From d0aac71ccdfceef37be4943ed6551bdd59f76d93 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 1 Feb 2024 12:18:01 +0000 Subject: [PATCH 01/93] Generalize multifframe database manager --- src/highdicom/_module_utils.py | 23 + src/highdicom/_multiframe.py | 882 +++++++++++++++++++++++++++++++++ src/highdicom/content.py | 8 +- src/highdicom/pm/content.py | 5 +- src/highdicom/pm/sop.py | 3 +- src/highdicom/pr/content.py | 13 +- src/highdicom/seg/content.py | 10 +- src/highdicom/seg/sop.py | 719 ++------------------------- src/highdicom/spatial.py | 199 +++++++- src/highdicom/sr/content.py | 3 +- 10 files changed, 1184 insertions(+), 681 deletions(-) create mode 100644 src/highdicom/_multiframe.py diff --git a/src/highdicom/_module_utils.py b/src/highdicom/_module_utils.py index f73f246a..376547e4 100644 --- a/src/highdicom/_module_utils.py +++ b/src/highdicom/_module_utils.py @@ -281,3 +281,26 @@ def does_iod_have_pixel_data(sop_class_uid: str) -> bool: return any( is_attribute_in_iod(attr, sop_class_uid) for attr in pixel_attrs ) + + +def is_multiframe_image(dataset: Dataset): + """Determine whether an image is a multiframe image. + + The definition used is whether the IOD allows for multiple frames, not + whether this particular instance has more than one frame. + + Parameters + ---------- + dataset: pydicom.Dataset + A dataset to check. + + Returns + ------- + bool: + Whether the image belongs to a multiframe IOD. + + """ + return is_attribute_in_iod( + 'PerFrameFunctionalGroupsSequence', + dataset.SOPClassUID, + ) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py new file mode 100644 index 00000000..57766034 --- /dev/null +++ b/src/highdicom/_multiframe.py @@ -0,0 +1,882 @@ +"""Tools for working with multiframe DICOM images.""" +from collections import Counter +from contextlib import contextmanager +import logging +import sqlite3 +from typing import ( + Any, + Iterable, + Dict, + Generator, + List, + Optional, + Set, + Sequence, + Tuple, + Union, +) +import numpy as np +from pydicom import Dataset +from pydicom.tag import BaseTag +from pydicom.datadict import get_entry, tag_for_keyword +from pydicom.multival import MultiValue + +from highdicom.enum import CoordinateSystemNames +from highdicom.seg.enum import SpatialLocationsPreservedValues +from highdicom.spatial import ( + DEFAULT_SPACING_TOLERANCE, + get_coordinate_system, + get_regular_slice_spacing, +) +from highdicom.uid import UID as hd_UID +from highdicom.utils import ( + iter_tiled_full_frame_data, +) + + +_NO_FRAME_REF_VALUE = -1 + + +logger = logging.getLogger(__name__) + + +class MultiFrameDBManager: + + """Database manager for frame information in a multiframe image.""" + + # Dictionary mapping DCM VRs to appropriate SQLite types + _DCM_SQL_TYPE_MAP = { + 'CS': 'VARCHAR', + 'DS': 'REAL', + 'FD': 'REAL', + 'FL': 'REAL', + 'IS': 'INTEGER', + 'LO': 'TEXT', + 'LT': 'TEXT', + 'PN': 'TEXT', + 'SH': 'TEXT', + 'SL': 'INTEGER', + 'SS': 'INTEGER', + 'ST': 'TEXT', + 'UI': 'TEXT', + 'UL': 'INTEGER', + 'UR': 'TEXT', + 'US or SS': 'INTEGER', + 'US': 'INTEGER', + 'UT': 'TEXT', + } + + def __init__( + self, + dataset: Dataset, + ): + """ + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset of a multi-frame image. + + """ + self._coordinate_system = get_coordinate_system(dataset) + referenced_uids = self._get_ref_instance_uids(dataset) + all_referenced_sops = {uids[2] for uids in referenced_uids} + + self._is_tiled_full = ( + hasattr(dataset, 'DimensionOrganizationType') and + dataset.DimensionOrganizationType == 'TILED_FULL' + ) + + self._dim_ind_pointers = [ + dim_ind.DimensionIndexPointer + for dim_ind in dataset.DimensionIndexSequence + ] + func_grp_pointers = {} + for dim_ind in dataset.DimensionIndexSequence: + ptr = dim_ind.DimensionIndexPointer + if ptr in self._dim_ind_pointers: + grp_ptr = getattr(dim_ind, "FunctionalGroupPointer", None) + func_grp_pointers[ptr] = grp_ptr + + # We mav want to gather additional information that is not one of the + # indices + extra_collection_pointers = [] + extra_collection_func_pointers = {} + if self._coordinate_system == CoordinateSystemNames.PATIENT: + image_position_tag = tag_for_keyword('ImagePositionPatient') + plane_pos_seq_tag = tag_for_keyword('PlanePositionSequence') + # Include the image position if it is not an index + if image_position_tag not in self._dim_ind_pointers: + extra_collection_pointers.append(image_position_tag) + extra_collection_func_pointers[ + image_position_tag + ] = plane_pos_seq_tag + + dim_ind_positions = { + dim_ind.DimensionIndexPointer: i + for i, dim_ind in enumerate(dataset.DimensionIndexSequence) + } + dim_indices: Dict[int, List[int]] = { + ptr: [] for ptr in self._dim_ind_pointers + } + dim_values: Dict[int, List[Any]] = { + ptr: [] for ptr in self._dim_ind_pointers + } + + extra_collection_values: Dict[int, List[Any]] = { + ptr: [] for ptr in extra_collection_pointers + } + + self.shared_image_orientation = self._get_shared_image_orientation( + dataset + ) + + self._single_source_frame_per_frame = True + + if self._is_tiled_full: + # With TILED_FULL, there is no PerFrameFunctionalGroupsSequence, + # so we have to deduce the per-frame information + row_tag = tag_for_keyword('RowPositionInTotalImagePixelMatrix') + col_tag = tag_for_keyword('ColumnPositionInTotalImagePixelMatrix') + x_tag = tag_for_keyword('XOffsetInSlideCoordinateSystem') + y_tag = tag_for_keyword('YOffsetInSlideCoordinateSystem') + z_tag = tag_for_keyword('ZOffsetInSlideCoordinateSystem') + tiled_full_dim_indices = {row_tag, col_tag} + if len(tiled_full_dim_indices - set(dim_indices.keys())) > 0: + raise RuntimeError( + 'Expected images with ' + '"DimensionOrganizationType" of "TILED_FULL" ' + 'to have the following dimension index pointers: ' + 'RowPositionInTotalImagePixelMatrix, ' + 'ColumnPositionInTotalImagePixelMatrix.' + ) + self._single_source_frame_per_frame = False + ( + channel_numbers, + _, + dim_values[col_tag], + dim_values[row_tag], + dim_values[x_tag], + dim_values[y_tag], + dim_values[z_tag], + ) = zip(*iter_tiled_full_frame_data(dataset)) + + if hasattr(dataset, 'SegmentSequence'): + segment_tag = tag_for_keyword('ReferencedSegmentNumber') + dim_values[segment_tag] = channel_numbers + elif hasattr(dataset, 'OpticalPathSequence'): + op_tag = tag_for_keyword('OpticalPathIdentifier') + dim_values[op_tag] = channel_numbers + + # Create indices for each of the dimensions + for ptr, vals in dim_values.items(): + _, indices = np.unique(vals, return_inverse=True) + dim_indices[ptr] = (indices + 1).tolist() + + # There is no way to deduce whether the spatial locations are + # preserved in the tiled full case + self._locations_preserved = None + + referenced_instances = None + referenced_frames = None + else: + referenced_instances: Optional[List[str]] = [] + referenced_frames: Optional[List[int]] = [] + + # Create a list of source images and check for spatial locations + # preserved + locations_list_type = List[ + Optional[SpatialLocationsPreservedValues] + ] + locations_preserved: locations_list_type = [] + + for frame_item in dataset.PerFrameFunctionalGroupsSequence: + # Get dimension indices for this frame + content_seq = frame_item.FrameContentSequence[0] + indices = content_seq.DimensionIndexValues + if not isinstance(indices, (MultiValue, list)): + # In case there is a single dimension index + indices = [indices] + if len(indices) != len(self._dim_ind_pointers): + raise RuntimeError( + 'Unexpected mismatch between dimension index values in ' + 'per-frames functional groups sequence and items in ' + 'the dimension index sequence.' + ) + for ptr in self._dim_ind_pointers: + dim_indices[ptr].append(indices[dim_ind_positions[ptr]]) + grp_ptr = func_grp_pointers[ptr] + if grp_ptr is not None: + dim_val = frame_item[grp_ptr][0][ptr].value + else: + dim_val = frame_item[ptr].value + dim_values[ptr].append(dim_val) + for ptr in extra_collection_pointers: + grp_ptr = extra_collection_func_pointers[ptr] + if grp_ptr is not None: + dim_val = frame_item[grp_ptr][0][ptr].value + else: + dim_val = frame_item[ptr].value + extra_collection_values[ptr].append(dim_val) + + frame_source_instances = [] + frame_source_frames = [] + for der_im in getattr( + frame_item, + 'DerivationImageSequence', + [] + ): + for src_im in getattr( + der_im, + 'SourceImageSequence', + [] + ): + frame_source_instances.append( + src_im.ReferencedSOPInstanceUID + ) + if hasattr(src_im, 'SpatialLocationsPreserved'): + locations_preserved.append( + SpatialLocationsPreservedValues( + src_im.SpatialLocationsPreserved + ) + ) + else: + locations_preserved.append( + None + ) + + if hasattr(src_im, 'ReferencedFrameNumber'): + if isinstance( + src_im.ReferencedFrameNumber, + MultiValue + ): + frame_source_frames.extend( + [ + int(f) + for f in src_im.ReferencedFrameNumber + ] + ) + else: + frame_source_frames.append( + int(src_im.ReferencedFrameNumber) + ) + else: + frame_source_frames.append(_NO_FRAME_REF_VALUE) + + if ( + len(set(frame_source_instances)) != 1 or + len(set(frame_source_frames)) != 1 + ): + self._single_source_frame_per_frame = False + else: + ref_instance_uid = frame_source_instances[0] + if ref_instance_uid not in all_referenced_sops: + raise AttributeError( + f'SOP instance {ref_instance_uid} referenced in ' + 'the source image sequence is not included in the ' + 'Referenced Series Sequence or Studies Containing ' + 'Other Referenced Instances Sequence. This is an ' + 'error with the integrity of the Segmentation ' + 'object.' + ) + referenced_instances.append(ref_instance_uid) + referenced_frames.append(frame_source_frames[0]) + + # Summarise + if any( + isinstance(v, SpatialLocationsPreservedValues) and + v == SpatialLocationsPreservedValues.NO + for v in locations_preserved + ): + + self._locations_preserved: Optional[ + SpatialLocationsPreservedValues + ] = SpatialLocationsPreservedValues.NO + elif all( + isinstance(v, SpatialLocationsPreservedValues) and + v == SpatialLocationsPreservedValues.YES + for v in locations_preserved + ): + self._locations_preserved = SpatialLocationsPreservedValues.YES + else: + self._locations_preserved = None + + if not self._single_source_frame_per_frame: + referenced_instances = None + referenced_frames = None + + self._db_con: sqlite3.Connection = sqlite3.connect(":memory:") + + self._create_ref_instance_table(referenced_uids) + + self._number_of_frames = dataset.NumberOfFrames + + # Construct the columns and values to put into a frame look-up table + # table within sqlite. There will be one row per frame in the + # segmentation instance + col_defs = [] # SQL column definitions + col_data = [] # lists of column data + + # Frame number column + col_defs.append('FrameNumber INTEGER PRIMARY KEY') + col_data.append(list(range(1, self._number_of_frames + 1))) + + self._dim_ind_col_names = {} + for i, t in enumerate(dim_indices.keys()): + vr, vm_str, _, _, kw = get_entry(t) + if kw == '': + kw = f'UnknownDimensionIndex{i}' + ind_col_name = kw + '_DimensionIndexValues' + self._dim_ind_col_names[t] = ind_col_name + + # Add column for dimension index + col_defs.append(f'{ind_col_name} INTEGER NOT NULL') + col_data.append(dim_indices[t]) + + # Add column for dimension value + # For this to be possible, must have a fixed VM + # and a VR that we can map to a sqlite type + # Otherwise, we just omit the data from the db + if kw == 'ReferencedSegmentNumber': + # Special case since this tag technically has VM 1-n + vm = 1 + else: + try: + vm = int(vm_str) + except ValueError: + continue + try: + sql_type = self._DCM_SQL_TYPE_MAP[vr] + except KeyError: + continue + + if vm > 1: + for d in range(vm): + data = [el[d] for el in dim_values[t]] + col_defs.append(f'{kw}_{d} {sql_type} NOT NULL') + col_data.append(data) + else: + # Single column + col_defs.append(f'{kw} {sql_type} NOT NULL') + col_data.append(dim_values[t]) + + for i, t in enumerate(extra_collection_pointers): + vr, vm_str, _, _, kw = get_entry(t) + + # Add column for dimension value + # For this to be possible, must have a fixed VM + # and a VR that we can map to a sqlite type + # Otherwise, we just omit the data from the db + vm = int(vm_str) + sql_type = self._DCM_SQL_TYPE_MAP[vr] + + if vm > 1: + for d in range(vm): + data = [el[d] for el in extra_collection_values[t]] + col_defs.append(f'{kw}_{d} {sql_type} NOT NULL') + col_data.append(data) + else: + # Single column + col_defs.append(f'{kw} {sql_type} NOT NULL') + col_data.append(dim_values[t]) + + # Columns related to source frames, if they are usable for indexing + if (referenced_frames is None) != (referenced_instances is None): + raise TypeError( + "'referenced_frames' and 'referenced_instances' should be " + "provided together or not at all." + ) + if referenced_instances is not None: + col_defs.append('ReferencedFrameNumber INTEGER') + col_defs.append('ReferencedSOPInstanceUID VARCHAR NOT NULL') + col_defs.append( + 'FOREIGN KEY(ReferencedSOPInstanceUID) ' + 'REFERENCES InstanceUIDs(SOPInstanceUID)' + ) + col_data += [ + referenced_frames, + referenced_instances, + ] + + # Build LUT from columns + all_defs = ", ".join(col_defs) + cmd = f'CREATE TABLE FrameLUT({all_defs})' + print(cmd) + placeholders = ', '.join(['?'] * len(col_data)) + with self._db_con: + self._db_con.execute(cmd) + self._db_con.executemany( + f'INSERT INTO FrameLUT VALUES({placeholders})', + zip(*col_data), + ) + + def _get_ref_instance_uids( + self, + dataset: Dataset, + ) -> List[Tuple[str, str, str]]: + """List all instances referenced in the image. + + Parameters + ---------- + dataset + + Returns + ------- + List[Tuple[str, str, str]] + List of all instances referenced in the image in the format + (StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID). + + """ + instance_data = [] + if hasattr(dataset, 'ReferencedSeriesSequence'): + for ref_series in dataset.ReferencedSeriesSequence: + for ref_ins in ref_series.ReferencedInstanceSequence: + instance_data.append( + ( + dataset.StudyInstanceUID, + ref_series.SeriesInstanceUID, + ref_ins.ReferencedSOPInstanceUID + ) + ) + other_studies_kw = 'StudiesContainingOtherReferencedInstancesSequence' + if hasattr(dataset, other_studies_kw): + for ref_study in getattr(dataset, other_studies_kw): + for ref_series in ref_study.ReferencedSeriesSequence: + for ref_ins in ref_series.ReferencedInstanceSequence: + instance_data.append( + ( + ref_study.StudyInstanceUID, + ref_series.SeriesInstanceUID, + ref_ins.ReferencedSOPInstanceUID, + ) + ) + + # There shouldn't be duplicates here, but there's no explicit rule + # preventing it. + # Since dictionary ordering is preserved, this trick deduplicates + # the list without changing the order + unique_instance_data = list(dict.fromkeys(instance_data)) + if len(unique_instance_data) != len(instance_data): + counts = Counter(instance_data) + duplicate_sop_uids = [ + f"'{key[2]}'" for key, value in counts.items() if value > 1 + ] + display_str = ', '.join(duplicate_sop_uids) + logger.warning( + 'Duplicate entries found in the ReferencedSeriesSequence. ' + f"SOP Instance UID: '{dataset.SOPInstanceUID}', " + f'duplicated referenced SOP Instance UID items: {display_str}.' + ) + + return unique_instance_data + + def _check_indexing_with_source_frames( + self, + ignore_spatial_locations: bool = False + ) -> None: + """Check if indexing by source frames is possible. + + Raise exceptions with useful messages otherwise. + + Possible problems include: + * Spatial locations are not preserved. + * The dataset does not specify that spatial locations are preserved + and the user has not asserted that they are. + * At least one frame in the segmentation lists multiple + source frames. + + Parameters + ---------- + ignore_spatial_locations: bool + Allows the user to ignore whether spatial locations are preserved + in the frames. + + """ + # Checks that it is possible to index using source frames in this + # dataset + if self._is_tiled_full: + raise RuntimeError( + 'Indexing via source frames is not possible when a ' + 'segmentation is stored using the DimensionOrganizationType ' + '"TILED_FULL".' + ) + elif self._locations_preserved is None: + if not ignore_spatial_locations: + raise RuntimeError( + 'Indexing via source frames is not permissible since this ' + 'image does not specify that spatial locations are ' + 'preserved in the course of deriving the segmentation ' + 'from the source image. If you are confident that spatial ' + 'locations are preserved, or do not require that spatial ' + 'locations are preserved, you may override this behavior ' + "with the 'ignore_spatial_locations' parameter." + ) + elif self._locations_preserved == SpatialLocationsPreservedValues.NO: + if not ignore_spatial_locations: + raise RuntimeError( + 'Indexing via source frames is not permissible since this ' + 'image specifies that spatial locations are not preserved ' + 'in the course of deriving the segmentation from the ' + 'source image. If you do not require that spatial ' + ' locations are preserved you may override this behavior ' + "with the 'ignore_spatial_locations' parameter." + ) + if not self._single_source_frame_per_frame: + raise RuntimeError( + 'Indexing via source frames is not permissible since some ' + 'frames in the segmentation specify multiple source frames.' + ) + + @property + def dimension_index_pointers(self) -> List[BaseTag]: + """List[pydicom.tag.BaseTag]: + List of tags used as dimension indices. + """ + return [BaseTag(t) for t in self._dim_ind_pointers] + + def _create_ref_instance_table( + self, + referenced_uids: List[Tuple[str, str, str]], + ) -> None: + """Create a table of referenced instances. + + The resulting table (called InstanceUIDs) contains Study, Series and + SOP instance UIDs for each instance referenced by the segmentation + image. + + Parameters + ---------- + referenced_uids: List[Tuple[str, str, str]] + List of UIDs for each instance referenced in the segmentation. + Each tuple should be in the format + (StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID). + + """ + with self._db_con: + self._db_con.execute( + """ + CREATE TABLE InstanceUIDs( + StudyInstanceUID VARCHAR NOT NULL, + SeriesInstanceUID VARCHAR NOT NULL, + SOPInstanceUID VARCHAR PRIMARY KEY + ) + """ + ) + self._db_con.executemany( + "INSERT INTO InstanceUIDs " + "(StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID) " + "VALUES(?, ?, ?)", + referenced_uids, + ) + + def _get_shared_image_orientation( + self, + dataset: Dataset + ) -> Optional[List[float]]: + """Get image orientation if it is shared between frames. + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset for which to get the image orientation. + + Returns + ------- + List[float]: + Image orientation attribute (list of 6 floats containing direction + cosines) if this is shared between frames in the image. Otherwise + returns None. + + """ + if hasattr(dataset, 'ImageOrientationSlide'): + return dataset.ImageOrientationSlide + + if hasattr(dataset, 'SharedFunctionalGroupsSequence'): + sfgs = dataset.SharedFunctionalGroupsSequence[0] + if hasattr(sfgs, 'PlaneOrientationSequence'): + return sfgs.PlaneOrientationSequence[0].ImageOrientationPatient + + if hasattr(dataset, 'PerFrameFunctionalGroupsSequence'): + pfg1 = dataset.PerFrameFunctionalGroupsSequence[0] + if hasattr(pfg1, 'PlaneOrientationSequence'): + iop = pfg1.PlaneOrientationSequence[0].ImageOrientationPatient + + if len(dataset.PerFrameFunctionalGroupsSequence) == 1: + return iop + else: + for pfg in dataset.PerFrameFunctionalGroupsSequence[1:]: + frame_iop = ( + pfg.PlaneOrientationSequence[0]. + ImageOrientationPatient + ) + if frame_iop != iop: + break + else: + return iop + + return None + + def are_dimension_indices_unique( + self, + dimension_index_pointers: Sequence[Union[int, BaseTag]], + ) -> bool: + """Check if a list of index pointers uniquely identifies frames. + + For a given list of dimension index pointers, check whether every + combination of index values for these pointers identifies a unique + frame image. This is a pre-requisite for indexing using this list of + dimension index pointers. + + Parameters + ---------- + dimension_index_pointers: Sequence[Union[int, pydicom.tag.BaseTag]] + Sequence of tags serving as dimension index pointers. + + Returns + ------- + bool + True if dimension indices are unique. + + """ + column_names = [] + for ptr in dimension_index_pointers: + column_names.append(self._dim_ind_col_names[ptr]) + col_str = ", ".join(column_names) + cur = self._db_con.cursor() + n_unique_combos = cur.execute( + f"SELECT COUNT(*) FROM (SELECT 1 FROM FrameLUT GROUP BY {col_str})" + ).fetchone()[0] + return n_unique_combos == self._number_of_frames + + def get_source_image_uids(self) -> List[Tuple[hd_UID, hd_UID, hd_UID]]: + """Get UIDs of source image instances referenced in the image. + + Returns + ------- + List[Tuple[highdicom.UID, highdicom.UID, highdicom.UID]] + (Study Instance UID, Series Instance UID, SOP Instance UID) triplet + for every image instance referenced in the segmentation. + + """ + cur = self._db_con.cursor() + res = cur.execute( + 'SELECT StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID ' + 'FROM InstanceUIDs' + ) + + return [ + (hd_UID(a), hd_UID(b), hd_UID(c)) for a, b, c in res.fetchall() + ] + + def get_unique_referenced_sop_instance_uids(self) -> Set[str]: + """Get set of unique Referenced SOP Instance UIDs. + + Returns + ------- + Set[str] + Set of unique Referenced SOP Instance UIDs. + + """ + cur = self._db_con.cursor() + return { + r[0] for r in + cur.execute( + 'SELECT DISTINCT(SOPInstanceUID) from InstanceUIDs' + ) + } + + def get_max_referenced_frame_number(self) -> int: + """Get highest frame number of any referenced frame. + + Absent access to the referenced dataset itself, being less than this + value is a sufficient condition for the existence of a frame number + in the source image. + + Returns + ------- + int + Highest frame number referenced in the segmentation image. + + """ + cur = self._db_con.cursor() + return cur.execute( + 'SELECT MAX(ReferencedFrameNumber) FROM FrameLUT' + ).fetchone()[0] + + def is_indexable_as_total_pixel_matrix(self) -> bool: + """Whether the image can be indexed as a total pixel matrix. + + Returns + ------- + bool: + True if the segmentation may be indexed using row and column + positions in the total pixel matrix. False otherwise. + + """ + row_pos_kw = tag_for_keyword('RowPositionInTotalImagePixelMatrix') + col_pos_kw = tag_for_keyword('ColumnPositionInTotalImagePixelMatrix') + return ( + row_pos_kw in self._dim_ind_col_names and + col_pos_kw in self._dim_ind_col_names + ) + + def get_unique_dim_index_values( + self, + dimension_index_pointers: Sequence[int], + ) -> Set[Tuple[int, ...]]: + """Get set of unique dimension index value combinations. + + Parameters + ---------- + dimension_index_pointers: Sequence[int] + List of dimension index pointers for which to find unique + combinations of values. + + Returns + ------- + Set[Tuple[int, ...]] + Set of unique dimension index value combinations for the given + input dimension index pointers. + + """ + cols = [self._dim_ind_col_names[p] for p in dimension_index_pointers] + cols_str = ', '.join(cols) + cur = self._db_con.cursor() + return { + r for r in + cur.execute( + f'SELECT DISTINCT {cols_str} FROM FrameLUT' + ) + } + + def get_slice_spacing( + self, + tol: float = DEFAULT_SPACING_TOLERANCE, + split_dimensions: Optional[Sequence[BaseTag]] = None, + ) -> Optional[float]: + """Get slice spacing, if any, for the image. + + First determines whether the multiframe image represents a 3D volume. + A 3D volume consists of regularly spaced slices with orthogonal axes, i.e. + the slices are spaced equally along the direction orthogonal to the + in-plane image orientation cosines. + + If the image does represent a volume, returns the absolute value of the + slice spacing. If the series does not represent a volume, returns None. + + Note that we stipulate that an image with a single frame in the patient + coordinate system is a 3D volume for the purposes of this function. In this + case the returned slice spacing will be 0.0 if it cannot be deduced from + the metadata. + + Note also that this function checks the image position and image + orientation metadata found in the file and ignores any SpacingBetweenSlices + or DimensionOrganizationType found in the dataset. Therefore it does not + rely upon the creator having populated these attributes, or that their + values are correct. + + Parameters + ---------- + tol: float, optional + Tolerance for determining spacing regularity. If slice spacings vary by + less that this spacing, they are considered to be regular. + split_dimensions: Union[Sequence[pydicom.tag.BaseTag], None], optional + Split on these dimension indices and determine whether there is 3D + volume for each value of this dimension index, the same 3D volumes of + frames exist. For example, if time were included as a split dimension, + this function will check whether a 3D volume exists at each timepoint + (and that the volume is the same at each time point). Each dimension + index should be provided as a base tags representing the Dimension + Index Pointer. + + Returns + ------- + float: + Absolute value of the regular slice spacing if the series of images + meets the definition of a 3D volume, above. None otherwise. + + """ + if self._coordinate_system is None: + return None + if self._coordinate_system != CoordinateSystemNames.PATIENT: + return None + + if self.shared_image_orientation is None: + return None + + if self._number_of_frames == 1: + # Stipulate that this does represent a volume + return 0.0 + + if split_dimensions is None: + cur = self._db_con.cursor() + + query = """ + SELECT + ImagePositionPatient_0, + ImagePositionPatient_1, + ImagePositionPatient_2 + FROM FrameLUT; + """ + + image_positions = np.array( + [r for r in cur.execute(query)] + ) + spacing = get_regular_slice_spacing( + image_positions=image_positions, + image_orientation=np.array(self.shared_image_orientation), + sort=True, + ) + + return spacing + + + + @contextmanager + def _generate_temp_table( + self, + table_name: str, + column_defs: Sequence[str], + column_data: Iterable[Sequence[Any]], + ) -> Generator[None, None, None]: + """Context manager that handles a temporary table. + + The temporary table is created with the specified information. Control + flow then returns to code within the "with" block. After the "with" + block has completed, the cleanup of the table is automatically handled. + + Parameters + ---------- + table_name: str + Name of the temporary table. + column_defs: Sequence[str] + SQL syntax strings defining each column in the temporary table, one + string per column. + column_data: Iterable[Sequence[Any]] + Column data to place into the table. + + Yields + ------ + None: + Yields control to the "with" block, with the temporary table + created. + + """ + defs_str = ', '.join(column_defs) + create_cmd = (f'CREATE TABLE {table_name}({defs_str})') + placeholders = ', '.join(['?'] * len(column_defs)) + + with self._db_con: + self._db_con.execute(create_cmd) + self._db_con.executemany( + f'INSERT INTO {table_name} VALUES({placeholders})', + column_data + ) + + # Return control flow to "with" block + yield + + # Clean up the table + cmd = (f'DROP TABLE {table_name}') + with self._db_con: + self._db_con.execute(cmd) diff --git a/src/highdicom/content.py b/src/highdicom/content.py index 4bbc53da..37b449b5 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -36,7 +36,8 @@ ) from highdicom._module_utils import ( check_required_attributes, - does_iod_have_pixel_data + does_iod_have_pixel_data, + is_multiframe_image, ) @@ -1754,7 +1755,10 @@ def __init__( 'Specifying "referenced_frame_number" is not supported ' 'with multiple referenced images.' ) - if not hasattr(referenced_images[0], 'NumberOfFrames'): + # note cannot use the highdicom.utils function here due to + # circular import issues + is_multiframe = is_multiframe_image(referenced_images[0]) + if not is_multiframe: raise TypeError( 'Specifying "referenced_frame_number" is not valid ' 'when the referenced image is not a multi-frame image.' diff --git a/src/highdicom/pm/content.py b/src/highdicom/pm/content.py index 2d50eee1..aea90057 100644 --- a/src/highdicom/pm/content.py +++ b/src/highdicom/pm/content.py @@ -5,6 +5,7 @@ from pydicom.dataset import Dataset from pydicom.sequence import Sequence as DataElementSequence from pydicom.sr.coding import Code +from highdicom._module_utils import is_multiframe_image from highdicom.content import PlanePositionSequence from highdicom.enum import CoordinateSystemNames @@ -281,7 +282,7 @@ def get_plane_positions_of_image( Plane position of each frame in the image """ - is_multiframe = hasattr(image, 'NumberOfFrames') + is_multiframe = is_multiframe_image(image) if not is_multiframe: raise ValueError('Argument "image" must be a multi-frame image.') @@ -322,7 +323,7 @@ def get_plane_positions_of_series( Plane position of each frame in the image """ - is_multiframe = any([hasattr(img, 'NumberOfFrames') for img in images]) + is_multiframe = any([is_multiframe_image(img) for img in images]) if is_multiframe: raise ValueError( 'Argument "images" must be a series of single-frame images.' diff --git a/src/highdicom/pm/sop.py b/src/highdicom/pm/sop.py index 2bc8a645..091c7278 100644 --- a/src/highdicom/pm/sop.py +++ b/src/highdicom/pm/sop.py @@ -17,6 +17,7 @@ from highdicom.pm.content import DimensionIndexSequence, RealWorldValueMapping from highdicom.pm.enum import DerivedPixelContrastValues, ImageFlavorValues from highdicom.valuerep import check_person_name, _check_code_string +from highdicom._module_utils import is_multiframe_image from pydicom import Dataset from pydicom.uid import ( UID, @@ -271,7 +272,7 @@ def __init__( ) src_img = self._source_images[0] - is_multiframe = hasattr(src_img, 'NumberOfFrames') + is_multiframe = is_multiframe_image(src_img) # TODO: Revisit, may be overly restrictive # Check Source Image Sequence attribute in General Reference module if is_multiframe: diff --git a/src/highdicom/pr/content.py b/src/highdicom/pr/content.py index 701e6cba..41af8d27 100644 --- a/src/highdicom/pr/content.py +++ b/src/highdicom/pr/content.py @@ -41,6 +41,8 @@ _check_long_string, _check_short_text ) +from highdicom._module_utils import is_multiframe_image + logger = logging.getLogger(__name__) @@ -553,7 +555,7 @@ def __init__( ) self.GraphicLayer = graphic_layer.GraphicLayer - is_multiframe = hasattr(referenced_images[0], 'NumberOfFrames') + is_multiframe = is_multiframe_image(referenced_images[0]) if is_multiframe and len(referenced_images) > 1: raise ValueError( 'If referenced images are multi-frame, only a single image ' @@ -1087,7 +1089,7 @@ def _get_modality_lut_transformation( """ # Multframe images - if any(hasattr(im, 'NumberOfFrames') for im in referenced_images): + if any(is_multiframe_image(im) for im in referenced_images): im = referenced_images[0] if len(referenced_images) > 1 and not is_tiled_image(im): raise ValueError( @@ -1277,10 +1279,7 @@ def _add_softcopy_voi_lut_attributes( 'included in "referenced_images".' ) ref_im = ref_images_lut[uids] - is_multiframe = hasattr( - ref_im, - 'NumberOfFrames', - ) + is_multiframe = is_multiframe_image(ref_im) if uids in prev_ref_frames and not is_multiframe: raise ValueError( f'Instance with SOP Instance UID {uids[1]} ' @@ -1358,7 +1357,7 @@ def _get_softcopy_voi_lut_transformations( """ transformations = [] - if any(hasattr(im, 'NumberOfFrames') for im in referenced_images): + if any(is_multiframe_image(im) for im in referenced_images): if len(referenced_images) > 1: raise ValueError( "If multiple images are passed and any of them are multiframe, " diff --git a/src/highdicom/seg/content.py b/src/highdicom/seg/content.py index 495f89d6..cf6672a4 100644 --- a/src/highdicom/seg/content.py +++ b/src/highdicom/seg/content.py @@ -18,7 +18,11 @@ from highdicom.sr.coding import CodedConcept from highdicom.uid import UID from highdicom.utils import compute_plane_position_slide_per_frame -from highdicom._module_utils import check_required_attributes +from highdicom._module_utils import ( + check_required_attributes, + is_multiframe_image, +) + class SegmentDescription(Dataset): @@ -470,7 +474,7 @@ def get_plane_positions_of_image( Plane position of each frame in the image """ - is_multiframe = hasattr(image, 'NumberOfFrames') + is_multiframe = is_multiframe_image(image) if not is_multiframe: raise ValueError('Argument "image" must be a multi-frame image.') @@ -515,7 +519,7 @@ def get_plane_positions_of_series( Plane position of each frame in the image """ - is_multiframe = any([hasattr(img, 'NumberOfFrames') for img in images]) + is_multiframe = any([is_multiframe_image(img) for img in images]) if is_multiframe: raise ValueError( 'Argument "images" must be a series of single-frame images.' diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index be933a3d..776a1576 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -5,18 +5,15 @@ from contextlib import contextmanager from copy import deepcopy from os import PathLike -import sqlite3 from typing import ( Any, BinaryIO, Dict, Generator, - Iterable, Iterator, List, Optional, Sequence, - Set, Tuple, Union, cast, @@ -25,9 +22,8 @@ import numpy as np from pydicom.dataset import Dataset -from pydicom.datadict import get_entry, keyword_for_tag, tag_for_keyword +from pydicom.datadict import keyword_for_tag, tag_for_keyword from pydicom.encaps import encapsulate -from pydicom.multival import MultiValue from pydicom.pixel_data_handlers.numpy_handler import pack_bits from pydicom.tag import BaseTag, Tag from pydicom.uid import ( @@ -43,7 +39,12 @@ from pydicom.sr.coding import Code from pydicom.filereader import dcmread -from highdicom._module_utils import ModuleUsageValues, get_module_usage +from highdicom._module_utils import ( + ModuleUsageValues, + get_module_usage, + is_multiframe_image, +) +from highdicom._multiframe import MultiFrameDBManager from highdicom.base import SOPClass, _check_little_endian from highdicom.content import ( ContentCreatorIdentificationCodeSequence, @@ -61,7 +62,6 @@ compute_plane_position_tiled_full, is_tiled_image, get_tile_array, - iter_tiled_full_frame_data, tile_pixel_matrix, ) from highdicom.seg.content import ( @@ -72,7 +72,6 @@ SegmentationFractionalTypeValues, SegmentationTypeValues, SegmentsOverlapValues, - SpatialLocationsPreservedValues, SegmentAlgorithmTypeValues, ) from highdicom.seg.utils import iter_segments @@ -89,8 +88,6 @@ logger = logging.getLogger(__name__) -_NO_FRAME_REF_VALUE = -1 - def _get_unsigned_dtype(max_val: Union[int, np.integer]) -> type: """Get the smallest unsigned NumPy datatype to accommodate a value. @@ -150,242 +147,10 @@ def _check_numpy_value_representation( ) -class _SegDBManager: +class _SegDBManager(MultiFrameDBManager): """Database manager for data associated with a segmentation image.""" - # Dictionary mapping DCM VRs to appropriate SQLite types - _DCM_SQL_TYPE_MAP = { - 'CS': 'VARCHAR', - 'DS': 'REAL', - 'FD': 'REAL', - 'FL': 'REAL', - 'IS': 'INTEGER', - 'LO': 'TEXT', - 'LT': 'TEXT', - 'PN': 'TEXT', - 'SH': 'TEXT', - 'SL': 'INTEGER', - 'SS': 'INTEGER', - 'ST': 'TEXT', - 'UI': 'TEXT', - 'UL': 'INTEGER', - 'UR': 'TEXT', - 'US or SS': 'INTEGER', - 'US': 'INTEGER', - 'UT': 'TEXT', - } - - def __init__( - self, - referenced_uids: List[Tuple[str, str, str]], - segment_numbers: List[int], - dim_indices: Dict[int, List[int]], - dim_values: Dict[int, List[Any]], - referenced_instances: Optional[List[str]], - referenced_frames: Optional[List[int]], - ): - """ - - Parameters - ---------- - referenced_uids: List[Tuple[str, str, str]] - Triplet of UIDs for each image instance (Study Instance UID, - Series Instance UID, SOP Instance UID) that is referenced - in the segmentation image. - segment_numbers: List[int] - Segment numbers for each frame in the segmentation image. - dim_indices: Dict[int, List[int]] - Dictionary mapping the integer tag value of each dimension index - pointer (excluding SegmentNumber) to a list of dimension indices - for each frame in the segmentation image. - dim_values: Dict[int, List[Values]] - Dictionary mapping the integer tag value of each dimension index - pointer (excluding SegmentNumber) to a list of dimension values - for each frame in the segmentation image. - referenced_instances: Optional[List[str]] - SOP Instance UID of each referenced image instance for each frame - in the segmentation image. Should be omitted if there is not a - single referenced image instance per segmentation image frame. - referenced_frames: Optional[List[int]] - Number of the corresponding frame in the referenced image - instance for each frame in the segmentation image. Should be - omitted if there is not a single referenced image instance per - segmentation image frame. - - """ - self._db_con: sqlite3.Connection = sqlite3.connect(":memory:") - - self._create_ref_instance_table(referenced_uids) - - self._number_of_frames = len(segment_numbers) - - # Construct the columns and values to put into a frame look-up table - # table within sqlite. There will be one row per frame in the - # segmentation instance - col_defs = [] # SQL column definitions - col_data = [] # lists of column data - - # Frame number column - col_defs.append('FrameNumber INTEGER PRIMARY KEY') - col_data.append(list(range(1, self._number_of_frames + 1))) - - # Segment number column - col_defs.append('SegmentNumber INTEGER NOT NULL') - col_data.append(segment_numbers) - - self._dim_ind_col_names = {} - for i, t in enumerate(dim_indices.keys()): - vr, vm_str, _, _, kw = get_entry(t) - if kw == '': - kw = f'UnknownDimensionIndex{i}' - ind_col_name = kw + '_DimensionIndexValues' - self._dim_ind_col_names[t] = ind_col_name - - # Add column for dimension index - col_defs.append(f'{ind_col_name} INTEGER NOT NULL') - col_data.append(dim_indices[t]) - - # Add column for dimension value - # For this to be possible, must have a fixed VM - # and a VR that we can map to a sqlite type - # Otherwise, we just omit the data from the db - try: - vm = int(vm_str) - except ValueError: - continue - try: - sql_type = self._DCM_SQL_TYPE_MAP[vr] - except KeyError: - continue - - if vm > 1: - for d in range(vm): - data = [el[d] for el in dim_values[t]] - col_defs.append(f'{kw}_{d} {sql_type} NOT NULL') - col_data.append(data) - else: - # Single column - col_defs.append(f'{kw} {sql_type} NOT NULL') - col_data.append(dim_values[t]) - - # Columns related to source frames, if they are usable for indexing - if (referenced_frames is None) != (referenced_instances is None): - raise TypeError( - "'referenced_frames' and 'referenced_instances' should be " - "provided together or not at all." - ) - if referenced_instances is not None: - col_defs.append('ReferencedFrameNumber INTEGER') - col_defs.append('ReferencedSOPInstanceUID VARCHAR NOT NULL') - col_defs.append( - 'FOREIGN KEY(ReferencedSOPInstanceUID) ' - 'REFERENCES InstanceUIDs(SOPInstanceUID)' - ) - col_data += [ - referenced_frames, - referenced_instances, - ] - - # Build LUT from columns - all_defs = ", ".join(col_defs) - cmd = f'CREATE TABLE FrameLUT({all_defs})' - placeholders = ', '.join(['?'] * len(col_data)) - with self._db_con: - self._db_con.execute(cmd) - self._db_con.executemany( - f'INSERT INTO FrameLUT VALUES({placeholders})', - zip(*col_data), - ) - - def _create_ref_instance_table( - self, - referenced_uids: List[Tuple[str, str, str]], - ) -> None: - """Create a table of referenced instances. - - The resulting table (called InstanceUIDs) contains Study, Series and - SOP instance UIDs for each instance referenced by the segmentation - image. - - Parameters - ---------- - referenced_uids: List[Tuple[str, str, str]] - List of UIDs for each instance referenced in the segmentation. - Each tuple should be in the format - (StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID). - - """ - with self._db_con: - self._db_con.execute( - """ - CREATE TABLE InstanceUIDs( - StudyInstanceUID VARCHAR NOT NULL, - SeriesInstanceUID VARCHAR NOT NULL, - SOPInstanceUID VARCHAR PRIMARY KEY - ) - """ - ) - self._db_con.executemany( - "INSERT INTO InstanceUIDs " - "(StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID) " - "VALUES(?, ?, ?)", - referenced_uids, - ) - - def get_source_image_uids(self) -> List[Tuple[hd_UID, hd_UID, hd_UID]]: - """Get UIDs of source image instances referenced in the segmentation. - - Returns - ------- - List[Tuple[highdicom.UID, highdicom.UID, highdicom.UID]] - (Study Instance UID, Series Instance UID, SOP Instance UID) triplet - for every image instance referenced in the segmentation. - - """ - cur = self._db_con.cursor() - res = cur.execute( - 'SELECT StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID ' - 'FROM InstanceUIDs' - ) - - return [ - (hd_UID(a), hd_UID(b), hd_UID(c)) for a, b, c in res.fetchall() - ] - - def are_dimension_indices_unique( - self, - dimension_index_pointers: Sequence[Union[int, BaseTag]], - ) -> bool: - """Check if a list of index pointers uniquely identifies frames. - - For a given list of dimension index pointers, check whether every - combination of index values for these pointers identifies a unique - frame per segment in the segmentation image. This is a pre-requisite - for indexing using this list of dimension index pointers in the - :meth:`Segmentation.get_pixels_by_dimension_index_values()` method. - - Parameters - ---------- - dimension_index_pointers: Sequence[Union[int, pydicom.tag.BaseTag]] - Sequence of tags serving as dimension index pointers. - - Returns - ------- - bool - True if dimension indices are unique. - - """ - column_names = ['SegmentNumber'] - for ptr in dimension_index_pointers: - column_names.append(self._dim_ind_col_names[ptr]) - col_str = ", ".join(column_names) - cur = self._db_con.cursor() - n_unique_combos = cur.execute( - f"SELECT COUNT(*) FROM (SELECT 1 FROM FrameLUT GROUP BY {col_str})" - ).fetchone()[0] - return n_unique_combos == self._number_of_frames - def are_referenced_sop_instances_unique(self) -> bool: """Check if Referenced SOP Instance UIDs uniquely identify frames. @@ -406,7 +171,7 @@ def are_referenced_sop_instances_unique(self) -> bool: n_unique_combos = cur.execute( 'SELECT COUNT(*) FROM ' '(SELECT 1 FROM FrameLUT GROUP BY ReferencedSOPInstanceUID, ' - 'SegmentNumber)' + 'ReferencedSegmentNumber)' ).fetchone()[0] return n_unique_combos == self._number_of_frames @@ -425,140 +190,10 @@ def are_referenced_frames_unique(self) -> bool: n_unique_combos = cur.execute( 'SELECT COUNT(*) FROM ' '(SELECT 1 FROM FrameLUT GROUP BY ReferencedFrameNumber, ' - 'SegmentNumber)' + 'ReferencedSegmentNumber)' ).fetchone()[0] return n_unique_combos == self._number_of_frames - def get_unique_sop_instance_uids(self) -> Set[str]: - """Get set of unique Referenced SOP Instance UIDs. - - Returns - ------- - Set[str] - Set of unique Referenced SOP Instance UIDs. - - """ - cur = self._db_con.cursor() - return { - r[0] for r in - cur.execute( - 'SELECT DISTINCT(SOPInstanceUID) from InstanceUIDs' - ) - } - - def get_max_frame_number(self) -> int: - """Get highest frame number of any referenced frame. - - Absent access to the referenced dataset itself, being less than this - value is a sufficient condition for the existence of a frame number - in the source image. - - Returns - ------- - int - Highest frame number referenced in the segmentation image. - - """ - cur = self._db_con.cursor() - return cur.execute( - 'SELECT MAX(ReferencedFrameNumber) FROM FrameLUT' - ).fetchone()[0] - - def get_unique_dim_index_values( - self, - dimension_index_pointers: Sequence[int], - ) -> Set[Tuple[int, ...]]: - """Get set of unique dimension index value combinations. - - Parameters - ---------- - dimension_index_pointers: Sequence[int] - List of dimension index pointers for which to find unique - combinations of values. - - Returns - ------- - Set[Tuple[int, ...]] - Set of unique dimension index value combinations for the given - input dimension index pointers. - - """ - cols = [self._dim_ind_col_names[p] for p in dimension_index_pointers] - cols_str = ', '.join(cols) - cur = self._db_con.cursor() - return { - r for r in - cur.execute( - f'SELECT DISTINCT {cols_str} FROM FrameLUT' - ) - } - - def is_indexable_as_total_pixel_matrix(self) -> bool: - """Whether the segmentation can be indexed as a total pixel matrix. - - Returns - ------- - bool: - True if the segmentation may be indexed using row and column - positions in the total pixel matrix. False otherwise. - - """ - row_pos_kw = tag_for_keyword('RowPositionInTotalImagePixelMatrix') - col_pos_kw = tag_for_keyword('ColumnPositionInTotalImagePixelMatrix') - return ( - row_pos_kw in self._dim_ind_col_names and - col_pos_kw in self._dim_ind_col_names - ) - - @contextmanager - def _generate_temp_table( - self, - table_name: str, - column_defs: Sequence[str], - column_data: Iterable[Sequence[Any]], - ) -> Generator[None, None, None]: - """Context manager that handles a temporary table. - - The temporary table is created with the specified information. Control - flow then returns to code within the "with" block. After the "with" - block has completed, the cleanup of the table is automatically handled. - - Parameters - ---------- - table_name: str - Name of the temporary table. - column_defs: Sequence[str] - SQL syntax strings defining each column in the temporary table, one - string per column. - column_data: Iterable[Sequence[Any]] - Column data to place into the table. - - Yields - ------ - None: - Yields control to the "with" block, with the temporary table - created. - - """ - defs_str = ', '.join(column_defs) - create_cmd = (f'CREATE TABLE {table_name}({defs_str})') - placeholders = ', '.join(['?'] * len(column_defs)) - - with self._db_con: - self._db_con.execute(create_cmd) - self._db_con.executemany( - f'INSERT INTO {table_name} VALUES({placeholders})', - column_data - ) - - # Return control flow to "with" block - yield - - # Clean up the table - cmd = (f'DROP TABLE {table_name}') - with self._db_con: - self._db_con.execute(cmd) - @contextmanager def _generate_temp_segment_table( self, @@ -724,7 +359,7 @@ def iterate_indices_by_source_instance( 'INNER JOIN FrameLUT L' ' ON T.SourceSOPInstanceUID = L.ReferencedSOPInstanceUID ' 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.SegmentNumber = S.SegmentNumber ' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' 'ORDER BY T.OutputFrameIndex' ) @@ -844,7 +479,7 @@ def iterate_indices_by_source_frame( 'INNER JOIN FrameLUT L' ' ON F.SourceFrameNumber = L.ReferencedFrameNumber ' 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.SegmentNumber = S.SegmentNumber ' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' 'ORDER BY F.OutputFrameIndex' ) @@ -967,7 +602,7 @@ def iterate_indices_by_dimension_index_values( 'INNER JOIN FrameLUT L' f' ON {join_str} ' 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.SegmentNumber = S.SegmentNumber ' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' 'ORDER BY D.OutputFrameIndex' ) @@ -1088,7 +723,7 @@ def iterate_indices_for_tiled_region( ' S.OutputSegmentNumber ' 'FROM FrameLUT L ' 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.SegmentNumber = S.SegmentNumber ' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' 'WHERE (' ' L.RowPositionInTotalImagePixelMatrix >= ' f' {row_offset_start}' @@ -1435,7 +1070,7 @@ def __init__( ) src_img = source_images[0] - is_multiframe = hasattr(src_img, 'NumberOfFrames') + is_multiframe = is_multiframe_image(src_img) if is_multiframe and len(source_images) > 1: raise ValueError( 'Only one source image should be provided in case images ' @@ -1870,9 +1505,7 @@ def __init__( dimension_organization_type = self._check_dimension_organization_type( dimension_organization_type=dimension_organization_type, is_tiled=is_tiled, - are_spatial_locations_preserved=are_spatial_locations_preserved, omit_empty_frames=omit_empty_frames, - source_image=src_img, plane_positions=plane_positions, rows=self.Rows, columns=self.Columns, @@ -2384,9 +2017,7 @@ def _check_dimension_organization_type( None, ], is_tiled: bool, - are_spatial_locations_preserved: bool, omit_empty_frames: bool, - source_image: Dataset, plane_positions: Sequence[PlanePositionSequence], rows: int, columns: int, @@ -2399,13 +2030,8 @@ def _check_dimension_organization_type( The specified DimensionOrganizationType for the output Segmentation. is_tiled: bool Whether the source image is a tiled image. - are_spatial_locations_preserved: bool - Whether spatial locations are preserved between the source image - and the segmentation pixel array. omit_empty_frames: bool Whether it was specified to omit empty frames. - source_image: pydicom.Dataset - Representative dataset of the source images. plane_positions: Sequence[highdicom.PlanePositionSequence] Plane positions of all frames. rows: int @@ -2454,8 +2080,8 @@ def _check_dimension_organization_type( ): raise ValueError( 'A value of "TILED_FULL" for parameter ' - '"dimension_organization_type" is not permitted unless ' - 'the "plane_positions" of the segmentation do not ' + '"dimension_organization_type" is not permitted because ' + 'the "plane_positions" of the segmentation ' 'do not follow the relevant requirements. See ' 'https://dicom.nema.org/medical/dicom/current/output/' 'chtml/part03/sect_C.7.6.17.3.html#sect_C.7.6.17.3.' @@ -2922,7 +2548,7 @@ def _get_pffg_item( ] derivation_src_img_item = Dataset() - if hasattr(source_images[0], 'NumberOfFrames'): + if is_multiframe_image(source_images[0]): # A single multi-frame source image src_img_item = source_images[0] # Frame numbers are one-based @@ -3164,209 +2790,7 @@ def _build_luts(self) -> None: index values. """ - referenced_uids = self._get_ref_instance_uids() - all_referenced_sops = {uids[2] for uids in referenced_uids} - - is_tiled_full = ( - hasattr(self, 'DimensionOrganizationType') and - self.DimensionOrganizationType == 'TILED_FULL' - ) - - segment_numbers = [] - - # Get list of all dimension index pointers, excluding the segment - # number, since this is treated differently - seg_num_tag = tag_for_keyword('ReferencedSegmentNumber') - self._dim_ind_pointers = [ - dim_ind.DimensionIndexPointer - for dim_ind in self.DimensionIndexSequence - if dim_ind.DimensionIndexPointer != seg_num_tag - ] - - func_grp_pointers = {} - for dim_ind in self.DimensionIndexSequence: - ptr = dim_ind.DimensionIndexPointer - if ptr in self._dim_ind_pointers: - grp_ptr = getattr(dim_ind, "FunctionalGroupPointer", None) - func_grp_pointers[ptr] = grp_ptr - - dim_ind_positions = { - dim_ind.DimensionIndexPointer: i - for i, dim_ind in enumerate(self.DimensionIndexSequence) - if dim_ind.DimensionIndexPointer != seg_num_tag - } - dim_indices: Dict[int, List[int]] = { - ptr: [] for ptr in self._dim_ind_pointers - } - dim_values: Dict[int, List[Any]] = { - ptr: [] for ptr in self._dim_ind_pointers - } - - self._single_source_frame_per_seg_frame = True - - if is_tiled_full: - # With TILED_FULL, there is no PerFrameFunctionalGroupsSequence, - # so we have to deduce the per-frame information - row_tag = tag_for_keyword('RowPositionInTotalImagePixelMatrix') - col_tag = tag_for_keyword('ColumnPositionInTotalImagePixelMatrix') - x_tag = tag_for_keyword('XOffsetInSlideCoordinateSystem') - y_tag = tag_for_keyword('YOffsetInSlideCoordinateSystem') - z_tag = tag_for_keyword('ZOffsetInSlideCoordinateSystem') - tiled_full_dim_indices = {row_tag, col_tag, x_tag, y_tag, z_tag} - if len(set(dim_indices.keys()) - tiled_full_dim_indices) > 0: - raise RuntimeError( - 'Expected segmentation images with ' - '"DimensionOrganizationType" of "TILED_FULL" are expected ' - 'to have the following dimension index pointers: ' - 'SegmentNumber, RowPositionInTotalImagePixelMatrix, ' - 'ColumnPositionInTotalImagePixelMatrix.' - ) - self._single_source_frame_per_seg_frame = False - ( - segment_numbers, - _, - dim_values[col_tag], - dim_values[row_tag], - dim_values[x_tag], - dim_values[y_tag], - dim_values[z_tag], - ) = zip(*iter_tiled_full_frame_data(self)) - - # Create indices for each of the dimensions - for ptr, vals in dim_values.items(): - _, indices = np.unique(vals, return_inverse=True) - dim_indices[ptr] = (indices + 1).tolist() - - # There is no way to deduce whether the spatial locations are - # preserved in the tiled full case - self._locations_preserved = None - - referenced_instances = None - referenced_frames = None - else: - referenced_instances: Optional[List[str]] = [] - referenced_frames: Optional[List[int]] = [] - - # Create a list of source images and check for spatial locations - # preserved - locations_list_type = List[ - Optional[SpatialLocationsPreservedValues] - ] - locations_preserved: locations_list_type = [] - - for frame_item in self.PerFrameFunctionalGroupsSequence: - # Get segment number for this frame - seg_id_seg = frame_item.SegmentIdentificationSequence[0] - seg_num = seg_id_seg.ReferencedSegmentNumber - segment_numbers.append(int(seg_num)) - - # Get dimension indices for this frame - content_seq = frame_item.FrameContentSequence[0] - indices = content_seq.DimensionIndexValues - if not isinstance(indices, (MultiValue, list)): - # In case there is a single dimension index - indices = [indices] - if len(indices) != len(self._dim_ind_pointers) + 1: - # (+1 because referenced segment number is ignored) - raise RuntimeError( - 'Unexpected mismatch between dimension index values in ' - 'per-frames functional groups sequence and items in ' - 'the dimension index sequence.' - ) - for ptr in self._dim_ind_pointers: - dim_indices[ptr].append(indices[dim_ind_positions[ptr]]) - grp_ptr = func_grp_pointers[ptr] - if grp_ptr is not None: - dim_val = frame_item[grp_ptr][0][ptr].value - else: - dim_val = frame_item[ptr].value - dim_values[ptr].append(dim_val) - - frame_source_instances = [] - frame_source_frames = [] - for der_im in frame_item.DerivationImageSequence: - for src_im in der_im.SourceImageSequence: - frame_source_instances.append( - src_im.ReferencedSOPInstanceUID - ) - if hasattr(src_im, 'SpatialLocationsPreserved'): - locations_preserved.append( - SpatialLocationsPreservedValues( - src_im.SpatialLocationsPreserved - ) - ) - else: - locations_preserved.append( - None - ) - - if hasattr(src_im, 'ReferencedFrameNumber'): - if isinstance( - src_im.ReferencedFrameNumber, - MultiValue - ): - frame_source_frames.extend( - [ - int(f) - for f in src_im.ReferencedFrameNumber - ] - ) - else: - frame_source_frames.append( - int(src_im.ReferencedFrameNumber) - ) - else: - frame_source_frames.append(_NO_FRAME_REF_VALUE) - - if ( - len(set(frame_source_instances)) != 1 or - len(set(frame_source_frames)) != 1 - ): - self._single_source_frame_per_seg_frame = False - else: - ref_instance_uid = frame_source_instances[0] - if ref_instance_uid not in all_referenced_sops: - raise AttributeError( - f'SOP instance {ref_instance_uid} referenced in ' - 'the source image sequence is not included in the ' - 'Referenced Series Sequence or Studies Containing ' - 'Other Referenced Instances Sequence. This is an ' - 'error with the integrity of the Segmentation ' - 'object.' - ) - referenced_instances.append(ref_instance_uid) - referenced_frames.append(frame_source_frames[0]) - - # Summarise - if any( - isinstance(v, SpatialLocationsPreservedValues) and - v == SpatialLocationsPreservedValues.NO - for v in locations_preserved - ): - Type = Optional[SpatialLocationsPreservedValues] - self._locations_preserved: Type = \ - SpatialLocationsPreservedValues.NO - elif all( - isinstance(v, SpatialLocationsPreservedValues) and - v == SpatialLocationsPreservedValues.YES - for v in locations_preserved - ): - self._locations_preserved = SpatialLocationsPreservedValues.YES - else: - self._locations_preserved = None - - if not self._single_source_frame_per_seg_frame: - referenced_instances = None - referenced_frames = None - - self._db_man = _SegDBManager( - referenced_uids=referenced_uids, - segment_numbers=segment_numbers, - dim_indices=dim_indices, - dim_values=dim_values, - referenced_instances=referenced_instances, - referenced_frames=referenced_frames, - ) + self._db_man = _SegDBManager(self) @property def segmentation_type(self) -> SegmentationTypeValues: @@ -3937,7 +3361,11 @@ def get_default_dimension_index_pointers( List of tags used as the default dimension index pointers. """ - return self._dim_ind_pointers[:] + referenced_segment_number = tag_for_keyword('ReferencedSegmentNumber') + return [ + t for t in self._db_man.dimension_index_pointers[:] + if t != referenced_segment_number + ] def are_dimension_indices_unique( self, @@ -3973,8 +3401,9 @@ def are_dimension_indices_unique( raise ValueError( 'Argument "dimension_index_pointers" may not be empty.' ) + dimension_index_pointers = list(dimension_index_pointers) for ptr in dimension_index_pointers: - if ptr not in self._dim_ind_pointers: + if ptr not in self._db_man.dimension_index_pointers: kw = keyword_for_tag(ptr) if kw == '': kw = '' @@ -3982,71 +3411,14 @@ def are_dimension_indices_unique( f'Tag {ptr} ({kw}) is not used as a dimension index ' 'in this image.' ) + + dimension_index_pointers.append( + tag_for_keyword('ReferencedSegmentNumber') + ) return self._db_man.are_dimension_indices_unique( dimension_index_pointers ) - def _check_indexing_with_source_frames( - self, - ignore_spatial_locations: bool = False - ) -> None: - """Check if indexing by source frames is possible. - - Raise exceptions with useful messages otherwise. - - Possible problems include: - * Spatial locations are not preserved. - * The dataset does not specify that spatial locations are preserved - and the user has not asserted that they are. - * At least one frame in the segmentation lists multiple - source frames. - - Parameters - ---------- - ignore_spatial_locations: bool - Allows the user to ignore whether spatial locations are preserved - in the frames. - - """ - # Checks that it is possible to index using source frames in this - # dataset - is_tiled_full = ( - hasattr(self, 'DimensionOrganizationType') and - self.DimensionOrganizationType == 'TILED_FULL' - ) - if is_tiled_full: - raise RuntimeError( - 'Indexing via source frames is not possible when a ' - 'segmentation is stored using the DimensionOrganizationType ' - '"TILED_FULL".' - ) - elif self._locations_preserved is None: - if not ignore_spatial_locations: - raise RuntimeError( - 'Indexing via source frames is not permissible since this ' - 'image does not specify that spatial locations are ' - 'preserved in the course of deriving the segmentation ' - 'from the source image. If you are confident that spatial ' - 'locations are preserved, or do not require that spatial ' - 'locations are preserved, you may override this behavior ' - "with the 'ignore_spatial_locations' parameter." - ) - elif self._locations_preserved == SpatialLocationsPreservedValues.NO: - if not ignore_spatial_locations: - raise RuntimeError( - 'Indexing via source frames is not permissible since this ' - 'image specifies that spatial locations are not preserved ' - 'in the course of deriving the segmentation from the ' - 'source image. If you do not require that spatial ' - ' locations are preserved you may override this behavior ' - "with the 'ignore_spatial_locations' parameter." - ) - if not self._single_source_frame_per_seg_frame: - raise RuntimeError( - 'Indexing via source frames is not permissible since some ' - 'frames in the segmentation specify multiple source frames.' - ) - def get_pixels_by_source_instance( self, source_sop_instance_uids: Sequence[str], @@ -4202,7 +3574,9 @@ def get_pixels_by_source_instance( """ # Check that indexing in this way is possible - self._check_indexing_with_source_frames(ignore_spatial_locations) + self._db_man._check_indexing_with_source_frames( + ignore_spatial_locations + ) # Checks on validity of the inputs if segment_numbers is None: @@ -4231,7 +3605,9 @@ def get_pixels_by_source_instance( # Check that all frame numbers requested actually exist if not assert_missing_frames_are_empty: - unique_uids = self._db_man.get_unique_sop_instance_uids() + unique_uids = ( + self._db_man.get_unique_referenced_sop_instance_uids() + ) missing_uids = set(source_sop_instance_uids) - unique_uids if len(missing_uids) > 0: msg = ( @@ -4454,7 +3830,9 @@ def get_pixels_by_source_frame( """ # Check that indexing in this way is possible - self._check_indexing_with_source_frames(ignore_spatial_locations) + self._db_man._check_indexing_with_source_frames( + ignore_spatial_locations + ) # Checks on validity of the inputs if segment_numbers is None: @@ -4483,7 +3861,9 @@ def get_pixels_by_source_frame( # Check that all frame numbers requested actually exist if not assert_missing_frames_are_empty: - max_frame_number = self._db_man.get_max_frame_number() + max_frame_number = ( + self._db_man.get_max_referenced_frame_number() + ) for f in source_frame_numbers: if f > max_frame_number: msg = ( @@ -4691,15 +4071,26 @@ def get_pixels_by_dimension_index_values( 'Segment numbers may not be empty.' ) + referenced_segment_number_tag = tag_for_keyword( + 'ReferencedSegmentNumber' + ) if dimension_index_pointers is None: - dimension_index_pointers = self._dim_ind_pointers + dimension_index_pointers = [ + t for t in self._db_man.dimension_index_pointers + if t != referenced_segment_number_tag + ] else: if len(dimension_index_pointers) == 0: raise ValueError( 'Argument "dimension_index_pointers" must not be empty.' ) for ptr in dimension_index_pointers: - if ptr not in self._dim_ind_pointers: + if ptr == referenced_segment_number_tag: + raise ValueError( + "Do not include the ReferencedSegmentNumber in the " + "argument 'dimension_index_pointers'." + ) + if ptr not in self._db_man.dimension_index_pointers: kw = keyword_for_tag(ptr) if kw == '': kw = '' diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 54a64925..a827aede 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -1,6 +1,14 @@ -from typing import Sequence, Tuple +from typing import Optional, Sequence, Tuple import numpy as np +import pydicom + +from highdicom._module_utils import is_multiframe_image +from highdicom.enum import CoordinateSystemNames + + +DEFAULT_SPACING_TOLERANCE = 1e-4 +"""Default tolerance for determining whether slices are regularly spaced.""" def create_rotation_matrix( @@ -917,3 +925,192 @@ def map_coordinate_into_pixel_matrix( round(pixel_matrix_coordinates[1]), round(pixel_matrix_coordinates[2]), ) + + +def get_series_slice_spacing( + datasets: Sequence[pydicom.Dataset], + tol: float = DEFAULT_SPACING_TOLERANCE, +) -> Optional[float]: + """Get slice spacing, if any, for a series of single frame images. + + First determines whether the image series represents a 3D volume. + A 3D volume consists of regularly spaced slices with orthogonal axes, i.e. + the slices are spaced equally along the direction orthogonal to the + in-plane image coordinates. + + If the series does represent a volume, returns the absolute value of the + slice spacing. If the series does not represent a volume, returns None. + + Note that we stipulate that a single image is a 3D volume for the purposes + of this function. In this case the returned slice spacing will be 0.0. + + Parameters + ---------- + datasets: Sequence[pydicom.Dataset] + Set of datasets representing an imaging series. + tol: float + Tolerance for determining spacing regularity. If slice spacings vary by + less that this spacing, they are considered to be regular. + + Returns + ------- + float: + Absolute value of the regular slice spacing if the series of images + meets the definition of a 3D volume, above. None otherwise. + + """ + if len(datasets) == 0: + raise ValueError("List must not be empty.") + # We stipluate that a single image does represent a volume with spacing 0.0 + if len(datasets) == 1: + return 0.0 + for ds in datasets: + if is_multiframe_image(ds): + raise ValueError( + "Datasets should be single-frame images." + ) + + # Check image orientations are consistent + image_orientation = datasets[0].ImageOrientationPatient + for ds in datasets[1:]: + if ds.ImageOrientationPatient != image_orientation: + return None + + positions = np.array( + [ds.ImagePositionPatient for ds in datasets] + ) + + return get_regular_slice_spacing( + image_positions=positions, + image_orientation=np.array(image_orientation), + tol=tol, + ) + + +def get_regular_slice_spacing( + image_positions: np.ndarray, + image_orientation: np.ndarray, + tol: float = DEFAULT_SPACING_TOLERANCE, + sort: bool = True, +) -> Optional[float]: + """Get the regular spacing between set of image positions, if any. + + A 3D volume consists of regularly spaced slices with orthogonal axes, i.e. + the slices are spaced equally along the direction orthogonal to the + in-plane image coordinates. + + Note that we stipulate that a single image is a 3D volume for the purposes + of this function. In this case the returned slice spacing will be 0.0. + + Parameters + ---------- + image_positions: numpy.ndarray + Array of image positions for multiple frames. Should be a numpy array of + shape (N, 3) where N is the number of frames. + image_orientation: numpy.ndarray + Image orientation as direction cosine values taken directly from the + ImageOrientationPatient attribute. 1D array of length 6. + tol: float + Tolerance for determining spacing regularity. If slice spacings vary by + less that this spacing, they are considered to be regular. + sort: bool + Sort the image positions before finding the spacing. If True, this + makes the function tolerant of unsorted inputs. Set to False to check + whether the positions represent a 3D volume in the specific order in + which they are passed. + + Returns + ------- + Union[float, None] + If the image positions are regularly spaced, the (abolute value of) the + slice spacing. If the image positions are not regularly spaced, returns + None. + + """ + image_positions = np.array(image_positions) + image_orientation = np.array(image_orientation) + + if image_positions.ndim != 2 or image_positions.shape[1] != 3: + raise ValueError( + "Argument 'image_positions' should be an (N, 3) array." + ) + if image_orientation.ndim != 1 or image_orientation.shape[0] != 6: + raise ValueError( + "Argument 'image_orientation' should be an array of " + "length 6." + ) + n = image_positions.shape[0] + if n == 0: + raise ValueError( + "Argument 'image_positions' should contain at least 1 position." + ) + elif n == 1: + # Special case, we stipluate that this has spacing 0.0 + return 0.0 + + # Find normal vector to the imaging plane + v1 = image_orientation[:3] + v2 = image_orientation[3:] + v3 = np.cross(v1, v2) + + # Calculate distance of each slice from coordinate system origin along the + # normal vector + origin_distances = v3[None] @ image_positions.T + origin_distances = origin_distances.squeeze(0) + + if sort: + sort_index = np.argsort(origin_distances) + origin_distances = origin_distances[sort_index] + else: + sort_index = np.arange(image_positions.shape[0]) + + spacings = np.diff(origin_distances) + avg_spacing = spacings.mean() + + is_regular = np.isclose( + avg_spacing, + spacings, + atol=tol + ).all() + + # Additionally check that the vector from the first to the last plane lies + # approximately along v3 + pos1 = image_positions[sort_index[0], :] + pos2 = image_positions[sort_index[-1], :] + span = (pos2 - pos1) + span /= np.linalg.norm(span) + + is_perpendicular = abs(v3.T @ span - 1.0) < tol + + if is_regular and is_perpendicular: + return abs(avg_spacing) + else: + return None + + +def get_coordinate_system( + dataset: pydicom.Dataset, +) -> Optional[CoordinateSystemNames]: + """Determine which coordinate system an image uses. + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset for which the coordinate system is required. + + Returns + ------- + Union[highdicom.enum.CoordinateSystemNames]: + Coordinate system used by the input image's frame of reference. Returns + None if the image does not specify a frame of reference. + + """ + if not hasattr(dataset, 'FrameOfReferenceUID'): + return None + if ( + hasattr(dataset, 'ImageOrientationSlide') or + hasattr(dataset, 'ImageCenterPointCoordinatesSequence') + ): + return CoordinateSystemNames.SLIDE + else: + return CoordinateSystemNames.PATIENT diff --git a/src/highdicom/sr/content.py b/src/highdicom/sr/content.py index d6c294ec..b0442f4e 100644 --- a/src/highdicom/sr/content.py +++ b/src/highdicom/sr/content.py @@ -32,6 +32,7 @@ Scoord3DContentItem, UIDRefContentItem, ) +from highdicom._module_utils import is_multiframe_image logger = logging.getLogger(__name__) @@ -90,7 +91,7 @@ def _check_frame_numbers_valid_for_dataset( referenced_frame_numbers: Optional[Sequence[int]] ) -> None: if referenced_frame_numbers is not None: - if not hasattr(dataset, 'NumberOfFrames'): + if not is_multiframe_image(dataset): raise TypeError( 'The dataset does not represent a multi-frame dataset, so no ' 'referenced frame numbers should be provided.' From 32703b020fb44461088850eed09566827bdcb018 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 1 Feb 2024 12:34:29 +0000 Subject: [PATCH 02/93] Add multiframe tests --- tests/test_multiframe.py | 27 +++++++++++++++++++++++++++ tests/test_spatial.py | 25 +++++++++++++++++++++++++ 2 files changed, 52 insertions(+) create mode 100644 tests/test_multiframe.py diff --git a/tests/test_multiframe.py b/tests/test_multiframe.py new file mode 100644 index 00000000..e0927a79 --- /dev/null +++ b/tests/test_multiframe.py @@ -0,0 +1,27 @@ +"""Tests for the highdicom._multiframe module.""" +from pydicom import dcmread +from pydicom.data import get_testdata_file, get_testdata_files + +from highdicom._multiframe import MultiFrameDBManager + + +def test_slice_spacing(): + ct_multiframe = dcmread( + get_testdata_file('eCT_Supplemental.dcm') + ) + db = MultiFrameDBManager(ct_multiframe) + + assert db.get_slice_spacing() == 10.0 + +def test_slice_spacing_irregular(): + ct_multiframe = dcmread( + get_testdata_file('eCT_Supplemental.dcm') + ) + + # Mock some iregular spacings + ct_multiframe.PerFrameFunctionalGroupsSequence[0].\ + PlanePositionSequence[0].ImagePositionPatient = [1.0, 0.0, 0.0] + + db = MultiFrameDBManager(ct_multiframe) + + assert db.get_slice_spacing() is None diff --git a/tests/test_spatial.py b/tests/test_spatial.py index bce8d1f5..d1717c9b 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -1,4 +1,6 @@ import numpy as np +from pydicom import dcmread +from pydicom.data import get_testdata_file, get_testdata_files import pytest from highdicom.spatial import ( @@ -6,6 +8,7 @@ PixelToReferenceTransformer, ReferenceToImageTransformer, ReferenceToPixelTransformer, + get_series_slice_spacing, ) @@ -451,3 +454,25 @@ def test_map_reference_to_image_coordinate(params, inputs, expected_outputs): transform = ReferenceToImageTransformer(**params) outputs = transform(inputs) np.testing.assert_array_almost_equal(outputs, expected_outputs) + + +def test_get_series_slice_spacing_irregular(): + # A series of single frame CT images + ct_series = [ + dcmread(f) + for f in get_testdata_files('dicomdirtests/77654033/CT2/*') + ] + spacing = get_series_slice_spacing(ct_series) + assert spacing is None + + +def test_get_series_slice_spacing_regular(): + # Use a subset of this test series that does have regular spacing + ct_files = [ + get_testdata_file('dicomdirtests/77654033/CT2/17196'), + get_testdata_file('dicomdirtests/77654033/CT2/17136'), + get_testdata_file('dicomdirtests/77654033/CT2/17166'), + ] + ct_series = [dcmread(f) for f in ct_files] + spacing = get_series_slice_spacing(ct_series) + assert spacing == 1.25 From 4ba09b5ddc652c5a85734f5219c120f982951fd4 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 1 Feb 2024 16:42:03 +0000 Subject: [PATCH 03/93] Automatically populate the 3D tag --- data/test_files/seg_image_sm_control.dcm | Bin 20720 -> 20704 bytes src/highdicom/_multiframe.py | 1 - src/highdicom/seg/sop.py | 37 +++++++++- src/highdicom/spatial.py | 11 +++ tests/test_multiframe.py | 1 + tests/test_seg.py | 87 +++++++++++++++++++++++ tests/test_spatial.py | 2 +- 7 files changed, 134 insertions(+), 5 deletions(-) diff --git a/data/test_files/seg_image_sm_control.dcm b/data/test_files/seg_image_sm_control.dcm index bf695236c632163863a707d74b5eb252f8f8c2d2..34c3d8d73d0ad613634755da731c5ad51227327f 100644 GIT binary patch delta 36 rcmeycknzDn#tmGYlQlGzHgj<@De%}aFfjc4&+xzuNN@JiT&4~H-RTP! delta 49 zcmaE`knzJp#tmGYn{_yu6eJ567#RNjXLyhSq!k#%CTci~pj(QJE diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 57766034..43ea15a0 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -401,7 +401,6 @@ def __init__( # Build LUT from columns all_defs = ", ".join(col_defs) cmd = f'CREATE TABLE FrameLUT({all_defs})' - print(cmd) placeholders = ', '.join(['?'] * len(col_data)) with self._db_con: self._db_con.execute(cmd) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 776a1576..fc9e399f 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -75,7 +75,7 @@ SegmentAlgorithmTypeValues, ) from highdicom.seg.utils import iter_segments -from highdicom.spatial import ImageToReferenceTransformer +from highdicom.spatial import ImageToReferenceTransformer, get_regular_slice_spacing, get_series_slice_spacing from highdicom.sr.coding import CodedConcept from highdicom.valuerep import ( check_person_name, @@ -1502,7 +1502,7 @@ def __init__( ) # Dimension Organization Type - dimension_organization_type = self._check_dimension_organization_type( + dimension_organization_type = self._check_tiled_dimension_organization_type( dimension_organization_type=dimension_organization_type, is_tiled=is_tiled, omit_empty_frames=omit_empty_frames, @@ -1510,6 +1510,37 @@ def __init__( rows=self.Rows, columns=self.Columns, ) + if self._coordinate_system == CoordinateSystemNames.PATIENT: + spacing = get_regular_slice_spacing( + image_positions=np.array(plane_position_values[:, 0, :]), + image_orientation=np.array( + plane_orientation[0].ImageOrientationPatient + ), + sort=False, + enforce_postive=True, + ) + + if spacing is not None and spacing > 1.0: + # The image is a regular volume, so we should record this + dimension_organization_type = ( + DimensionOrganizationTypeValues.THREE_DIMENSIONAL + ) + # Also add the slice spacing to the pixel measures + ( + self.SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .SpacingBetweenSlices + ) = spacing + else: + if ( + dimension_organization_type == + DimensionOrganizationTypeValues.THREE_DIMENSIONAL + ): + raise ValueError( + 'Dimension organization "3D" has been specified, ' + 'but the source image is not a regularly-spaced 3D ' + 'volume.' + ) if dimension_organization_type is not None: self.DimensionOrganizationType = dimension_organization_type.value @@ -2010,7 +2041,7 @@ def _add_slide_coordinate_metadata( self.ImageCenterPointCoordinatesSequence = [center_item] @staticmethod - def _check_dimension_organization_type( + def _check_tiled_dimension_organization_type( dimension_organization_type: Union[ DimensionOrganizationTypeValues, str, diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index a827aede..5c1dcb40 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -992,6 +992,7 @@ def get_regular_slice_spacing( image_orientation: np.ndarray, tol: float = DEFAULT_SPACING_TOLERANCE, sort: bool = True, + enforce_postive: bool = False, ) -> Optional[float]: """Get the regular spacing between set of image positions, if any. @@ -1018,6 +1019,12 @@ def get_regular_slice_spacing( makes the function tolerant of unsorted inputs. Set to False to check whether the positions represent a 3D volume in the specific order in which they are passed. + enforce_postive: bool + If True and sort is False, require that the images are not only + regularly spaced but also that they are ordered along the direction of + the increasing normal vector, as opposed to being ordered regularly + along the direction of the decreasing normal vector. If sort is False, + this has no effect. Returns ------- @@ -1057,6 +1064,7 @@ def get_regular_slice_spacing( # normal vector origin_distances = v3[None] @ image_positions.T origin_distances = origin_distances.squeeze(0) + print(origin_distances) if sort: sort_index = np.argsort(origin_distances) @@ -1072,6 +1080,9 @@ def get_regular_slice_spacing( spacings, atol=tol ).all() + if is_regular and enforce_postive: + if avg_spacing < 0.0: + return None # Additionally check that the vector from the first to the last plane lies # approximately along v3 diff --git a/tests/test_multiframe.py b/tests/test_multiframe.py index e0927a79..7274023b 100644 --- a/tests/test_multiframe.py +++ b/tests/test_multiframe.py @@ -13,6 +13,7 @@ def test_slice_spacing(): assert db.get_slice_spacing() == 10.0 + def test_slice_spacing_irregular(): ct_multiframe = dcmread( get_testdata_file('eCT_Supplemental.dcm') diff --git a/tests/test_seg.py b/tests/test_seg.py index 4a572eaf..8c9b3765 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -940,6 +940,7 @@ def test_construction(self): SegmentsOverlapValues.NO with pytest.raises(AttributeError): frame_item.PlanePositionSlideSequence + assert not hasattr(instance, "DimensionOrganizationType") self.check_dimension_index_vals(instance) def test_construction_2(self): @@ -1011,6 +1012,7 @@ def test_construction_2(self): SegmentsOverlapValues.NO with pytest.raises(AttributeError): frame_item.PlanePositionSequence + assert instance.DimensionOrganizationType == "TILED_SPARSE" self.check_dimension_index_vals(instance) def test_construction_3(self): @@ -1096,6 +1098,7 @@ def test_construction_3(self): SegmentsOverlapValues.NO with pytest.raises(AttributeError): frame_item.PlanePositionSlideSequence + assert not hasattr(instance, 'DimensionOrganizationType') self.check_dimension_index_vals(instance) def test_construction_4(self): @@ -1175,6 +1178,9 @@ def test_construction_4(self): SegmentsOverlapValues.NO with pytest.raises(AttributeError): frame_item.PlanePositionSlideSequence + + # Frames are regularly but ordered the wrong way in this case + assert not hasattr(instance, 'DimensionOrganizationType') self.check_dimension_index_vals(instance) def test_construction_5(self): @@ -1259,6 +1265,7 @@ def test_construction_5(self): SegmentsOverlapValues.NO with pytest.raises(AttributeError): frame_item.PlanePositionSlideSequence + assert not hasattr(instance, 'DimensionOrganizationType') self.check_dimension_index_vals(instance) def test_construction_6(self): @@ -1345,6 +1352,7 @@ def test_construction_6(self): assert len(derivation_image_item.SourceImageSequence) == 1 assert SegmentsOverlapValues[instance.SegmentsOverlap] == \ SegmentsOverlapValues.NO + assert not hasattr(instance, 'DimensionOrganizationType') def test_construction_7(self): # A chest X-ray with no frame of reference and multiple segments @@ -1435,6 +1443,85 @@ def test_construction_7(self): assert len(derivation_image_item.SourceImageSequence) == 1 assert SegmentsOverlapValues[instance.SegmentsOverlap] == \ SegmentsOverlapValues.NO + assert not hasattr(instance, 'DimensionOrganizationType') + + def test_construction_3d_multiframe(self): + # The CT multiframe image is already a volume, but the frames are + # ordered the wrong way + volume_multiframe = deepcopy(self._ct_multiframe) + positions = [ + fm.PlanePositionSequence[0].ImagePositionPatient + for fm in volume_multiframe.PerFrameFunctionalGroupsSequence + ] + positions = positions[::-1] + for pos, fm in zip( + positions, + volume_multiframe.PerFrameFunctionalGroupsSequence + ): + fm.PlanePositionSequence[0].ImagePositionPatient = pos + + # Segmentation instance from an enhanced (multi-frame) CT image + instance = Segmentation( + [volume_multiframe], + self._ct_multiframe_mask_array, + SegmentationTypeValues.FRACTIONAL.value, + self._segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number + ) + # This is a "volume" image, so the output instance should have + # the DimensionOrganizationType set correctly and should have deduced + # the spacing between slices + assert instance.DimensionOrganizationType == "3D" + spacing = ( + instance + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .SpacingBetweenSlices + ) + assert spacing == 10.0 + + def test_construction_3d_singleframe(self): + # The CT single frame series is a volume if you omit one of the images + ct_files = [ + get_testdata_file('dicomdirtests/77654033/CT2/17136'), + get_testdata_file('dicomdirtests/77654033/CT2/17166'), + get_testdata_file('dicomdirtests/77654033/CT2/17196'), + ] + ct_series = [dcmread(f) for f in ct_files] + + # Segmentation instance from an enhanced (multi-frame) CT image + instance = Segmentation( + ct_series, + self._ct_series_mask_array[:3], + SegmentationTypeValues.FRACTIONAL.value, + self._segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number + ) + # This is a "volume" image, so the output instance should have + # the DimensionOrganizationType set correctly and should have deduced + # the spacing between slices + assert instance.DimensionOrganizationType == "3D" + spacing = ( + instance + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .SpacingBetweenSlices + ) + assert spacing == 1.25 def test_construction_workers(self): # Create a segmentation with multiple workers diff --git a/tests/test_spatial.py b/tests/test_spatial.py index d1717c9b..99c7ac2b 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -469,8 +469,8 @@ def test_get_series_slice_spacing_irregular(): def test_get_series_slice_spacing_regular(): # Use a subset of this test series that does have regular spacing ct_files = [ - get_testdata_file('dicomdirtests/77654033/CT2/17196'), get_testdata_file('dicomdirtests/77654033/CT2/17136'), + get_testdata_file('dicomdirtests/77654033/CT2/17196'), get_testdata_file('dicomdirtests/77654033/CT2/17166'), ] ct_series = [dcmread(f) for f in ct_files] From ec26339a33ce84bc802ae04e4407143abe06bd8f Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 2 Feb 2024 10:01:57 +0000 Subject: [PATCH 04/93] Add ability to determine whether a segmentation is a volume --- src/highdicom/_multiframe.py | 86 +++++++++++++++++++++++++++++++----- src/highdicom/seg/sop.py | 35 +++++++++++++++ src/highdicom/spatial.py | 1 - 3 files changed, 109 insertions(+), 13 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 43ea15a0..d88e1de6 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -1,6 +1,7 @@ """Tools for working with multiframe DICOM images.""" from collections import Counter from contextlib import contextmanager +import itertools import logging import sqlite3 from typing import ( @@ -750,8 +751,8 @@ def get_unique_dim_index_values( def get_slice_spacing( self, + split_dimensions: Optional[Sequence[str]] = None, tol: float = DEFAULT_SPACING_TOLERANCE, - split_dimensions: Optional[Sequence[BaseTag]] = None, ) -> Optional[float]: """Get slice spacing, if any, for the image. @@ -785,8 +786,8 @@ def get_slice_spacing( frames exist. For example, if time were included as a split dimension, this function will check whether a 3D volume exists at each timepoint (and that the volume is the same at each time point). Each dimension - index should be provided as a base tags representing the Dimension - Index Pointer. + index should be provided as the keyword representing the relevant + DICOM attribute. Returns ------- @@ -807,16 +808,17 @@ def get_slice_spacing( # Stipulate that this does represent a volume return 0.0 + cur = self._db_con.cursor() + if split_dimensions is None: - cur = self._db_con.cursor() - query = """ - SELECT - ImagePositionPatient_0, - ImagePositionPatient_1, - ImagePositionPatient_2 - FROM FrameLUT; - """ + query = ( + 'SELECT ' + 'ImagePositionPatient_0, ' + 'ImagePositionPatient_1, ' + 'ImagePositionPatient_2 ' + 'FROM FrameLUT;' + ) image_positions = np.array( [r for r in cur.execute(query)] @@ -825,10 +827,70 @@ def get_slice_spacing( image_positions=image_positions, image_orientation=np.array(self.shared_image_orientation), sort=True, + tol=tol, ) + else: + dim_values = [] - return spacing + # Get lists of all unique values for the specified dimensions + for kw in split_dimensions: + # Find unique values of this attribute + query = f""" + SELECT DISTINCT {kw} FROM FrameLUT; + """ + + dim_values.append( + [ + v[0] for v in cur.execute(query) + ] + ) + + # Check that each combination of the split dimension has the same + # list of image positions + all_image_positions = [] + for vals in itertools.product(*dim_values): + filter_str = 'AND '.join( + f'{kw} = {val}' for kw, val in zip(split_dimensions, vals) + ) + query = ( + 'SELECT ' + 'ImagePositionPatient_0, ' + 'ImagePositionPatient_1, ' + 'ImagePositionPatient_2 ' + 'FROM FrameLUT ' + 'WHERE ' + f'{filter_str} ' + 'ORDER BY ' + 'ImagePositionPatient_0, ' + 'ImagePositionPatient_1, ' + 'ImagePositionPatient_2 ' + ';' + ) + image_positions = np.array( + [r for r in cur.execute(query)] + ) + all_image_positions.append(image_positions) + + if len(all_image_positions) > 1: + for image_positions in all_image_positions: + if not np.array_equal( + image_positions, + all_image_positions[0] + ): + # The volumes described by each combination of the + # split dimensions have different sets of image + # positions + return None + + spacing = get_regular_slice_spacing( + image_positions=all_image_positions[0], + image_orientation=np.array(self.shared_image_orientation), + sort=True, + tol=tol, + ) + + return spacing @contextmanager diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index fc9e399f..ac19b7d7 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -3120,6 +3120,41 @@ def segmented_property_types(self) -> List[CodedConcept]: return types + def is_3d_volume( + self, + split_dimensions: Optional[Sequence[str]] = None, + ): + """Determine whether this segmentation is a 3D volume. + + For this purpose, a 3D volume is a set of regularly slices in 3D space + distributed at regular spacings along a vector perpendicular to the + normal vector to each image. + + Parameters + ---------- + + + """ + if split_dimensions is not None: + split_dimensions = list(split_dimensions) + if len(split_dimensions) == 0: + raise ValueError( + 'Argument "split_dimensions" must not be empty.' + ) + if 'ReferencedSegmentNumber' in split_dimensions: + raise ValueError( + 'The value "ReferencedSegmentNumber" should not be ' + 'included in the spplit dimensions.' + ) + else: + split_dimensions = [] + + split_dimensions.append('ReferencedSegmentNumber') + + spacing = self._db_man.get_slice_spacing(split_dimensions) + + return spacing is not None + def _get_pixels_by_seg_frame( self, output_shape: Union[int, Tuple[int, int]], diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 5c1dcb40..5285bb88 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -1064,7 +1064,6 @@ def get_regular_slice_spacing( # normal vector origin_distances = v3[None] @ image_positions.T origin_distances = origin_distances.squeeze(0) - print(origin_distances) if sort: sort_index = np.argsort(origin_distances) From f598555c3690ee6a67f40af38d5a4d5d7a5a446e Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 2 Feb 2024 22:03:31 +0000 Subject: [PATCH 05/93] Fix plane ordering bug; split spatial functions --- src/highdicom/seg/content.py | 58 ++++++++++---- src/highdicom/seg/sop.py | 96 ++++++++++++----------- src/highdicom/spatial.py | 146 +++++++++++++++++++++++++++++------ 3 files changed, 217 insertions(+), 83 deletions(-) diff --git a/src/highdicom/seg/content.py b/src/highdicom/seg/content.py index cf6672a4..36873bd5 100644 --- a/src/highdicom/seg/content.py +++ b/src/highdicom/seg/content.py @@ -14,7 +14,11 @@ ) from highdicom.enum import CoordinateSystemNames from highdicom.seg.enum import SegmentAlgorithmTypeValues -from highdicom.spatial import map_pixel_into_coordinate_system +from highdicom.spatial import ( + _get_slice_distances, + get_normal_vector, + map_pixel_into_coordinate_system, +) from highdicom.sr.coding import CodedConcept from highdicom.uid import UID from highdicom.utils import compute_plane_position_slide_per_frame @@ -605,7 +609,8 @@ def get_index_position(self, pointer: str) -> int: def get_index_values( self, - plane_positions: Sequence[PlanePositionSequence] + plane_positions: Sequence[PlanePositionSequence], + image_orientation: Optional[Sequence[float]] = None, ) -> Tuple[np.ndarray, np.ndarray]: """Get values of indexed attributes that specify position of planes. @@ -626,6 +631,15 @@ def get_index_values( plane_indices: numpy.ndarray 1D array of planes indices for sorting frames according to their spatial position specified by the dimension index + image_orientation: Union[Sequence[float], None], optional + An image orientation to use to order frames within a 3D coordinate + system. By default (if ``image_orientation`` is ``None``), the + plane positions are ordered using their raw numerical values and + not along any particular spatial vector. If ``image_orientation`` + is provided, planes are ordered along the positive direction of the + vector normal to the specified. Should be a sequence of 6 floats. + This is only valid when plane position inputs contain only the + ImagePositionPatient. Note ---- @@ -659,21 +673,37 @@ def get_index_values( for p in plane_positions ]) - # Build an array that can be used to sort planes according to the - # Dimension Index Value based on the order of the items in the - # Dimension Index Sequence. - _, plane_sort_indices = np.unique( - plane_position_values, - axis=0, - return_index=True - ) + if image_orientation is not None: + if not hasattr(plane_positions[0][0], 'ImagePositionPatient'): + raise ValueError( + 'Provided "image_orientation" is only valid when ' + 'plane_positions contain the ImagePositionPatient.' + ) + normal_vector = get_normal_vector(image_orientation) + origin_distances = _get_slice_distances( + plane_position_values[:, 0, :], + normal_vector, + ) + _, plane_sort_indices = np.unique( + origin_distances, + return_index=True, + ) + else: + # Build an array that can be used to sort planes according to the + # Dimension Index Value based on the order of the items in the + # Dimension Index Sequence. + _, plane_sort_indices = np.unique( + plane_position_values, + axis=0, + return_index=True + ) if len(plane_sort_indices) != len(plane_positions): raise ValueError( - "Input image/frame positions are not unique according to the " - "Dimension Index Pointers. The generated segmentation would be " - "ambiguous. Ensure that source images/frames have distinct " - "locations." + 'Input image/frame positions are not unique according to the ' + 'Dimension Index Pointers. The generated segmentation would be ' + 'ambiguous. Ensure that source images/frames have distinct ' + 'locations.' ) return (plane_position_values, plane_sort_indices) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index ac19b7d7..e6786844 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -1460,9 +1460,15 @@ def __init__( # number) plane_sort_index is a list of indices into the input # planes giving the order in which they should be arranged to # correctly sort them for inclusion into the segmentation + sort_orientation = ( + plane_orientation[0].ImageOrientationPatient + if self._coordinate_system == CoordinateSystemNames.PATIENT + else None + ) plane_position_values, plane_sort_index = \ self.DimensionIndexSequence.get_index_values( - plane_positions + plane_positions, + image_orientation=sort_orientation, ) are_spatial_locations_preserved = ( @@ -1501,49 +1507,6 @@ def __init__( "the source image." ) - # Dimension Organization Type - dimension_organization_type = self._check_tiled_dimension_organization_type( - dimension_organization_type=dimension_organization_type, - is_tiled=is_tiled, - omit_empty_frames=omit_empty_frames, - plane_positions=plane_positions, - rows=self.Rows, - columns=self.Columns, - ) - if self._coordinate_system == CoordinateSystemNames.PATIENT: - spacing = get_regular_slice_spacing( - image_positions=np.array(plane_position_values[:, 0, :]), - image_orientation=np.array( - plane_orientation[0].ImageOrientationPatient - ), - sort=False, - enforce_postive=True, - ) - - if spacing is not None and spacing > 1.0: - # The image is a regular volume, so we should record this - dimension_organization_type = ( - DimensionOrganizationTypeValues.THREE_DIMENSIONAL - ) - # Also add the slice spacing to the pixel measures - ( - self.SharedFunctionalGroupsSequence[0] - .PixelMeasuresSequence[0] - .SpacingBetweenSlices - ) = spacing - else: - if ( - dimension_organization_type == - DimensionOrganizationTypeValues.THREE_DIMENSIONAL - ): - raise ValueError( - 'Dimension organization "3D" has been specified, ' - 'but the source image is not a regularly-spaced 3D ' - 'volume.' - ) - if dimension_organization_type is not None: - self.DimensionOrganizationType = dimension_organization_type.value - # Find indices such that empty planes are removed if omit_empty_frames: if tile_pixel_array: @@ -1589,6 +1552,51 @@ def __init__( else: unique_dimension_values = [None] + # Dimension Organization Type + dimension_organization_type = self._check_tiled_dimension_organization_type( + dimension_organization_type=dimension_organization_type, + is_tiled=is_tiled, + omit_empty_frames=omit_empty_frames, + plane_positions=plane_positions, + rows=self.Rows, + columns=self.Columns, + ) + if self._coordinate_system == CoordinateSystemNames.PATIENT: + spacing = get_regular_slice_spacing( + image_positions=np.array( + plane_position_values[plane_sort_index, 0, :] + ), + image_orientation=np.array( + plane_orientation[0].ImageOrientationPatient + ), + sort=False, + enforce_positive=True, + ) + + if spacing is not None and spacing > 0.0: + # The image is a regular volume, so we should record this + dimension_organization_type = ( + DimensionOrganizationTypeValues.THREE_DIMENSIONAL + ) + # Also add the slice spacing to the pixel measures + ( + self.SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .SpacingBetweenSlices + ) = spacing + else: + if ( + dimension_organization_type == + DimensionOrganizationTypeValues.THREE_DIMENSIONAL + ): + raise ValueError( + 'Dimension organization "3D" has been specified, ' + 'but the source image is not a regularly-spaced 3D ' + 'volume.' + ) + if dimension_organization_type is not None: + self.DimensionOrganizationType = dimension_organization_type.value + if ( has_ref_frame_uid and self._coordinate_system == CoordinateSystemNames.SLIDE diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 5285bb88..3b6a0376 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -1,4 +1,4 @@ -from typing import Optional, Sequence, Tuple +from typing import List, Optional, Sequence, Tuple import numpy as np import pydicom @@ -988,11 +988,11 @@ def get_series_slice_spacing( def get_regular_slice_spacing( - image_positions: np.ndarray, - image_orientation: np.ndarray, + image_positions: Sequence[Sequence[float]], + image_orientation: Sequence[float], tol: float = DEFAULT_SPACING_TOLERANCE, sort: bool = True, - enforce_postive: bool = False, + enforce_positive: bool = False, ) -> Optional[float]: """Get the regular spacing between set of image positions, if any. @@ -1005,12 +1005,14 @@ def get_regular_slice_spacing( Parameters ---------- - image_positions: numpy.ndarray - Array of image positions for multiple frames. Should be a numpy array of - shape (N, 3) where N is the number of frames. - image_orientation: numpy.ndarray + image_positions: Sequence[Sequence[float]] + Array of image positions for multiple frames. Should be a 2D array of + shape (N, 3) where N is the number of frames. Either a numpy array or + anything convertible to it may be passed. + image_orientation: Sequence[float] Image orientation as direction cosine values taken directly from the - ImageOrientationPatient attribute. 1D array of length 6. + ImageOrientationPatient attribute. 1D array of length 6. Either a numpy + array or anything convertible to it may be passed. tol: float Tolerance for determining spacing regularity. If slice spacings vary by less that this spacing, they are considered to be regular. @@ -1019,7 +1021,7 @@ def get_regular_slice_spacing( makes the function tolerant of unsorted inputs. Set to False to check whether the positions represent a 3D volume in the specific order in which they are passed. - enforce_postive: bool + enforce_positive: bool If True and sort is False, require that the images are not only regularly spaced but also that they are ordered along the direction of the increasing normal vector, as opposed to being ordered regularly @@ -1035,17 +1037,11 @@ def get_regular_slice_spacing( """ image_positions = np.array(image_positions) - image_orientation = np.array(image_orientation) if image_positions.ndim != 2 or image_positions.shape[1] != 3: raise ValueError( "Argument 'image_positions' should be an (N, 3) array." ) - if image_orientation.ndim != 1 or image_orientation.shape[0] != 6: - raise ValueError( - "Argument 'image_orientation' should be an array of " - "length 6." - ) n = image_positions.shape[0] if n == 0: raise ValueError( @@ -1055,15 +1051,11 @@ def get_regular_slice_spacing( # Special case, we stipluate that this has spacing 0.0 return 0.0 - # Find normal vector to the imaging plane - v1 = image_orientation[:3] - v2 = image_orientation[3:] - v3 = np.cross(v1, v2) + normal_vector = get_normal_vector(image_orientation) # Calculate distance of each slice from coordinate system origin along the # normal vector - origin_distances = v3[None] @ image_positions.T - origin_distances = origin_distances.squeeze(0) + origin_distances = _get_slice_distances(image_positions, normal_vector) if sort: sort_index = np.argsort(origin_distances) @@ -1079,18 +1071,18 @@ def get_regular_slice_spacing( spacings, atol=tol ).all() - if is_regular and enforce_postive: + if is_regular and enforce_positive: if avg_spacing < 0.0: return None # Additionally check that the vector from the first to the last plane lies - # approximately along v3 + # approximately along the normal vector pos1 = image_positions[sort_index[0], :] pos2 = image_positions[sort_index[-1], :] span = (pos2 - pos1) span /= np.linalg.norm(span) - is_perpendicular = abs(v3.T @ span - 1.0) < tol + is_perpendicular = abs(normal_vector.T @ span - 1.0) < tol if is_regular and is_perpendicular: return abs(avg_spacing) @@ -1098,6 +1090,110 @@ def get_regular_slice_spacing( return None +def get_normal_vector( + image_orientation: Sequence[float], +): + """Get a vector normal to an imaging plane. + + Parameters + ---------- + image_orientation: Sequence[float] + Image orientation in the standard DICOM format used for the + ImageOrientationPatient and ImageOrientationSlide attributes, + consisting of 6 numbers representing the direction cosines along the + rows (first three elements) and columns (second three elements). + + Returns + ------- + np.ndarray: + Unit normal vector as a NumPy array with shape (3, ). + + """ + image_orientation = np.array(image_orientation) + if image_orientation.ndim != 1 or image_orientation.shape[0] != 6: + raise ValueError( + "Argument 'image_orientation' should be an array of " + "length 6." + ) + + # Find normal vector to the imaging plane + v1 = image_orientation[:3] + v2 = image_orientation[3:] + v3 = np.cross(v1, v2) + + return v3 + + +def get_plane_sort_index( + image_positions: Sequence[Sequence[float]], + image_orientation: Sequence[float], +) -> List[int]: + """ + + Parameters + ---------- + image_positions: Sequence[Sequence[float]] + Array of image positions for multiple frames. Should be a 2D array of + shape (N, 3) where N is the number of frames. Either a numpy array or + anything convertible to it may be passed. + image_orientation: Sequence[float] + Image orientation as direction cosine values taken directly from the + ImageOrientationPatient attribute. 1D array of length 6. Either a numpy + array or anything convertible to it may be passed. + + Returns + ------- + List[int] + Sorting index for the input planes. Element i of this list gives the + index in the original list of the frames such that the output list + is sorted along the positive direction of the normal vector of the + imaging plane. + + """ + image_positions = np.array(image_positions) + image_orientation = np.array(image_orientation) + + normal_vector = get_normal_vector(image_orientation) + + # Calculate distance of each slice from coordinate system origin along the + # normal vector + origin_distances = _get_slice_distances(image_positions, normal_vector) + + sort_index = np.argsort(origin_distances) + + return sort_index.tolist() + + +def _get_slice_distances( + image_positions: np.ndarray, + normal_vector: np.ndarray, +) -> np.ndarray: + """Get distances of a set of planes from the origin. + + For each plane position, find (signed) distance from origin along the vector normal + to the imaging plane. + + Parameters + ---------- + image_positions: np.ndarray + Image positions array. 2D array of shape (N, 3) where N is the number of + planes and each row gives the (x, y, z) image position of a plane. + normal_vector: np.ndarray + Unit normal vector (perpendicular to the imaging plane). + + Returns + ------- + np.ndarray: + 1D array of shape (N, ) giving signed distance from the origin of each + plane position. + + """ + origin_distances = normal_vector[None] @ image_positions.T + origin_distances = origin_distances.squeeze(0) + + return origin_distances + + def get_coordinate_system( dataset: pydicom.Dataset, ) -> Optional[CoordinateSystemNames]: From 13e546b50e3cb8946647beb63308a92b975abeb0 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 1 Feb 2024 12:18:01 +0000 Subject: [PATCH 06/93] Generalize multifframe database manager --- src/highdicom/_module_utils.py | 23 + src/highdicom/_multiframe.py | 882 +++++++++++++++++++++++++++++++++ src/highdicom/content.py | 8 +- src/highdicom/pm/content.py | 5 +- src/highdicom/pm/sop.py | 3 +- src/highdicom/pr/content.py | 13 +- src/highdicom/seg/content.py | 10 +- src/highdicom/seg/sop.py | 719 ++------------------------- src/highdicom/spatial.py | 199 +++++++- src/highdicom/sr/content.py | 3 +- 10 files changed, 1184 insertions(+), 681 deletions(-) create mode 100644 src/highdicom/_multiframe.py diff --git a/src/highdicom/_module_utils.py b/src/highdicom/_module_utils.py index f73f246a..376547e4 100644 --- a/src/highdicom/_module_utils.py +++ b/src/highdicom/_module_utils.py @@ -281,3 +281,26 @@ def does_iod_have_pixel_data(sop_class_uid: str) -> bool: return any( is_attribute_in_iod(attr, sop_class_uid) for attr in pixel_attrs ) + + +def is_multiframe_image(dataset: Dataset): + """Determine whether an image is a multiframe image. + + The definition used is whether the IOD allows for multiple frames, not + whether this particular instance has more than one frame. + + Parameters + ---------- + dataset: pydicom.Dataset + A dataset to check. + + Returns + ------- + bool: + Whether the image belongs to a multiframe IOD. + + """ + return is_attribute_in_iod( + 'PerFrameFunctionalGroupsSequence', + dataset.SOPClassUID, + ) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py new file mode 100644 index 00000000..57766034 --- /dev/null +++ b/src/highdicom/_multiframe.py @@ -0,0 +1,882 @@ +"""Tools for working with multiframe DICOM images.""" +from collections import Counter +from contextlib import contextmanager +import logging +import sqlite3 +from typing import ( + Any, + Iterable, + Dict, + Generator, + List, + Optional, + Set, + Sequence, + Tuple, + Union, +) +import numpy as np +from pydicom import Dataset +from pydicom.tag import BaseTag +from pydicom.datadict import get_entry, tag_for_keyword +from pydicom.multival import MultiValue + +from highdicom.enum import CoordinateSystemNames +from highdicom.seg.enum import SpatialLocationsPreservedValues +from highdicom.spatial import ( + DEFAULT_SPACING_TOLERANCE, + get_coordinate_system, + get_regular_slice_spacing, +) +from highdicom.uid import UID as hd_UID +from highdicom.utils import ( + iter_tiled_full_frame_data, +) + + +_NO_FRAME_REF_VALUE = -1 + + +logger = logging.getLogger(__name__) + + +class MultiFrameDBManager: + + """Database manager for frame information in a multiframe image.""" + + # Dictionary mapping DCM VRs to appropriate SQLite types + _DCM_SQL_TYPE_MAP = { + 'CS': 'VARCHAR', + 'DS': 'REAL', + 'FD': 'REAL', + 'FL': 'REAL', + 'IS': 'INTEGER', + 'LO': 'TEXT', + 'LT': 'TEXT', + 'PN': 'TEXT', + 'SH': 'TEXT', + 'SL': 'INTEGER', + 'SS': 'INTEGER', + 'ST': 'TEXT', + 'UI': 'TEXT', + 'UL': 'INTEGER', + 'UR': 'TEXT', + 'US or SS': 'INTEGER', + 'US': 'INTEGER', + 'UT': 'TEXT', + } + + def __init__( + self, + dataset: Dataset, + ): + """ + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset of a multi-frame image. + + """ + self._coordinate_system = get_coordinate_system(dataset) + referenced_uids = self._get_ref_instance_uids(dataset) + all_referenced_sops = {uids[2] for uids in referenced_uids} + + self._is_tiled_full = ( + hasattr(dataset, 'DimensionOrganizationType') and + dataset.DimensionOrganizationType == 'TILED_FULL' + ) + + self._dim_ind_pointers = [ + dim_ind.DimensionIndexPointer + for dim_ind in dataset.DimensionIndexSequence + ] + func_grp_pointers = {} + for dim_ind in dataset.DimensionIndexSequence: + ptr = dim_ind.DimensionIndexPointer + if ptr in self._dim_ind_pointers: + grp_ptr = getattr(dim_ind, "FunctionalGroupPointer", None) + func_grp_pointers[ptr] = grp_ptr + + # We mav want to gather additional information that is not one of the + # indices + extra_collection_pointers = [] + extra_collection_func_pointers = {} + if self._coordinate_system == CoordinateSystemNames.PATIENT: + image_position_tag = tag_for_keyword('ImagePositionPatient') + plane_pos_seq_tag = tag_for_keyword('PlanePositionSequence') + # Include the image position if it is not an index + if image_position_tag not in self._dim_ind_pointers: + extra_collection_pointers.append(image_position_tag) + extra_collection_func_pointers[ + image_position_tag + ] = plane_pos_seq_tag + + dim_ind_positions = { + dim_ind.DimensionIndexPointer: i + for i, dim_ind in enumerate(dataset.DimensionIndexSequence) + } + dim_indices: Dict[int, List[int]] = { + ptr: [] for ptr in self._dim_ind_pointers + } + dim_values: Dict[int, List[Any]] = { + ptr: [] for ptr in self._dim_ind_pointers + } + + extra_collection_values: Dict[int, List[Any]] = { + ptr: [] for ptr in extra_collection_pointers + } + + self.shared_image_orientation = self._get_shared_image_orientation( + dataset + ) + + self._single_source_frame_per_frame = True + + if self._is_tiled_full: + # With TILED_FULL, there is no PerFrameFunctionalGroupsSequence, + # so we have to deduce the per-frame information + row_tag = tag_for_keyword('RowPositionInTotalImagePixelMatrix') + col_tag = tag_for_keyword('ColumnPositionInTotalImagePixelMatrix') + x_tag = tag_for_keyword('XOffsetInSlideCoordinateSystem') + y_tag = tag_for_keyword('YOffsetInSlideCoordinateSystem') + z_tag = tag_for_keyword('ZOffsetInSlideCoordinateSystem') + tiled_full_dim_indices = {row_tag, col_tag} + if len(tiled_full_dim_indices - set(dim_indices.keys())) > 0: + raise RuntimeError( + 'Expected images with ' + '"DimensionOrganizationType" of "TILED_FULL" ' + 'to have the following dimension index pointers: ' + 'RowPositionInTotalImagePixelMatrix, ' + 'ColumnPositionInTotalImagePixelMatrix.' + ) + self._single_source_frame_per_frame = False + ( + channel_numbers, + _, + dim_values[col_tag], + dim_values[row_tag], + dim_values[x_tag], + dim_values[y_tag], + dim_values[z_tag], + ) = zip(*iter_tiled_full_frame_data(dataset)) + + if hasattr(dataset, 'SegmentSequence'): + segment_tag = tag_for_keyword('ReferencedSegmentNumber') + dim_values[segment_tag] = channel_numbers + elif hasattr(dataset, 'OpticalPathSequence'): + op_tag = tag_for_keyword('OpticalPathIdentifier') + dim_values[op_tag] = channel_numbers + + # Create indices for each of the dimensions + for ptr, vals in dim_values.items(): + _, indices = np.unique(vals, return_inverse=True) + dim_indices[ptr] = (indices + 1).tolist() + + # There is no way to deduce whether the spatial locations are + # preserved in the tiled full case + self._locations_preserved = None + + referenced_instances = None + referenced_frames = None + else: + referenced_instances: Optional[List[str]] = [] + referenced_frames: Optional[List[int]] = [] + + # Create a list of source images and check for spatial locations + # preserved + locations_list_type = List[ + Optional[SpatialLocationsPreservedValues] + ] + locations_preserved: locations_list_type = [] + + for frame_item in dataset.PerFrameFunctionalGroupsSequence: + # Get dimension indices for this frame + content_seq = frame_item.FrameContentSequence[0] + indices = content_seq.DimensionIndexValues + if not isinstance(indices, (MultiValue, list)): + # In case there is a single dimension index + indices = [indices] + if len(indices) != len(self._dim_ind_pointers): + raise RuntimeError( + 'Unexpected mismatch between dimension index values in ' + 'per-frames functional groups sequence and items in ' + 'the dimension index sequence.' + ) + for ptr in self._dim_ind_pointers: + dim_indices[ptr].append(indices[dim_ind_positions[ptr]]) + grp_ptr = func_grp_pointers[ptr] + if grp_ptr is not None: + dim_val = frame_item[grp_ptr][0][ptr].value + else: + dim_val = frame_item[ptr].value + dim_values[ptr].append(dim_val) + for ptr in extra_collection_pointers: + grp_ptr = extra_collection_func_pointers[ptr] + if grp_ptr is not None: + dim_val = frame_item[grp_ptr][0][ptr].value + else: + dim_val = frame_item[ptr].value + extra_collection_values[ptr].append(dim_val) + + frame_source_instances = [] + frame_source_frames = [] + for der_im in getattr( + frame_item, + 'DerivationImageSequence', + [] + ): + for src_im in getattr( + der_im, + 'SourceImageSequence', + [] + ): + frame_source_instances.append( + src_im.ReferencedSOPInstanceUID + ) + if hasattr(src_im, 'SpatialLocationsPreserved'): + locations_preserved.append( + SpatialLocationsPreservedValues( + src_im.SpatialLocationsPreserved + ) + ) + else: + locations_preserved.append( + None + ) + + if hasattr(src_im, 'ReferencedFrameNumber'): + if isinstance( + src_im.ReferencedFrameNumber, + MultiValue + ): + frame_source_frames.extend( + [ + int(f) + for f in src_im.ReferencedFrameNumber + ] + ) + else: + frame_source_frames.append( + int(src_im.ReferencedFrameNumber) + ) + else: + frame_source_frames.append(_NO_FRAME_REF_VALUE) + + if ( + len(set(frame_source_instances)) != 1 or + len(set(frame_source_frames)) != 1 + ): + self._single_source_frame_per_frame = False + else: + ref_instance_uid = frame_source_instances[0] + if ref_instance_uid not in all_referenced_sops: + raise AttributeError( + f'SOP instance {ref_instance_uid} referenced in ' + 'the source image sequence is not included in the ' + 'Referenced Series Sequence or Studies Containing ' + 'Other Referenced Instances Sequence. This is an ' + 'error with the integrity of the Segmentation ' + 'object.' + ) + referenced_instances.append(ref_instance_uid) + referenced_frames.append(frame_source_frames[0]) + + # Summarise + if any( + isinstance(v, SpatialLocationsPreservedValues) and + v == SpatialLocationsPreservedValues.NO + for v in locations_preserved + ): + + self._locations_preserved: Optional[ + SpatialLocationsPreservedValues + ] = SpatialLocationsPreservedValues.NO + elif all( + isinstance(v, SpatialLocationsPreservedValues) and + v == SpatialLocationsPreservedValues.YES + for v in locations_preserved + ): + self._locations_preserved = SpatialLocationsPreservedValues.YES + else: + self._locations_preserved = None + + if not self._single_source_frame_per_frame: + referenced_instances = None + referenced_frames = None + + self._db_con: sqlite3.Connection = sqlite3.connect(":memory:") + + self._create_ref_instance_table(referenced_uids) + + self._number_of_frames = dataset.NumberOfFrames + + # Construct the columns and values to put into a frame look-up table + # table within sqlite. There will be one row per frame in the + # segmentation instance + col_defs = [] # SQL column definitions + col_data = [] # lists of column data + + # Frame number column + col_defs.append('FrameNumber INTEGER PRIMARY KEY') + col_data.append(list(range(1, self._number_of_frames + 1))) + + self._dim_ind_col_names = {} + for i, t in enumerate(dim_indices.keys()): + vr, vm_str, _, _, kw = get_entry(t) + if kw == '': + kw = f'UnknownDimensionIndex{i}' + ind_col_name = kw + '_DimensionIndexValues' + self._dim_ind_col_names[t] = ind_col_name + + # Add column for dimension index + col_defs.append(f'{ind_col_name} INTEGER NOT NULL') + col_data.append(dim_indices[t]) + + # Add column for dimension value + # For this to be possible, must have a fixed VM + # and a VR that we can map to a sqlite type + # Otherwise, we just omit the data from the db + if kw == 'ReferencedSegmentNumber': + # Special case since this tag technically has VM 1-n + vm = 1 + else: + try: + vm = int(vm_str) + except ValueError: + continue + try: + sql_type = self._DCM_SQL_TYPE_MAP[vr] + except KeyError: + continue + + if vm > 1: + for d in range(vm): + data = [el[d] for el in dim_values[t]] + col_defs.append(f'{kw}_{d} {sql_type} NOT NULL') + col_data.append(data) + else: + # Single column + col_defs.append(f'{kw} {sql_type} NOT NULL') + col_data.append(dim_values[t]) + + for i, t in enumerate(extra_collection_pointers): + vr, vm_str, _, _, kw = get_entry(t) + + # Add column for dimension value + # For this to be possible, must have a fixed VM + # and a VR that we can map to a sqlite type + # Otherwise, we just omit the data from the db + vm = int(vm_str) + sql_type = self._DCM_SQL_TYPE_MAP[vr] + + if vm > 1: + for d in range(vm): + data = [el[d] for el in extra_collection_values[t]] + col_defs.append(f'{kw}_{d} {sql_type} NOT NULL') + col_data.append(data) + else: + # Single column + col_defs.append(f'{kw} {sql_type} NOT NULL') + col_data.append(dim_values[t]) + + # Columns related to source frames, if they are usable for indexing + if (referenced_frames is None) != (referenced_instances is None): + raise TypeError( + "'referenced_frames' and 'referenced_instances' should be " + "provided together or not at all." + ) + if referenced_instances is not None: + col_defs.append('ReferencedFrameNumber INTEGER') + col_defs.append('ReferencedSOPInstanceUID VARCHAR NOT NULL') + col_defs.append( + 'FOREIGN KEY(ReferencedSOPInstanceUID) ' + 'REFERENCES InstanceUIDs(SOPInstanceUID)' + ) + col_data += [ + referenced_frames, + referenced_instances, + ] + + # Build LUT from columns + all_defs = ", ".join(col_defs) + cmd = f'CREATE TABLE FrameLUT({all_defs})' + print(cmd) + placeholders = ', '.join(['?'] * len(col_data)) + with self._db_con: + self._db_con.execute(cmd) + self._db_con.executemany( + f'INSERT INTO FrameLUT VALUES({placeholders})', + zip(*col_data), + ) + + def _get_ref_instance_uids( + self, + dataset: Dataset, + ) -> List[Tuple[str, str, str]]: + """List all instances referenced in the image. + + Parameters + ---------- + dataset + + Returns + ------- + List[Tuple[str, str, str]] + List of all instances referenced in the image in the format + (StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID). + + """ + instance_data = [] + if hasattr(dataset, 'ReferencedSeriesSequence'): + for ref_series in dataset.ReferencedSeriesSequence: + for ref_ins in ref_series.ReferencedInstanceSequence: + instance_data.append( + ( + dataset.StudyInstanceUID, + ref_series.SeriesInstanceUID, + ref_ins.ReferencedSOPInstanceUID + ) + ) + other_studies_kw = 'StudiesContainingOtherReferencedInstancesSequence' + if hasattr(dataset, other_studies_kw): + for ref_study in getattr(dataset, other_studies_kw): + for ref_series in ref_study.ReferencedSeriesSequence: + for ref_ins in ref_series.ReferencedInstanceSequence: + instance_data.append( + ( + ref_study.StudyInstanceUID, + ref_series.SeriesInstanceUID, + ref_ins.ReferencedSOPInstanceUID, + ) + ) + + # There shouldn't be duplicates here, but there's no explicit rule + # preventing it. + # Since dictionary ordering is preserved, this trick deduplicates + # the list without changing the order + unique_instance_data = list(dict.fromkeys(instance_data)) + if len(unique_instance_data) != len(instance_data): + counts = Counter(instance_data) + duplicate_sop_uids = [ + f"'{key[2]}'" for key, value in counts.items() if value > 1 + ] + display_str = ', '.join(duplicate_sop_uids) + logger.warning( + 'Duplicate entries found in the ReferencedSeriesSequence. ' + f"SOP Instance UID: '{dataset.SOPInstanceUID}', " + f'duplicated referenced SOP Instance UID items: {display_str}.' + ) + + return unique_instance_data + + def _check_indexing_with_source_frames( + self, + ignore_spatial_locations: bool = False + ) -> None: + """Check if indexing by source frames is possible. + + Raise exceptions with useful messages otherwise. + + Possible problems include: + * Spatial locations are not preserved. + * The dataset does not specify that spatial locations are preserved + and the user has not asserted that they are. + * At least one frame in the segmentation lists multiple + source frames. + + Parameters + ---------- + ignore_spatial_locations: bool + Allows the user to ignore whether spatial locations are preserved + in the frames. + + """ + # Checks that it is possible to index using source frames in this + # dataset + if self._is_tiled_full: + raise RuntimeError( + 'Indexing via source frames is not possible when a ' + 'segmentation is stored using the DimensionOrganizationType ' + '"TILED_FULL".' + ) + elif self._locations_preserved is None: + if not ignore_spatial_locations: + raise RuntimeError( + 'Indexing via source frames is not permissible since this ' + 'image does not specify that spatial locations are ' + 'preserved in the course of deriving the segmentation ' + 'from the source image. If you are confident that spatial ' + 'locations are preserved, or do not require that spatial ' + 'locations are preserved, you may override this behavior ' + "with the 'ignore_spatial_locations' parameter." + ) + elif self._locations_preserved == SpatialLocationsPreservedValues.NO: + if not ignore_spatial_locations: + raise RuntimeError( + 'Indexing via source frames is not permissible since this ' + 'image specifies that spatial locations are not preserved ' + 'in the course of deriving the segmentation from the ' + 'source image. If you do not require that spatial ' + ' locations are preserved you may override this behavior ' + "with the 'ignore_spatial_locations' parameter." + ) + if not self._single_source_frame_per_frame: + raise RuntimeError( + 'Indexing via source frames is not permissible since some ' + 'frames in the segmentation specify multiple source frames.' + ) + + @property + def dimension_index_pointers(self) -> List[BaseTag]: + """List[pydicom.tag.BaseTag]: + List of tags used as dimension indices. + """ + return [BaseTag(t) for t in self._dim_ind_pointers] + + def _create_ref_instance_table( + self, + referenced_uids: List[Tuple[str, str, str]], + ) -> None: + """Create a table of referenced instances. + + The resulting table (called InstanceUIDs) contains Study, Series and + SOP instance UIDs for each instance referenced by the segmentation + image. + + Parameters + ---------- + referenced_uids: List[Tuple[str, str, str]] + List of UIDs for each instance referenced in the segmentation. + Each tuple should be in the format + (StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID). + + """ + with self._db_con: + self._db_con.execute( + """ + CREATE TABLE InstanceUIDs( + StudyInstanceUID VARCHAR NOT NULL, + SeriesInstanceUID VARCHAR NOT NULL, + SOPInstanceUID VARCHAR PRIMARY KEY + ) + """ + ) + self._db_con.executemany( + "INSERT INTO InstanceUIDs " + "(StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID) " + "VALUES(?, ?, ?)", + referenced_uids, + ) + + def _get_shared_image_orientation( + self, + dataset: Dataset + ) -> Optional[List[float]]: + """Get image orientation if it is shared between frames. + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset for which to get the image orientation. + + Returns + ------- + List[float]: + Image orientation attribute (list of 6 floats containing direction + cosines) if this is shared between frames in the image. Otherwise + returns None. + + """ + if hasattr(dataset, 'ImageOrientationSlide'): + return dataset.ImageOrientationSlide + + if hasattr(dataset, 'SharedFunctionalGroupsSequence'): + sfgs = dataset.SharedFunctionalGroupsSequence[0] + if hasattr(sfgs, 'PlaneOrientationSequence'): + return sfgs.PlaneOrientationSequence[0].ImageOrientationPatient + + if hasattr(dataset, 'PerFrameFunctionalGroupsSequence'): + pfg1 = dataset.PerFrameFunctionalGroupsSequence[0] + if hasattr(pfg1, 'PlaneOrientationSequence'): + iop = pfg1.PlaneOrientationSequence[0].ImageOrientationPatient + + if len(dataset.PerFrameFunctionalGroupsSequence) == 1: + return iop + else: + for pfg in dataset.PerFrameFunctionalGroupsSequence[1:]: + frame_iop = ( + pfg.PlaneOrientationSequence[0]. + ImageOrientationPatient + ) + if frame_iop != iop: + break + else: + return iop + + return None + + def are_dimension_indices_unique( + self, + dimension_index_pointers: Sequence[Union[int, BaseTag]], + ) -> bool: + """Check if a list of index pointers uniquely identifies frames. + + For a given list of dimension index pointers, check whether every + combination of index values for these pointers identifies a unique + frame image. This is a pre-requisite for indexing using this list of + dimension index pointers. + + Parameters + ---------- + dimension_index_pointers: Sequence[Union[int, pydicom.tag.BaseTag]] + Sequence of tags serving as dimension index pointers. + + Returns + ------- + bool + True if dimension indices are unique. + + """ + column_names = [] + for ptr in dimension_index_pointers: + column_names.append(self._dim_ind_col_names[ptr]) + col_str = ", ".join(column_names) + cur = self._db_con.cursor() + n_unique_combos = cur.execute( + f"SELECT COUNT(*) FROM (SELECT 1 FROM FrameLUT GROUP BY {col_str})" + ).fetchone()[0] + return n_unique_combos == self._number_of_frames + + def get_source_image_uids(self) -> List[Tuple[hd_UID, hd_UID, hd_UID]]: + """Get UIDs of source image instances referenced in the image. + + Returns + ------- + List[Tuple[highdicom.UID, highdicom.UID, highdicom.UID]] + (Study Instance UID, Series Instance UID, SOP Instance UID) triplet + for every image instance referenced in the segmentation. + + """ + cur = self._db_con.cursor() + res = cur.execute( + 'SELECT StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID ' + 'FROM InstanceUIDs' + ) + + return [ + (hd_UID(a), hd_UID(b), hd_UID(c)) for a, b, c in res.fetchall() + ] + + def get_unique_referenced_sop_instance_uids(self) -> Set[str]: + """Get set of unique Referenced SOP Instance UIDs. + + Returns + ------- + Set[str] + Set of unique Referenced SOP Instance UIDs. + + """ + cur = self._db_con.cursor() + return { + r[0] for r in + cur.execute( + 'SELECT DISTINCT(SOPInstanceUID) from InstanceUIDs' + ) + } + + def get_max_referenced_frame_number(self) -> int: + """Get highest frame number of any referenced frame. + + Absent access to the referenced dataset itself, being less than this + value is a sufficient condition for the existence of a frame number + in the source image. + + Returns + ------- + int + Highest frame number referenced in the segmentation image. + + """ + cur = self._db_con.cursor() + return cur.execute( + 'SELECT MAX(ReferencedFrameNumber) FROM FrameLUT' + ).fetchone()[0] + + def is_indexable_as_total_pixel_matrix(self) -> bool: + """Whether the image can be indexed as a total pixel matrix. + + Returns + ------- + bool: + True if the segmentation may be indexed using row and column + positions in the total pixel matrix. False otherwise. + + """ + row_pos_kw = tag_for_keyword('RowPositionInTotalImagePixelMatrix') + col_pos_kw = tag_for_keyword('ColumnPositionInTotalImagePixelMatrix') + return ( + row_pos_kw in self._dim_ind_col_names and + col_pos_kw in self._dim_ind_col_names + ) + + def get_unique_dim_index_values( + self, + dimension_index_pointers: Sequence[int], + ) -> Set[Tuple[int, ...]]: + """Get set of unique dimension index value combinations. + + Parameters + ---------- + dimension_index_pointers: Sequence[int] + List of dimension index pointers for which to find unique + combinations of values. + + Returns + ------- + Set[Tuple[int, ...]] + Set of unique dimension index value combinations for the given + input dimension index pointers. + + """ + cols = [self._dim_ind_col_names[p] for p in dimension_index_pointers] + cols_str = ', '.join(cols) + cur = self._db_con.cursor() + return { + r for r in + cur.execute( + f'SELECT DISTINCT {cols_str} FROM FrameLUT' + ) + } + + def get_slice_spacing( + self, + tol: float = DEFAULT_SPACING_TOLERANCE, + split_dimensions: Optional[Sequence[BaseTag]] = None, + ) -> Optional[float]: + """Get slice spacing, if any, for the image. + + First determines whether the multiframe image represents a 3D volume. + A 3D volume consists of regularly spaced slices with orthogonal axes, i.e. + the slices are spaced equally along the direction orthogonal to the + in-plane image orientation cosines. + + If the image does represent a volume, returns the absolute value of the + slice spacing. If the series does not represent a volume, returns None. + + Note that we stipulate that an image with a single frame in the patient + coordinate system is a 3D volume for the purposes of this function. In this + case the returned slice spacing will be 0.0 if it cannot be deduced from + the metadata. + + Note also that this function checks the image position and image + orientation metadata found in the file and ignores any SpacingBetweenSlices + or DimensionOrganizationType found in the dataset. Therefore it does not + rely upon the creator having populated these attributes, or that their + values are correct. + + Parameters + ---------- + tol: float, optional + Tolerance for determining spacing regularity. If slice spacings vary by + less that this spacing, they are considered to be regular. + split_dimensions: Union[Sequence[pydicom.tag.BaseTag], None], optional + Split on these dimension indices and determine whether there is 3D + volume for each value of this dimension index, the same 3D volumes of + frames exist. For example, if time were included as a split dimension, + this function will check whether a 3D volume exists at each timepoint + (and that the volume is the same at each time point). Each dimension + index should be provided as a base tags representing the Dimension + Index Pointer. + + Returns + ------- + float: + Absolute value of the regular slice spacing if the series of images + meets the definition of a 3D volume, above. None otherwise. + + """ + if self._coordinate_system is None: + return None + if self._coordinate_system != CoordinateSystemNames.PATIENT: + return None + + if self.shared_image_orientation is None: + return None + + if self._number_of_frames == 1: + # Stipulate that this does represent a volume + return 0.0 + + if split_dimensions is None: + cur = self._db_con.cursor() + + query = """ + SELECT + ImagePositionPatient_0, + ImagePositionPatient_1, + ImagePositionPatient_2 + FROM FrameLUT; + """ + + image_positions = np.array( + [r for r in cur.execute(query)] + ) + spacing = get_regular_slice_spacing( + image_positions=image_positions, + image_orientation=np.array(self.shared_image_orientation), + sort=True, + ) + + return spacing + + + + @contextmanager + def _generate_temp_table( + self, + table_name: str, + column_defs: Sequence[str], + column_data: Iterable[Sequence[Any]], + ) -> Generator[None, None, None]: + """Context manager that handles a temporary table. + + The temporary table is created with the specified information. Control + flow then returns to code within the "with" block. After the "with" + block has completed, the cleanup of the table is automatically handled. + + Parameters + ---------- + table_name: str + Name of the temporary table. + column_defs: Sequence[str] + SQL syntax strings defining each column in the temporary table, one + string per column. + column_data: Iterable[Sequence[Any]] + Column data to place into the table. + + Yields + ------ + None: + Yields control to the "with" block, with the temporary table + created. + + """ + defs_str = ', '.join(column_defs) + create_cmd = (f'CREATE TABLE {table_name}({defs_str})') + placeholders = ', '.join(['?'] * len(column_defs)) + + with self._db_con: + self._db_con.execute(create_cmd) + self._db_con.executemany( + f'INSERT INTO {table_name} VALUES({placeholders})', + column_data + ) + + # Return control flow to "with" block + yield + + # Clean up the table + cmd = (f'DROP TABLE {table_name}') + with self._db_con: + self._db_con.execute(cmd) diff --git a/src/highdicom/content.py b/src/highdicom/content.py index 4bbc53da..37b449b5 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -36,7 +36,8 @@ ) from highdicom._module_utils import ( check_required_attributes, - does_iod_have_pixel_data + does_iod_have_pixel_data, + is_multiframe_image, ) @@ -1754,7 +1755,10 @@ def __init__( 'Specifying "referenced_frame_number" is not supported ' 'with multiple referenced images.' ) - if not hasattr(referenced_images[0], 'NumberOfFrames'): + # note cannot use the highdicom.utils function here due to + # circular import issues + is_multiframe = is_multiframe_image(referenced_images[0]) + if not is_multiframe: raise TypeError( 'Specifying "referenced_frame_number" is not valid ' 'when the referenced image is not a multi-frame image.' diff --git a/src/highdicom/pm/content.py b/src/highdicom/pm/content.py index 2d50eee1..aea90057 100644 --- a/src/highdicom/pm/content.py +++ b/src/highdicom/pm/content.py @@ -5,6 +5,7 @@ from pydicom.dataset import Dataset from pydicom.sequence import Sequence as DataElementSequence from pydicom.sr.coding import Code +from highdicom._module_utils import is_multiframe_image from highdicom.content import PlanePositionSequence from highdicom.enum import CoordinateSystemNames @@ -281,7 +282,7 @@ def get_plane_positions_of_image( Plane position of each frame in the image """ - is_multiframe = hasattr(image, 'NumberOfFrames') + is_multiframe = is_multiframe_image(image) if not is_multiframe: raise ValueError('Argument "image" must be a multi-frame image.') @@ -322,7 +323,7 @@ def get_plane_positions_of_series( Plane position of each frame in the image """ - is_multiframe = any([hasattr(img, 'NumberOfFrames') for img in images]) + is_multiframe = any([is_multiframe_image(img) for img in images]) if is_multiframe: raise ValueError( 'Argument "images" must be a series of single-frame images.' diff --git a/src/highdicom/pm/sop.py b/src/highdicom/pm/sop.py index 2bc8a645..091c7278 100644 --- a/src/highdicom/pm/sop.py +++ b/src/highdicom/pm/sop.py @@ -17,6 +17,7 @@ from highdicom.pm.content import DimensionIndexSequence, RealWorldValueMapping from highdicom.pm.enum import DerivedPixelContrastValues, ImageFlavorValues from highdicom.valuerep import check_person_name, _check_code_string +from highdicom._module_utils import is_multiframe_image from pydicom import Dataset from pydicom.uid import ( UID, @@ -271,7 +272,7 @@ def __init__( ) src_img = self._source_images[0] - is_multiframe = hasattr(src_img, 'NumberOfFrames') + is_multiframe = is_multiframe_image(src_img) # TODO: Revisit, may be overly restrictive # Check Source Image Sequence attribute in General Reference module if is_multiframe: diff --git a/src/highdicom/pr/content.py b/src/highdicom/pr/content.py index 701e6cba..41af8d27 100644 --- a/src/highdicom/pr/content.py +++ b/src/highdicom/pr/content.py @@ -41,6 +41,8 @@ _check_long_string, _check_short_text ) +from highdicom._module_utils import is_multiframe_image + logger = logging.getLogger(__name__) @@ -553,7 +555,7 @@ def __init__( ) self.GraphicLayer = graphic_layer.GraphicLayer - is_multiframe = hasattr(referenced_images[0], 'NumberOfFrames') + is_multiframe = is_multiframe_image(referenced_images[0]) if is_multiframe and len(referenced_images) > 1: raise ValueError( 'If referenced images are multi-frame, only a single image ' @@ -1087,7 +1089,7 @@ def _get_modality_lut_transformation( """ # Multframe images - if any(hasattr(im, 'NumberOfFrames') for im in referenced_images): + if any(is_multiframe_image(im) for im in referenced_images): im = referenced_images[0] if len(referenced_images) > 1 and not is_tiled_image(im): raise ValueError( @@ -1277,10 +1279,7 @@ def _add_softcopy_voi_lut_attributes( 'included in "referenced_images".' ) ref_im = ref_images_lut[uids] - is_multiframe = hasattr( - ref_im, - 'NumberOfFrames', - ) + is_multiframe = is_multiframe_image(ref_im) if uids in prev_ref_frames and not is_multiframe: raise ValueError( f'Instance with SOP Instance UID {uids[1]} ' @@ -1358,7 +1357,7 @@ def _get_softcopy_voi_lut_transformations( """ transformations = [] - if any(hasattr(im, 'NumberOfFrames') for im in referenced_images): + if any(is_multiframe_image(im) for im in referenced_images): if len(referenced_images) > 1: raise ValueError( "If multiple images are passed and any of them are multiframe, " diff --git a/src/highdicom/seg/content.py b/src/highdicom/seg/content.py index 495f89d6..cf6672a4 100644 --- a/src/highdicom/seg/content.py +++ b/src/highdicom/seg/content.py @@ -18,7 +18,11 @@ from highdicom.sr.coding import CodedConcept from highdicom.uid import UID from highdicom.utils import compute_plane_position_slide_per_frame -from highdicom._module_utils import check_required_attributes +from highdicom._module_utils import ( + check_required_attributes, + is_multiframe_image, +) + class SegmentDescription(Dataset): @@ -470,7 +474,7 @@ def get_plane_positions_of_image( Plane position of each frame in the image """ - is_multiframe = hasattr(image, 'NumberOfFrames') + is_multiframe = is_multiframe_image(image) if not is_multiframe: raise ValueError('Argument "image" must be a multi-frame image.') @@ -515,7 +519,7 @@ def get_plane_positions_of_series( Plane position of each frame in the image """ - is_multiframe = any([hasattr(img, 'NumberOfFrames') for img in images]) + is_multiframe = any([is_multiframe_image(img) for img in images]) if is_multiframe: raise ValueError( 'Argument "images" must be a series of single-frame images.' diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index be933a3d..776a1576 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -5,18 +5,15 @@ from contextlib import contextmanager from copy import deepcopy from os import PathLike -import sqlite3 from typing import ( Any, BinaryIO, Dict, Generator, - Iterable, Iterator, List, Optional, Sequence, - Set, Tuple, Union, cast, @@ -25,9 +22,8 @@ import numpy as np from pydicom.dataset import Dataset -from pydicom.datadict import get_entry, keyword_for_tag, tag_for_keyword +from pydicom.datadict import keyword_for_tag, tag_for_keyword from pydicom.encaps import encapsulate -from pydicom.multival import MultiValue from pydicom.pixel_data_handlers.numpy_handler import pack_bits from pydicom.tag import BaseTag, Tag from pydicom.uid import ( @@ -43,7 +39,12 @@ from pydicom.sr.coding import Code from pydicom.filereader import dcmread -from highdicom._module_utils import ModuleUsageValues, get_module_usage +from highdicom._module_utils import ( + ModuleUsageValues, + get_module_usage, + is_multiframe_image, +) +from highdicom._multiframe import MultiFrameDBManager from highdicom.base import SOPClass, _check_little_endian from highdicom.content import ( ContentCreatorIdentificationCodeSequence, @@ -61,7 +62,6 @@ compute_plane_position_tiled_full, is_tiled_image, get_tile_array, - iter_tiled_full_frame_data, tile_pixel_matrix, ) from highdicom.seg.content import ( @@ -72,7 +72,6 @@ SegmentationFractionalTypeValues, SegmentationTypeValues, SegmentsOverlapValues, - SpatialLocationsPreservedValues, SegmentAlgorithmTypeValues, ) from highdicom.seg.utils import iter_segments @@ -89,8 +88,6 @@ logger = logging.getLogger(__name__) -_NO_FRAME_REF_VALUE = -1 - def _get_unsigned_dtype(max_val: Union[int, np.integer]) -> type: """Get the smallest unsigned NumPy datatype to accommodate a value. @@ -150,242 +147,10 @@ def _check_numpy_value_representation( ) -class _SegDBManager: +class _SegDBManager(MultiFrameDBManager): """Database manager for data associated with a segmentation image.""" - # Dictionary mapping DCM VRs to appropriate SQLite types - _DCM_SQL_TYPE_MAP = { - 'CS': 'VARCHAR', - 'DS': 'REAL', - 'FD': 'REAL', - 'FL': 'REAL', - 'IS': 'INTEGER', - 'LO': 'TEXT', - 'LT': 'TEXT', - 'PN': 'TEXT', - 'SH': 'TEXT', - 'SL': 'INTEGER', - 'SS': 'INTEGER', - 'ST': 'TEXT', - 'UI': 'TEXT', - 'UL': 'INTEGER', - 'UR': 'TEXT', - 'US or SS': 'INTEGER', - 'US': 'INTEGER', - 'UT': 'TEXT', - } - - def __init__( - self, - referenced_uids: List[Tuple[str, str, str]], - segment_numbers: List[int], - dim_indices: Dict[int, List[int]], - dim_values: Dict[int, List[Any]], - referenced_instances: Optional[List[str]], - referenced_frames: Optional[List[int]], - ): - """ - - Parameters - ---------- - referenced_uids: List[Tuple[str, str, str]] - Triplet of UIDs for each image instance (Study Instance UID, - Series Instance UID, SOP Instance UID) that is referenced - in the segmentation image. - segment_numbers: List[int] - Segment numbers for each frame in the segmentation image. - dim_indices: Dict[int, List[int]] - Dictionary mapping the integer tag value of each dimension index - pointer (excluding SegmentNumber) to a list of dimension indices - for each frame in the segmentation image. - dim_values: Dict[int, List[Values]] - Dictionary mapping the integer tag value of each dimension index - pointer (excluding SegmentNumber) to a list of dimension values - for each frame in the segmentation image. - referenced_instances: Optional[List[str]] - SOP Instance UID of each referenced image instance for each frame - in the segmentation image. Should be omitted if there is not a - single referenced image instance per segmentation image frame. - referenced_frames: Optional[List[int]] - Number of the corresponding frame in the referenced image - instance for each frame in the segmentation image. Should be - omitted if there is not a single referenced image instance per - segmentation image frame. - - """ - self._db_con: sqlite3.Connection = sqlite3.connect(":memory:") - - self._create_ref_instance_table(referenced_uids) - - self._number_of_frames = len(segment_numbers) - - # Construct the columns and values to put into a frame look-up table - # table within sqlite. There will be one row per frame in the - # segmentation instance - col_defs = [] # SQL column definitions - col_data = [] # lists of column data - - # Frame number column - col_defs.append('FrameNumber INTEGER PRIMARY KEY') - col_data.append(list(range(1, self._number_of_frames + 1))) - - # Segment number column - col_defs.append('SegmentNumber INTEGER NOT NULL') - col_data.append(segment_numbers) - - self._dim_ind_col_names = {} - for i, t in enumerate(dim_indices.keys()): - vr, vm_str, _, _, kw = get_entry(t) - if kw == '': - kw = f'UnknownDimensionIndex{i}' - ind_col_name = kw + '_DimensionIndexValues' - self._dim_ind_col_names[t] = ind_col_name - - # Add column for dimension index - col_defs.append(f'{ind_col_name} INTEGER NOT NULL') - col_data.append(dim_indices[t]) - - # Add column for dimension value - # For this to be possible, must have a fixed VM - # and a VR that we can map to a sqlite type - # Otherwise, we just omit the data from the db - try: - vm = int(vm_str) - except ValueError: - continue - try: - sql_type = self._DCM_SQL_TYPE_MAP[vr] - except KeyError: - continue - - if vm > 1: - for d in range(vm): - data = [el[d] for el in dim_values[t]] - col_defs.append(f'{kw}_{d} {sql_type} NOT NULL') - col_data.append(data) - else: - # Single column - col_defs.append(f'{kw} {sql_type} NOT NULL') - col_data.append(dim_values[t]) - - # Columns related to source frames, if they are usable for indexing - if (referenced_frames is None) != (referenced_instances is None): - raise TypeError( - "'referenced_frames' and 'referenced_instances' should be " - "provided together or not at all." - ) - if referenced_instances is not None: - col_defs.append('ReferencedFrameNumber INTEGER') - col_defs.append('ReferencedSOPInstanceUID VARCHAR NOT NULL') - col_defs.append( - 'FOREIGN KEY(ReferencedSOPInstanceUID) ' - 'REFERENCES InstanceUIDs(SOPInstanceUID)' - ) - col_data += [ - referenced_frames, - referenced_instances, - ] - - # Build LUT from columns - all_defs = ", ".join(col_defs) - cmd = f'CREATE TABLE FrameLUT({all_defs})' - placeholders = ', '.join(['?'] * len(col_data)) - with self._db_con: - self._db_con.execute(cmd) - self._db_con.executemany( - f'INSERT INTO FrameLUT VALUES({placeholders})', - zip(*col_data), - ) - - def _create_ref_instance_table( - self, - referenced_uids: List[Tuple[str, str, str]], - ) -> None: - """Create a table of referenced instances. - - The resulting table (called InstanceUIDs) contains Study, Series and - SOP instance UIDs for each instance referenced by the segmentation - image. - - Parameters - ---------- - referenced_uids: List[Tuple[str, str, str]] - List of UIDs for each instance referenced in the segmentation. - Each tuple should be in the format - (StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID). - - """ - with self._db_con: - self._db_con.execute( - """ - CREATE TABLE InstanceUIDs( - StudyInstanceUID VARCHAR NOT NULL, - SeriesInstanceUID VARCHAR NOT NULL, - SOPInstanceUID VARCHAR PRIMARY KEY - ) - """ - ) - self._db_con.executemany( - "INSERT INTO InstanceUIDs " - "(StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID) " - "VALUES(?, ?, ?)", - referenced_uids, - ) - - def get_source_image_uids(self) -> List[Tuple[hd_UID, hd_UID, hd_UID]]: - """Get UIDs of source image instances referenced in the segmentation. - - Returns - ------- - List[Tuple[highdicom.UID, highdicom.UID, highdicom.UID]] - (Study Instance UID, Series Instance UID, SOP Instance UID) triplet - for every image instance referenced in the segmentation. - - """ - cur = self._db_con.cursor() - res = cur.execute( - 'SELECT StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID ' - 'FROM InstanceUIDs' - ) - - return [ - (hd_UID(a), hd_UID(b), hd_UID(c)) for a, b, c in res.fetchall() - ] - - def are_dimension_indices_unique( - self, - dimension_index_pointers: Sequence[Union[int, BaseTag]], - ) -> bool: - """Check if a list of index pointers uniquely identifies frames. - - For a given list of dimension index pointers, check whether every - combination of index values for these pointers identifies a unique - frame per segment in the segmentation image. This is a pre-requisite - for indexing using this list of dimension index pointers in the - :meth:`Segmentation.get_pixels_by_dimension_index_values()` method. - - Parameters - ---------- - dimension_index_pointers: Sequence[Union[int, pydicom.tag.BaseTag]] - Sequence of tags serving as dimension index pointers. - - Returns - ------- - bool - True if dimension indices are unique. - - """ - column_names = ['SegmentNumber'] - for ptr in dimension_index_pointers: - column_names.append(self._dim_ind_col_names[ptr]) - col_str = ", ".join(column_names) - cur = self._db_con.cursor() - n_unique_combos = cur.execute( - f"SELECT COUNT(*) FROM (SELECT 1 FROM FrameLUT GROUP BY {col_str})" - ).fetchone()[0] - return n_unique_combos == self._number_of_frames - def are_referenced_sop_instances_unique(self) -> bool: """Check if Referenced SOP Instance UIDs uniquely identify frames. @@ -406,7 +171,7 @@ def are_referenced_sop_instances_unique(self) -> bool: n_unique_combos = cur.execute( 'SELECT COUNT(*) FROM ' '(SELECT 1 FROM FrameLUT GROUP BY ReferencedSOPInstanceUID, ' - 'SegmentNumber)' + 'ReferencedSegmentNumber)' ).fetchone()[0] return n_unique_combos == self._number_of_frames @@ -425,140 +190,10 @@ def are_referenced_frames_unique(self) -> bool: n_unique_combos = cur.execute( 'SELECT COUNT(*) FROM ' '(SELECT 1 FROM FrameLUT GROUP BY ReferencedFrameNumber, ' - 'SegmentNumber)' + 'ReferencedSegmentNumber)' ).fetchone()[0] return n_unique_combos == self._number_of_frames - def get_unique_sop_instance_uids(self) -> Set[str]: - """Get set of unique Referenced SOP Instance UIDs. - - Returns - ------- - Set[str] - Set of unique Referenced SOP Instance UIDs. - - """ - cur = self._db_con.cursor() - return { - r[0] for r in - cur.execute( - 'SELECT DISTINCT(SOPInstanceUID) from InstanceUIDs' - ) - } - - def get_max_frame_number(self) -> int: - """Get highest frame number of any referenced frame. - - Absent access to the referenced dataset itself, being less than this - value is a sufficient condition for the existence of a frame number - in the source image. - - Returns - ------- - int - Highest frame number referenced in the segmentation image. - - """ - cur = self._db_con.cursor() - return cur.execute( - 'SELECT MAX(ReferencedFrameNumber) FROM FrameLUT' - ).fetchone()[0] - - def get_unique_dim_index_values( - self, - dimension_index_pointers: Sequence[int], - ) -> Set[Tuple[int, ...]]: - """Get set of unique dimension index value combinations. - - Parameters - ---------- - dimension_index_pointers: Sequence[int] - List of dimension index pointers for which to find unique - combinations of values. - - Returns - ------- - Set[Tuple[int, ...]] - Set of unique dimension index value combinations for the given - input dimension index pointers. - - """ - cols = [self._dim_ind_col_names[p] for p in dimension_index_pointers] - cols_str = ', '.join(cols) - cur = self._db_con.cursor() - return { - r for r in - cur.execute( - f'SELECT DISTINCT {cols_str} FROM FrameLUT' - ) - } - - def is_indexable_as_total_pixel_matrix(self) -> bool: - """Whether the segmentation can be indexed as a total pixel matrix. - - Returns - ------- - bool: - True if the segmentation may be indexed using row and column - positions in the total pixel matrix. False otherwise. - - """ - row_pos_kw = tag_for_keyword('RowPositionInTotalImagePixelMatrix') - col_pos_kw = tag_for_keyword('ColumnPositionInTotalImagePixelMatrix') - return ( - row_pos_kw in self._dim_ind_col_names and - col_pos_kw in self._dim_ind_col_names - ) - - @contextmanager - def _generate_temp_table( - self, - table_name: str, - column_defs: Sequence[str], - column_data: Iterable[Sequence[Any]], - ) -> Generator[None, None, None]: - """Context manager that handles a temporary table. - - The temporary table is created with the specified information. Control - flow then returns to code within the "with" block. After the "with" - block has completed, the cleanup of the table is automatically handled. - - Parameters - ---------- - table_name: str - Name of the temporary table. - column_defs: Sequence[str] - SQL syntax strings defining each column in the temporary table, one - string per column. - column_data: Iterable[Sequence[Any]] - Column data to place into the table. - - Yields - ------ - None: - Yields control to the "with" block, with the temporary table - created. - - """ - defs_str = ', '.join(column_defs) - create_cmd = (f'CREATE TABLE {table_name}({defs_str})') - placeholders = ', '.join(['?'] * len(column_defs)) - - with self._db_con: - self._db_con.execute(create_cmd) - self._db_con.executemany( - f'INSERT INTO {table_name} VALUES({placeholders})', - column_data - ) - - # Return control flow to "with" block - yield - - # Clean up the table - cmd = (f'DROP TABLE {table_name}') - with self._db_con: - self._db_con.execute(cmd) - @contextmanager def _generate_temp_segment_table( self, @@ -724,7 +359,7 @@ def iterate_indices_by_source_instance( 'INNER JOIN FrameLUT L' ' ON T.SourceSOPInstanceUID = L.ReferencedSOPInstanceUID ' 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.SegmentNumber = S.SegmentNumber ' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' 'ORDER BY T.OutputFrameIndex' ) @@ -844,7 +479,7 @@ def iterate_indices_by_source_frame( 'INNER JOIN FrameLUT L' ' ON F.SourceFrameNumber = L.ReferencedFrameNumber ' 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.SegmentNumber = S.SegmentNumber ' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' 'ORDER BY F.OutputFrameIndex' ) @@ -967,7 +602,7 @@ def iterate_indices_by_dimension_index_values( 'INNER JOIN FrameLUT L' f' ON {join_str} ' 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.SegmentNumber = S.SegmentNumber ' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' 'ORDER BY D.OutputFrameIndex' ) @@ -1088,7 +723,7 @@ def iterate_indices_for_tiled_region( ' S.OutputSegmentNumber ' 'FROM FrameLUT L ' 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.SegmentNumber = S.SegmentNumber ' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' 'WHERE (' ' L.RowPositionInTotalImagePixelMatrix >= ' f' {row_offset_start}' @@ -1435,7 +1070,7 @@ def __init__( ) src_img = source_images[0] - is_multiframe = hasattr(src_img, 'NumberOfFrames') + is_multiframe = is_multiframe_image(src_img) if is_multiframe and len(source_images) > 1: raise ValueError( 'Only one source image should be provided in case images ' @@ -1870,9 +1505,7 @@ def __init__( dimension_organization_type = self._check_dimension_organization_type( dimension_organization_type=dimension_organization_type, is_tiled=is_tiled, - are_spatial_locations_preserved=are_spatial_locations_preserved, omit_empty_frames=omit_empty_frames, - source_image=src_img, plane_positions=plane_positions, rows=self.Rows, columns=self.Columns, @@ -2384,9 +2017,7 @@ def _check_dimension_organization_type( None, ], is_tiled: bool, - are_spatial_locations_preserved: bool, omit_empty_frames: bool, - source_image: Dataset, plane_positions: Sequence[PlanePositionSequence], rows: int, columns: int, @@ -2399,13 +2030,8 @@ def _check_dimension_organization_type( The specified DimensionOrganizationType for the output Segmentation. is_tiled: bool Whether the source image is a tiled image. - are_spatial_locations_preserved: bool - Whether spatial locations are preserved between the source image - and the segmentation pixel array. omit_empty_frames: bool Whether it was specified to omit empty frames. - source_image: pydicom.Dataset - Representative dataset of the source images. plane_positions: Sequence[highdicom.PlanePositionSequence] Plane positions of all frames. rows: int @@ -2454,8 +2080,8 @@ def _check_dimension_organization_type( ): raise ValueError( 'A value of "TILED_FULL" for parameter ' - '"dimension_organization_type" is not permitted unless ' - 'the "plane_positions" of the segmentation do not ' + '"dimension_organization_type" is not permitted because ' + 'the "plane_positions" of the segmentation ' 'do not follow the relevant requirements. See ' 'https://dicom.nema.org/medical/dicom/current/output/' 'chtml/part03/sect_C.7.6.17.3.html#sect_C.7.6.17.3.' @@ -2922,7 +2548,7 @@ def _get_pffg_item( ] derivation_src_img_item = Dataset() - if hasattr(source_images[0], 'NumberOfFrames'): + if is_multiframe_image(source_images[0]): # A single multi-frame source image src_img_item = source_images[0] # Frame numbers are one-based @@ -3164,209 +2790,7 @@ def _build_luts(self) -> None: index values. """ - referenced_uids = self._get_ref_instance_uids() - all_referenced_sops = {uids[2] for uids in referenced_uids} - - is_tiled_full = ( - hasattr(self, 'DimensionOrganizationType') and - self.DimensionOrganizationType == 'TILED_FULL' - ) - - segment_numbers = [] - - # Get list of all dimension index pointers, excluding the segment - # number, since this is treated differently - seg_num_tag = tag_for_keyword('ReferencedSegmentNumber') - self._dim_ind_pointers = [ - dim_ind.DimensionIndexPointer - for dim_ind in self.DimensionIndexSequence - if dim_ind.DimensionIndexPointer != seg_num_tag - ] - - func_grp_pointers = {} - for dim_ind in self.DimensionIndexSequence: - ptr = dim_ind.DimensionIndexPointer - if ptr in self._dim_ind_pointers: - grp_ptr = getattr(dim_ind, "FunctionalGroupPointer", None) - func_grp_pointers[ptr] = grp_ptr - - dim_ind_positions = { - dim_ind.DimensionIndexPointer: i - for i, dim_ind in enumerate(self.DimensionIndexSequence) - if dim_ind.DimensionIndexPointer != seg_num_tag - } - dim_indices: Dict[int, List[int]] = { - ptr: [] for ptr in self._dim_ind_pointers - } - dim_values: Dict[int, List[Any]] = { - ptr: [] for ptr in self._dim_ind_pointers - } - - self._single_source_frame_per_seg_frame = True - - if is_tiled_full: - # With TILED_FULL, there is no PerFrameFunctionalGroupsSequence, - # so we have to deduce the per-frame information - row_tag = tag_for_keyword('RowPositionInTotalImagePixelMatrix') - col_tag = tag_for_keyword('ColumnPositionInTotalImagePixelMatrix') - x_tag = tag_for_keyword('XOffsetInSlideCoordinateSystem') - y_tag = tag_for_keyword('YOffsetInSlideCoordinateSystem') - z_tag = tag_for_keyword('ZOffsetInSlideCoordinateSystem') - tiled_full_dim_indices = {row_tag, col_tag, x_tag, y_tag, z_tag} - if len(set(dim_indices.keys()) - tiled_full_dim_indices) > 0: - raise RuntimeError( - 'Expected segmentation images with ' - '"DimensionOrganizationType" of "TILED_FULL" are expected ' - 'to have the following dimension index pointers: ' - 'SegmentNumber, RowPositionInTotalImagePixelMatrix, ' - 'ColumnPositionInTotalImagePixelMatrix.' - ) - self._single_source_frame_per_seg_frame = False - ( - segment_numbers, - _, - dim_values[col_tag], - dim_values[row_tag], - dim_values[x_tag], - dim_values[y_tag], - dim_values[z_tag], - ) = zip(*iter_tiled_full_frame_data(self)) - - # Create indices for each of the dimensions - for ptr, vals in dim_values.items(): - _, indices = np.unique(vals, return_inverse=True) - dim_indices[ptr] = (indices + 1).tolist() - - # There is no way to deduce whether the spatial locations are - # preserved in the tiled full case - self._locations_preserved = None - - referenced_instances = None - referenced_frames = None - else: - referenced_instances: Optional[List[str]] = [] - referenced_frames: Optional[List[int]] = [] - - # Create a list of source images and check for spatial locations - # preserved - locations_list_type = List[ - Optional[SpatialLocationsPreservedValues] - ] - locations_preserved: locations_list_type = [] - - for frame_item in self.PerFrameFunctionalGroupsSequence: - # Get segment number for this frame - seg_id_seg = frame_item.SegmentIdentificationSequence[0] - seg_num = seg_id_seg.ReferencedSegmentNumber - segment_numbers.append(int(seg_num)) - - # Get dimension indices for this frame - content_seq = frame_item.FrameContentSequence[0] - indices = content_seq.DimensionIndexValues - if not isinstance(indices, (MultiValue, list)): - # In case there is a single dimension index - indices = [indices] - if len(indices) != len(self._dim_ind_pointers) + 1: - # (+1 because referenced segment number is ignored) - raise RuntimeError( - 'Unexpected mismatch between dimension index values in ' - 'per-frames functional groups sequence and items in ' - 'the dimension index sequence.' - ) - for ptr in self._dim_ind_pointers: - dim_indices[ptr].append(indices[dim_ind_positions[ptr]]) - grp_ptr = func_grp_pointers[ptr] - if grp_ptr is not None: - dim_val = frame_item[grp_ptr][0][ptr].value - else: - dim_val = frame_item[ptr].value - dim_values[ptr].append(dim_val) - - frame_source_instances = [] - frame_source_frames = [] - for der_im in frame_item.DerivationImageSequence: - for src_im in der_im.SourceImageSequence: - frame_source_instances.append( - src_im.ReferencedSOPInstanceUID - ) - if hasattr(src_im, 'SpatialLocationsPreserved'): - locations_preserved.append( - SpatialLocationsPreservedValues( - src_im.SpatialLocationsPreserved - ) - ) - else: - locations_preserved.append( - None - ) - - if hasattr(src_im, 'ReferencedFrameNumber'): - if isinstance( - src_im.ReferencedFrameNumber, - MultiValue - ): - frame_source_frames.extend( - [ - int(f) - for f in src_im.ReferencedFrameNumber - ] - ) - else: - frame_source_frames.append( - int(src_im.ReferencedFrameNumber) - ) - else: - frame_source_frames.append(_NO_FRAME_REF_VALUE) - - if ( - len(set(frame_source_instances)) != 1 or - len(set(frame_source_frames)) != 1 - ): - self._single_source_frame_per_seg_frame = False - else: - ref_instance_uid = frame_source_instances[0] - if ref_instance_uid not in all_referenced_sops: - raise AttributeError( - f'SOP instance {ref_instance_uid} referenced in ' - 'the source image sequence is not included in the ' - 'Referenced Series Sequence or Studies Containing ' - 'Other Referenced Instances Sequence. This is an ' - 'error with the integrity of the Segmentation ' - 'object.' - ) - referenced_instances.append(ref_instance_uid) - referenced_frames.append(frame_source_frames[0]) - - # Summarise - if any( - isinstance(v, SpatialLocationsPreservedValues) and - v == SpatialLocationsPreservedValues.NO - for v in locations_preserved - ): - Type = Optional[SpatialLocationsPreservedValues] - self._locations_preserved: Type = \ - SpatialLocationsPreservedValues.NO - elif all( - isinstance(v, SpatialLocationsPreservedValues) and - v == SpatialLocationsPreservedValues.YES - for v in locations_preserved - ): - self._locations_preserved = SpatialLocationsPreservedValues.YES - else: - self._locations_preserved = None - - if not self._single_source_frame_per_seg_frame: - referenced_instances = None - referenced_frames = None - - self._db_man = _SegDBManager( - referenced_uids=referenced_uids, - segment_numbers=segment_numbers, - dim_indices=dim_indices, - dim_values=dim_values, - referenced_instances=referenced_instances, - referenced_frames=referenced_frames, - ) + self._db_man = _SegDBManager(self) @property def segmentation_type(self) -> SegmentationTypeValues: @@ -3937,7 +3361,11 @@ def get_default_dimension_index_pointers( List of tags used as the default dimension index pointers. """ - return self._dim_ind_pointers[:] + referenced_segment_number = tag_for_keyword('ReferencedSegmentNumber') + return [ + t for t in self._db_man.dimension_index_pointers[:] + if t != referenced_segment_number + ] def are_dimension_indices_unique( self, @@ -3973,8 +3401,9 @@ def are_dimension_indices_unique( raise ValueError( 'Argument "dimension_index_pointers" may not be empty.' ) + dimension_index_pointers = list(dimension_index_pointers) for ptr in dimension_index_pointers: - if ptr not in self._dim_ind_pointers: + if ptr not in self._db_man.dimension_index_pointers: kw = keyword_for_tag(ptr) if kw == '': kw = '' @@ -3982,71 +3411,14 @@ def are_dimension_indices_unique( f'Tag {ptr} ({kw}) is not used as a dimension index ' 'in this image.' ) + + dimension_index_pointers.append( + tag_for_keyword('ReferencedSegmentNumber') + ) return self._db_man.are_dimension_indices_unique( dimension_index_pointers ) - def _check_indexing_with_source_frames( - self, - ignore_spatial_locations: bool = False - ) -> None: - """Check if indexing by source frames is possible. - - Raise exceptions with useful messages otherwise. - - Possible problems include: - * Spatial locations are not preserved. - * The dataset does not specify that spatial locations are preserved - and the user has not asserted that they are. - * At least one frame in the segmentation lists multiple - source frames. - - Parameters - ---------- - ignore_spatial_locations: bool - Allows the user to ignore whether spatial locations are preserved - in the frames. - - """ - # Checks that it is possible to index using source frames in this - # dataset - is_tiled_full = ( - hasattr(self, 'DimensionOrganizationType') and - self.DimensionOrganizationType == 'TILED_FULL' - ) - if is_tiled_full: - raise RuntimeError( - 'Indexing via source frames is not possible when a ' - 'segmentation is stored using the DimensionOrganizationType ' - '"TILED_FULL".' - ) - elif self._locations_preserved is None: - if not ignore_spatial_locations: - raise RuntimeError( - 'Indexing via source frames is not permissible since this ' - 'image does not specify that spatial locations are ' - 'preserved in the course of deriving the segmentation ' - 'from the source image. If you are confident that spatial ' - 'locations are preserved, or do not require that spatial ' - 'locations are preserved, you may override this behavior ' - "with the 'ignore_spatial_locations' parameter." - ) - elif self._locations_preserved == SpatialLocationsPreservedValues.NO: - if not ignore_spatial_locations: - raise RuntimeError( - 'Indexing via source frames is not permissible since this ' - 'image specifies that spatial locations are not preserved ' - 'in the course of deriving the segmentation from the ' - 'source image. If you do not require that spatial ' - ' locations are preserved you may override this behavior ' - "with the 'ignore_spatial_locations' parameter." - ) - if not self._single_source_frame_per_seg_frame: - raise RuntimeError( - 'Indexing via source frames is not permissible since some ' - 'frames in the segmentation specify multiple source frames.' - ) - def get_pixels_by_source_instance( self, source_sop_instance_uids: Sequence[str], @@ -4202,7 +3574,9 @@ def get_pixels_by_source_instance( """ # Check that indexing in this way is possible - self._check_indexing_with_source_frames(ignore_spatial_locations) + self._db_man._check_indexing_with_source_frames( + ignore_spatial_locations + ) # Checks on validity of the inputs if segment_numbers is None: @@ -4231,7 +3605,9 @@ def get_pixels_by_source_instance( # Check that all frame numbers requested actually exist if not assert_missing_frames_are_empty: - unique_uids = self._db_man.get_unique_sop_instance_uids() + unique_uids = ( + self._db_man.get_unique_referenced_sop_instance_uids() + ) missing_uids = set(source_sop_instance_uids) - unique_uids if len(missing_uids) > 0: msg = ( @@ -4454,7 +3830,9 @@ def get_pixels_by_source_frame( """ # Check that indexing in this way is possible - self._check_indexing_with_source_frames(ignore_spatial_locations) + self._db_man._check_indexing_with_source_frames( + ignore_spatial_locations + ) # Checks on validity of the inputs if segment_numbers is None: @@ -4483,7 +3861,9 @@ def get_pixels_by_source_frame( # Check that all frame numbers requested actually exist if not assert_missing_frames_are_empty: - max_frame_number = self._db_man.get_max_frame_number() + max_frame_number = ( + self._db_man.get_max_referenced_frame_number() + ) for f in source_frame_numbers: if f > max_frame_number: msg = ( @@ -4691,15 +4071,26 @@ def get_pixels_by_dimension_index_values( 'Segment numbers may not be empty.' ) + referenced_segment_number_tag = tag_for_keyword( + 'ReferencedSegmentNumber' + ) if dimension_index_pointers is None: - dimension_index_pointers = self._dim_ind_pointers + dimension_index_pointers = [ + t for t in self._db_man.dimension_index_pointers + if t != referenced_segment_number_tag + ] else: if len(dimension_index_pointers) == 0: raise ValueError( 'Argument "dimension_index_pointers" must not be empty.' ) for ptr in dimension_index_pointers: - if ptr not in self._dim_ind_pointers: + if ptr == referenced_segment_number_tag: + raise ValueError( + "Do not include the ReferencedSegmentNumber in the " + "argument 'dimension_index_pointers'." + ) + if ptr not in self._db_man.dimension_index_pointers: kw = keyword_for_tag(ptr) if kw == '': kw = '' diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index d5fab854..fa1a6c72 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -1,6 +1,14 @@ -from typing import Sequence, Tuple +from typing import Optional, Sequence, Tuple import numpy as np +import pydicom + +from highdicom._module_utils import is_multiframe_image +from highdicom.enum import CoordinateSystemNames + + +DEFAULT_SPACING_TOLERANCE = 1e-4 +"""Default tolerance for determining whether slices are regularly spaced.""" def create_rotation_matrix( @@ -959,3 +967,192 @@ def are_points_coplanar( deviations = normal.T @ points_centered.T max_dev = np.abs(deviations).max() return max_dev <= tol + + +def get_series_slice_spacing( + datasets: Sequence[pydicom.Dataset], + tol: float = DEFAULT_SPACING_TOLERANCE, +) -> Optional[float]: + """Get slice spacing, if any, for a series of single frame images. + + First determines whether the image series represents a 3D volume. + A 3D volume consists of regularly spaced slices with orthogonal axes, i.e. + the slices are spaced equally along the direction orthogonal to the + in-plane image coordinates. + + If the series does represent a volume, returns the absolute value of the + slice spacing. If the series does not represent a volume, returns None. + + Note that we stipulate that a single image is a 3D volume for the purposes + of this function. In this case the returned slice spacing will be 0.0. + + Parameters + ---------- + datasets: Sequence[pydicom.Dataset] + Set of datasets representing an imaging series. + tol: float + Tolerance for determining spacing regularity. If slice spacings vary by + less that this spacing, they are considered to be regular. + + Returns + ------- + float: + Absolute value of the regular slice spacing if the series of images + meets the definition of a 3D volume, above. None otherwise. + + """ + if len(datasets) == 0: + raise ValueError("List must not be empty.") + # We stipluate that a single image does represent a volume with spacing 0.0 + if len(datasets) == 1: + return 0.0 + for ds in datasets: + if is_multiframe_image(ds): + raise ValueError( + "Datasets should be single-frame images." + ) + + # Check image orientations are consistent + image_orientation = datasets[0].ImageOrientationPatient + for ds in datasets[1:]: + if ds.ImageOrientationPatient != image_orientation: + return None + + positions = np.array( + [ds.ImagePositionPatient for ds in datasets] + ) + + return get_regular_slice_spacing( + image_positions=positions, + image_orientation=np.array(image_orientation), + tol=tol, + ) + + +def get_regular_slice_spacing( + image_positions: np.ndarray, + image_orientation: np.ndarray, + tol: float = DEFAULT_SPACING_TOLERANCE, + sort: bool = True, +) -> Optional[float]: + """Get the regular spacing between set of image positions, if any. + + A 3D volume consists of regularly spaced slices with orthogonal axes, i.e. + the slices are spaced equally along the direction orthogonal to the + in-plane image coordinates. + + Note that we stipulate that a single image is a 3D volume for the purposes + of this function. In this case the returned slice spacing will be 0.0. + + Parameters + ---------- + image_positions: numpy.ndarray + Array of image positions for multiple frames. Should be a numpy array of + shape (N, 3) where N is the number of frames. + image_orientation: numpy.ndarray + Image orientation as direction cosine values taken directly from the + ImageOrientationPatient attribute. 1D array of length 6. + tol: float + Tolerance for determining spacing regularity. If slice spacings vary by + less that this spacing, they are considered to be regular. + sort: bool + Sort the image positions before finding the spacing. If True, this + makes the function tolerant of unsorted inputs. Set to False to check + whether the positions represent a 3D volume in the specific order in + which they are passed. + + Returns + ------- + Union[float, None] + If the image positions are regularly spaced, the (abolute value of) the + slice spacing. If the image positions are not regularly spaced, returns + None. + + """ + image_positions = np.array(image_positions) + image_orientation = np.array(image_orientation) + + if image_positions.ndim != 2 or image_positions.shape[1] != 3: + raise ValueError( + "Argument 'image_positions' should be an (N, 3) array." + ) + if image_orientation.ndim != 1 or image_orientation.shape[0] != 6: + raise ValueError( + "Argument 'image_orientation' should be an array of " + "length 6." + ) + n = image_positions.shape[0] + if n == 0: + raise ValueError( + "Argument 'image_positions' should contain at least 1 position." + ) + elif n == 1: + # Special case, we stipluate that this has spacing 0.0 + return 0.0 + + # Find normal vector to the imaging plane + v1 = image_orientation[:3] + v2 = image_orientation[3:] + v3 = np.cross(v1, v2) + + # Calculate distance of each slice from coordinate system origin along the + # normal vector + origin_distances = v3[None] @ image_positions.T + origin_distances = origin_distances.squeeze(0) + + if sort: + sort_index = np.argsort(origin_distances) + origin_distances = origin_distances[sort_index] + else: + sort_index = np.arange(image_positions.shape[0]) + + spacings = np.diff(origin_distances) + avg_spacing = spacings.mean() + + is_regular = np.isclose( + avg_spacing, + spacings, + atol=tol + ).all() + + # Additionally check that the vector from the first to the last plane lies + # approximately along v3 + pos1 = image_positions[sort_index[0], :] + pos2 = image_positions[sort_index[-1], :] + span = (pos2 - pos1) + span /= np.linalg.norm(span) + + is_perpendicular = abs(v3.T @ span - 1.0) < tol + + if is_regular and is_perpendicular: + return abs(avg_spacing) + else: + return None + + +def get_coordinate_system( + dataset: pydicom.Dataset, +) -> Optional[CoordinateSystemNames]: + """Determine which coordinate system an image uses. + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset for which the coordinate system is required. + + Returns + ------- + Union[highdicom.enum.CoordinateSystemNames]: + Coordinate system used by the input image's frame of reference. Returns + None if the image does not specify a frame of reference. + + """ + if not hasattr(dataset, 'FrameOfReferenceUID'): + return None + if ( + hasattr(dataset, 'ImageOrientationSlide') or + hasattr(dataset, 'ImageCenterPointCoordinatesSequence') + ): + return CoordinateSystemNames.SLIDE + else: + return CoordinateSystemNames.PATIENT diff --git a/src/highdicom/sr/content.py b/src/highdicom/sr/content.py index d6c294ec..b0442f4e 100644 --- a/src/highdicom/sr/content.py +++ b/src/highdicom/sr/content.py @@ -32,6 +32,7 @@ Scoord3DContentItem, UIDRefContentItem, ) +from highdicom._module_utils import is_multiframe_image logger = logging.getLogger(__name__) @@ -90,7 +91,7 @@ def _check_frame_numbers_valid_for_dataset( referenced_frame_numbers: Optional[Sequence[int]] ) -> None: if referenced_frame_numbers is not None: - if not hasattr(dataset, 'NumberOfFrames'): + if not is_multiframe_image(dataset): raise TypeError( 'The dataset does not represent a multi-frame dataset, so no ' 'referenced frame numbers should be provided.' From aea557870ecaaf936afe0288af85b9335eed7dbf Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 1 Feb 2024 12:34:29 +0000 Subject: [PATCH 07/93] Add multiframe tests --- tests/test_multiframe.py | 27 +++++++++++++++++++++++++++ tests/test_spatial.py | 25 +++++++++++++++++++++++++ 2 files changed, 52 insertions(+) create mode 100644 tests/test_multiframe.py diff --git a/tests/test_multiframe.py b/tests/test_multiframe.py new file mode 100644 index 00000000..e0927a79 --- /dev/null +++ b/tests/test_multiframe.py @@ -0,0 +1,27 @@ +"""Tests for the highdicom._multiframe module.""" +from pydicom import dcmread +from pydicom.data import get_testdata_file, get_testdata_files + +from highdicom._multiframe import MultiFrameDBManager + + +def test_slice_spacing(): + ct_multiframe = dcmread( + get_testdata_file('eCT_Supplemental.dcm') + ) + db = MultiFrameDBManager(ct_multiframe) + + assert db.get_slice_spacing() == 10.0 + +def test_slice_spacing_irregular(): + ct_multiframe = dcmread( + get_testdata_file('eCT_Supplemental.dcm') + ) + + # Mock some iregular spacings + ct_multiframe.PerFrameFunctionalGroupsSequence[0].\ + PlanePositionSequence[0].ImagePositionPatient = [1.0, 0.0, 0.0] + + db = MultiFrameDBManager(ct_multiframe) + + assert db.get_slice_spacing() is None diff --git a/tests/test_spatial.py b/tests/test_spatial.py index bce8d1f5..d1717c9b 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -1,4 +1,6 @@ import numpy as np +from pydicom import dcmread +from pydicom.data import get_testdata_file, get_testdata_files import pytest from highdicom.spatial import ( @@ -6,6 +8,7 @@ PixelToReferenceTransformer, ReferenceToImageTransformer, ReferenceToPixelTransformer, + get_series_slice_spacing, ) @@ -451,3 +454,25 @@ def test_map_reference_to_image_coordinate(params, inputs, expected_outputs): transform = ReferenceToImageTransformer(**params) outputs = transform(inputs) np.testing.assert_array_almost_equal(outputs, expected_outputs) + + +def test_get_series_slice_spacing_irregular(): + # A series of single frame CT images + ct_series = [ + dcmread(f) + for f in get_testdata_files('dicomdirtests/77654033/CT2/*') + ] + spacing = get_series_slice_spacing(ct_series) + assert spacing is None + + +def test_get_series_slice_spacing_regular(): + # Use a subset of this test series that does have regular spacing + ct_files = [ + get_testdata_file('dicomdirtests/77654033/CT2/17196'), + get_testdata_file('dicomdirtests/77654033/CT2/17136'), + get_testdata_file('dicomdirtests/77654033/CT2/17166'), + ] + ct_series = [dcmread(f) for f in ct_files] + spacing = get_series_slice_spacing(ct_series) + assert spacing == 1.25 From 9df31ebd92687deb9eb4ba272177643a9f2acc41 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 1 Feb 2024 16:42:03 +0000 Subject: [PATCH 08/93] Automatically populate the 3D tag --- data/test_files/seg_image_sm_control.dcm | Bin 20720 -> 20704 bytes src/highdicom/_multiframe.py | 1 - src/highdicom/seg/sop.py | 37 +++++++++- src/highdicom/spatial.py | 11 +++ tests/test_multiframe.py | 1 + tests/test_seg.py | 87 +++++++++++++++++++++++ tests/test_spatial.py | 2 +- 7 files changed, 134 insertions(+), 5 deletions(-) diff --git a/data/test_files/seg_image_sm_control.dcm b/data/test_files/seg_image_sm_control.dcm index bf695236c632163863a707d74b5eb252f8f8c2d2..34c3d8d73d0ad613634755da731c5ad51227327f 100644 GIT binary patch delta 36 rcmeycknzDn#tmGYlQlGzHgj<@De%}aFfjc4&+xzuNN@JiT&4~H-RTP! delta 49 zcmaE`knzJp#tmGYn{_yu6eJ567#RNjXLyhSq!k#%CTci~pj(QJE diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 57766034..43ea15a0 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -401,7 +401,6 @@ def __init__( # Build LUT from columns all_defs = ", ".join(col_defs) cmd = f'CREATE TABLE FrameLUT({all_defs})' - print(cmd) placeholders = ', '.join(['?'] * len(col_data)) with self._db_con: self._db_con.execute(cmd) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 776a1576..fc9e399f 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -75,7 +75,7 @@ SegmentAlgorithmTypeValues, ) from highdicom.seg.utils import iter_segments -from highdicom.spatial import ImageToReferenceTransformer +from highdicom.spatial import ImageToReferenceTransformer, get_regular_slice_spacing, get_series_slice_spacing from highdicom.sr.coding import CodedConcept from highdicom.valuerep import ( check_person_name, @@ -1502,7 +1502,7 @@ def __init__( ) # Dimension Organization Type - dimension_organization_type = self._check_dimension_organization_type( + dimension_organization_type = self._check_tiled_dimension_organization_type( dimension_organization_type=dimension_organization_type, is_tiled=is_tiled, omit_empty_frames=omit_empty_frames, @@ -1510,6 +1510,37 @@ def __init__( rows=self.Rows, columns=self.Columns, ) + if self._coordinate_system == CoordinateSystemNames.PATIENT: + spacing = get_regular_slice_spacing( + image_positions=np.array(plane_position_values[:, 0, :]), + image_orientation=np.array( + plane_orientation[0].ImageOrientationPatient + ), + sort=False, + enforce_postive=True, + ) + + if spacing is not None and spacing > 1.0: + # The image is a regular volume, so we should record this + dimension_organization_type = ( + DimensionOrganizationTypeValues.THREE_DIMENSIONAL + ) + # Also add the slice spacing to the pixel measures + ( + self.SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .SpacingBetweenSlices + ) = spacing + else: + if ( + dimension_organization_type == + DimensionOrganizationTypeValues.THREE_DIMENSIONAL + ): + raise ValueError( + 'Dimension organization "3D" has been specified, ' + 'but the source image is not a regularly-spaced 3D ' + 'volume.' + ) if dimension_organization_type is not None: self.DimensionOrganizationType = dimension_organization_type.value @@ -2010,7 +2041,7 @@ def _add_slide_coordinate_metadata( self.ImageCenterPointCoordinatesSequence = [center_item] @staticmethod - def _check_dimension_organization_type( + def _check_tiled_dimension_organization_type( dimension_organization_type: Union[ DimensionOrganizationTypeValues, str, diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index fa1a6c72..063ffea7 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -1034,6 +1034,7 @@ def get_regular_slice_spacing( image_orientation: np.ndarray, tol: float = DEFAULT_SPACING_TOLERANCE, sort: bool = True, + enforce_postive: bool = False, ) -> Optional[float]: """Get the regular spacing between set of image positions, if any. @@ -1060,6 +1061,12 @@ def get_regular_slice_spacing( makes the function tolerant of unsorted inputs. Set to False to check whether the positions represent a 3D volume in the specific order in which they are passed. + enforce_postive: bool + If True and sort is False, require that the images are not only + regularly spaced but also that they are ordered along the direction of + the increasing normal vector, as opposed to being ordered regularly + along the direction of the decreasing normal vector. If sort is False, + this has no effect. Returns ------- @@ -1099,6 +1106,7 @@ def get_regular_slice_spacing( # normal vector origin_distances = v3[None] @ image_positions.T origin_distances = origin_distances.squeeze(0) + print(origin_distances) if sort: sort_index = np.argsort(origin_distances) @@ -1114,6 +1122,9 @@ def get_regular_slice_spacing( spacings, atol=tol ).all() + if is_regular and enforce_postive: + if avg_spacing < 0.0: + return None # Additionally check that the vector from the first to the last plane lies # approximately along v3 diff --git a/tests/test_multiframe.py b/tests/test_multiframe.py index e0927a79..7274023b 100644 --- a/tests/test_multiframe.py +++ b/tests/test_multiframe.py @@ -13,6 +13,7 @@ def test_slice_spacing(): assert db.get_slice_spacing() == 10.0 + def test_slice_spacing_irregular(): ct_multiframe = dcmread( get_testdata_file('eCT_Supplemental.dcm') diff --git a/tests/test_seg.py b/tests/test_seg.py index 4a572eaf..8c9b3765 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -940,6 +940,7 @@ def test_construction(self): SegmentsOverlapValues.NO with pytest.raises(AttributeError): frame_item.PlanePositionSlideSequence + assert not hasattr(instance, "DimensionOrganizationType") self.check_dimension_index_vals(instance) def test_construction_2(self): @@ -1011,6 +1012,7 @@ def test_construction_2(self): SegmentsOverlapValues.NO with pytest.raises(AttributeError): frame_item.PlanePositionSequence + assert instance.DimensionOrganizationType == "TILED_SPARSE" self.check_dimension_index_vals(instance) def test_construction_3(self): @@ -1096,6 +1098,7 @@ def test_construction_3(self): SegmentsOverlapValues.NO with pytest.raises(AttributeError): frame_item.PlanePositionSlideSequence + assert not hasattr(instance, 'DimensionOrganizationType') self.check_dimension_index_vals(instance) def test_construction_4(self): @@ -1175,6 +1178,9 @@ def test_construction_4(self): SegmentsOverlapValues.NO with pytest.raises(AttributeError): frame_item.PlanePositionSlideSequence + + # Frames are regularly but ordered the wrong way in this case + assert not hasattr(instance, 'DimensionOrganizationType') self.check_dimension_index_vals(instance) def test_construction_5(self): @@ -1259,6 +1265,7 @@ def test_construction_5(self): SegmentsOverlapValues.NO with pytest.raises(AttributeError): frame_item.PlanePositionSlideSequence + assert not hasattr(instance, 'DimensionOrganizationType') self.check_dimension_index_vals(instance) def test_construction_6(self): @@ -1345,6 +1352,7 @@ def test_construction_6(self): assert len(derivation_image_item.SourceImageSequence) == 1 assert SegmentsOverlapValues[instance.SegmentsOverlap] == \ SegmentsOverlapValues.NO + assert not hasattr(instance, 'DimensionOrganizationType') def test_construction_7(self): # A chest X-ray with no frame of reference and multiple segments @@ -1435,6 +1443,85 @@ def test_construction_7(self): assert len(derivation_image_item.SourceImageSequence) == 1 assert SegmentsOverlapValues[instance.SegmentsOverlap] == \ SegmentsOverlapValues.NO + assert not hasattr(instance, 'DimensionOrganizationType') + + def test_construction_3d_multiframe(self): + # The CT multiframe image is already a volume, but the frames are + # ordered the wrong way + volume_multiframe = deepcopy(self._ct_multiframe) + positions = [ + fm.PlanePositionSequence[0].ImagePositionPatient + for fm in volume_multiframe.PerFrameFunctionalGroupsSequence + ] + positions = positions[::-1] + for pos, fm in zip( + positions, + volume_multiframe.PerFrameFunctionalGroupsSequence + ): + fm.PlanePositionSequence[0].ImagePositionPatient = pos + + # Segmentation instance from an enhanced (multi-frame) CT image + instance = Segmentation( + [volume_multiframe], + self._ct_multiframe_mask_array, + SegmentationTypeValues.FRACTIONAL.value, + self._segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number + ) + # This is a "volume" image, so the output instance should have + # the DimensionOrganizationType set correctly and should have deduced + # the spacing between slices + assert instance.DimensionOrganizationType == "3D" + spacing = ( + instance + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .SpacingBetweenSlices + ) + assert spacing == 10.0 + + def test_construction_3d_singleframe(self): + # The CT single frame series is a volume if you omit one of the images + ct_files = [ + get_testdata_file('dicomdirtests/77654033/CT2/17136'), + get_testdata_file('dicomdirtests/77654033/CT2/17166'), + get_testdata_file('dicomdirtests/77654033/CT2/17196'), + ] + ct_series = [dcmread(f) for f in ct_files] + + # Segmentation instance from an enhanced (multi-frame) CT image + instance = Segmentation( + ct_series, + self._ct_series_mask_array[:3], + SegmentationTypeValues.FRACTIONAL.value, + self._segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number + ) + # This is a "volume" image, so the output instance should have + # the DimensionOrganizationType set correctly and should have deduced + # the spacing between slices + assert instance.DimensionOrganizationType == "3D" + spacing = ( + instance + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .SpacingBetweenSlices + ) + assert spacing == 1.25 def test_construction_workers(self): # Create a segmentation with multiple workers diff --git a/tests/test_spatial.py b/tests/test_spatial.py index d1717c9b..99c7ac2b 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -469,8 +469,8 @@ def test_get_series_slice_spacing_irregular(): def test_get_series_slice_spacing_regular(): # Use a subset of this test series that does have regular spacing ct_files = [ - get_testdata_file('dicomdirtests/77654033/CT2/17196'), get_testdata_file('dicomdirtests/77654033/CT2/17136'), + get_testdata_file('dicomdirtests/77654033/CT2/17196'), get_testdata_file('dicomdirtests/77654033/CT2/17166'), ] ct_series = [dcmread(f) for f in ct_files] From 74b412da4390c126faa78d106fccc9f2313c6bb9 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 2 Feb 2024 10:01:57 +0000 Subject: [PATCH 09/93] Add ability to determine whether a segmentation is a volume --- src/highdicom/_multiframe.py | 86 +++++++++++++++++++++++++++++++----- src/highdicom/seg/sop.py | 35 +++++++++++++++ src/highdicom/spatial.py | 1 - 3 files changed, 109 insertions(+), 13 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 43ea15a0..d88e1de6 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -1,6 +1,7 @@ """Tools for working with multiframe DICOM images.""" from collections import Counter from contextlib import contextmanager +import itertools import logging import sqlite3 from typing import ( @@ -750,8 +751,8 @@ def get_unique_dim_index_values( def get_slice_spacing( self, + split_dimensions: Optional[Sequence[str]] = None, tol: float = DEFAULT_SPACING_TOLERANCE, - split_dimensions: Optional[Sequence[BaseTag]] = None, ) -> Optional[float]: """Get slice spacing, if any, for the image. @@ -785,8 +786,8 @@ def get_slice_spacing( frames exist. For example, if time were included as a split dimension, this function will check whether a 3D volume exists at each timepoint (and that the volume is the same at each time point). Each dimension - index should be provided as a base tags representing the Dimension - Index Pointer. + index should be provided as the keyword representing the relevant + DICOM attribute. Returns ------- @@ -807,16 +808,17 @@ def get_slice_spacing( # Stipulate that this does represent a volume return 0.0 + cur = self._db_con.cursor() + if split_dimensions is None: - cur = self._db_con.cursor() - query = """ - SELECT - ImagePositionPatient_0, - ImagePositionPatient_1, - ImagePositionPatient_2 - FROM FrameLUT; - """ + query = ( + 'SELECT ' + 'ImagePositionPatient_0, ' + 'ImagePositionPatient_1, ' + 'ImagePositionPatient_2 ' + 'FROM FrameLUT;' + ) image_positions = np.array( [r for r in cur.execute(query)] @@ -825,10 +827,70 @@ def get_slice_spacing( image_positions=image_positions, image_orientation=np.array(self.shared_image_orientation), sort=True, + tol=tol, ) + else: + dim_values = [] - return spacing + # Get lists of all unique values for the specified dimensions + for kw in split_dimensions: + # Find unique values of this attribute + query = f""" + SELECT DISTINCT {kw} FROM FrameLUT; + """ + + dim_values.append( + [ + v[0] for v in cur.execute(query) + ] + ) + + # Check that each combination of the split dimension has the same + # list of image positions + all_image_positions = [] + for vals in itertools.product(*dim_values): + filter_str = 'AND '.join( + f'{kw} = {val}' for kw, val in zip(split_dimensions, vals) + ) + query = ( + 'SELECT ' + 'ImagePositionPatient_0, ' + 'ImagePositionPatient_1, ' + 'ImagePositionPatient_2 ' + 'FROM FrameLUT ' + 'WHERE ' + f'{filter_str} ' + 'ORDER BY ' + 'ImagePositionPatient_0, ' + 'ImagePositionPatient_1, ' + 'ImagePositionPatient_2 ' + ';' + ) + image_positions = np.array( + [r for r in cur.execute(query)] + ) + all_image_positions.append(image_positions) + + if len(all_image_positions) > 1: + for image_positions in all_image_positions: + if not np.array_equal( + image_positions, + all_image_positions[0] + ): + # The volumes described by each combination of the + # split dimensions have different sets of image + # positions + return None + + spacing = get_regular_slice_spacing( + image_positions=all_image_positions[0], + image_orientation=np.array(self.shared_image_orientation), + sort=True, + tol=tol, + ) + + return spacing @contextmanager diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index fc9e399f..ac19b7d7 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -3120,6 +3120,41 @@ def segmented_property_types(self) -> List[CodedConcept]: return types + def is_3d_volume( + self, + split_dimensions: Optional[Sequence[str]] = None, + ): + """Determine whether this segmentation is a 3D volume. + + For this purpose, a 3D volume is a set of regularly slices in 3D space + distributed at regular spacings along a vector perpendicular to the + normal vector to each image. + + Parameters + ---------- + + + """ + if split_dimensions is not None: + split_dimensions = list(split_dimensions) + if len(split_dimensions) == 0: + raise ValueError( + 'Argument "split_dimensions" must not be empty.' + ) + if 'ReferencedSegmentNumber' in split_dimensions: + raise ValueError( + 'The value "ReferencedSegmentNumber" should not be ' + 'included in the spplit dimensions.' + ) + else: + split_dimensions = [] + + split_dimensions.append('ReferencedSegmentNumber') + + spacing = self._db_man.get_slice_spacing(split_dimensions) + + return spacing is not None + def _get_pixels_by_seg_frame( self, output_shape: Union[int, Tuple[int, int]], diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 063ffea7..231c7dbb 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -1106,7 +1106,6 @@ def get_regular_slice_spacing( # normal vector origin_distances = v3[None] @ image_positions.T origin_distances = origin_distances.squeeze(0) - print(origin_distances) if sort: sort_index = np.argsort(origin_distances) From 91e11ceb450bec78204d21025c315eeea59cadda Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 2 Feb 2024 22:03:31 +0000 Subject: [PATCH 10/93] Fix plane ordering bug; split spatial functions --- src/highdicom/seg/content.py | 58 ++++++++++---- src/highdicom/seg/sop.py | 96 ++++++++++++----------- src/highdicom/spatial.py | 146 +++++++++++++++++++++++++++++------ 3 files changed, 217 insertions(+), 83 deletions(-) diff --git a/src/highdicom/seg/content.py b/src/highdicom/seg/content.py index cf6672a4..36873bd5 100644 --- a/src/highdicom/seg/content.py +++ b/src/highdicom/seg/content.py @@ -14,7 +14,11 @@ ) from highdicom.enum import CoordinateSystemNames from highdicom.seg.enum import SegmentAlgorithmTypeValues -from highdicom.spatial import map_pixel_into_coordinate_system +from highdicom.spatial import ( + _get_slice_distances, + get_normal_vector, + map_pixel_into_coordinate_system, +) from highdicom.sr.coding import CodedConcept from highdicom.uid import UID from highdicom.utils import compute_plane_position_slide_per_frame @@ -605,7 +609,8 @@ def get_index_position(self, pointer: str) -> int: def get_index_values( self, - plane_positions: Sequence[PlanePositionSequence] + plane_positions: Sequence[PlanePositionSequence], + image_orientation: Optional[Sequence[float]] = None, ) -> Tuple[np.ndarray, np.ndarray]: """Get values of indexed attributes that specify position of planes. @@ -626,6 +631,15 @@ def get_index_values( plane_indices: numpy.ndarray 1D array of planes indices for sorting frames according to their spatial position specified by the dimension index + image_orientation: Union[Sequence[float], None], optional + An image orientation to use to order frames within a 3D coordinate + system. By default (if ``image_orientation`` is ``None``), the + plane positions are ordered using their raw numerical values and + not along any particular spatial vector. If ``image_orientation`` + is provided, planes are ordered along the positive direction of the + vector normal to the specified. Should be a sequence of 6 floats. + This is only valid when plane position inputs contain only the + ImagePositionPatient. Note ---- @@ -659,21 +673,37 @@ def get_index_values( for p in plane_positions ]) - # Build an array that can be used to sort planes according to the - # Dimension Index Value based on the order of the items in the - # Dimension Index Sequence. - _, plane_sort_indices = np.unique( - plane_position_values, - axis=0, - return_index=True - ) + if image_orientation is not None: + if not hasattr(plane_positions[0][0], 'ImagePositionPatient'): + raise ValueError( + 'Provided "image_orientation" is only valid when ' + 'plane_positions contain the ImagePositionPatient.' + ) + normal_vector = get_normal_vector(image_orientation) + origin_distances = _get_slice_distances( + plane_position_values[:, 0, :], + normal_vector, + ) + _, plane_sort_indices = np.unique( + origin_distances, + return_index=True, + ) + else: + # Build an array that can be used to sort planes according to the + # Dimension Index Value based on the order of the items in the + # Dimension Index Sequence. + _, plane_sort_indices = np.unique( + plane_position_values, + axis=0, + return_index=True + ) if len(plane_sort_indices) != len(plane_positions): raise ValueError( - "Input image/frame positions are not unique according to the " - "Dimension Index Pointers. The generated segmentation would be " - "ambiguous. Ensure that source images/frames have distinct " - "locations." + 'Input image/frame positions are not unique according to the ' + 'Dimension Index Pointers. The generated segmentation would be ' + 'ambiguous. Ensure that source images/frames have distinct ' + 'locations.' ) return (plane_position_values, plane_sort_indices) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index ac19b7d7..e6786844 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -1460,9 +1460,15 @@ def __init__( # number) plane_sort_index is a list of indices into the input # planes giving the order in which they should be arranged to # correctly sort them for inclusion into the segmentation + sort_orientation = ( + plane_orientation[0].ImageOrientationPatient + if self._coordinate_system == CoordinateSystemNames.PATIENT + else None + ) plane_position_values, plane_sort_index = \ self.DimensionIndexSequence.get_index_values( - plane_positions + plane_positions, + image_orientation=sort_orientation, ) are_spatial_locations_preserved = ( @@ -1501,49 +1507,6 @@ def __init__( "the source image." ) - # Dimension Organization Type - dimension_organization_type = self._check_tiled_dimension_organization_type( - dimension_organization_type=dimension_organization_type, - is_tiled=is_tiled, - omit_empty_frames=omit_empty_frames, - plane_positions=plane_positions, - rows=self.Rows, - columns=self.Columns, - ) - if self._coordinate_system == CoordinateSystemNames.PATIENT: - spacing = get_regular_slice_spacing( - image_positions=np.array(plane_position_values[:, 0, :]), - image_orientation=np.array( - plane_orientation[0].ImageOrientationPatient - ), - sort=False, - enforce_postive=True, - ) - - if spacing is not None and spacing > 1.0: - # The image is a regular volume, so we should record this - dimension_organization_type = ( - DimensionOrganizationTypeValues.THREE_DIMENSIONAL - ) - # Also add the slice spacing to the pixel measures - ( - self.SharedFunctionalGroupsSequence[0] - .PixelMeasuresSequence[0] - .SpacingBetweenSlices - ) = spacing - else: - if ( - dimension_organization_type == - DimensionOrganizationTypeValues.THREE_DIMENSIONAL - ): - raise ValueError( - 'Dimension organization "3D" has been specified, ' - 'but the source image is not a regularly-spaced 3D ' - 'volume.' - ) - if dimension_organization_type is not None: - self.DimensionOrganizationType = dimension_organization_type.value - # Find indices such that empty planes are removed if omit_empty_frames: if tile_pixel_array: @@ -1589,6 +1552,51 @@ def __init__( else: unique_dimension_values = [None] + # Dimension Organization Type + dimension_organization_type = self._check_tiled_dimension_organization_type( + dimension_organization_type=dimension_organization_type, + is_tiled=is_tiled, + omit_empty_frames=omit_empty_frames, + plane_positions=plane_positions, + rows=self.Rows, + columns=self.Columns, + ) + if self._coordinate_system == CoordinateSystemNames.PATIENT: + spacing = get_regular_slice_spacing( + image_positions=np.array( + plane_position_values[plane_sort_index, 0, :] + ), + image_orientation=np.array( + plane_orientation[0].ImageOrientationPatient + ), + sort=False, + enforce_positive=True, + ) + + if spacing is not None and spacing > 0.0: + # The image is a regular volume, so we should record this + dimension_organization_type = ( + DimensionOrganizationTypeValues.THREE_DIMENSIONAL + ) + # Also add the slice spacing to the pixel measures + ( + self.SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .SpacingBetweenSlices + ) = spacing + else: + if ( + dimension_organization_type == + DimensionOrganizationTypeValues.THREE_DIMENSIONAL + ): + raise ValueError( + 'Dimension organization "3D" has been specified, ' + 'but the source image is not a regularly-spaced 3D ' + 'volume.' + ) + if dimension_organization_type is not None: + self.DimensionOrganizationType = dimension_organization_type.value + if ( has_ref_frame_uid and self._coordinate_system == CoordinateSystemNames.SLIDE diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 231c7dbb..9705278a 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -1,4 +1,4 @@ -from typing import Optional, Sequence, Tuple +from typing import List, Optional, Sequence, Tuple import numpy as np import pydicom @@ -1030,11 +1030,11 @@ def get_series_slice_spacing( def get_regular_slice_spacing( - image_positions: np.ndarray, - image_orientation: np.ndarray, + image_positions: Sequence[Sequence[float]], + image_orientation: Sequence[float], tol: float = DEFAULT_SPACING_TOLERANCE, sort: bool = True, - enforce_postive: bool = False, + enforce_positive: bool = False, ) -> Optional[float]: """Get the regular spacing between set of image positions, if any. @@ -1047,12 +1047,14 @@ def get_regular_slice_spacing( Parameters ---------- - image_positions: numpy.ndarray - Array of image positions for multiple frames. Should be a numpy array of - shape (N, 3) where N is the number of frames. - image_orientation: numpy.ndarray + image_positions: Sequence[Sequence[float]] + Array of image positions for multiple frames. Should be a 2D array of + shape (N, 3) where N is the number of frames. Either a numpy array or + anything convertible to it may be passed. + image_orientation: Sequence[float] Image orientation as direction cosine values taken directly from the - ImageOrientationPatient attribute. 1D array of length 6. + ImageOrientationPatient attribute. 1D array of length 6. Either a numpy + array or anything convertible to it may be passed. tol: float Tolerance for determining spacing regularity. If slice spacings vary by less that this spacing, they are considered to be regular. @@ -1061,7 +1063,7 @@ def get_regular_slice_spacing( makes the function tolerant of unsorted inputs. Set to False to check whether the positions represent a 3D volume in the specific order in which they are passed. - enforce_postive: bool + enforce_positive: bool If True and sort is False, require that the images are not only regularly spaced but also that they are ordered along the direction of the increasing normal vector, as opposed to being ordered regularly @@ -1077,17 +1079,11 @@ def get_regular_slice_spacing( """ image_positions = np.array(image_positions) - image_orientation = np.array(image_orientation) if image_positions.ndim != 2 or image_positions.shape[1] != 3: raise ValueError( "Argument 'image_positions' should be an (N, 3) array." ) - if image_orientation.ndim != 1 or image_orientation.shape[0] != 6: - raise ValueError( - "Argument 'image_orientation' should be an array of " - "length 6." - ) n = image_positions.shape[0] if n == 0: raise ValueError( @@ -1097,15 +1093,11 @@ def get_regular_slice_spacing( # Special case, we stipluate that this has spacing 0.0 return 0.0 - # Find normal vector to the imaging plane - v1 = image_orientation[:3] - v2 = image_orientation[3:] - v3 = np.cross(v1, v2) + normal_vector = get_normal_vector(image_orientation) # Calculate distance of each slice from coordinate system origin along the # normal vector - origin_distances = v3[None] @ image_positions.T - origin_distances = origin_distances.squeeze(0) + origin_distances = _get_slice_distances(image_positions, normal_vector) if sort: sort_index = np.argsort(origin_distances) @@ -1121,18 +1113,18 @@ def get_regular_slice_spacing( spacings, atol=tol ).all() - if is_regular and enforce_postive: + if is_regular and enforce_positive: if avg_spacing < 0.0: return None # Additionally check that the vector from the first to the last plane lies - # approximately along v3 + # approximately along the normal vector pos1 = image_positions[sort_index[0], :] pos2 = image_positions[sort_index[-1], :] span = (pos2 - pos1) span /= np.linalg.norm(span) - is_perpendicular = abs(v3.T @ span - 1.0) < tol + is_perpendicular = abs(normal_vector.T @ span - 1.0) < tol if is_regular and is_perpendicular: return abs(avg_spacing) @@ -1140,6 +1132,110 @@ def get_regular_slice_spacing( return None +def get_normal_vector( + image_orientation: Sequence[float], +): + """Get a vector normal to an imaging plane. + + Parameters + ---------- + image_orientation: Sequence[float] + Image orientation in the standard DICOM format used for the + ImageOrientationPatient and ImageOrientationSlide attributes, + consisting of 6 numbers representing the direction cosines along the + rows (first three elements) and columns (second three elements). + + Returns + ------- + np.ndarray: + Unit normal vector as a NumPy array with shape (3, ). + + """ + image_orientation = np.array(image_orientation) + if image_orientation.ndim != 1 or image_orientation.shape[0] != 6: + raise ValueError( + "Argument 'image_orientation' should be an array of " + "length 6." + ) + + # Find normal vector to the imaging plane + v1 = image_orientation[:3] + v2 = image_orientation[3:] + v3 = np.cross(v1, v2) + + return v3 + + +def get_plane_sort_index( + image_positions: Sequence[Sequence[float]], + image_orientation: Sequence[float], +) -> List[int]: + """ + + Parameters + ---------- + image_positions: Sequence[Sequence[float]] + Array of image positions for multiple frames. Should be a 2D array of + shape (N, 3) where N is the number of frames. Either a numpy array or + anything convertible to it may be passed. + image_orientation: Sequence[float] + Image orientation as direction cosine values taken directly from the + ImageOrientationPatient attribute. 1D array of length 6. Either a numpy + array or anything convertible to it may be passed. + + Returns + ------- + List[int] + Sorting index for the input planes. Element i of this list gives the + index in the original list of the frames such that the output list + is sorted along the positive direction of the normal vector of the + imaging plane. + + """ + image_positions = np.array(image_positions) + image_orientation = np.array(image_orientation) + + normal_vector = get_normal_vector(image_orientation) + + # Calculate distance of each slice from coordinate system origin along the + # normal vector + origin_distances = _get_slice_distances(image_positions, normal_vector) + + sort_index = np.argsort(origin_distances) + + return sort_index.tolist() + + +def _get_slice_distances( + image_positions: np.ndarray, + normal_vector: np.ndarray, +) -> np.ndarray: + """Get distances of a set of planes from the origin. + + For each plane position, find (signed) distance from origin along the vector normal + to the imaging plane. + + Parameters + ---------- + image_positions: np.ndarray + Image positions array. 2D array of shape (N, 3) where N is the number of + planes and each row gives the (x, y, z) image position of a plane. + normal_vector: np.ndarray + Unit normal vector (perpendicular to the imaging plane). + + Returns + ------- + np.ndarray: + 1D array of shape (N, ) giving signed distance from the origin of each + plane position. + + """ + origin_distances = normal_vector[None] @ image_positions.T + origin_distances = origin_distances.squeeze(0) + + return origin_distances + + def get_coordinate_system( dataset: pydicom.Dataset, ) -> Optional[CoordinateSystemNames]: From a671b27501e76842c1617f01723db4b5f9a72ed5 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 19 May 2024 12:59:44 -0400 Subject: [PATCH 11/93] Spelling --- src/highdicom/spatial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 248806e4..79d47340 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -2390,7 +2390,7 @@ def get_regular_slice_spacing( Returns ------- Union[float, None] - If the image positions are regularly spaced, the (abolute value of) the + If the image positions are regularly spaced, the (absolute value of) the slice spacing. If the image positions are not regularly spaced, returns None. From ba83039324e17509cc5396fca9987aada6b34356 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 27 May 2024 19:46:09 -0400 Subject: [PATCH 12/93] Fix test for multiframe --- src/highdicom/seg/sop.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 94709f8e..60b545ab 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -2789,7 +2789,7 @@ def _get_pffg_item( ) derivation_src_img_item = Dataset() - if 0x00280008 in source_images[0]: # NumberOfFrames + if is_multiframe_image(source_images[0]): # A single multi-frame source image src_img_item = source_images[0] # Frame numbers are one-based From 0e9aa467bb132bdd7608da6a8955cbbe2ccd539d Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 27 May 2024 19:48:51 -0400 Subject: [PATCH 13/93] Fix test for multiframe --- src/highdicom/seg/sop.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 60b545ab..5d9c2d89 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -1900,6 +1900,7 @@ def __init__( are_spatial_locations_preserved=are_spatial_locations_preserved, # noqa: E501 has_ref_frame_uid=has_ref_frame_uid, coordinate_system=self._coordinate_system, + is_multiframe=is_multiframe, ) pffg_sequence.append(pffg_item) @@ -2707,6 +2708,7 @@ def _get_pffg_item( are_spatial_locations_preserved: bool, has_ref_frame_uid: bool, coordinate_system: Optional[CoordinateSystemNames], + is_multiframe: bool, ) -> Dataset: """Get a single item of the Per Frame Functional Groups Sequence. @@ -2731,6 +2733,8 @@ def _get_pffg_item( Whether the sources images have a frame of reference UID. coordinate_system: Optional[highdicom.CoordinateSystemNames] Coordinate system used, if any. + is_multiframe: bool + Whether source images are multiframe. Returns ------- @@ -2789,7 +2793,7 @@ def _get_pffg_item( ) derivation_src_img_item = Dataset() - if is_multiframe_image(source_images[0]): + if is_multiframe: # A single multi-frame source image src_img_item = source_images[0] # Frame numbers are one-based From e53938eb86a689bf73bfe8895f4a477e9515967f Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 10 Jun 2024 00:23:02 -0400 Subject: [PATCH 14/93] WIP implementation of VolumeGeometry --- src/highdicom/_multiframe.py | 4 +- src/highdicom/enum.py | 109 ++++ src/highdicom/seg/sop.py | 2 +- src/highdicom/spatial.py | 986 +++++++++++++++++++++++++++++++++-- tests/test_spatial.py | 213 ++++++++ 5 files changed, 1274 insertions(+), 40 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 816555f7..ebcaae00 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -25,7 +25,7 @@ from highdicom.enum import CoordinateSystemNames from highdicom.seg.enum import SpatialLocationsPreservedValues from highdicom.spatial import ( - DEFAULT_SPACING_TOLERANCE, + _DEFAULT_SPACING_TOLERANCE, get_image_coordinate_system, get_regular_slice_spacing, ) @@ -754,7 +754,7 @@ def get_unique_dim_index_values( def get_slice_spacing( self, split_dimensions: Optional[Sequence[str]] = None, - tol: float = DEFAULT_SPACING_TOLERANCE, + tol: float = _DEFAULT_SPACING_TOLERANCE, ) -> Optional[float]: """Get slice spacing, if any, for the image. diff --git a/src/highdicom/enum.py b/src/highdicom/enum.py index 32f2bf15..9f5eef12 100644 --- a/src/highdicom/enum.py +++ b/src/highdicom/enum.py @@ -10,6 +10,115 @@ class CoordinateSystemNames(Enum): SLIDE = 'SLIDE' +class PixelIndexDirections(Enum): + + """ + + Enumerated values used to describe indexing conventions of pixel arrays. + + """ + + L = 'L' + """ + + Left: Pixel index that increases moving across the image from right to left. + + """ + + R = 'R' + """ + + Right: Pixel index that increases moving across the image from left to right. + + """ + + U = 'U' + """ + + Up: Pixel index that increases moving up the image from bottom to top. + + """ + + D = 'D' + """ + + Down: Pixel index that increases moving down the image from top to bottom. + + """ + + I = 'I' + """ + + In: Pixel index that increases moving through the slices in the away from + the viewer. + + """ + + O = 'O' + """ + + Out: Pixel index that increases moving through the slices in the towards + the viewer. + + """ + + +class PatientFrameOfReferenceDirections(Enum): + + """ + + Enumerated values used to describe directions in the patient frame of + reference coordinate space. + + """ + + L = 'L' + """ + + Left: Direction that increases moving from the patient's right to left. + + """ + + R = 'R' + """ + + Right: Direction that increases moving from the patient's left to right. + + """ + + P = 'P' + """ + + Posterior: Direction that increases moving from the patient's anterior to + posterior. + + """ + + A = 'A' + """ + + Anterior: Direction that increases moving from the patient's posterior to + anterior. + + """ + + I = 'I' + """ + + Inferior: Direction that increases moving from the patient's superior to + inferior. + + """ + + S = 'S' + """ + + Superior: Direction that increases moving from the patient's ingerior to + superior. + + """ + + class ContentQualificationValues(Enum): """Enumerated values for Content Qualification attribute.""" diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 5d9c2d89..0bc47f4d 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -1684,7 +1684,7 @@ def __init__( plane_orientation[0].ImageOrientationPatient ), sort=False, - enforce_positive=True, + enforce_right_handed=True, ) if spacing is not None and spacing > 0.0: diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 28204f09..27510820 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -1,15 +1,27 @@ import itertools -from typing import Generator, Iterator, List, Optional, Sequence, Tuple +from typing import ( + Generator, + Iterator, + List, + Optional, + Sequence, + Tuple, + Union, +) from pydicom import Dataset import numpy as np import pydicom from highdicom._module_utils import is_multiframe_image -from highdicom.enum import CoordinateSystemNames +from highdicom.enum import ( + CoordinateSystemNames, + PixelIndexDirections, + PatientFrameOfReferenceDirections, +) -DEFAULT_SPACING_TOLERANCE = 1e-4 +_DEFAULT_SPACING_TOLERANCE = 1e-4 """Default tolerance for determining whether slices are regularly spaced.""" @@ -649,6 +661,121 @@ def _are_images_coplanar( return abs(dis_a - dis_b) < tol +def _normalize_pixel_index_convention( + c: Union[str, Sequence[Union[str, PixelIndexDirections]]], +) -> Tuple[PixelIndexDirections, PixelIndexDirections, PixelIndexDirections]: + """Normalize and check a pixel index convention. + + Parameters + ---------- + c: Union[str, Sequence[Union[str, highdicom.enum.PixelIndexDirections]]] + Pixel index convention description consisting of three directions, + either L or R, either U or D, and either I or O, in any order. + + Returns + ------- + Tuple[highdicom.enum.PixelIndexDirections, highdicom.enum.PixelIndexDirections, highdicom.enum.PixelIndexDirections]: + Convention description in a canonical form as a tuple of three enum + instances. Furthermore this is guaranteed to be a valid description. + + """ # noqa: E501 + if len(c) != 3: + raise ValueError('Length of pixel index convention must be 3.') + + c = tuple(PixelIndexDirections(d) for d in c) + + c_set = {d.value for d in c} + + criteria = [ + ('L' in c_set) != ('R' in c_set), + ('U' in c_set) != ('D' in c_set), + ('I' in c_set) != ('O' in c_set), + ] + if not all(criteria): + c_str = [d.value for d in c] + raise ValueError(f'Invalid combination of pixel directions: {c_str}.') + + return c + + +def _normalize_reference_direction_convention( + c: Union[str, Sequence[Union[str, PatientFrameOfReferenceDirections]]], +) -> Tuple[ + PatientFrameOfReferenceDirections, + PatientFrameOfReferenceDirections, + PatientFrameOfReferenceDirections, +]: + """Normalize and check a frame of reference direction convention. + + Parameters + ---------- + c: Union[str, Sequence[Union[str, highdicom.enum.PatientFrameOfReferenceDirections]]] + Frame of reference convention description consisting of three directions, + either L or R, either A or P, and either I or S, in any order. + + Returns + ------- + Tuple[highdicom.enum.PatientFrameOfReferenceDirections, highdicom.enum.PatientFrameOfReferenceDirections, highdicom.enum.PatientFrameOfReferenceDirections]: + Convention description in a canonical form as a tuple of three enum + instances. Furthermore this is guaranteed to be a valid description. + + """ # noqa: E501 + if len(c) != 3: + raise ValueError('Length of pixel index convention must be 3.') + + c = tuple(PatientFrameOfReferenceDirections(d) for d in c) + + c_set = {d.value for d in c} + + criteria = [ + ('L' in c_set) != ('R' in c_set), + ('A' in c_set) != ('P' in c_set), + ('I' in c_set) != ('S' in c_set), + ] + if not all(criteria): + c_str = [d.value for d in c] + raise ValueError( + 'Invalid combination of frame of reference directions: ' + f'{c_str}.' + ) + + return c + + +def _is_matrix_orthogonal( + m: np.ndarray, + tol: float = _DEFAULT_EQUALITY_TOLERANCE, +) -> bool: + """Check whether a matrix is orthogonal. + + Note this does not require that the columns have unit norm. + + Parameters + ---------- + m: numpy.ndarray + A matrix. + tol: float + Tolerance. ``m`` will be deemed orthogonal if the product ``m.T @ m`` + is equal to diagonal matrix of squared column norms within this + tolerance. + + Returns + ------- + bool: + True if the matrix ``m`` is a square orthogonal matrix. False + otherwise. + + """ + if m.ndim != 2: + raise ValueError( + 'Argument "m" should be an array with 2 dimensions.' + ) + if m.shape[0] != m.shape[1]: + return False + norm_squared = (m ** 2).sum(axis=0) + return np.allclose(m.T @ m, np.diag(norm_squared), atol=tol) + + def create_rotation_matrix( image_orientation: Sequence[float], ) -> np.ndarray: @@ -661,12 +788,14 @@ def create_rotation_matrix( increasing column index) and the column direction (second triplet: vertical, top to bottom, increasing row index) direction expressed in the three-dimensional patient or slide coordinate system defined by the - frame of reference + frame of reference. Returns ------- numpy.ndarray - 3 x 3 rotation matrix + 3 x 3 rotation matrix. Pre-multiplying a pixel index in format (column + index, row index, slice index) by this matrix gives the x, y, z + position in the frame-of-reference coordinate system. """ if len(image_orientation) != 6: @@ -674,10 +803,11 @@ def create_rotation_matrix( row_cosines = np.array(image_orientation[:3], dtype=float) column_cosines = np.array(image_orientation[3:], dtype=float) n = np.cross(row_cosines.T, column_cosines.T) + return np.column_stack([ row_cosines, column_cosines, - n + n, ]) @@ -685,6 +815,8 @@ def _create_affine_transformation_matrix( image_position: Sequence[float], image_orientation: Sequence[float], pixel_spacing: Sequence[float], + spacing_between_slices: float = 1.0, + index_convention: Optional[Sequence[PixelIndexDirections]] = None, ) -> np.ndarray: """Create affine matrix for transformation. @@ -713,11 +845,18 @@ def _create_affine_transformation_matrix( bottom, increasing row index) and the rows direction (second value: spacing between columns: horizontal, left to right, increasing column index) + spacing_between_slices: float + Spacing between consecutive slices. + index_convention: Union[Sequence[highdicom.enum.PixelIndexDirections], None] + Desired convention for the pixel index directions. Must consist of only + D, I, and R. Returns ------- numpy.ndarray - 4 x 4 affine transformation matrix + 4 x 4 affine transformation matrix. Pre-multiplying a pixel index in + format (column index, row index, slice index, 1) by this matrix gives + the (x, y, z, 1) position in the frame-of-reference coordinate system. """ if not isinstance(image_position, Sequence): @@ -738,16 +877,20 @@ def _create_affine_transformation_matrix( z_offset = float(image_position[2]) translation = np.array([x_offset, y_offset, z_offset], dtype=float) - rotation = create_rotation_matrix(image_orientation) + rotation = create_rotation_matrix( + image_orientation, + ) # Column direction (spacing between rows) - column_spacing = float(pixel_spacing[0]) + spacing_between_rows = float(pixel_spacing[0]) # Row direction (spacing between columns) - row_spacing = float(pixel_spacing[1]) - rotation[:, 0] *= row_spacing - rotation[:, 1] *= column_spacing + spacing_between_columns = float(pixel_spacing[1]) + + rotation[:, 0] *= spacing_between_columns + rotation[:, 1] *= spacing_between_rows + rotation[:, 2] *= spacing_between_slices # 4x4 transformation matrix - return np.row_stack( + affine = np.row_stack( [ np.column_stack([ rotation, @@ -757,12 +900,31 @@ def _create_affine_transformation_matrix( ] ) + if index_convention is not None: + current_convention = ( + PixelIndexDirections.R, + PixelIndexDirections.D, + PixelIndexDirections.I, + ) + if set(index_convention) != set(current_convention): + raise ValueError( + 'Index convention must consist of D, I, and R.' + ) + affine = _transform_affine_to_convention( + affine=affine, + shape=(1, 1, 1), # dummy (not used) + from_index_convention=current_convention, + to_index_convention=index_convention, + ) + + return affine + def _create_inv_affine_transformation_matrix( image_position: Sequence[float], image_orientation: Sequence[float], pixel_spacing: Sequence[float], - spacing_between_slices: float = 1.0 + spacing_between_slices: float = 1.0, ) -> np.ndarray: """Create affine matrix for inverse transformation. @@ -794,6 +956,14 @@ def _create_inv_affine_transformation_matrix( Distance (in the coordinate defined by the frame of reference) between neighboring slices. Default: 1 + Returns + ------- + numpy.ndarray + 4 x 4 affine transformation matrix. Pre-multiplying a + frame-of-reference coordinate in the format (x, y, z, 1) by this matrix + gives the pixel indices in the form (column index, row index, slice + index, 1). + Raises ------ TypeError @@ -823,12 +993,13 @@ def _create_inv_affine_transformation_matrix( translation = np.array([x_offset, y_offset, z_offset]) rotation = create_rotation_matrix(image_orientation) + # Column direction (spacing between rows) - column_spacing = float(pixel_spacing[0]) + spacing_between_rows = float(pixel_spacing[0]) # Row direction (spacing between columns) - row_spacing = float(pixel_spacing[1]) - rotation[:, 0] *= row_spacing - rotation[:, 1] *= column_spacing + spacing_between_columns = float(pixel_spacing[1]) + rotation[:, 0] *= spacing_between_columns + rotation[:, 1] *= spacing_between_rows rotation[:, 2] *= spacing_between_slices inv_rotation = np.linalg.inv(rotation) @@ -844,6 +1015,224 @@ def _create_inv_affine_transformation_matrix( ) +def _transform_affine_matrix( + affine: np.ndarray, + shape: Sequence[int], + flip_indices: Optional[Sequence[bool]] = None, + flip_reference: Optional[Sequence[bool]] = None, + permute_indices: Optional[Sequence[int]] = None, + permute_reference: Optional[Sequence[int]] = None, +) -> np.ndarray: + """Transform an affine matrix between conventions. + + Parameters + ---------- + affine: np.ndarray + 4 x 4 affine matrix to transform. + shape: Sequence[int] + Shape of the array. + flip_indices: Optional[Sequence[bool]], optional + Whether to flip each of the pixel index axes to index from the other + side of the array. Must consist of three boolean values, one for each + of the index axes (before any permutation is applied). + flip_reference: Optional[Sequence[bool]], optional + Whether to flip each of the frame of reference axes to about the + origin. Must consist of three boolean values, one for each of the frame + of reference axes (before any permutation is applied). + permute_indices: Optional[Sequence[int]], optional + Permutation (if any) to apply to the pixel index axes. Must consist of + the values [0, 1, 2] in some order. + permute_reference: Optional[Sequence[int]], optional + Permutation (if any) to apply to the frame of reference axes. Must + consist of the values [0, 1, 2] in some order. + + Returns + ------- + np.ndarray: + Affine matrix after operations are applied. + + """ + if affine.shape != (4, 4): + raise ValueError("Affine matrix must have shape (4, 4).") + if len(shape) != 3: + raise ValueError("Shape must have shape three elements.") + + transformed = affine.copy() + + if flip_indices is not None and any(flip_indices): + # Move the origin to the opposite side of the array + enable = np.array(flip_indices, np.uint8) + offset = transformed[:3, :3] * (np.array(shape).reshape(3, 1) - 1) + transformed[:3, 3] += enable @ offset + + # Inverting the columns + transformed *= np.array( + [*[-1 if x else 1 for x in flip_indices], 1] + ) + + if flip_reference is not None and any(flip_reference): + # Flipping the reference means inverting the rows (including the + # translation) + row_inv = np.diag( + [*[-1 if x else 1 for x in flip_reference], 1] + ) + transformed = row_inv @ transformed + + # Permuting indices is a permutation of the columns + if permute_indices is not None: + if len(permute_indices) != 3: + raise ValueError( + 'Argument "permute_indices" should have 3 elements.' + ) + if set(permute_indices) != set((0, 1, 2)): + raise ValueError( + 'Argument "permute_indices" should contain elements 0, 1, ' + "and 3 in some order." + ) + transformed = transformed[:, [*permute_indices, 3]] + + # Permuting the reference is a permutation of the rows + if permute_reference is not None: + if len(permute_reference) != 3: + raise ValueError( + 'Argument "permute_reference" should have 3 elements.' + ) + if set(permute_reference) != set((0, 1, 2)): + raise ValueError( + 'Argument "permute_reference" should contain elements 0, 1, ' + "and 3 in some order." + ) + transformed = transformed[[*permute_reference, 3], :] + + return transformed + + +def _transform_affine_to_convention( + affine: np.ndarray, + shape: Sequence[int], + from_index_convention: Union[ + str, Sequence[Union[str, PixelIndexDirections]], None + ] = None, + to_index_convention: Union[ + str, Sequence[Union[str, PixelIndexDirections]], None + ] = None, + from_reference_convention: Union[ + str, Sequence[Union[str, PatientFrameOfReferenceDirections]], None + ] = None, + to_reference_convention: Union[ + str, Sequence[Union[str, PatientFrameOfReferenceDirections]], None + ] = None, +) -> np.ndarray: + """Transform an affine matrix between different conventions. + + Parameters + ---------- + affine: np.ndarray + Affine matrix to transform. + shape: Sequence[int] + Shape of the array. + from_index_convention: Union[str, Sequence[Union[str, PixelIndexDirections]], None], optional + Index convention used in the input affine. + to_index_convention: Union[str, Sequence[Union[str, PixelIndexDirections]], None], optional + Desired index convention for the output affine. + from_reference_convention: Union[str, Sequence[Union[str, PatientFrameOfReferenceDirections]], None], optional + Reference convention used in the input affine. + to_reference_convention: Union[str, Sequence[Union[str, PatientFrameOfReferenceDirections]], None], optional + Desired reference convention for the output affine. + + Returns + ------- + np.ndarray: + Affine matrix after operations are applied. + + """ # noqa: E501 + indices_opposites = { + PixelIndexDirections.U: PixelIndexDirections.D, + PixelIndexDirections.D: PixelIndexDirections.U, + PixelIndexDirections.L: PixelIndexDirections.R, + PixelIndexDirections.R: PixelIndexDirections.L, + PixelIndexDirections.I: PixelIndexDirections.O, + PixelIndexDirections.O: PixelIndexDirections.I, + } + pfrd = PatientFrameOfReferenceDirections # shorthand + reference_opposites = { + pfrd.L: pfrd.R, + pfrd.R: pfrd.L, + pfrd.A: pfrd.P, + pfrd.P: pfrd.A, + pfrd.I: pfrd.S, + pfrd.S: pfrd.I, + } + + if (from_index_convention is None) != (to_index_convention is None): + raise TypeError( + 'Arguments "from_index_convention" and "to_index_convention" ' + 'should either both be None, or neither should be None.' + ) + if from_index_convention is not None and to_index_convention is not None: + from_index_normed = _normalize_pixel_index_convention( + from_index_convention + ) + to_index_normed = _normalize_pixel_index_convention( + to_index_convention + ) + flip_indices = [ + d not in to_index_normed for d in from_index_normed + ] + + permute_indices = [] + for d, flipped in zip(to_index_normed, flip_indices): + if flipped: + d_ = indices_opposites[d] + permute_indices.append(from_index_normed.index(d_)) + else: + permute_indices.append(from_index_normed.index(d)) + else: + flip_indices = None + permute_indices = None + + if ( + (from_reference_convention is None) != (to_reference_convention is None) + ): + raise TypeError( + 'Arguments "from_reference_convention" and "to_reference_convention" ' + 'should either both be None, or neither should be None.' + ) + if ( + from_reference_convention is not None + and to_reference_convention is not None + ): + from_reference_normed = _normalize_reference_direction_convention( + from_reference_convention + ) + to_reference_normed = _normalize_reference_direction_convention( + to_reference_convention + ) + + flip_reference = [ + d not in to_reference_normed for d in from_reference_normed + ] + permute_reference = [] + for d, flipped in zip(to_reference_normed, flip_reference): + if flipped: + d_ = reference_opposites[d] + permute_reference.append(from_reference_normed.index(d_)) + else: + permute_reference.append(from_reference_normed.index(d)) + else: + flip_reference = None + permute_reference = None + + return _transform_affine_matrix( + affine=affine, + shape=shape, + permute_indices=permute_indices, + permute_reference=permute_reference, + flip_indices=flip_indices, + flip_reference=flip_reference, + ) + + class PixelToReferenceTransformer: """Class for transforming pixel indices to reference coordinates. @@ -870,7 +1259,8 @@ class PixelToReferenceTransformer: >>> transformer = PixelToReferenceTransformer( ... image_position=[56.0, 34.2, 1.0], ... image_orientation=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0], - ... pixel_spacing=[0.5, 0.5]) + ... pixel_spacing=[0.5, 0.5], + ... ) >>> >>> # Use the transformer to convert coordinates >>> pixel_indices = np.array([[0, 10], [5, 5]]) @@ -932,7 +1322,7 @@ def __init__( @property def affine(self) -> np.ndarray: """numpy.ndarray: 4x4 affine transformation matrix""" - return self._affine + return self._affine.copy() def __call__(self, indices: np.ndarray) -> np.ndarray: """Transform image pixel indices to frame of reference coordinates. @@ -1135,7 +1525,7 @@ def __init__( @property def affine(self) -> np.ndarray: """numpy.ndarray: 4 x 4 affine transformation matrix""" - return self._affine + return self._affine.copy() def __call__(self, coordinates: np.ndarray) -> np.ndarray: """Transform frame of reference coordinates into image pixel indices. @@ -1390,7 +1780,7 @@ def __init__( @property def affine(self) -> np.ndarray: """numpy.ndarray: 4x4 affine transformation matrix""" - return self._affine + return self._affine.copy() def __call__(self, indices: np.ndarray) -> np.ndarray: """Transform pixel indices between two images. @@ -1606,7 +1996,7 @@ def __init__( @property def affine(self) -> np.ndarray: """numpy.ndarray: 4x4 affine transformation matrix""" - return self._affine + return self._affine.copy() def __call__(self, coordinates: np.ndarray) -> np.ndarray: """Transform image coordinates to frame of reference coordinates. @@ -1809,7 +2199,7 @@ def __init__( @property def affine(self) -> np.ndarray: """numpy.ndarray: 4 x 4 affine transformation matrix""" - return self._affine + return self._affine.copy() def __call__(self, coordinates: np.ndarray) -> np.ndarray: """Apply the inverse of an affine transformation matrix to a batch of @@ -2059,7 +2449,7 @@ def __init__( @property def affine(self) -> np.ndarray: """numpy.ndarray: 4x4 affine transformation matrix""" - return self._affine + return self._affine.copy() def __call__(self, coordinates: np.ndarray) -> np.ndarray: """Transform pixel indices between two images. @@ -2166,6 +2556,468 @@ def for_images( ) +class VolumeGeometry: + + """Class representing the geomtry of a regularly-spaced 3D array. + + All such geometries exist within DICOM's patient coordinate system. + + Internally this class uses the following conventions to represent the + geometry, however this can be constructed from or transformed to other + conventions with appropriate optional parameters to its methods: + + * The pixel indices are ordered (slice index, row index, column index). + * Pixel indices are zero-based and represent the center of the pixel. + * Column indices are ordered top to bottom, row indices are ordered left to + right. The interpretation of the slice indices direction is not defined. + * The x, y, z coordinates of frame-of-reference coordinate system follow + the "LPS" convention used in DICOM (see + :dcm:`Part 3 Section C.7.6.2.1.1 `). + I.e. + * The first coordinate (``x``) increases from the patient's right to left + * The second coordinate (``y``) increases from the patient's anterior to + posterior. + * The third coordinate (``z``) increases from the patient's caudal + direction (inferior) to cranial direction (superior). + + Note + ---- + The ordering of pixel indices used by this class (slice, row, column) + matches the way pydicom and highdicom represent pixel arrays but differs + from the (column, row, slice) convention used by the various "transformer" + classes in the ``highdicom.spatial`` module. + + """ + def __init__( + self, + affine: np.ndarray, + shape: Sequence[int], + frame_of_reference_uid: Optional[str] = None, + sop_instance_uids: Optional[Sequence[str]] = None, + frame_numbers: Optional[Sequence[int]] = None, + ): + """ + + Parameters + ---------- + affine: np.ndarray + 4 x 4 affine matrix representing the transformation from pixel + indices (slice index, row index, column index) to the + frame-of-reference coordinate system. The top left 3 x 3 matrix + should be a scaled orthogonal matrix representing the rotation and + scaling. The top right 3 x 1 vector represents the translation + component. The last row should have value [0, 0, 0, 1]. + shape: Sequence[int] + Shape (slices, rows, columns) of the implied volume array. + frame_of_reference_uid: Optional[str], optional + Frame of reference UID for the frame of reference, if known. + sop_instance_uids: Optional[Sequence[str]], optional + SOP instance UIDs corresponding to each slice (stacked down + dimension 0) of the implied volume. This is relevant if and only if + the volume is formed from a series of single frame DICOM images. + frame_numbers: Optional[Sequence[int]], optional + Frame numbers of corresponding to each slice (stacked down + dimension 0) of the implied volume. This is relevant if and only if + the volume is formed from a set of frames of a single multiframe + DICOM image. + + """ + + if affine.shape != (4, 4): + raise ValueError("Affine matrix must have shape (4, 4).") + if not np.array_equal(affine[-1, :], np.array([0.0, 0.0, 0.0, 1.0])): + raise ValueError( + "Final row of affine matrix must be [0.0, 0.0, 0.0, 1.0]." + ) + if not _is_matrix_orthogonal(affine[:3, :3]): + raise ValueError( + "Argument 'affine' must be an orthogonal matrix." + ) + if len(shape) != 3: + raise ValueError( + "Argument 'shape' must have three elements." + ) + + self._affine = affine + self._shape = tuple(shape) + self._frame_of_reference_uid = frame_of_reference_uid + if frame_numbers is not None: + if any(not isinstance(f, int) for f in frame_numbers): + raise TypeError( + "Argument 'frame_numbers' should be a sequence of ints." + ) + if any(f < 1 for f in frame_numbers): + raise ValueError( + "Argument 'frame_numbers' should contain only (strictly) " + "positive integers." + ) + if len(frame_numbers) != shape[0]: + raise ValueError( + "Length of 'frame_numbers' should match first item of " + "'shape'." + ) + self._frame_numbers = list(frame_numbers) + else: + self._frame_numbers = None + if sop_instance_uids is not None: + if any(not isinstance(u, str) for u in sop_instance_uids): + raise TypeError( + "Argument 'sop_instance_uids' should be a sequence of " + "str." + ) + if len(sop_instance_uids) != shape[0]: + raise ValueError( + "Length of 'sop_instance_uids' should match first item " + "of 'shape'." + ) + self._sop_instance_uids = list(sop_instance_uids) + else: + self._sop_instance_uids = None + + @classmethod + def for_image_series( + cls, + series_datasets: Sequence[Dataset], + ) -> "VolumeGeometry": + """Get volume geometry for a series of single frame images. + + Parameters + ---------- + series_datasets: Sequence[pydicom.Dataset] + Series of single frame datasets. There is no requirement on the + sorting of the datasets. + + Returns + ------- + VolumeGeometry: + Object representing the geometry of the series. + + """ + coordinate_system = get_image_coordinate_system(series_datasets[0]) + if ( + coordinate_system is None or + coordinate_system != CoordinateSystemNames.PATIENT + ): + raise ValueError( + "Dataset should exist in the patient " + "coordinate_system." + ) + frame_of_reference_uid = series_datasets[0].FrameOfReferenceUID + if not all( + ds.FrameOfReferenceUID == frame_of_reference_uid + for ds in series_datasets + ): + raise ValueError('Images do not share a frame of reference.') + + series_datasets = sort_datasets(series_datasets) + sorted_sop_instance_uids = [ + ds.SOPInstanceUID for ds in series_datasets + ] + + slice_spacing = get_series_slice_spacing(series_datasets) + if slice_spacing is None: + raise ValueError('Series is not a regularly spaced volume.') + ds = series_datasets[0] + shape = (len(series_datasets), ds.Rows, ds.Columns) + affine = _create_affine_transformation_matrix( + image_position=ds.ImagePositionPatient, + image_orientation=ds.ImageOrientationPatient, + pixel_spacing=ds.PixelSpacing, + spacing_between_slices=slice_spacing, + index_convention=( + PixelIndexDirections.I, + PixelIndexDirections.D, + PixelIndexDirections.R, + ), + ) + + return cls( + affine=affine, + shape=shape, + frame_of_reference_uid=frame_of_reference_uid, + sop_instance_uids=sorted_sop_instance_uids, + ) + + @classmethod + def for_image( + cls, + dataset: Dataset, + ) -> "VolumeGeometry": + """Get volume geometry for a multiframe image. + + Parameters + ---------- + dataset: pydicom.Dataset + A multi-frame image dataset. + + Returns + ------- + VolumeGeometry: + Object representing the geometry of the image. + + """ + if not is_multiframe_image(dataset): + raise ValueError( + 'Dataset should be a multi-frame image.' + ) + coordinate_system = get_image_coordinate_system(dataset) + if ( + coordinate_system is None or + coordinate_system != CoordinateSystemNames.PATIENT + ): + raise ValueError( + "Dataset should exist in the patient " + "coordinate_system." + ) + sfgs = dataset.SharedFunctionalGroupsSequence[0] + if 'PlaneOrientationSequence' not in sfgs: + raise ValueError('Frames do not share an orientation.') + image_orientation = ( + sfgs + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + pffgs = dataset.PerFrameFunctionalGroupsSequence + image_positions = [ + g.PlanePositionSequence[0].ImagePositionPatient + for g in pffgs + ] + sort_index = get_plane_sort_index( + image_positions, + image_orientation, + ) + sorted_positions = [image_positions[i] for i in sort_index] + sorted_frame_numbers = [f + 1 for f in sort_index] + + if 'PixelMeasuresSequence' not in sfgs: + raise ValueError('Frames do not share pixel measures.') + pixel_spacing = sfgs.PixelMeasuresSequence[0].PixelSpacing + + slice_spacing = get_regular_slice_spacing( + image_positions=image_positions, + image_orientation=image_orientation, + ) + if slice_spacing is None: + raise ValueError( + 'Dataset does not represent a regularly sampled volume.' + ) + + shape = (dataset.NumberOfFrames, dataset.Rows, dataset.Columns) + affine = _create_affine_transformation_matrix( + image_position=sorted_positions[0], + image_orientation=image_orientation, + pixel_spacing=pixel_spacing, + spacing_between_slices=slice_spacing, + index_convention=( + PixelIndexDirections.I, + PixelIndexDirections.D, + PixelIndexDirections.R, + ), + ) + + return cls( + affine=affine, + shape=shape, + frame_of_reference_uid=dataset.FrameOfReferenceUID, + frame_numbers=sorted_frame_numbers, + ) + + @classmethod + def from_attributes( + cls, + image_position: Sequence[float], + image_orientation: Sequence[float], + pixel_spacing: Sequence[float], + spacing_between_slices: float, + rows:int, + columns: int, + number_of_frames: int, + frame_of_reference_uid: Optional[str] = None, + sop_instance_uids: Optional[Sequence[str]] = None, + frame_numbers: Optional[Sequence[int]] = None, + ) -> "VolumeGeometry": + """""" + affine = _create_affine_transformation_matrix( + image_position=image_position, + image_orientation=image_orientation, + pixel_spacing=pixel_spacing, + spacing_between_slices=spacing_between_slices, + index_convention=( + PixelIndexDirections.I, + PixelIndexDirections.D, + PixelIndexDirections.R, + ), + ) + shape = (number_of_frames, rows, columns) + return cls( + affine=affine, + shape=shape, + frame_of_reference_uid=frame_of_reference_uid, + sop_instance_uids=sop_instance_uids, + frame_numbers=frame_numbers, + ) + + @classmethod + def from_components( + cls, + position: Sequence[float], + direction: Sequence[float], + spacing: Sequence[float], + shape: Sequence[int], + frame_of_reference_uid: Optional[str] = None, + sop_instance_uids: Optional[Sequence[str]] = None, + frame_numbers: Optional[Sequence[int]] = None, + ) -> "VolumeGeometry": + """""" + if not isinstance(position, Sequence): + raise TypeError('Argument "position" must be a sequence.') + if len(position) != 3: + raise ValueError('Argument "position" must have length 3.') + if not isinstance(spacing, Sequence): + raise TypeError('Argument "spacing" must be a sequence.') + if len(spacing) != 3: + raise ValueError('Argument "spacing" must have length 3.') + direction_arr = np.array(direction, dtype=np.float32) + if direction_arr.shape == (9, ): + direction_arr = direction_arr.reshape(3, 3) + elif direction_arr.shape == (3, 3): + pass + else: + raise ValueError( + "Argument 'direction' must have shape (9, ) or (3, 3)." + ) + scaled_direction = direction_arr * spacing + affine = np.row_stack( + [ + np.column_stack([scaled_direction, position]), + [0.0, 0.0, 0.0, 1.0] + ] + ) + return cls( + affine=affine, + shape=shape, + frame_of_reference_uid=frame_of_reference_uid, + sop_instance_uids=sop_instance_uids, + frame_numbers=frame_numbers, + ) + + def get_index_for_frame_number( + self, + frame_number: int, + ) -> int: + """Get the slice index for a frame number. + + This is intended for volumes representing for multi-frame images. + + Parameters + ---------- + frame_number: int + 1-based frame number in the original image. + + Returns + ------- + 0-based index of this frame number down the + slice dimension (axis 0) of the volume. + + """ + if self._frame_numbers is None: + raise RuntimeError( + "Frame information is not present." + ) + return self._frame_numbers.index(frame_number) + + def get_index_for_sop_instance_uid( + self, + sop_instance_uid: str, + ) -> int: + """Get the slice index for a SOP Instance UID. + + This is intended for volumes representing a series of single-frame + images. + + Parameters + ---------- + sop_instance_uid: str + SOP Instance of a particular image in the series. + + Returns + ------- + 0-based index of the image with the given SOP Instance UID down the + slice dimension (axis 0) of the volume. + + """ + if self._sop_instance_uids is None: + raise RuntimeError( + "SOP Instance UID information is not present." + ) + return self._sop_instance_uids.index(sop_instance_uid) + + @property + def frame_of_reference_uid(self) -> Optional[str]: + """Union[str, None]: Frame of reference UID.""" + return self._frame_of_reference_uid + + @property + def affine(self) -> np.ndarray: + """numpy.ndarray: 4x4 affine transformation matrix""" + return self._affine.copy() + + @property + def shape(self) -> Tuple[int, int, int]: + """Tuple[int, int, int]: Shape of the volume.""" + return self._shape + + @property + def sop_instance_uids(self) -> Union[List[str], None]: + """Union[List[str], None]: SOP Instance UID at each index.""" + if self._sop_instance_uids is not None: + return self._sop_instance_uids.copy() + + @property + def frame_numbers(self) -> Union[List[int], None]: + """Union[List[int], None]: Frame number at each index.""" + if self._frame_numbers is not None: + return self._frame_numbers.copy() + + @property + def direction_cosines(self) -> List[float]: + vec_along_rows = self._affine[:3, 2].copy() + vec_along_columns = self._affine[:3, 1].copy() + vec_along_columns /= np.sqrt((vec_along_columns ** 2).sum()) + vec_along_rows /= np.sqrt((vec_along_rows ** 2).sum()) + return [*vec_along_rows.tolist(), *vec_along_columns.tolist()] + + @property + def pixel_spacing(self) -> List[float]: + vec_along_rows = self._affine[:3, 2] + vec_along_columns = self._affine[:3, 1] + spacing_between_columns = np.sqrt((vec_along_rows ** 2).sum()).item() + spacing_between_rows = np.sqrt((vec_along_columns ** 2).sum()).item() + return [spacing_between_rows, spacing_between_columns] + + @property + def spacing_between_slices(self) -> List[float]: + slice_vec = self._affine[:3, 0] + spacing = np.sqrt((slice_vec ** 2).sum()).item() + return spacing + + @property + def spacing(self) -> List[float]: + dir_mat = self._affine[:3, :3] + norms = np.sqrt((dir_mat ** 2).sum(axis=0)) + return norms.tolist() + + @property + def position(self) -> List[float]: + return self._affine[:3, 3].tolist() + + @property + def direction(self) -> np.ndarray: + dir_mat = self._affine[:3, :3] + norms = np.sqrt((dir_mat ** 2).sum(axis=0)) + return dir_mat / norms + + def map_pixel_into_coordinate_system( index: Sequence[int], image_position: Sequence[float], @@ -2354,7 +3206,7 @@ def are_points_coplanar( def get_series_slice_spacing( datasets: Sequence[pydicom.Dataset], - tol: float = DEFAULT_SPACING_TOLERANCE, + tol: float = _DEFAULT_SPACING_TOLERANCE, ) -> Optional[float]: """Get slice spacing, if any, for a series of single frame images. @@ -2415,9 +3267,9 @@ def get_series_slice_spacing( def get_regular_slice_spacing( image_positions: Sequence[Sequence[float]], image_orientation: Sequence[float], - tol: float = DEFAULT_SPACING_TOLERANCE, + tol: float = _DEFAULT_SPACING_TOLERANCE, sort: bool = True, - enforce_positive: bool = False, + enforce_right_handed: bool = False, ) -> Optional[float]: """Get the regular spacing between set of image positions, if any. @@ -2448,10 +3300,12 @@ def get_regular_slice_spacing( which they are passed. enforce_positive: bool If True and sort is False, require that the images are not only - regularly spaced but also that they are ordered along the direction of - the increasing normal vector, as opposed to being ordered regularly - along the direction of the decreasing normal vector. If sort is False, - this has no effect. + regularly spaced but also that they are ordered correctly to give a + right-handed coordinate system, i.e. frames are ordered along the + direction of the increasing normal vector, as opposed to being ordered + regularly along the direction of the decreasing normal vector. If sort + is True, this has no effect since positions will be sorted in the + right-handed direction before finding the spacing. Returns ------- @@ -2496,7 +3350,7 @@ def get_regular_slice_spacing( spacings, atol=tol ).all() - if is_regular and enforce_positive: + if is_regular and enforce_right_handed: if avg_spacing < 0.0: return None @@ -2575,20 +3429,78 @@ def get_plane_sort_index( imaging plane. """ - image_positions = np.array(image_positions) - image_orientation = np.array(image_orientation) + pos_arr = np.array(image_positions) + if pos_arr.ndim != 2 or pos_arr.shape[1] != 3: + raise ValueError("Argument 'image_positions' must have shape (N, 3)") + ori_arr = np.array(image_orientation) + if ori_arr.ndim != 1 or ori_arr.shape[0] != 6: + raise ValueError("Argument 'image_orientation' must have shape (6, )") - normal_vector = get_normal_vector(image_orientation) + normal_vector = get_normal_vector(ori_arr) # Calculate distance of each slice from coordinate system origin along the # normal vector - origin_distances = _get_slice_distances(image_positions, normal_vector) + origin_distances = _get_slice_distances(pos_arr, normal_vector) sort_index = np.argsort(origin_distances) return sort_index.tolist() +def get_dataset_sort_index(datasets: Sequence[Dataset]) -> List[int]: + """Get index to sort single frame datasets spatially. + + Parameters + ---------- + datasets: Sequence[pydicom.Dataset] + Datasets containing single frame images, with a consistent orientation. + + Returns + ------- + List[int] + Sorting index for the input datasets. Element i of this list gives the + index in the original list of datasets such that the output list is + sorted along the positive direction of the normal vector of the imaging + plane. + + """ + if is_multiframe_image(datasets[0]): + raise ValueError('Datasets should be single frame images.') + if 'ImageOrientationPatient' not in datasets[0]: + raise AttributeError( + 'Datasets do not have an orientation.' + ) + image_orientation = datasets[0].ImageOrientationPatient + if not all( + np.allclose(ds.ImageOrientationPatient, image_orientation) + for ds in datasets + ): + raise ValueError('Datasets do not have a consistent orientation.') + positions = [ds.ImagePositionPatient for ds in datasets] + return get_plane_sort_index(positions, image_orientation) + + +def sort_datasets(datasets: Sequence[Dataset]) -> List[Dataset]: + """Sort single frame datasets spatially. + + Parameters + ---------- + datasets: Sequence[pydicom.Dataset] + Datasets containing single frame images, with a consistent orientation. + + Returns + ------- + List[Dataset] + Sorting index for the input datasets. Element i of this list gives the + index in the original list of datasets such that the output list is + sorted along the positive direction of the normal vector of the imaging + plane. + + """ + sort_index = get_dataset_sort_index(datasets) + return [datasets[i] for i in sort_index] + + def _get_slice_distances( image_positions: np.ndarray, normal_vector: np.ndarray, diff --git a/tests/test_spatial.py b/tests/test_spatial.py index 47b16406..6e9686e9 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -11,7 +11,9 @@ PixelToReferenceTransformer, ReferenceToImageTransformer, ReferenceToPixelTransformer, + VolumeGeometry, _are_images_coplanar, + _transform_affine_matrix, get_series_slice_spacing, is_tiled_image, ) @@ -888,3 +890,214 @@ def test_get_series_slice_spacing_regular(): ct_series = [pydicom.dcmread(f) for f in ct_files] spacing = get_series_slice_spacing(ct_series) assert spacing == 1.25 + + +def test_transform_affine_matrix(): + affine = np.array( + [ + [np.cos(np.radians(30)), -np.sin(np.radians(30)), 0.0, -34.0], + [np.sin(np.radians(30)), np.cos(np.radians(30)), 0.0, 45.2], + [0.0, 0.0, 1.0, -1.2], + [0.0, 0.0, 0.0, 1.0], + ] + ) + + transformed = _transform_affine_matrix( + affine, + permute_indices=[1, 2, 0], + shape=[10, 10, 10], + ) + expected = np.array( + [ + [-np.sin(np.radians(30)), 0.0, np.cos(np.radians(30)), -34.0], + [np.cos(np.radians(30)), 0.0, np.sin(np.radians(30)), 45.2], + [0.0, 1.0, 0.0, -1.2], + [0.0, 0.0, 0.0, 1.0], + ] + ) + assert np.array_equal(transformed, expected) + + transformed = _transform_affine_matrix( + affine, + permute_reference=[1, 2, 0], + shape=[10, 10, 10], + ) + expected = np.array( + [ + [np.sin(np.radians(30)), np.cos(np.radians(30)), 0.0, 45.2], + [0.0, 0.0, 1.0, -1.2], + [np.cos(np.radians(30)), -np.sin(np.radians(30)), 0.0, -34.0], + [0.0, 0.0, 0.0, 1.0], + ] + ) + assert np.array_equal(transformed, expected) + + transformed = _transform_affine_matrix( + affine, + flip_indices=[True, False, True], + shape=[10, 10, 10], + ) + expected = np.array( + [ + [-np.cos(np.radians(30)), -np.sin(np.radians(30)), 0.0, -26.20577137], + [-np.sin(np.radians(30)), np.cos(np.radians(30)), 0.0, 40.7], + [0.0, 0.0, -1.0, 7.8], + [0.0, 0.0, 0.0, 1.0], + ] + ) + print(affine) + print(transformed) + print(expected) + assert np.allclose(transformed, expected) + + transformed = _transform_affine_matrix( + affine, + flip_reference=[True, False, True], + shape=[10, 10, 10], + ) + expected = np.array( + [ + [-np.cos(np.radians(30)), np.sin(np.radians(30)), 0.0, 34.0], + [np.sin(np.radians(30)), np.cos(np.radians(30)), 0.0, 45.2], + [0.0, 0.0, -1.0, 1.2], + [0.0, 0.0, 0.0, 1.0], + ] + ) + assert np.array_equal(transformed, expected) + + +@pytest.mark.parametrize( + 'image_position,image_orientation,pixel_spacing,spacing_between_slices', + [ + ( + (67.0, 32.4, -45.2), + (1.0, 0.0, 0.0, 0.0, -1.0, 0.0), + (3.2, 1.6), + 1.25, + ), + ( + [67.0, 32.4, -45.2], + (-1.0, 0.0, 0.0, 0.0, -1.0, 0.0), + (3.2, 1.6), + 1.25, + ), + ( + (-67.0, 132.4, -5.2), + (0.0, 0.0, -1.0, 1.0, 0.0, 0.0), + (0.25, 0.25), + 3.5, + ), + ( + (-67.0, 132.4, -5.2), + ( + np.cos(np.radians(30)), -np.sin(np.radians(30)), 0.0, + np.sin(np.radians(30)), np.cos(np.radians(30)), 0.0, + ), + (0.75, 0.25), + 3.5, + ), + ], +) +def test_geometry_from_attributes( + image_position, + image_orientation, + pixel_spacing, + spacing_between_slices, +): + geometry = VolumeGeometry.from_attributes( + rows=10, + columns=10, + number_of_frames=10, + image_position=image_position, + image_orientation=image_orientation, + pixel_spacing=pixel_spacing, + spacing_between_slices=spacing_between_slices, + ) + assert geometry.position == list(image_position) + assert geometry.direction_cosines == list(image_orientation) + assert geometry.pixel_spacing == list(pixel_spacing) + assert geometry.spacing_between_slices == spacing_between_slices + + +def test_volume_geometry_single_frame(): + ct_files = [ + get_testdata_file('dicomdirtests/77654033/CT2/17136'), + get_testdata_file('dicomdirtests/77654033/CT2/17196'), + get_testdata_file('dicomdirtests/77654033/CT2/17166'), + ] + ct_series = [pydicom.dcmread(f) for f in ct_files] + geometry = VolumeGeometry.for_image_series(ct_series) + assert isinstance(geometry, VolumeGeometry) + rows, columns = ct_series[0].Rows, ct_series[0].Columns + assert geometry.shape == (len(ct_files), rows, columns) + assert geometry.frame_numbers is None + sop_instance_uids = [ + ct_series[0].SOPInstanceUID, + ct_series[2].SOPInstanceUID, + ct_series[1].SOPInstanceUID, + ] + assert geometry.sop_instance_uids == sop_instance_uids + assert geometry.get_index_for_sop_instance_uid( + ct_series[2].SOPInstanceUID + ) == 1 + with pytest.raises(RuntimeError): + geometry.get_index_for_frame_number(2) + orientation = ct_series[0].ImageOrientationPatient + assert geometry.direction_cosines == orientation + direction = geometry.direction + assert np.array_equal(direction[:, 1], orientation[3:]) + assert np.array_equal(direction[:, 2], orientation[:3]) + # Check third direction is normal to others + assert direction[:, 0] @ direction[:, 1] == 0.0 + assert direction[:, 0] @ direction[:, 2] == 0.0 + assert (direction[:, 0] ** 2).sum() == 1.0 + assert geometry.position == ct_series[0].ImagePositionPatient + assert geometry.pixel_spacing == ct_series[0].PixelSpacing + slice_spacing = 1.25 + assert geometry.spacing == [slice_spacing, *ct_series[0].PixelSpacing[::-1]] + + +def test_volume_geometry_multiframe(): + dcm = pydicom.dcmread(get_testdata_file('eCT_Supplemental.dcm')) + geometry = VolumeGeometry.for_image(dcm) + assert isinstance(geometry, VolumeGeometry) + rows, columns = dcm.Rows, dcm.Columns + assert geometry.shape == (dcm.NumberOfFrames, rows, columns) + assert geometry.frame_numbers == [2, 1] + assert geometry.sop_instance_uids is None + with pytest.raises(RuntimeError): + geometry.get_index_for_sop_instance_uid( + dcm.SOPInstanceUID + ) + assert geometry.get_index_for_frame_number(2) == 0 + orientation = ( + dcm + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + pixel_spacing = ( + dcm + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert geometry.direction_cosines == orientation + direction = geometry.direction + assert np.array_equal(direction[:, 1], orientation[3:]) + assert np.array_equal(direction[:, 2], orientation[:3]) + # Check third direction is normal to others + assert direction[:, 0] @ direction[:, 1] == 0.0 + assert direction[:, 0] @ direction[:, 2] == 0.0 + assert (direction[:, 0] ** 2).sum() == 1.0 + first_frame = geometry.frame_numbers[0] + first_frame_pos = ( + dcm + .PerFrameFunctionalGroupsSequence[first_frame - 1] + .PlanePositionSequence[0] + .ImagePositionPatient + ) + assert geometry.position == first_frame_pos + assert geometry.pixel_spacing == pixel_spacing + slice_spacing = 10.0 + assert geometry.spacing == [slice_spacing, *pixel_spacing[::-1]] From 0a7f6baabbc75dd4315c1ba72bbf680cecdfece5 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 10 Jun 2024 18:55:43 -0400 Subject: [PATCH 15/93] Remove print statements from tests --- tests/test_spatial.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/test_spatial.py b/tests/test_spatial.py index 6e9686e9..b2b13d95 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -945,9 +945,6 @@ def test_transform_affine_matrix(): [0.0, 0.0, 0.0, 1.0], ] ) - print(affine) - print(transformed) - print(expected) assert np.allclose(transformed, expected) transformed = _transform_affine_matrix( From 8ae190019a61346ac9cca4252d59ee51db8a11cc Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 14 Jun 2024 21:52:05 -0400 Subject: [PATCH 16/93] Add some docstrings --- src/highdicom/spatial.py | 58 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 56 insertions(+), 2 deletions(-) diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 27510820..e1c77ea6 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -2639,6 +2639,8 @@ def __init__( ) self._affine = affine + if len(shape) != 3: + raise ValueError("Argument 'shape' must have three item".) self._shape = tuple(shape) self._frame_of_reference_uid = frame_of_reference_uid if frame_numbers is not None: @@ -2836,7 +2838,56 @@ def from_attributes( sop_instance_uids: Optional[Sequence[str]] = None, frame_numbers: Optional[Sequence[int]] = None, ) -> "VolumeGeometry": - """""" + """Create a volume geometry from DICOM attributes. + + Parameters + ---------- + image_position: Sequence[float] + Position in the frame of reference space of the center of the top + left pixel of the image. Corresponds to DICOM attributes + "ImagePositionPatient". Should be a sequence of length 3. + image_orientation: Sequence[float] + Cosines of the row direction (first triplet: horizontal, left to + right, increasing column index) and the column direction (second + triplet: vertical, top to bottom, increasing row index) direction + expressed in the three-dimensional patient or slide coordinate + system defined by the frame of reference. Corresponds to the DICOM + attribute "ImageOrientationPatient". + pixel_spacing: Sequence[float] + Spacing between pixels in millimeter unit along the column + direction (first value: spacing between rows, vertical, top to + bottom, increasing row index) and the row direction (second value: + spacing between columns: horizontal, left to right, increasing + column index). Corresponds to DICOM attribute "PixelSpacing". + spacing_between_slices: float + Spacing between slices in millimeter units in the frame of + reference coordinate system space. Corresponds to the DICOM + attribute "SpacingBetweenSlices" (however, this may not be present in + many images and may need to be inferred from "ImagePositionPatient" + attributes of consecutive slices). + rows:int + Number of rows in the image. Corresponds to the DICOM attribute + "Rows". + columns: int + Number of columns in the image. Corresponds to the DICOM attribute + "Columns". + number_of_frames: int + Number of frames in the image. Corresponds to NumberOfFrames + attribute, or to the number of images in the case of an image + series. + frame_of_reference_uid: Union[str, None], optional + Frame of reference UID, if known. Corresponds to DICOM attribute + FrameOfReferenceUID. + sop_instance_uids: Union[Sequence[str], None], optional + Ordered SOP Instance UIDs of each frame, if known, in the situation + that the volume is formed from a sequence of individual DICOM + instances. + frame_numbers: Union[Sequence[int], None], optional + Ordered frame numbers of each frame, if known, in the situation + that the volume is formed from a sequence of frames of one + multi-frame DICOM image. + + """ affine = _create_affine_transformation_matrix( image_position=image_position, image_orientation=image_orientation, @@ -2989,6 +3040,8 @@ def direction_cosines(self) -> List[float]: @property def pixel_spacing(self) -> List[float]: + """List[float]: Within-plane pixel spacing in millimeter units. Two + values (spacing between rows, spacing between columns).""" vec_along_rows = self._affine[:3, 2] vec_along_columns = self._affine[:3, 1] spacing_between_columns = np.sqrt((vec_along_rows ** 2).sum()).item() @@ -2996,7 +3049,8 @@ def pixel_spacing(self) -> List[float]: return [spacing_between_rows, spacing_between_columns] @property - def spacing_between_slices(self) -> List[float]: + def spacing_between_slices(self) -> float: + """float: Spacing between consecutive slices in millimeter units.""" slice_vec = self._affine[:3, 0] spacing = np.sqrt((slice_vec ** 2).sum()).item() return spacing From b7f6cb479946d40c6a4457a1895de519d93a11c8 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 16 Jun 2024 11:02:36 -0400 Subject: [PATCH 17/93] Fixes to matrix transform --- src/highdicom/spatial.py | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 0a881dd5..f73a3263 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -1177,7 +1177,7 @@ def _transform_affine_to_convention( to_index_convention ) flip_indices = [ - d not in to_index_normed for d in from_index_normed + d not in from_index_normed for d in to_index_normed ] permute_indices = [] @@ -2588,6 +2588,14 @@ class VolumeGeometry: classes in the ``highdicom.spatial`` module. """ + # The indexing convention used for all internal representations of the + # affine matrix. + _INTERNAL_INDEX_CONVENTION = ( + PixelIndexDirections.I, + PixelIndexDirections.D, + PixelIndexDirections.R, + ) + def __init__( self, affine: np.ndarray, @@ -2726,11 +2734,7 @@ def for_image_series( image_orientation=ds.ImageOrientationPatient, pixel_spacing=ds.PixelSpacing, spacing_between_slices=slice_spacing, - index_convention=( - PixelIndexDirections.I, - PixelIndexDirections.D, - PixelIndexDirections.R, - ), + index_convention=self._INTERNAL_INDEX_CONVENTION, ) return cls( @@ -2810,11 +2814,7 @@ def for_image( image_orientation=image_orientation, pixel_spacing=pixel_spacing, spacing_between_slices=slice_spacing, - index_convention=( - PixelIndexDirections.I, - PixelIndexDirections.D, - PixelIndexDirections.R, - ), + index_convention=cls._INTERNAL_INDEX_CONVENTION, ) return cls( @@ -2893,11 +2893,7 @@ def from_attributes( image_orientation=image_orientation, pixel_spacing=pixel_spacing, spacing_between_slices=spacing_between_slices, - index_convention=( - PixelIndexDirections.I, - PixelIndexDirections.D, - PixelIndexDirections.R, - ), + index_convention=cls._INTERNAL_INDEX_CONVENTION, ) shape = (number_of_frames, rows, columns) return cls( From 607b1ae7dbd3cea7354eb6c4d7e22e8df343a099 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 16 Jun 2024 11:03:32 -0400 Subject: [PATCH 18/93] further fix --- src/highdicom/spatial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index f73a3263..cea7ce84 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -2734,7 +2734,7 @@ def for_image_series( image_orientation=ds.ImageOrientationPatient, pixel_spacing=ds.PixelSpacing, spacing_between_slices=slice_spacing, - index_convention=self._INTERNAL_INDEX_CONVENTION, + index_convention=cls._INTERNAL_INDEX_CONVENTION, ) return cls( From adec6b3587693fc21c00c9e28b46f3a19f0f7432 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Wed, 19 Jun 2024 17:51:28 -0400 Subject: [PATCH 19/93] Move volume to new file --- src/highdicom/spatial.py | 525 +------------------------- src/highdicom/volume.py | 784 +++++++++++++++++++++++++++++++++++++++ tests/test_spatial.py | 136 ------- tests/test_volume.py | 144 +++++++ 4 files changed, 940 insertions(+), 649 deletions(-) create mode 100644 src/highdicom/volume.py create mode 100644 tests/test_volume.py diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index cea7ce84..52ef7acf 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -744,6 +744,7 @@ def _normalize_reference_direction_convention( def _is_matrix_orthogonal( m: np.ndarray, + require_unit: bool = True, tol: float = _DEFAULT_EQUALITY_TOLERANCE, ) -> bool: """Check whether a matrix is orthogonal. @@ -754,7 +755,9 @@ def _is_matrix_orthogonal( ---------- m: numpy.ndarray A matrix. - tol: float + require_unit: bool, optional + Whether to require that the row vectors are unit vectors. + tol: float, optional Tolerance. ``m`` will be deemed orthogonal if the product ``m.T @ m`` is equal to diagonal matrix of squared column norms within this tolerance. @@ -773,6 +776,14 @@ def _is_matrix_orthogonal( if m.shape[0] != m.shape[1]: return False norm_squared = (m ** 2).sum(axis=0) + if require_unit: + if not np.allclose( + norm_squared, + np.array([1.0, 1.0, 1.0]), + atol=tol, + ): + return False + return np.allclose(m.T @ m, np.diag(norm_squared), atol=tol) @@ -2556,518 +2567,6 @@ def for_images( ) -class VolumeGeometry: - - """Class representing the geomtry of a regularly-spaced 3D array. - - All such geometries exist within DICOM's patient coordinate system. - - Internally this class uses the following conventions to represent the - geometry, however this can be constructed from or transformed to other - conventions with appropriate optional parameters to its methods: - - * The pixel indices are ordered (slice index, row index, column index). - * Pixel indices are zero-based and represent the center of the pixel. - * Column indices are ordered top to bottom, row indices are ordered left to - right. The interpretation of the slice indices direction is not defined. - * The x, y, z coordinates of frame-of-reference coordinate system follow - the "LPS" convention used in DICOM (see - :dcm:`Part 3 Section C.7.6.2.1.1 `). - I.e. - * The first coordinate (``x``) increases from the patient's right to left - * The second coordinate (``y``) increases from the patient's anterior to - posterior. - * The third coordinate (``z``) increases from the patient's caudal - direction (inferior) to cranial direction (superior). - - Note - ---- - The ordering of pixel indices used by this class (slice, row, column) - matches the way pydicom and highdicom represent pixel arrays but differs - from the (column, row, slice) convention used by the various "transformer" - classes in the ``highdicom.spatial`` module. - - """ - # The indexing convention used for all internal representations of the - # affine matrix. - _INTERNAL_INDEX_CONVENTION = ( - PixelIndexDirections.I, - PixelIndexDirections.D, - PixelIndexDirections.R, - ) - - def __init__( - self, - affine: np.ndarray, - shape: Sequence[int], - frame_of_reference_uid: Optional[str] = None, - sop_instance_uids: Optional[Sequence[str]] = None, - frame_numbers: Optional[Sequence[int]] = None, - ): - """ - - Parameters - ---------- - affine: np.ndarray - 4 x 4 affine matrix representing the transformation from pixel - indices (slice index, row index, column index) to the - frame-of-reference coordinate system. The top left 3 x 3 matrix - should be a scaled orthogonal matrix representing the rotation and - scaling. The top right 3 x 1 vector represents the translation - component. The last row should have value [0, 0, 0, 1]. - shape: Sequence[int] - Shape (slices, rows, columns) of the implied volume array. - frame_of_reference_uid: Optional[str], optional - Frame of reference UID for the frame of reference, if known. - sop_instance_uids: Optional[Sequence[str]], optional - SOP instance UIDs corresponding to each slice (stacked down - dimension 0) of the implied volume. This is relevant if and only if - the volume is formed from a series of single frame DICOM images. - frame_numbers: Optional[Sequence[int]], optional - Frame numbers of corresponding to each slice (stacked down - dimension 0) of the implied volume. This is relevant if and only if - the volume is formed from a set of frames of a single multiframe - DICOM image. - - """ - - if affine.shape != (4, 4): - raise ValueError("Affine matrix must have shape (4, 4).") - if not np.array_equal(affine[-1, :], np.array([0.0, 0.0, 0.0, 1.0])): - raise ValueError( - "Final row of affine matrix must be [0.0, 0.0, 0.0, 1.0]." - ) - if not _is_matrix_orthogonal(affine[:3, :3]): - raise ValueError( - "Argument 'affine' must be an orthogonal matrix." - ) - if len(shape) != 3: - raise ValueError( - "Argument 'shape' must have three elements." - ) - - self._affine = affine - if len(shape) != 3: - raise ValueError("Argument 'shape' must have three items.") - self._shape = tuple(shape) - self._frame_of_reference_uid = frame_of_reference_uid - if frame_numbers is not None: - if any(not isinstance(f, int) for f in frame_numbers): - raise TypeError( - "Argument 'frame_numbers' should be a sequence of ints." - ) - if any(f < 1 for f in frame_numbers): - raise ValueError( - "Argument 'frame_numbers' should contain only (strictly) " - "positive integers." - ) - if len(frame_numbers) != shape[0]: - raise ValueError( - "Length of 'frame_numbers' should match first item of " - "'shape'." - ) - self._frame_numbers = list(frame_numbers) - else: - self._frame_numbers = None - if sop_instance_uids is not None: - if any(not isinstance(u, str) for u in sop_instance_uids): - raise TypeError( - "Argument 'sop_instance_uids' should be a sequence of " - "str." - ) - if len(sop_instance_uids) != shape[0]: - raise ValueError( - "Length of 'sop_instance_uids' should match first item " - "of 'shape'." - ) - self._sop_instance_uids = list(sop_instance_uids) - else: - self._sop_instance_uids = None - - @classmethod - def for_image_series( - cls, - series_datasets: Sequence[Dataset], - ) -> "VolumeGeometry": - """Get volume geometry for a series of single frame images. - - Parameters - ---------- - series_datasets: Sequence[pydicom.Dataset] - Series of single frame datasets. There is no requirement on the - sorting of the datasets. - - Returns - ------- - VolumeGeometry: - Object representing the geometry of the series. - - """ - coordinate_system = get_image_coordinate_system(series_datasets[0]) - if ( - coordinate_system is None or - coordinate_system != CoordinateSystemNames.PATIENT - ): - raise ValueError( - "Dataset should exist in the patient " - "coordinate_system." - ) - frame_of_reference_uid = series_datasets[0].FrameOfReferenceUID - if not all( - ds.FrameOfReferenceUID == frame_of_reference_uid - for ds in series_datasets - ): - raise ValueError('Images do not share a frame of reference.') - - series_datasets = sort_datasets(series_datasets) - sorted_sop_instance_uids = [ - ds.SOPInstanceUID for ds in series_datasets - ] - - slice_spacing = get_series_slice_spacing(series_datasets) - if slice_spacing is None: - raise ValueError('Series is not a regularly spaced volume.') - ds = series_datasets[0] - shape = (len(series_datasets), ds.Rows, ds.Columns) - affine = _create_affine_transformation_matrix( - image_position=ds.ImagePositionPatient, - image_orientation=ds.ImageOrientationPatient, - pixel_spacing=ds.PixelSpacing, - spacing_between_slices=slice_spacing, - index_convention=cls._INTERNAL_INDEX_CONVENTION, - ) - - return cls( - affine=affine, - shape=shape, - frame_of_reference_uid=frame_of_reference_uid, - sop_instance_uids=sorted_sop_instance_uids, - ) - - @classmethod - def for_image( - cls, - dataset: Dataset, - ) -> "VolumeGeometry": - """Get volume geometry for a multiframe image. - - Parameters - ---------- - dataset: pydicom.Dataset - A multi-frame image dataset. - - Returns - ------- - VolumeGeometry: - Object representing the geometry of the image. - - """ - if not is_multiframe_image(dataset): - raise ValueError( - 'Dataset should be a multi-frame image.' - ) - coordinate_system = get_image_coordinate_system(dataset) - if ( - coordinate_system is None or - coordinate_system != CoordinateSystemNames.PATIENT - ): - raise ValueError( - "Dataset should exist in the patient " - "coordinate_system." - ) - sfgs = dataset.SharedFunctionalGroupsSequence[0] - if 'PlaneOrientationSequence' not in sfgs: - raise ValueError('Frames do not share an orientation.') - image_orientation = ( - sfgs - .PlaneOrientationSequence[0] - .ImageOrientationPatient - ) - pffgs = dataset.PerFrameFunctionalGroupsSequence - image_positions = [ - g.PlanePositionSequence[0].ImagePositionPatient - for g in pffgs - ] - sort_index = get_plane_sort_index( - image_positions, - image_orientation, - ) - sorted_positions = [image_positions[i] for i in sort_index] - sorted_frame_numbers = [f + 1 for f in sort_index] - - if 'PixelMeasuresSequence' not in sfgs: - raise ValueError('Frames do not share pixel measures.') - pixel_spacing = sfgs.PixelMeasuresSequence[0].PixelSpacing - - slice_spacing = get_regular_slice_spacing( - image_positions=image_positions, - image_orientation=image_orientation, - ) - if slice_spacing is None: - raise ValueError( - 'Dataset does not represent a regularly sampled volume.' - ) - - shape = (dataset.NumberOfFrames, dataset.Rows, dataset.Columns) - affine = _create_affine_transformation_matrix( - image_position=sorted_positions[0], - image_orientation=image_orientation, - pixel_spacing=pixel_spacing, - spacing_between_slices=slice_spacing, - index_convention=cls._INTERNAL_INDEX_CONVENTION, - ) - - return cls( - affine=affine, - shape=shape, - frame_of_reference_uid=dataset.FrameOfReferenceUID, - frame_numbers=sorted_frame_numbers, - ) - - @classmethod - def from_attributes( - cls, - image_position: Sequence[float], - image_orientation: Sequence[float], - pixel_spacing: Sequence[float], - spacing_between_slices: float, - rows:int, - columns: int, - number_of_frames: int, - frame_of_reference_uid: Optional[str] = None, - sop_instance_uids: Optional[Sequence[str]] = None, - frame_numbers: Optional[Sequence[int]] = None, - ) -> "VolumeGeometry": - """Create a volume geometry from DICOM attributes. - - Parameters - ---------- - image_position: Sequence[float] - Position in the frame of reference space of the center of the top - left pixel of the image. Corresponds to DICOM attributes - "ImagePositionPatient". Should be a sequence of length 3. - image_orientation: Sequence[float] - Cosines of the row direction (first triplet: horizontal, left to - right, increasing column index) and the column direction (second - triplet: vertical, top to bottom, increasing row index) direction - expressed in the three-dimensional patient or slide coordinate - system defined by the frame of reference. Corresponds to the DICOM - attribute "ImageOrientationPatient". - pixel_spacing: Sequence[float] - Spacing between pixels in millimeter unit along the column - direction (first value: spacing between rows, vertical, top to - bottom, increasing row index) and the row direction (second value: - spacing between columns: horizontal, left to right, increasing - column index). Corresponds to DICOM attribute "PixelSpacing". - spacing_between_slices: float - Spacing between slices in millimeter units in the frame of - reference coordinate system space. Corresponds to the DICOM - attribute "SpacingBetweenSlices" (however, this may not be present in - many images and may need to be inferred from "ImagePositionPatient" - attributes of consecutive slices). - rows:int - Number of rows in the image. Corresponds to the DICOM attribute - "Rows". - columns: int - Number of columns in the image. Corresponds to the DICOM attribute - "Columns". - number_of_frames: int - Number of frames in the image. Corresponds to NumberOfFrames - attribute, or to the number of images in the case of an image - series. - frame_of_reference_uid: Union[str, None], optional - Frame of reference UID, if known. Corresponds to DICOM attribute - FrameOfReferenceUID. - sop_instance_uids: Union[Sequence[str], None], optional - Ordered SOP Instance UIDs of each frame, if known, in the situation - that the volume is formed from a sequence of individual DICOM - instances. - frame_numbers: Union[Sequence[int], None], optional - Ordered frame numbers of each frame, if known, in the situation - that the volume is formed from a sequence of frames of one - multi-frame DICOM image. - - """ - affine = _create_affine_transformation_matrix( - image_position=image_position, - image_orientation=image_orientation, - pixel_spacing=pixel_spacing, - spacing_between_slices=spacing_between_slices, - index_convention=cls._INTERNAL_INDEX_CONVENTION, - ) - shape = (number_of_frames, rows, columns) - return cls( - affine=affine, - shape=shape, - frame_of_reference_uid=frame_of_reference_uid, - sop_instance_uids=sop_instance_uids, - frame_numbers=frame_numbers, - ) - - @classmethod - def from_components( - cls, - position: Sequence[float], - direction: Sequence[float], - spacing: Sequence[float], - shape: Sequence[int], - frame_of_reference_uid: Optional[str] = None, - sop_instance_uids: Optional[Sequence[str]] = None, - frame_numbers: Optional[Sequence[int]] = None, - ) -> "VolumeGeometry": - """""" - if not isinstance(position, Sequence): - raise TypeError('Argument "position" must be a sequence.') - if len(position) != 3: - raise ValueError('Argument "position" must have length 3.') - if not isinstance(spacing, Sequence): - raise TypeError('Argument "spacing" must be a sequence.') - if len(spacing) != 3: - raise ValueError('Argument "spacing" must have length 3.') - direction_arr = np.array(direction, dtype=np.float32) - if direction_arr.shape == (9, ): - direction_arr = direction_arr.reshape(3, 3) - elif direction_arr.shape == (3, 3): - pass - else: - raise ValueError( - "Argument 'direction' must have shape (9, ) or (3, 3)." - ) - scaled_direction = direction_arr * spacing - affine = np.row_stack( - [ - np.column_stack([scaled_direction, position]), - [0.0, 0.0, 0.0, 1.0] - ] - ) - return cls( - affine=affine, - shape=shape, - frame_of_reference_uid=frame_of_reference_uid, - sop_instance_uids=sop_instance_uids, - frame_numbers=frame_numbers, - ) - - def get_index_for_frame_number( - self, - frame_number: int, - ) -> int: - """Get the slice index for a frame number. - - This is intended for volumes representing for multi-frame images. - - Parameters - ---------- - frame_number: int - 1-based frame number in the original image. - - Returns - ------- - 0-based index of this frame number down the - slice dimension (axis 0) of the volume. - - """ - if self._frame_numbers is None: - raise RuntimeError( - "Frame information is not present." - ) - return self._frame_numbers.index(frame_number) - - def get_index_for_sop_instance_uid( - self, - sop_instance_uid: str, - ) -> int: - """Get the slice index for a SOP Instance UID. - - This is intended for volumes representing a series of single-frame - images. - - Parameters - ---------- - sop_instance_uid: str - SOP Instance of a particular image in the series. - - Returns - ------- - 0-based index of the image with the given SOP Instance UID down the - slice dimension (axis 0) of the volume. - - """ - if self._sop_instance_uids is None: - raise RuntimeError( - "SOP Instance UID information is not present." - ) - return self._sop_instance_uids.index(sop_instance_uid) - - @property - def frame_of_reference_uid(self) -> Optional[str]: - """Union[str, None]: Frame of reference UID.""" - return self._frame_of_reference_uid - - @property - def affine(self) -> np.ndarray: - """numpy.ndarray: 4x4 affine transformation matrix""" - return self._affine.copy() - - @property - def shape(self) -> Tuple[int, int, int]: - """Tuple[int, int, int]: Shape of the volume.""" - return self._shape - - @property - def sop_instance_uids(self) -> Union[List[str], None]: - """Union[List[str], None]: SOP Instance UID at each index.""" - if self._sop_instance_uids is not None: - return self._sop_instance_uids.copy() - - @property - def frame_numbers(self) -> Union[List[int], None]: - """Union[List[int], None]: Frame number at each index.""" - if self._frame_numbers is not None: - return self._frame_numbers.copy() - - @property - def direction_cosines(self) -> List[float]: - vec_along_rows = self._affine[:3, 2].copy() - vec_along_columns = self._affine[:3, 1].copy() - vec_along_columns /= np.sqrt((vec_along_columns ** 2).sum()) - vec_along_rows /= np.sqrt((vec_along_rows ** 2).sum()) - return [*vec_along_rows.tolist(), *vec_along_columns.tolist()] - - @property - def pixel_spacing(self) -> List[float]: - """List[float]: Within-plane pixel spacing in millimeter units. Two - values (spacing between rows, spacing between columns).""" - vec_along_rows = self._affine[:3, 2] - vec_along_columns = self._affine[:3, 1] - spacing_between_columns = np.sqrt((vec_along_rows ** 2).sum()).item() - spacing_between_rows = np.sqrt((vec_along_columns ** 2).sum()).item() - return [spacing_between_rows, spacing_between_columns] - - @property - def spacing_between_slices(self) -> float: - """float: Spacing between consecutive slices in millimeter units.""" - slice_vec = self._affine[:3, 0] - spacing = np.sqrt((slice_vec ** 2).sum()).item() - return spacing - - @property - def spacing(self) -> List[float]: - dir_mat = self._affine[:3, :3] - norms = np.sqrt((dir_mat ** 2).sum(axis=0)) - return norms.tolist() - - @property - def position(self) -> List[float]: - return self._affine[:3, 3].tolist() - - @property - def direction(self) -> np.ndarray: - dir_mat = self._affine[:3, :3] - norms = np.sqrt((dir_mat ** 2).sum(axis=0)) - return dir_mat / norms - - def map_pixel_into_coordinate_system( index: Sequence[int], image_position: Sequence[float], diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py new file mode 100644 index 00000000..faad3196 --- /dev/null +++ b/src/highdicom/volume.py @@ -0,0 +1,784 @@ +from typing import List, Optional, Sequence, Union, Tuple +import numpy as np + +from highdicom._module_utils import is_multiframe_image +from highdicom.enum import ( + CoordinateSystemNames, + PixelIndexDirections, +) +from highdicom.spatial import ( + _create_affine_transformation_matrix, + _is_matrix_orthogonal, + get_image_coordinate_system, + get_plane_sort_index, + get_regular_slice_spacing, + get_series_slice_spacing, + sort_datasets, +) +from highdicom.content import PlanePositionSequence + +from pydicom import Dataset + + +class VolumeGeometry: + + """Class representing the geomtry of a regularly-spaced 3D array. + + All such geometries exist within DICOM's patient coordinate system. + + Internally this class uses the following conventions to represent the + geometry, however this can be constructed from or transformed to other + conventions with appropriate optional parameters to its methods: + + * The pixel indices are ordered (slice index, row index, column index). + * Pixel indices are zero-based and represent the center of the pixel. + * Column indices are ordered top to bottom, row indices are ordered left to + right. The interpretation of the slice indices direction is not defined. + * The x, y, z coordinates of frame-of-reference coordinate system follow + the "LPS" convention used in DICOM (see + :dcm:`Part 3 Section C.7.6.2.1.1 `). + I.e. + * The first coordinate (``x``) increases from the patient's right to left + * The second coordinate (``y``) increases from the patient's anterior to + posterior. + * The third coordinate (``z``) increases from the patient's caudal + direction (inferior) to cranial direction (superior). + + Note + ---- + The ordering of pixel indices used by this class (slice, row, column) + matches the way pydicom and highdicom represent pixel arrays but differs + from the (column, row, slice) convention used by the various "transformer" + classes in the ``highdicom.spatial`` module. + + """ + # The indexing convention used for all internal representations of the + # affine matrix. + _INTERNAL_INDEX_CONVENTION = ( + PixelIndexDirections.I, + PixelIndexDirections.D, + PixelIndexDirections.R, + ) + + def __init__( + self, + affine: np.ndarray, + shape: Sequence[int], + frame_of_reference_uid: Optional[str] = None, + sop_instance_uids: Optional[Sequence[str]] = None, + frame_numbers: Optional[Sequence[int]] = None, + ): + """ + + Parameters + ---------- + affine: np.ndarray + 4 x 4 affine matrix representing the transformation from pixel + indices (slice index, row index, column index) to the + frame-of-reference coordinate system. The top left 3 x 3 matrix + should be a scaled orthogonal matrix representing the rotation and + scaling. The top right 3 x 1 vector represents the translation + component. The last row should have value [0, 0, 0, 1]. + shape: Sequence[int] + Shape (slices, rows, columns) of the implied volume array. + frame_of_reference_uid: Optional[str], optional + Frame of reference UID for the frame of reference, if known. + sop_instance_uids: Optional[Sequence[str]], optional + SOP instance UIDs corresponding to each slice (stacked down + dimension 0) of the implied volume. This is relevant if and only if + the volume is formed from a series of single frame DICOM images. + frame_numbers: Optional[Sequence[int]], optional + Frame numbers of corresponding to each slice (stacked down + dimension 0) of the implied volume. This is relevant if and only if + the volume is formed from a set of frames of a single multiframe + DICOM image. + + """ + + if affine.shape != (4, 4): + raise ValueError("Affine matrix must have shape (4, 4).") + if not np.array_equal(affine[-1, :], np.array([0.0, 0.0, 0.0, 1.0])): + raise ValueError( + "Final row of affine matrix must be [0.0, 0.0, 0.0, 1.0]." + ) + if not _is_matrix_orthogonal(affine[:3, :3], require_unit=False): + raise ValueError( + "Argument 'affine' must be an orthogonal matrix." + ) + if len(shape) != 3: + raise ValueError( + "Argument 'shape' must have three elements." + ) + + self._affine = affine + if len(shape) != 3: + raise ValueError("Argument 'shape' must have three items.") + self._shape = tuple(shape) + self._frame_of_reference_uid = frame_of_reference_uid + if frame_numbers is not None: + if any(not isinstance(f, int) for f in frame_numbers): + raise TypeError( + "Argument 'frame_numbers' should be a sequence of ints." + ) + if any(f < 1 for f in frame_numbers): + raise ValueError( + "Argument 'frame_numbers' should contain only (strictly) " + "positive integers." + ) + if len(frame_numbers) != shape[0]: + raise ValueError( + "Length of 'frame_numbers' should match first item of " + "'shape'." + ) + self._frame_numbers = list(frame_numbers) + else: + self._frame_numbers = None + if sop_instance_uids is not None: + if any(not isinstance(u, str) for u in sop_instance_uids): + raise TypeError( + "Argument 'sop_instance_uids' should be a sequence of " + "str." + ) + if len(sop_instance_uids) != shape[0]: + raise ValueError( + "Length of 'sop_instance_uids' should match first item " + "of 'shape'." + ) + self._sop_instance_uids = list(sop_instance_uids) + else: + self._sop_instance_uids = None + + @classmethod + def for_image_series( + cls, + series_datasets: Sequence[Dataset], + ) -> "VolumeGeometry": + """Get volume geometry for a series of single frame images. + + Parameters + ---------- + series_datasets: Sequence[pydicom.Dataset] + Series of single frame datasets. There is no requirement on the + sorting of the datasets. + + Returns + ------- + VolumeGeometry: + Object representing the geometry of the series. + + """ + coordinate_system = get_image_coordinate_system(series_datasets[0]) + if ( + coordinate_system is None or + coordinate_system != CoordinateSystemNames.PATIENT + ): + raise ValueError( + "Dataset should exist in the patient " + "coordinate_system." + ) + frame_of_reference_uid = series_datasets[0].FrameOfReferenceUID + if not all( + ds.FrameOfReferenceUID == frame_of_reference_uid + for ds in series_datasets + ): + raise ValueError('Images do not share a frame of reference.') + + series_datasets = sort_datasets(series_datasets) + sorted_sop_instance_uids = [ + ds.SOPInstanceUID for ds in series_datasets + ] + + slice_spacing = get_series_slice_spacing(series_datasets) + if slice_spacing is None: + raise ValueError('Series is not a regularly spaced volume.') + ds = series_datasets[0] + shape = (len(series_datasets), ds.Rows, ds.Columns) + affine = _create_affine_transformation_matrix( + image_position=ds.ImagePositionPatient, + image_orientation=ds.ImageOrientationPatient, + pixel_spacing=ds.PixelSpacing, + spacing_between_slices=slice_spacing, + index_convention=cls._INTERNAL_INDEX_CONVENTION, + ) + + return cls( + affine=affine, + shape=shape, + frame_of_reference_uid=frame_of_reference_uid, + sop_instance_uids=sorted_sop_instance_uids, + ) + + @classmethod + def for_image( + cls, + dataset: Dataset, + ) -> "VolumeGeometry": + """Get volume geometry for a multiframe image. + + Parameters + ---------- + dataset: pydicom.Dataset + A multi-frame image dataset. + + Returns + ------- + VolumeGeometry: + Object representing the geometry of the image. + + """ + if not is_multiframe_image(dataset): + raise ValueError( + 'Dataset should be a multi-frame image.' + ) + coordinate_system = get_image_coordinate_system(dataset) + if ( + coordinate_system is None or + coordinate_system != CoordinateSystemNames.PATIENT + ): + raise ValueError( + "Dataset should exist in the patient " + "coordinate_system." + ) + sfgs = dataset.SharedFunctionalGroupsSequence[0] + if 'PlaneOrientationSequence' not in sfgs: + raise ValueError('Frames do not share an orientation.') + image_orientation = ( + sfgs + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + pffgs = dataset.PerFrameFunctionalGroupsSequence + image_positions = [ + g.PlanePositionSequence[0].ImagePositionPatient + for g in pffgs + ] + sort_index = get_plane_sort_index( + image_positions, + image_orientation, + ) + sorted_positions = [image_positions[i] for i in sort_index] + sorted_frame_numbers = [f + 1 for f in sort_index] + + if 'PixelMeasuresSequence' not in sfgs: + raise ValueError('Frames do not share pixel measures.') + pixel_spacing = sfgs.PixelMeasuresSequence[0].PixelSpacing + + slice_spacing = get_regular_slice_spacing( + image_positions=image_positions, + image_orientation=image_orientation, + ) + if slice_spacing is None: + raise ValueError( + 'Dataset does not represent a regularly sampled volume.' + ) + + shape = (dataset.NumberOfFrames, dataset.Rows, dataset.Columns) + affine = _create_affine_transformation_matrix( + image_position=sorted_positions[0], + image_orientation=image_orientation, + pixel_spacing=pixel_spacing, + spacing_between_slices=slice_spacing, + index_convention=cls._INTERNAL_INDEX_CONVENTION, + ) + + return cls( + affine=affine, + shape=shape, + frame_of_reference_uid=dataset.FrameOfReferenceUID, + frame_numbers=sorted_frame_numbers, + ) + + @classmethod + def from_attributes( + cls, + image_position: Sequence[float], + image_orientation: Sequence[float], + pixel_spacing: Sequence[float], + spacing_between_slices: float, + rows:int, + columns: int, + number_of_frames: int, + frame_of_reference_uid: Optional[str] = None, + sop_instance_uids: Optional[Sequence[str]] = None, + frame_numbers: Optional[Sequence[int]] = None, + ) -> "VolumeGeometry": + """Create a volume geometry from DICOM attributes. + + Parameters + ---------- + image_position: Sequence[float] + Position in the frame of reference space of the center of the top + left pixel of the image. Corresponds to DICOM attributes + "ImagePositionPatient". Should be a sequence of length 3. + image_orientation: Sequence[float] + Cosines of the row direction (first triplet: horizontal, left to + right, increasing column index) and the column direction (second + triplet: vertical, top to bottom, increasing row index) direction + expressed in the three-dimensional patient or slide coordinate + system defined by the frame of reference. Corresponds to the DICOM + attribute "ImageOrientationPatient". + pixel_spacing: Sequence[float] + Spacing between pixels in millimeter unit along the column + direction (first value: spacing between rows, vertical, top to + bottom, increasing row index) and the row direction (second value: + spacing between columns: horizontal, left to right, increasing + column index). Corresponds to DICOM attribute "PixelSpacing". + spacing_between_slices: float + Spacing between slices in millimeter units in the frame of + reference coordinate system space. Corresponds to the DICOM + attribute "SpacingBetweenSlices" (however, this may not be present in + many images and may need to be inferred from "ImagePositionPatient" + attributes of consecutive slices). + rows:int + Number of rows in the image. Corresponds to the DICOM attribute + "Rows". + columns: int + Number of columns in the image. Corresponds to the DICOM attribute + "Columns". + number_of_frames: int + Number of frames in the image. Corresponds to NumberOfFrames + attribute, or to the number of images in the case of an image + series. + frame_of_reference_uid: Union[str, None], optional + Frame of reference UID, if known. Corresponds to DICOM attribute + FrameOfReferenceUID. + sop_instance_uids: Union[Sequence[str], None], optional + Ordered SOP Instance UIDs of each frame, if known, in the situation + that the volume is formed from a sequence of individual DICOM + instances. + frame_numbers: Union[Sequence[int], None], optional + Ordered frame numbers of each frame, if known, in the situation + that the volume is formed from a sequence of frames of one + multi-frame DICOM image. + + """ + affine = _create_affine_transformation_matrix( + image_position=image_position, + image_orientation=image_orientation, + pixel_spacing=pixel_spacing, + spacing_between_slices=spacing_between_slices, + index_convention=cls._INTERNAL_INDEX_CONVENTION, + ) + shape = (number_of_frames, rows, columns) + return cls( + affine=affine, + shape=shape, + frame_of_reference_uid=frame_of_reference_uid, + sop_instance_uids=sop_instance_uids, + frame_numbers=frame_numbers, + ) + + @classmethod + def from_components( + cls, + position: Sequence[float], + direction: Sequence[float], + spacing: Sequence[float], + shape: Sequence[int], + frame_of_reference_uid: Optional[str] = None, + sop_instance_uids: Optional[Sequence[str]] = None, + frame_numbers: Optional[Sequence[int]] = None, + ) -> "VolumeGeometry": + """Construct a VolumeGeometry from components. + + Parameters + ---------- + position: Sequence[float] + Sequence of three floats giving the position in the frame of + reference coordinate system of the center of the pixel at location + (0, 0, 0). + direction: Sequence[float] + Direction matrix for the volume. The columns of the direction + matrix are orthogonal unit vectors that give the direction in the + frame of reference space of the increasing direction of each axis + of the array. This matrix may be passed either as a 3x3 matrix or a + flattened 9 element array (first row, second row, third row). + spacing: Sequence[float] + Spacing between pixel centers in the the frame of reference + coordinate system along each of the dimensions of the array. + shape: Sequence[int] + Sequence of three integers giving the shape of the volume array. + frame_of_reference_uid: Union[str, None], optional + Frame of reference UID for the frame of reference, if known. + sop_instance_uids: Union[Sequence[str], None], optional + Ordered SOP Instance UIDs of each frame, if known, in the situation + that the volume is formed from a sequence of individual DICOM + instances. + frame_numbers: Union[Sequence[int], None], optional + Ordered frame numbers of each frame, if known, in the situation + that the volume is formed from a sequence of frames of one + multi-frame DICOM image. + + Returns + ------- + highdicom.spatial.VolumeGeometry: + Volume geometry constructed from the provided components. + + """ + if not isinstance(position, Sequence): + raise TypeError('Argument "position" must be a sequence.') + if len(position) != 3: + raise ValueError('Argument "position" must have length 3.') + if not isinstance(spacing, Sequence): + raise TypeError('Argument "spacing" must be a sequence.') + if len(spacing) != 3: + raise ValueError('Argument "spacing" must have length 3.') + direction_arr = np.array(direction, dtype=np.float32) + if direction_arr.shape == (9, ): + direction_arr = direction_arr.reshape(3, 3) + elif direction_arr.shape == (3, 3): + pass + else: + raise ValueError( + "Argument 'direction' must have shape (9, ) or (3, 3)." + ) + if not _is_matrix_orthogonal(direction_arr, require_unit=True): + raise ValueError( + "Argument 'direction' must be an orthogonal matrix of " + "unit vectors." + ) + + scaled_direction = direction_arr * spacing + affine = np.row_stack( + [ + np.column_stack([scaled_direction, position]), + [0.0, 0.0, 0.0, 1.0] + ] + ) + return cls( + affine=affine, + shape=shape, + frame_of_reference_uid=frame_of_reference_uid, + sop_instance_uids=sop_instance_uids, + frame_numbers=frame_numbers, + ) + + def get_index_for_frame_number( + self, + frame_number: int, + ) -> int: + """Get the slice index for a frame number. + + This is intended for volumes representing for multi-frame images. + + Parameters + ---------- + frame_number: int + 1-based frame number in the original image. + + Returns + ------- + 0-based index of this frame number down the + slice dimension (axis 0) of the volume. + + """ + if self._frame_numbers is None: + raise RuntimeError( + "Frame information is not present." + ) + return self._frame_numbers.index(frame_number) + + def get_index_for_sop_instance_uid( + self, + sop_instance_uid: str, + ) -> int: + """Get the slice index for a SOP Instance UID. + + This is intended for volumes representing a series of single-frame + images. + + Parameters + ---------- + sop_instance_uid: str + SOP Instance of a particular image in the series. + + Returns + ------- + 0-based index of the image with the given SOP Instance UID down the + slice dimension (axis 0) of the volume. + + """ + if self._sop_instance_uids is None: + raise RuntimeError( + "SOP Instance UID information is not present." + ) + return self._sop_instance_uids.index(sop_instance_uid) + + def get_center_index(self, round_output: bool = False) -> np.ndarray: + """Get array index of center of the volume. + + Parameters + ---------- + round_output: bool, optional + If True, the result is returned rounded down to and with an integer + datatype. Otherwise it is returned as a floating point datatype + without rounding, to sub-voxel precision. + + Returns + ------- + np.ndarray: + Array of shape 3 representing the array indices at the center of + the volume. + + """ + if round_output: + center = np.array( + [(self.shape[d] // 2) for d in range(3)], + dtype=np.uint32, + ) + else: + center = np.array( + [(self.shape[d] - 1 / 2.0) for d in range(3)] + ) + + return center + + def get_center_coordinate(self) -> np.ndarray: + """Get frame-of-reference coordinate at the center of the volume. + + Returns + ------- + np.ndarray: + Array of shape 3 representing the frame-of-reference coordinate at + the center of the volume. + + """ + center_index = self.get_center_index().reshape((1, 3)) + center_coordinate = self.map_indices_to_reference(center_index) + + return center_coordinate.reshape((3, )) + + def map_indices_to_reference( + self, + indices: np.ndarray, + ) -> np.ndarray: + """Transform image pixel indices to frame of reference coordinates. + + Parameters + ---------- + indices: numpy.ndarray + Array of zero-based array indices. Array of integer values with + shape ``(n, 3)``, where *n* is the number of indices, the first + column represents the `column` index and the second column + represents the `row` index. + + Returns + ------- + numpy.ndarray + Array of (x, y, z) coordinates in the coordinate system defined by + the frame of reference. Array has shape ``(n, 3)``, where *n* is + the number of coordinates, the first column represents the `x` + offsets, the second column represents the `y` offsets and the third + column represents the `z` offsets + + Raises + ------ + ValueError + When `indices` has incorrect shape. + + """ + if indices.ndim != 2 or indices.shape[1] != 3: + raise ValueError( + 'Argument "indices" must be a two-dimensional array ' + 'with shape [n, 3].' + ) + indices_augmented = np.row_stack([ + indices.T.astype(float), + np.ones((indices.shape[0], ), dtype=float), + ]) + reference_coordinates = np.dot(self._affine, indices_augmented) + return reference_coordinates[:3, :].T + + def map_reference_to_indices( + self, + coordinates: np.y, + round_output: bool = False, + check_bounds: bool = False, + ) -> np.ndarray: + """Transform frame of reference coordinates into array indices. + + Parameters + ---------- + coordinates: numpy.ndarray + Array of (x, y, z) coordinates in the coordinate system defined by + the frame of reference. Array has shape ``(n, 3)``, where *n* is + the number of coordinates, the first column represents the *X* + offsets, the second column represents the *Y* offsets and the third + column represents the *Z* offsets + + Returns + ------- + numpy.ndarray + Array of zero-based array indices at pixel resolution. Array of + integer or floating point values with shape ``(n, 3)``, where *n* + is the number of indices. The datatype of the array will be integer + if ``round_output`` is True (the default), or float if + ``round_output`` is False. + round_output: bool, optional + Whether to round the output to the nearest voxel. If True, the + output will have integer datatype. If False, the returned array + will have floating point data type and sub-voxel precision. + check_bounds: bool, optional + Whether to check that the returned indices lie within the bounds of + the array. If True, a ``RuntimeError`` will be raised if the + resulting array indices (before rounding) lie out of the bounds of + the array. + + Note + ---- + The returned pixel indices may be negative if `coordinates` fall + outside of the array. + + Raises + ------ + ValueError + When `indices` has incorrect shape. + RuntimeError + If `check_bounds` is True and any map coordinate lies outside the + bounds of the array. + + """ + if coordinates.ndim != 2 or coordinates.shape[1] != 3: + raise ValueError( + 'Argument "coordinates" must be a two-dimensional array ' + 'with shape [n, 3].' + ) + reference_coordinates = np.row_stack([ + coordinates.T.astype(float), + np.ones((coordinates.shape[0], ), dtype=float) + ]) + indices = np.dot(self._affine, reference_coordinates) + indices = indices[:3, :].T + + if check_bounds: + out_of_bounds = False + for d in range(3): + if indices[:, d].min() < -0.5: + out_of_bounds = True + break + if indices[:, d].max() > self.shape[d] - 0.5: + out_of_bounds = True + break + + if out_of_bounds: + raise RuntimeError("Bounds check failed.") + + if round_output: + return np.around(indices).astype(int) + else: + return indices + + @property + def frame_of_reference_uid(self) -> Optional[str]: + """Union[str, None]: Frame of reference UID.""" + return self._frame_of_reference_uid + + @property + def affine(self) -> np.ndarray: + """numpy.ndarray: 4x4 affine transformation matrix + + This matrix maps an index into the array into a position in the LPS + frame of reference coordinate space. + + """ + return self._affine.copy() + + @property + def shape(self) -> Tuple[int, int, int]: + """Tuple[int, int, int]: Shape of the volume.""" + return self._shape + + @property + def sop_instance_uids(self) -> Union[List[str], None]: + """Union[List[str], None]: SOP Instance UID at each index.""" + if self._sop_instance_uids is not None: + return self._sop_instance_uids.copy() + + @property + def frame_numbers(self) -> Union[List[int], None]: + """Union[List[int], None]: + + Frame number at each index down the first dimension. + + """ + if self._frame_numbers is not None: + return self._frame_numbers.copy() + + @property + def direction_cosines(self) -> List[float]: + """List[float]: + + List of 6 floats giving the direction cosines of the + vector along the rows and the vector along the columns, matching the + format of the DICOM Image Orientation Patient attribute. + + """ + vec_along_rows = self._affine[:3, 2].copy() + vec_along_columns = self._affine[:3, 1].copy() + vec_along_columns /= np.sqrt((vec_along_columns ** 2).sum()) + vec_along_rows /= np.sqrt((vec_along_rows ** 2).sum()) + return [*vec_along_rows.tolist(), *vec_along_columns.tolist()] + + @property + def pixel_spacing(self) -> List[float]: + """List[float]: + + Within-plane pixel spacing in millimeter units. Two + values (spacing between rows, spacing between columns). + + """ + vec_along_rows = self._affine[:3, 2] + vec_along_columns = self._affine[:3, 1] + spacing_between_columns = np.sqrt((vec_along_rows ** 2).sum()).item() + spacing_between_rows = np.sqrt((vec_along_columns ** 2).sum()).item() + return [spacing_between_rows, spacing_between_columns] + + @property + def spacing_between_slices(self) -> float: + """float: + + Spacing between consecutive slices in millimeter units. + + """ + slice_vec = self._affine[:3, 0] + spacing = np.sqrt((slice_vec ** 2).sum()).item() + return spacing + + @property + def spacing(self) -> List[float]: + """List[float]: + + Pixel spacing in millimeter units for the three spatial directions. + Three values (spacing between slices, spacing spacing between rows, + spacing between columns). + + """ + dir_mat = self._affine[:3, :3] + norms = np.sqrt((dir_mat ** 2).sum(axis=0)) + return norms.tolist() + + @property + def position(self) -> List[float]: + """List[float]: + + Pixel spacing in millimeter units for the three spatial directions. + Three values (spacing between slices, spacing spacing between rows, + spacing between columns). + + """ + return self._affine[:3, 3].tolist() + + @property + def direction(self) -> np.ndarray: + """np.ndarray: + + Direction matrix for the volume. The columns of the direction + matrix are orthogonal unit vectors that give the direction in the + frame of reference space of the increasing direction of each axis + of the array. This matrix may be passed either as a 3x3 matrix or a + flattened 9 element array (first row, second row, third row). + + """ + dir_mat = self._affine[:3, :3] + norms = np.sqrt((dir_mat ** 2).sum(axis=0)) + return dir_mat / norms diff --git a/tests/test_spatial.py b/tests/test_spatial.py index b2b13d95..9f2f573b 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -11,7 +11,6 @@ PixelToReferenceTransformer, ReferenceToImageTransformer, ReferenceToPixelTransformer, - VolumeGeometry, _are_images_coplanar, _transform_affine_matrix, get_series_slice_spacing, @@ -963,138 +962,3 @@ def test_transform_affine_matrix(): assert np.array_equal(transformed, expected) -@pytest.mark.parametrize( - 'image_position,image_orientation,pixel_spacing,spacing_between_slices', - [ - ( - (67.0, 32.4, -45.2), - (1.0, 0.0, 0.0, 0.0, -1.0, 0.0), - (3.2, 1.6), - 1.25, - ), - ( - [67.0, 32.4, -45.2], - (-1.0, 0.0, 0.0, 0.0, -1.0, 0.0), - (3.2, 1.6), - 1.25, - ), - ( - (-67.0, 132.4, -5.2), - (0.0, 0.0, -1.0, 1.0, 0.0, 0.0), - (0.25, 0.25), - 3.5, - ), - ( - (-67.0, 132.4, -5.2), - ( - np.cos(np.radians(30)), -np.sin(np.radians(30)), 0.0, - np.sin(np.radians(30)), np.cos(np.radians(30)), 0.0, - ), - (0.75, 0.25), - 3.5, - ), - ], -) -def test_geometry_from_attributes( - image_position, - image_orientation, - pixel_spacing, - spacing_between_slices, -): - geometry = VolumeGeometry.from_attributes( - rows=10, - columns=10, - number_of_frames=10, - image_position=image_position, - image_orientation=image_orientation, - pixel_spacing=pixel_spacing, - spacing_between_slices=spacing_between_slices, - ) - assert geometry.position == list(image_position) - assert geometry.direction_cosines == list(image_orientation) - assert geometry.pixel_spacing == list(pixel_spacing) - assert geometry.spacing_between_slices == spacing_between_slices - - -def test_volume_geometry_single_frame(): - ct_files = [ - get_testdata_file('dicomdirtests/77654033/CT2/17136'), - get_testdata_file('dicomdirtests/77654033/CT2/17196'), - get_testdata_file('dicomdirtests/77654033/CT2/17166'), - ] - ct_series = [pydicom.dcmread(f) for f in ct_files] - geometry = VolumeGeometry.for_image_series(ct_series) - assert isinstance(geometry, VolumeGeometry) - rows, columns = ct_series[0].Rows, ct_series[0].Columns - assert geometry.shape == (len(ct_files), rows, columns) - assert geometry.frame_numbers is None - sop_instance_uids = [ - ct_series[0].SOPInstanceUID, - ct_series[2].SOPInstanceUID, - ct_series[1].SOPInstanceUID, - ] - assert geometry.sop_instance_uids == sop_instance_uids - assert geometry.get_index_for_sop_instance_uid( - ct_series[2].SOPInstanceUID - ) == 1 - with pytest.raises(RuntimeError): - geometry.get_index_for_frame_number(2) - orientation = ct_series[0].ImageOrientationPatient - assert geometry.direction_cosines == orientation - direction = geometry.direction - assert np.array_equal(direction[:, 1], orientation[3:]) - assert np.array_equal(direction[:, 2], orientation[:3]) - # Check third direction is normal to others - assert direction[:, 0] @ direction[:, 1] == 0.0 - assert direction[:, 0] @ direction[:, 2] == 0.0 - assert (direction[:, 0] ** 2).sum() == 1.0 - assert geometry.position == ct_series[0].ImagePositionPatient - assert geometry.pixel_spacing == ct_series[0].PixelSpacing - slice_spacing = 1.25 - assert geometry.spacing == [slice_spacing, *ct_series[0].PixelSpacing[::-1]] - - -def test_volume_geometry_multiframe(): - dcm = pydicom.dcmread(get_testdata_file('eCT_Supplemental.dcm')) - geometry = VolumeGeometry.for_image(dcm) - assert isinstance(geometry, VolumeGeometry) - rows, columns = dcm.Rows, dcm.Columns - assert geometry.shape == (dcm.NumberOfFrames, rows, columns) - assert geometry.frame_numbers == [2, 1] - assert geometry.sop_instance_uids is None - with pytest.raises(RuntimeError): - geometry.get_index_for_sop_instance_uid( - dcm.SOPInstanceUID - ) - assert geometry.get_index_for_frame_number(2) == 0 - orientation = ( - dcm - .SharedFunctionalGroupsSequence[0] - .PlaneOrientationSequence[0] - .ImageOrientationPatient - ) - pixel_spacing = ( - dcm - .SharedFunctionalGroupsSequence[0] - .PixelMeasuresSequence[0] - .PixelSpacing - ) - assert geometry.direction_cosines == orientation - direction = geometry.direction - assert np.array_equal(direction[:, 1], orientation[3:]) - assert np.array_equal(direction[:, 2], orientation[:3]) - # Check third direction is normal to others - assert direction[:, 0] @ direction[:, 1] == 0.0 - assert direction[:, 0] @ direction[:, 2] == 0.0 - assert (direction[:, 0] ** 2).sum() == 1.0 - first_frame = geometry.frame_numbers[0] - first_frame_pos = ( - dcm - .PerFrameFunctionalGroupsSequence[first_frame - 1] - .PlanePositionSequence[0] - .ImagePositionPatient - ) - assert geometry.position == first_frame_pos - assert geometry.pixel_spacing == pixel_spacing - slice_spacing = 10.0 - assert geometry.spacing == [slice_spacing, *pixel_spacing[::-1]] diff --git a/tests/test_volume.py b/tests/test_volume.py new file mode 100644 index 00000000..9a078a33 --- /dev/null +++ b/tests/test_volume.py @@ -0,0 +1,144 @@ +import numpy as np +import pydicom +from pydicom.data import get_testdata_file +import pytest + + +from highdicom.volume import VolumeGeometry + + +@pytest.mark.parametrize( + 'image_position,image_orientation,pixel_spacing,spacing_between_slices', + [ + ( + (67.0, 32.4, -45.2), + (1.0, 0.0, 0.0, 0.0, -1.0, 0.0), + (3.2, 1.6), + 1.25, + ), + ( + [67.0, 32.4, -45.2], + (-1.0, 0.0, 0.0, 0.0, -1.0, 0.0), + (3.2, 1.6), + 1.25, + ), + ( + (-67.0, 132.4, -5.2), + (0.0, 0.0, -1.0, 1.0, 0.0, 0.0), + (0.25, 0.25), + 3.5, + ), + ( + (-67.0, 132.4, -5.2), + ( + np.cos(np.radians(30)), -np.sin(np.radians(30)), 0.0, + np.sin(np.radians(30)), np.cos(np.radians(30)), 0.0, + ), + (0.75, 0.25), + 3.5, + ), + ], +) +def test_geometry_from_attributes( + image_position, + image_orientation, + pixel_spacing, + spacing_between_slices, +): + geometry = VolumeGeometry.from_attributes( + rows=10, + columns=10, + number_of_frames=10, + image_position=image_position, + image_orientation=image_orientation, + pixel_spacing=pixel_spacing, + spacing_between_slices=spacing_between_slices, + ) + assert geometry.position == list(image_position) + assert geometry.direction_cosines == list(image_orientation) + assert geometry.pixel_spacing == list(pixel_spacing) + assert geometry.spacing_between_slices == spacing_between_slices + + +def test_volume_geometry_single_frame(): + ct_files = [ + get_testdata_file('dicomdirtests/77654033/CT2/17136'), + get_testdata_file('dicomdirtests/77654033/CT2/17196'), + get_testdata_file('dicomdirtests/77654033/CT2/17166'), + ] + ct_series = [pydicom.dcmread(f) for f in ct_files] + geometry = VolumeGeometry.for_image_series(ct_series) + assert isinstance(geometry, VolumeGeometry) + rows, columns = ct_series[0].Rows, ct_series[0].Columns + assert geometry.shape == (len(ct_files), rows, columns) + assert geometry.frame_numbers is None + sop_instance_uids = [ + ct_series[0].SOPInstanceUID, + ct_series[2].SOPInstanceUID, + ct_series[1].SOPInstanceUID, + ] + assert geometry.sop_instance_uids == sop_instance_uids + assert geometry.get_index_for_sop_instance_uid( + ct_series[2].SOPInstanceUID + ) == 1 + with pytest.raises(RuntimeError): + geometry.get_index_for_frame_number(2) + orientation = ct_series[0].ImageOrientationPatient + assert geometry.direction_cosines == orientation + direction = geometry.direction + assert np.array_equal(direction[:, 1], orientation[3:]) + assert np.array_equal(direction[:, 2], orientation[:3]) + # Check third direction is normal to others + assert direction[:, 0] @ direction[:, 1] == 0.0 + assert direction[:, 0] @ direction[:, 2] == 0.0 + assert (direction[:, 0] ** 2).sum() == 1.0 + assert geometry.position == ct_series[0].ImagePositionPatient + assert geometry.pixel_spacing == ct_series[0].PixelSpacing + slice_spacing = 1.25 + assert geometry.spacing == [slice_spacing, *ct_series[0].PixelSpacing[::-1]] + + +def test_volume_geometry_multiframe(): + dcm = pydicom.dcmread(get_testdata_file('eCT_Supplemental.dcm')) + geometry = VolumeGeometry.for_image(dcm) + assert isinstance(geometry, VolumeGeometry) + rows, columns = dcm.Rows, dcm.Columns + assert geometry.shape == (dcm.NumberOfFrames, rows, columns) + assert geometry.frame_numbers == [2, 1] + assert geometry.sop_instance_uids is None + with pytest.raises(RuntimeError): + geometry.get_index_for_sop_instance_uid( + dcm.SOPInstanceUID + ) + assert geometry.get_index_for_frame_number(2) == 0 + orientation = ( + dcm + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + pixel_spacing = ( + dcm + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert geometry.direction_cosines == orientation + direction = geometry.direction + assert np.array_equal(direction[:, 1], orientation[3:]) + assert np.array_equal(direction[:, 2], orientation[:3]) + # Check third direction is normal to others + assert direction[:, 0] @ direction[:, 1] == 0.0 + assert direction[:, 0] @ direction[:, 2] == 0.0 + assert (direction[:, 0] ** 2).sum() == 1.0 + first_frame = geometry.frame_numbers[0] + first_frame_pos = ( + dcm + .PerFrameFunctionalGroupsSequence[first_frame - 1] + .PlanePositionSequence[0] + .ImagePositionPatient + ) + assert geometry.position == first_frame_pos + assert geometry.pixel_spacing == pixel_spacing + slice_spacing = 10.0 + assert geometry.spacing == [slice_spacing, *pixel_spacing[::-1]] From 6f41a70531d5189bee91df2907534c13532410c4 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Wed, 19 Jun 2024 22:12:35 -0400 Subject: [PATCH 20/93] Add new tests --- src/highdicom/volume.py | 67 ++++++++++++++++++++++++++++++++++++++--- tests/test_volume.py | 27 +++++++++++++++++ 2 files changed, 90 insertions(+), 4 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index faad3196..b76353e9 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -523,12 +523,12 @@ def get_center_index(self, round_output: bool = False) -> np.ndarray: """ if round_output: center = np.array( - [(self.shape[d] // 2) for d in range(3)], + [(self.shape[d] - 1) // 2 for d in range(3)], dtype=np.uint32, ) else: center = np.array( - [(self.shape[d] - 1 / 2.0) for d in range(3)] + [(self.shape[d] - 1) / 2.0 for d in range(3)] ) return center @@ -591,7 +591,7 @@ def map_indices_to_reference( def map_reference_to_indices( self, - coordinates: np.y, + coordinates: np.ndarray, round_output: bool = False, check_bounds: bool = False, ) -> np.ndarray: @@ -647,7 +647,7 @@ def map_reference_to_indices( coordinates.T.astype(float), np.ones((coordinates.shape[0], ), dtype=float) ]) - indices = np.dot(self._affine, reference_coordinates) + indices = np.dot(self.inverse_affine, reference_coordinates) indices = indices[:3, :].T if check_bounds: @@ -668,6 +668,55 @@ def map_reference_to_indices( else: return indices + def get_plane_position(self, plane_number: int) -> PlanePositionSequence: + """Get plane position of a given plane. + + Parameters + ---------- + plane_number: int + Zero-based plane index (down the first dimension of the array). + + Returns + ------- + highdicom.content.PlanePositionSequence: + Plane position of the plane. + + """ + if plane_number < 0 or plane_number >= self.shape[0]: + raise ValueError("Invalid plane number for volume.") + index = np.array([[plane_number, 0, 0]]) + position = self.map_indices_to_reference(index)[0] + + return PlanePositionSequence( + CoordinateSystemNames.PATIENT, + position, + ) + + def get_plane_positions(self) -> List[PlanePositionSequence]: + """Get plane positions of all planes in the volume. + + Returns + ------- + List[highdicom.content.PlanePositionSequence]: + Plane position of the all planes (stacked down axis 0 of the + volume). + + """ + indices = np.array( + [ + [p, 0, 0] for p in range(self.shape[0]) + ] + ) + positions = self.map_indices_to_reference(indices) + + return [ + PlanePositionSequence( + CoordinateSystemNames.PATIENT, + pos, + ) + for pos in positions + ] + @property def frame_of_reference_uid(self) -> Optional[str]: """Union[str, None]: Frame of reference UID.""" @@ -683,6 +732,16 @@ def affine(self) -> np.ndarray: """ return self._affine.copy() + @property + def inverse_affine(self) -> np.ndarray: + """numpy.ndarray: 4x4 inverse affine transformation matrix + + Inverse of the affine matrix. This matrix maps a position in the LPS + frame of reference coordinate space into an index into the array. + + """ + return np.linalg.inv(self._affine) + @property def shape(self) -> Tuple[int, int, int]: """Tuple[int, int, int]: Shape of the volume.""" diff --git a/tests/test_volume.py b/tests/test_volume.py index 9a078a33..a583a737 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -7,6 +7,33 @@ from highdicom.volume import VolumeGeometry +def test_transforms(): + volume = VolumeGeometry.from_attributes( + image_position=[0.0, 0.0, 0.0], + image_orientation=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0], + pixel_spacing=[1.0, 1.0], + rows=50, + columns=50, + spacing_between_slices=10.0, + number_of_frames=25, + ) + plane_positions = volume.get_plane_positions() + for i, pos in enumerate(plane_positions): + assert np.array_equal(pos[0].ImagePositionPatient, [0.0, 0.0, 10.0 * i]) + + indices = np.array([[1, 2, 3]]) + coords = volume.map_indices_to_reference(indices) + assert np.array_equal(coords, np.array([[3.0, 2.0, 10.0]])) + round_trip = volume.map_reference_to_indices(coords) + assert np.array_equal(round_trip, indices) + index_center = volume.get_center_index() + assert np.array_equal(index_center, [12.0, 24.5, 24.5]) + index_center = volume.get_center_index(round_output=True) + assert np.array_equal(index_center, [12, 24, 24]) + coord_center = volume.get_center_coordinate() + assert np.array_equal(coord_center, [24.5, 24.5, 120]) + + @pytest.mark.parametrize( 'image_position,image_orientation,pixel_spacing,spacing_between_slices', [ From e9d9fd7cd2293d837bccc03040af2229c0cc1c6f Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Wed, 26 Jun 2024 22:02:11 -0400 Subject: [PATCH 21/93] Moved to VolumeArray --- src/highdicom/volume.py | 126 ++++++++++++++++++++++------------------ tests/test_volume.py | 24 ++++---- 2 files changed, 79 insertions(+), 71 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index b76353e9..c0dc6d21 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -20,9 +20,15 @@ from pydicom import Dataset -class VolumeGeometry: +class VolumeArray: - """Class representing the geomtry of a regularly-spaced 3D array. + """Class representing a 3D array of regularly-spaced frames in 3D space. + + This class combines a 3D NumPy array with an affine matrix describing the + location of the voxels in the frame of reference coordinate space. A + VolumeArray is not a DICOM object itself, but represents a volume that may + be extracted from DICOM image, and/or encoded within a DICOM object, + potentially following any number of processing steps. All such geometries exist within DICOM's patient coordinate system. @@ -62,8 +68,8 @@ class VolumeGeometry: def __init__( self, + array: np.ndarray, affine: np.ndarray, - shape: Sequence[int], frame_of_reference_uid: Optional[str] = None, sop_instance_uids: Optional[Sequence[str]] = None, frame_numbers: Optional[Sequence[int]] = None, @@ -72,15 +78,15 @@ def __init__( Parameters ---------- - affine: np.ndarray + array: numpy.ndarray + Three dimensional array of voxel data. + affine: numpy.ndarray 4 x 4 affine matrix representing the transformation from pixel indices (slice index, row index, column index) to the frame-of-reference coordinate system. The top left 3 x 3 matrix should be a scaled orthogonal matrix representing the rotation and scaling. The top right 3 x 1 vector represents the translation component. The last row should have value [0, 0, 0, 1]. - shape: Sequence[int] - Shape (slices, rows, columns) of the implied volume array. frame_of_reference_uid: Optional[str], optional Frame of reference UID for the frame of reference, if known. sop_instance_uids: Optional[Sequence[str]], optional @@ -94,6 +100,10 @@ def __init__( DICOM image. """ + if array.ndim != 3: + raise ValueError( + "Argument 'array' must be three-dimensional." + ) if affine.shape != (4, 4): raise ValueError("Affine matrix must have shape (4, 4).") @@ -105,15 +115,9 @@ def __init__( raise ValueError( "Argument 'affine' must be an orthogonal matrix." ) - if len(shape) != 3: - raise ValueError( - "Argument 'shape' must have three elements." - ) + self._array = array self._affine = affine - if len(shape) != 3: - raise ValueError("Argument 'shape' must have three items.") - self._shape = tuple(shape) self._frame_of_reference_uid = frame_of_reference_uid if frame_numbers is not None: if any(not isinstance(f, int) for f in frame_numbers): @@ -125,10 +129,10 @@ def __init__( "Argument 'frame_numbers' should contain only (strictly) " "positive integers." ) - if len(frame_numbers) != shape[0]: + if len(frame_numbers) != self._array.shape[0]: raise ValueError( - "Length of 'frame_numbers' should match first item of " - "'shape'." + "Length of 'frame_numbers' should match first dimension " + "of 'array'." ) self._frame_numbers = list(frame_numbers) else: @@ -139,20 +143,20 @@ def __init__( "Argument 'sop_instance_uids' should be a sequence of " "str." ) - if len(sop_instance_uids) != shape[0]: + if len(sop_instance_uids) != self._array.shape[0]: raise ValueError( - "Length of 'sop_instance_uids' should match first item " - "of 'shape'." + "Length of 'sop_instance_uids' should match first " + "dimension of 'array'." ) self._sop_instance_uids = list(sop_instance_uids) else: self._sop_instance_uids = None @classmethod - def for_image_series( + def from_image_series( cls, series_datasets: Sequence[Dataset], - ) -> "VolumeGeometry": + ) -> "VolumeArray": """Get volume geometry for a series of single frame images. Parameters @@ -163,7 +167,7 @@ def for_image_series( Returns ------- - VolumeGeometry: + VolumeArray: Object representing the geometry of the series. """ @@ -192,7 +196,7 @@ def for_image_series( if slice_spacing is None: raise ValueError('Series is not a regularly spaced volume.') ds = series_datasets[0] - shape = (len(series_datasets), ds.Rows, ds.Columns) + affine = _create_affine_transformation_matrix( image_position=ds.ImagePositionPatient, image_orientation=ds.ImageOrientationPatient, @@ -201,18 +205,21 @@ def for_image_series( index_convention=cls._INTERNAL_INDEX_CONVENTION, ) + # TODO apply color, modality and VOI lookup + array = np.stack([ds.pixel_array for ds in series_datasets]) + return cls( affine=affine, - shape=shape, + array=array, frame_of_reference_uid=frame_of_reference_uid, sop_instance_uids=sorted_sop_instance_uids, ) @classmethod - def for_image( + def from_image( cls, dataset: Dataset, - ) -> "VolumeGeometry": + ) -> "VolumeArray": """Get volume geometry for a multiframe image. Parameters @@ -222,7 +229,7 @@ def for_image( Returns ------- - VolumeGeometry: + VolumeArray: Object representing the geometry of the image. """ @@ -272,7 +279,6 @@ def for_image( 'Dataset does not represent a regularly sampled volume.' ) - shape = (dataset.NumberOfFrames, dataset.Rows, dataset.Columns) affine = _create_affine_transformation_matrix( image_position=sorted_positions[0], image_orientation=image_orientation, @@ -281,9 +287,15 @@ def for_image( index_convention=cls._INTERNAL_INDEX_CONVENTION, ) + # TODO apply VOI color modality LUT etc + array = dataset.pixel_array + if array.ndim == 2: + array = array[np.newaxis] + array = array[sort_index] + return cls( affine=affine, - shape=shape, + array=array, frame_of_reference_uid=dataset.FrameOfReferenceUID, frame_numbers=sorted_frame_numbers, ) @@ -291,21 +303,23 @@ def for_image( @classmethod def from_attributes( cls, + array: np.ndarray, image_position: Sequence[float], image_orientation: Sequence[float], pixel_spacing: Sequence[float], spacing_between_slices: float, - rows:int, - columns: int, - number_of_frames: int, frame_of_reference_uid: Optional[str] = None, sop_instance_uids: Optional[Sequence[str]] = None, frame_numbers: Optional[Sequence[int]] = None, - ) -> "VolumeGeometry": + ) -> "VolumeArray": """Create a volume geometry from DICOM attributes. Parameters ---------- + array: numpy.ndarray + Three dimensional array of voxel data. The first dimension indexes + slices, the second dimension indexes rows, and the final dimension + indexes columns. image_position: Sequence[float] Position in the frame of reference space of the center of the top left pixel of the image. Corresponds to DICOM attributes @@ -329,27 +343,17 @@ def from_attributes( attribute "SpacingBetweenSlices" (however, this may not be present in many images and may need to be inferred from "ImagePositionPatient" attributes of consecutive slices). - rows:int - Number of rows in the image. Corresponds to the DICOM attribute - "Rows". - columns: int - Number of columns in the image. Corresponds to the DICOM attribute - "Columns". - number_of_frames: int - Number of frames in the image. Corresponds to NumberOfFrames - attribute, or to the number of images in the case of an image - series. frame_of_reference_uid: Union[str, None], optional Frame of reference UID, if known. Corresponds to DICOM attribute FrameOfReferenceUID. sop_instance_uids: Union[Sequence[str], None], optional Ordered SOP Instance UIDs of each frame, if known, in the situation that the volume is formed from a sequence of individual DICOM - instances. + instances, stacked down the first axis (index 0).. frame_numbers: Union[Sequence[int], None], optional Ordered frame numbers of each frame, if known, in the situation that the volume is formed from a sequence of frames of one - multi-frame DICOM image. + multi-frame DICOM image, stacked down the first axis (index 0).. """ affine = _create_affine_transformation_matrix( @@ -359,10 +363,9 @@ def from_attributes( spacing_between_slices=spacing_between_slices, index_convention=cls._INTERNAL_INDEX_CONVENTION, ) - shape = (number_of_frames, rows, columns) return cls( affine=affine, - shape=shape, + array=array, frame_of_reference_uid=frame_of_reference_uid, sop_instance_uids=sop_instance_uids, frame_numbers=frame_numbers, @@ -371,18 +374,20 @@ def from_attributes( @classmethod def from_components( cls, + array: np.ndarray, position: Sequence[float], direction: Sequence[float], spacing: Sequence[float], - shape: Sequence[int], frame_of_reference_uid: Optional[str] = None, sop_instance_uids: Optional[Sequence[str]] = None, frame_numbers: Optional[Sequence[int]] = None, - ) -> "VolumeGeometry": - """Construct a VolumeGeometry from components. + ) -> "VolumeArray": + """Construct a VolumeArray from components. Parameters ---------- + array: numpy.ndarray + Three dimensional array of voxel data. position: Sequence[float] Sequence of three floats giving the position in the frame of reference coordinate system of the center of the pixel at location @@ -403,15 +408,15 @@ def from_components( sop_instance_uids: Union[Sequence[str], None], optional Ordered SOP Instance UIDs of each frame, if known, in the situation that the volume is formed from a sequence of individual DICOM - instances. + instances, stacked down the first axis (index 0). frame_numbers: Union[Sequence[int], None], optional Ordered frame numbers of each frame, if known, in the situation that the volume is formed from a sequence of frames of one - multi-frame DICOM image. + multi-frame DICOM image, stacked down the first axis (index 0). Returns ------- - highdicom.spatial.VolumeGeometry: + highdicom.spatial.VolumeArray: Volume geometry constructed from the provided components. """ @@ -446,8 +451,8 @@ def from_components( ] ) return cls( + array=array, affine=affine, - shape=shape, frame_of_reference_uid=frame_of_reference_uid, sop_instance_uids=sop_instance_uids, frame_numbers=frame_numbers, @@ -516,7 +521,7 @@ def get_center_index(self, round_output: bool = False) -> np.ndarray: Returns ------- - np.ndarray: + numpy.ndarray: Array of shape 3 representing the array indices at the center of the volume. @@ -538,7 +543,7 @@ def get_center_coordinate(self) -> np.ndarray: Returns ------- - np.ndarray: + numpy.ndarray: Array of shape 3 representing the frame-of-reference coordinate at the center of the volume. @@ -745,7 +750,12 @@ def inverse_affine(self) -> np.ndarray: @property def shape(self) -> Tuple[int, int, int]: """Tuple[int, int, int]: Shape of the volume.""" - return self._shape + return tuple(self._array.shape) + + @property + def array(self) -> np.ndarray: + """numpy.ndarray: Volume array (copied).""" + return self._array.copy() @property def sop_instance_uids(self) -> Union[List[str], None]: @@ -829,7 +839,7 @@ def position(self) -> List[float]: @property def direction(self) -> np.ndarray: - """np.ndarray: + """numpy.ndarray: Direction matrix for the volume. The columns of the direction matrix are orthogonal unit vectors that give the direction in the diff --git a/tests/test_volume.py b/tests/test_volume.py index a583a737..5bc68be6 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -4,18 +4,17 @@ import pytest -from highdicom.volume import VolumeGeometry +from highdicom.volume import VolumeArray def test_transforms(): - volume = VolumeGeometry.from_attributes( + array = np.zeros((25, 50, 50)) + volume = VolumeArray.from_attributes( + array=array, image_position=[0.0, 0.0, 0.0], image_orientation=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0], pixel_spacing=[1.0, 1.0], - rows=50, - columns=50, spacing_between_slices=10.0, - number_of_frames=25, ) plane_positions = volume.get_plane_positions() for i, pos in enumerate(plane_positions): @@ -72,10 +71,9 @@ def test_geometry_from_attributes( pixel_spacing, spacing_between_slices, ): - geometry = VolumeGeometry.from_attributes( - rows=10, - columns=10, - number_of_frames=10, + array = np.zeros((10, 10, 10)) + geometry = VolumeArray.from_attributes( + array=array, image_position=image_position, image_orientation=image_orientation, pixel_spacing=pixel_spacing, @@ -94,8 +92,8 @@ def test_volume_geometry_single_frame(): get_testdata_file('dicomdirtests/77654033/CT2/17166'), ] ct_series = [pydicom.dcmread(f) for f in ct_files] - geometry = VolumeGeometry.for_image_series(ct_series) - assert isinstance(geometry, VolumeGeometry) + geometry = VolumeArray.from_image_series(ct_series) + assert isinstance(geometry, VolumeArray) rows, columns = ct_series[0].Rows, ct_series[0].Columns assert geometry.shape == (len(ct_files), rows, columns) assert geometry.frame_numbers is None @@ -127,8 +125,8 @@ def test_volume_geometry_single_frame(): def test_volume_geometry_multiframe(): dcm = pydicom.dcmread(get_testdata_file('eCT_Supplemental.dcm')) - geometry = VolumeGeometry.for_image(dcm) - assert isinstance(geometry, VolumeGeometry) + geometry = VolumeArray.from_image(dcm) + assert isinstance(geometry, VolumeArray) rows, columns = dcm.Rows, dcm.Columns assert geometry.shape == (dcm.NumberOfFrames, rows, columns) assert geometry.frame_numbers == [2, 1] From 590f8291709084f52be9305fc0564a2547082836 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 4 Jul 2024 19:07:24 -0400 Subject: [PATCH 22/93] start on affine matrix mapping --- src/highdicom/spatial.py | 36 ++++++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 52ef7acf..ea492892 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -742,6 +742,40 @@ def _normalize_reference_direction_convention( return c +def get_closest_directions(affine: np.ndarray) -> Tuple[ + PatientFrameOfReferenceDirections, + PatientFrameOfReferenceDirections, + PatientFrameOfReferenceDirections, +]: + """Given an affine matrix, find the + + Parameters + ---------- + affine: numpy.ndarray + Direction matrix (4x4 affine matrices or a 3x3 direction matrices are + acceptable). + + Returns + ------- + Tuple[PatientFrameOfReferenceDirections, PatientFrameOfReferenceDirections, PatientFrameOfReferenceDirections]: + + """ + if ( + affine.ndim != 2 + or ( + affine.shape != (3, 3) and + affine.shape != (4, 4) + ) + ): + raise ValueError(f"Invalid shape for array: {affine.shape}") + + result = [] + for d in range(3): + v = affine[:3, d] + alignments = v + + + def _is_matrix_orthogonal( m: np.ndarray, require_unit: bool = True, @@ -749,8 +783,6 @@ def _is_matrix_orthogonal( ) -> bool: """Check whether a matrix is orthogonal. - Note this does not require that the columns have unit norm. - Parameters ---------- m: numpy.ndarray From d9a3fdbd120a4e3f0b7a61a4c5dddcc4c317a73d Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 5 Jul 2024 12:01:43 -0400 Subject: [PATCH 23/93] Add channels --- src/highdicom/volume.py | 36 ++++++++++++++--- tests/test_volume.py | 89 ++++++++++++++++++++++++++--------------- 2 files changed, 87 insertions(+), 38 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index c0dc6d21..c988a588 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -79,7 +79,9 @@ def __init__( Parameters ---------- array: numpy.ndarray - Three dimensional array of voxel data. + Array of voxel data. Must be either 3D (three spatial dimensions), + or 4D (three spatial dimensions followed by a channel dimension). + Any datatype is permitted. affine: numpy.ndarray 4 x 4 affine matrix representing the transformation from pixel indices (slice index, row index, column index) to the @@ -100,9 +102,9 @@ def __init__( DICOM image. """ - if array.ndim != 3: + if array.ndim not in (3, 4): raise ValueError( - "Argument 'array' must be three-dimensional." + "Argument 'array' must be three or four-dimensional." ) if affine.shape != (4, 4): @@ -748,10 +750,34 @@ def inverse_affine(self) -> np.ndarray: return np.linalg.inv(self._affine) @property - def shape(self) -> Tuple[int, int, int]: - """Tuple[int, int, int]: Shape of the volume.""" + def shape(self) -> Tuple[int, ...]: + """Tuple[int, ...]: Shape of the underlying array. + + May or may not include a fourth channel dimension. + + """ return tuple(self._array.shape) + @property + def spatial_shape(self) -> Tuple[int, int, int]: + """Tuple[int, int, int]: Spatial shape of the array. + + Does not include the channel dimension. + + """ + return tuple(self._array.shape[:3]) + + @property + def number_of_channels(self) -> Optional[int]: + """Optional[int]: Number of channels. + + If the array has no channel dimension, returns None. + + """ + if self._array.ndim == 4: + return self._array.shape[3] + return None + @property def array(self) -> np.ndarray: """numpy.ndarray: Volume array (copied).""" diff --git a/tests/test_volume.py b/tests/test_volume.py index 5bc68be6..dfa74799 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -65,77 +65,99 @@ def test_transforms(): ), ], ) -def test_geometry_from_attributes( +def test_volume_from_attributes( image_position, image_orientation, pixel_spacing, spacing_between_slices, ): array = np.zeros((10, 10, 10)) - geometry = VolumeArray.from_attributes( + volume = VolumeArray.from_attributes( array=array, image_position=image_position, image_orientation=image_orientation, pixel_spacing=pixel_spacing, spacing_between_slices=spacing_between_slices, ) - assert geometry.position == list(image_position) - assert geometry.direction_cosines == list(image_orientation) - assert geometry.pixel_spacing == list(pixel_spacing) - assert geometry.spacing_between_slices == spacing_between_slices + assert volume.position == list(image_position) + assert volume.direction_cosines == list(image_orientation) + assert volume.pixel_spacing == list(pixel_spacing) + assert volume.spacing_between_slices == spacing_between_slices + assert volume.shape == (10, 10, 10) + assert volume.spatial_shape == (10, 10, 10) + assert volume.number_of_channels is None + + +def test_volume_with_channels(): + + array = np.zeros((10, 10, 10, 2)) + + volume = VolumeArray.from_attributes( + array=array, + image_position=(0.0, 0.0, 0.0), + image_orientation=(1.0, 0.0, 0.0, 0.0, 1.0, 0.0), + pixel_spacing=(1.0, 1.0), + spacing_between_slices=2.0, + ) + assert volume.shape == (10, 10, 10, 2) + assert volume.spatial_shape == (10, 10, 10) + assert volume.number_of_channels == 2 -def test_volume_geometry_single_frame(): +def test_volume_single_frame(): ct_files = [ get_testdata_file('dicomdirtests/77654033/CT2/17136'), get_testdata_file('dicomdirtests/77654033/CT2/17196'), get_testdata_file('dicomdirtests/77654033/CT2/17166'), ] ct_series = [pydicom.dcmread(f) for f in ct_files] - geometry = VolumeArray.from_image_series(ct_series) - assert isinstance(geometry, VolumeArray) + volume = VolumeArray.from_image_series(ct_series) + assert isinstance(volume, VolumeArray) rows, columns = ct_series[0].Rows, ct_series[0].Columns - assert geometry.shape == (len(ct_files), rows, columns) - assert geometry.frame_numbers is None + assert volume.shape == (len(ct_files), rows, columns) + assert volume.spatial_shape == volume.shape + assert volume.number_of_channels is None + assert volume.frame_numbers is None sop_instance_uids = [ ct_series[0].SOPInstanceUID, ct_series[2].SOPInstanceUID, ct_series[1].SOPInstanceUID, ] - assert geometry.sop_instance_uids == sop_instance_uids - assert geometry.get_index_for_sop_instance_uid( + assert volume.sop_instance_uids == sop_instance_uids + assert volume.get_index_for_sop_instance_uid( ct_series[2].SOPInstanceUID ) == 1 with pytest.raises(RuntimeError): - geometry.get_index_for_frame_number(2) + volume.get_index_for_frame_number(2) orientation = ct_series[0].ImageOrientationPatient - assert geometry.direction_cosines == orientation - direction = geometry.direction + assert volume.direction_cosines == orientation + direction = volume.direction assert np.array_equal(direction[:, 1], orientation[3:]) assert np.array_equal(direction[:, 2], orientation[:3]) # Check third direction is normal to others assert direction[:, 0] @ direction[:, 1] == 0.0 assert direction[:, 0] @ direction[:, 2] == 0.0 assert (direction[:, 0] ** 2).sum() == 1.0 - assert geometry.position == ct_series[0].ImagePositionPatient - assert geometry.pixel_spacing == ct_series[0].PixelSpacing + assert volume.position == ct_series[0].ImagePositionPatient + assert volume.pixel_spacing == ct_series[0].PixelSpacing slice_spacing = 1.25 - assert geometry.spacing == [slice_spacing, *ct_series[0].PixelSpacing[::-1]] + assert volume.spacing == [slice_spacing, *ct_series[0].PixelSpacing[::-1]] -def test_volume_geometry_multiframe(): +def test_volume_multiframe(): dcm = pydicom.dcmread(get_testdata_file('eCT_Supplemental.dcm')) - geometry = VolumeArray.from_image(dcm) - assert isinstance(geometry, VolumeArray) + volume = VolumeArray.from_image(dcm) + assert isinstance(volume, VolumeArray) rows, columns = dcm.Rows, dcm.Columns - assert geometry.shape == (dcm.NumberOfFrames, rows, columns) - assert geometry.frame_numbers == [2, 1] - assert geometry.sop_instance_uids is None + assert volume.shape == (dcm.NumberOfFrames, rows, columns) + assert volume.spatial_shape == volume.shape + assert volume.frame_numbers == [2, 1] + assert volume.sop_instance_uids is None with pytest.raises(RuntimeError): - geometry.get_index_for_sop_instance_uid( + volume.get_index_for_sop_instance_uid( dcm.SOPInstanceUID ) - assert geometry.get_index_for_frame_number(2) == 0 + assert volume.get_index_for_frame_number(2) == 0 orientation = ( dcm .SharedFunctionalGroupsSequence[0] @@ -148,22 +170,23 @@ def test_volume_geometry_multiframe(): .PixelMeasuresSequence[0] .PixelSpacing ) - assert geometry.direction_cosines == orientation - direction = geometry.direction + assert volume.direction_cosines == orientation + direction = volume.direction assert np.array_equal(direction[:, 1], orientation[3:]) assert np.array_equal(direction[:, 2], orientation[:3]) # Check third direction is normal to others assert direction[:, 0] @ direction[:, 1] == 0.0 assert direction[:, 0] @ direction[:, 2] == 0.0 assert (direction[:, 0] ** 2).sum() == 1.0 - first_frame = geometry.frame_numbers[0] + first_frame = volume.frame_numbers[0] first_frame_pos = ( dcm .PerFrameFunctionalGroupsSequence[first_frame - 1] .PlanePositionSequence[0] .ImagePositionPatient ) - assert geometry.position == first_frame_pos - assert geometry.pixel_spacing == pixel_spacing + assert volume.position == first_frame_pos + assert volume.pixel_spacing == pixel_spacing slice_spacing = 10.0 - assert geometry.spacing == [slice_spacing, *pixel_spacing[::-1]] + assert volume.spacing == [slice_spacing, *pixel_spacing[::-1]] + assert volume.number_of_channels is None From 379060e19603870544de8135a44c9e8f5f57f968 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 5 Jul 2024 12:35:46 -0400 Subject: [PATCH 24/93] Add with_array --- src/highdicom/volume.py | 59 +++++++++++++++++++++++++++++++++++++++++ tests/test_volume.py | 21 +++++++++++++-- 2 files changed, 78 insertions(+), 2 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index c988a588..2ebae481 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -1,4 +1,6 @@ +from copy import deepcopy from typing import List, Optional, Sequence, Union, Tuple + import numpy as np from highdicom._module_utils import is_multiframe_image @@ -749,6 +751,11 @@ def inverse_affine(self) -> np.ndarray: """ return np.linalg.inv(self._affine) + @property + def dtype(self) -> type: + """type: Datatype of the array.""" + return self._array.dtype + @property def shape(self) -> Tuple[int, ...]: """Tuple[int, ...]: Shape of the underlying array. @@ -778,6 +785,28 @@ def number_of_channels(self) -> Optional[int]: return self._array.shape[3] return None + def set_array(self, array: np.ndarray) -> None: + # TODO make this a proper setter and getter + """Change the voxel array without changing the affine. + + Parameters + ---------- + array: np.ndarray + New 3D or 4D array of voxel data. The spatial shape must match the + existing array, but the presence and number of channels and/or the + voxel datatype may differ. + + """ + if array.ndim not in (3, 4): + raise ValueError( + "Argument 'array' must be a three or four dimensional array." + ) + if array.shape[:3] != self.spatial_shape: + raise ValueError( + "Array must match the spatial shape of the existing array." + ) + self._array = array + @property def array(self) -> np.ndarray: """numpy.ndarray: Volume array (copied).""" @@ -877,3 +906,33 @@ def direction(self) -> np.ndarray: dir_mat = self._affine[:3, :3] norms = np.sqrt((dir_mat ** 2).sum(axis=0)) return dir_mat / norms + + def with_array(self, array: np.ndarray) -> 'VolumeArray': + """Get a new volume using a different array. + + The spatial and other metadata will be copied from this volume. + The original volume will be unaltered. + + Parameters + ---------- + array: np.ndarray + New 3D or 4D array of voxel data. The spatial shape must match the + existing array, but the presence and number of channels and/or the + voxel datatype may differ. + + """ + if array.ndim not in (3, 4): + raise ValueError( + "Argument 'array' must be a three or four dimensional array." + ) + if array.shape[:3] != self.spatial_shape: + raise ValueError( + "Array must match the spatial shape of the existing array." + ) + return self.__class__( + array=array, + affine=self._affine.copy(), + frame_of_reference_uid=self.frame_of_reference_uid, + sop_instance_uids=deepcopy(self.sop_instance_uids), + frame_numbers=deepcopy(self.frame_numbers), + ) diff --git a/tests/test_volume.py b/tests/test_volume.py index dfa74799..a400c349 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -89,9 +89,7 @@ def test_volume_from_attributes( def test_volume_with_channels(): - array = np.zeros((10, 10, 10, 2)) - volume = VolumeArray.from_attributes( array=array, image_position=(0.0, 0.0, 0.0), @@ -104,6 +102,25 @@ def test_volume_with_channels(): assert volume.number_of_channels == 2 +def test_with_array(): + array = np.zeros((10, 10, 10)) + volume = VolumeArray.from_attributes( + array=array, + image_position=(0.0, 0.0, 0.0), + image_orientation=(1.0, 0.0, 0.0, 0.0, 1.0, 0.0), + pixel_spacing=(1.0, 1.0), + spacing_between_slices=2.0, + ) + new_array = np.zeros((10, 10, 10, 2), dtype=np.uint8) + new_volume = volume.with_array(new_array) + assert new_volume.number_of_channels == 2 + assert isinstance(new_volume, VolumeArray) + assert volume.spatial_shape == new_volume.spatial_shape + assert np.array_equal(volume.affine, new_volume.affine) + assert volume.affine is not new_volume.affine + assert new_volume.dtype == np.uint8 + + def test_volume_single_frame(): ct_files = [ get_testdata_file('dicomdirtests/77654033/CT2/17136'), From dcfdc60a4214cad3469a88884289025e161cf362 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 5 Jul 2024 13:12:49 -0400 Subject: [PATCH 25/93] Added concat_volumes --- src/highdicom/volume.py | 64 +++++++++++++++++++++++++++++++++++++++++ tests/test_volume.py | 7 ++++- 2 files changed, 70 insertions(+), 1 deletion(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 2ebae481..bcf405c7 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -920,6 +920,11 @@ def with_array(self, array: np.ndarray) -> 'VolumeArray': existing array, but the presence and number of channels and/or the voxel datatype may differ. + Returns + ------- + highdicom.volume.VolumeArray: + New volume using the given array and the metadata of this volume. + """ if array.ndim not in (3, 4): raise ValueError( @@ -936,3 +941,62 @@ def with_array(self, array: np.ndarray) -> 'VolumeArray': sop_instance_uids=deepcopy(self.sop_instance_uids), frame_numbers=deepcopy(self.frame_numbers), ) + + +def concat_channels(volumes: Sequence[VolumeArray]) -> VolumeArray: + """Form a new volume by concatenating channels of existing volumes. + + Parameters + ---------- + volumes: Sequence[highdicom.volume.VolumeArray] + Sequence of one or more volumes to concatenate. Volumes must + share the same spatial shape and affine matrix, but may differ + by number and presence of channels. + + Returns + ------- + highdicom.volume.VolumeArray: + Volume array formed by concatenating the arrays. + + """ + if len(volumes) < 1: + raise ValueError("Argument 'volumes' should not be empty.") + spatial_shape = volumes[0].spatial_shape + affine = volumes[0].affine.copy() + frame_of_reference_uids = [ + v.frame_of_reference_uid for v in volumes + if v.frame_of_reference_uid is not None + ] + if len(set(frame_of_reference_uids)) > 1: + raise ValueError( + "Volumes have differing frame of reference UIDs." + ) + if len(frame_of_reference_uids) > 0: + frame_of_reference_uid = frame_of_reference_uids[0] + else: + frame_of_reference_uid = None + if not all(v.spatial_shape == spatial_shape for v in volumes): + raise ValueError( + "All items in 'volumes' should have the same spatial " + "shape." + ) + if not all(np.allclose(v.affine, affine) for v in volumes): + raise ValueError( + "All items in 'volumes' should have the same affine " + "matrix." + ) + + arrays = [] + for v in volumes: + array = v.array + if array.ndim == 3: + array = array[:, :, :, None] + + arrays.append(array) + + concat_array = np.concatenate(arrays, axis=3) + return VolumeArray( + array=concat_array, + affine=affine, + frame_of_reference_uid=frame_of_reference_uid, + ) diff --git a/tests/test_volume.py b/tests/test_volume.py index a400c349..60878782 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -4,7 +4,7 @@ import pytest -from highdicom.volume import VolumeArray +from highdicom.volume import VolumeArray, concat_channels def test_transforms(): @@ -120,6 +120,11 @@ def test_with_array(): assert volume.affine is not new_volume.affine assert new_volume.dtype == np.uint8 + concat_volume = concat_channels([volume, new_volume]) + assert isinstance(concat_volume, VolumeArray) + assert volume.spatial_shape == concat_volume.spatial_shape + assert concat_volume.number_of_channels == 3 + def test_volume_single_frame(): ct_files = [ From cee0c80352e9341df089e23d2617f6220923aae9 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 15 Jul 2024 16:18:43 -0700 Subject: [PATCH 26/93] Rename VolumeArray -> Volume --- src/highdicom/volume.py | 36 ++++++++++++++++++------------------ tests/test_volume.py | 22 +++++++++++----------- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index bcf405c7..0324358b 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -22,13 +22,13 @@ from pydicom import Dataset -class VolumeArray: +class Volume: """Class representing a 3D array of regularly-spaced frames in 3D space. This class combines a 3D NumPy array with an affine matrix describing the location of the voxels in the frame of reference coordinate space. A - VolumeArray is not a DICOM object itself, but represents a volume that may + Volume is not a DICOM object itself, but represents a volume that may be extracted from DICOM image, and/or encoded within a DICOM object, potentially following any number of processing steps. @@ -160,7 +160,7 @@ def __init__( def from_image_series( cls, series_datasets: Sequence[Dataset], - ) -> "VolumeArray": + ) -> "Volume": """Get volume geometry for a series of single frame images. Parameters @@ -171,7 +171,7 @@ def from_image_series( Returns ------- - VolumeArray: + Volume: Object representing the geometry of the series. """ @@ -223,7 +223,7 @@ def from_image_series( def from_image( cls, dataset: Dataset, - ) -> "VolumeArray": + ) -> "Volume": """Get volume geometry for a multiframe image. Parameters @@ -233,7 +233,7 @@ def from_image( Returns ------- - VolumeArray: + Volume: Object representing the geometry of the image. """ @@ -315,7 +315,7 @@ def from_attributes( frame_of_reference_uid: Optional[str] = None, sop_instance_uids: Optional[Sequence[str]] = None, frame_numbers: Optional[Sequence[int]] = None, - ) -> "VolumeArray": + ) -> "Volume": """Create a volume geometry from DICOM attributes. Parameters @@ -385,8 +385,8 @@ def from_components( frame_of_reference_uid: Optional[str] = None, sop_instance_uids: Optional[Sequence[str]] = None, frame_numbers: Optional[Sequence[int]] = None, - ) -> "VolumeArray": - """Construct a VolumeArray from components. + ) -> "Volume": + """Construct a Volume from components. Parameters ---------- @@ -406,7 +406,7 @@ def from_components( Spacing between pixel centers in the the frame of reference coordinate system along each of the dimensions of the array. shape: Sequence[int] - Sequence of three integers giving the shape of the volume array. + Sequence of three integers giving the shape of the volume. frame_of_reference_uid: Union[str, None], optional Frame of reference UID for the frame of reference, if known. sop_instance_uids: Union[Sequence[str], None], optional @@ -420,7 +420,7 @@ def from_components( Returns ------- - highdicom.spatial.VolumeArray: + highdicom.spatial.Volume: Volume geometry constructed from the provided components. """ @@ -907,7 +907,7 @@ def direction(self) -> np.ndarray: norms = np.sqrt((dir_mat ** 2).sum(axis=0)) return dir_mat / norms - def with_array(self, array: np.ndarray) -> 'VolumeArray': + def with_array(self, array: np.ndarray) -> 'Volume': """Get a new volume using a different array. The spatial and other metadata will be copied from this volume. @@ -922,7 +922,7 @@ def with_array(self, array: np.ndarray) -> 'VolumeArray': Returns ------- - highdicom.volume.VolumeArray: + highdicom.volume.Volume: New volume using the given array and the metadata of this volume. """ @@ -943,20 +943,20 @@ def with_array(self, array: np.ndarray) -> 'VolumeArray': ) -def concat_channels(volumes: Sequence[VolumeArray]) -> VolumeArray: +def concat_channels(volumes: Sequence[Volume]) -> Volume: """Form a new volume by concatenating channels of existing volumes. Parameters ---------- - volumes: Sequence[highdicom.volume.VolumeArray] + volumes: Sequence[highdicom.volume.Volume] Sequence of one or more volumes to concatenate. Volumes must share the same spatial shape and affine matrix, but may differ by number and presence of channels. Returns ------- - highdicom.volume.VolumeArray: - Volume array formed by concatenating the arrays. + highdicom.volume.Volume: + New volume formed by concatenating the input volumes. """ if len(volumes) < 1: @@ -995,7 +995,7 @@ def concat_channels(volumes: Sequence[VolumeArray]) -> VolumeArray: arrays.append(array) concat_array = np.concatenate(arrays, axis=3) - return VolumeArray( + return Volume( array=concat_array, affine=affine, frame_of_reference_uid=frame_of_reference_uid, diff --git a/tests/test_volume.py b/tests/test_volume.py index 60878782..805f87a1 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -4,12 +4,12 @@ import pytest -from highdicom.volume import VolumeArray, concat_channels +from highdicom.volume import Volume, concat_channels def test_transforms(): array = np.zeros((25, 50, 50)) - volume = VolumeArray.from_attributes( + volume = Volume.from_attributes( array=array, image_position=[0.0, 0.0, 0.0], image_orientation=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0], @@ -72,7 +72,7 @@ def test_volume_from_attributes( spacing_between_slices, ): array = np.zeros((10, 10, 10)) - volume = VolumeArray.from_attributes( + volume = Volume.from_attributes( array=array, image_position=image_position, image_orientation=image_orientation, @@ -90,7 +90,7 @@ def test_volume_from_attributes( def test_volume_with_channels(): array = np.zeros((10, 10, 10, 2)) - volume = VolumeArray.from_attributes( + volume = Volume.from_attributes( array=array, image_position=(0.0, 0.0, 0.0), image_orientation=(1.0, 0.0, 0.0, 0.0, 1.0, 0.0), @@ -104,7 +104,7 @@ def test_volume_with_channels(): def test_with_array(): array = np.zeros((10, 10, 10)) - volume = VolumeArray.from_attributes( + volume = Volume.from_attributes( array=array, image_position=(0.0, 0.0, 0.0), image_orientation=(1.0, 0.0, 0.0, 0.0, 1.0, 0.0), @@ -114,14 +114,14 @@ def test_with_array(): new_array = np.zeros((10, 10, 10, 2), dtype=np.uint8) new_volume = volume.with_array(new_array) assert new_volume.number_of_channels == 2 - assert isinstance(new_volume, VolumeArray) + assert isinstance(new_volume, Volume) assert volume.spatial_shape == new_volume.spatial_shape assert np.array_equal(volume.affine, new_volume.affine) assert volume.affine is not new_volume.affine assert new_volume.dtype == np.uint8 concat_volume = concat_channels([volume, new_volume]) - assert isinstance(concat_volume, VolumeArray) + assert isinstance(concat_volume, Volume) assert volume.spatial_shape == concat_volume.spatial_shape assert concat_volume.number_of_channels == 3 @@ -133,8 +133,8 @@ def test_volume_single_frame(): get_testdata_file('dicomdirtests/77654033/CT2/17166'), ] ct_series = [pydicom.dcmread(f) for f in ct_files] - volume = VolumeArray.from_image_series(ct_series) - assert isinstance(volume, VolumeArray) + volume = Volume.from_image_series(ct_series) + assert isinstance(volume, Volume) rows, columns = ct_series[0].Rows, ct_series[0].Columns assert volume.shape == (len(ct_files), rows, columns) assert volume.spatial_shape == volume.shape @@ -168,8 +168,8 @@ def test_volume_single_frame(): def test_volume_multiframe(): dcm = pydicom.dcmread(get_testdata_file('eCT_Supplemental.dcm')) - volume = VolumeArray.from_image(dcm) - assert isinstance(volume, VolumeArray) + volume = Volume.from_image(dcm) + assert isinstance(volume, Volume) rows, columns = dcm.Rows, dcm.Columns assert volume.shape == (dcm.NumberOfFrames, rows, columns) assert volume.spatial_shape == volume.shape From c79124ba7ded94d4b7c189195f94f9880d4ea588 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 15 Jul 2024 19:09:50 -0700 Subject: [PATCH 27/93] Implment volume indexing --- src/highdicom/volume.py | 288 ++++++++++++++++++++++++++++++---------- tests/test_volume.py | 165 ++++++++++++++++++++++- 2 files changed, 379 insertions(+), 74 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 0324358b..14120212 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -73,8 +73,9 @@ def __init__( array: np.ndarray, affine: np.ndarray, frame_of_reference_uid: Optional[str] = None, - sop_instance_uids: Optional[Sequence[str]] = None, - frame_numbers: Optional[Sequence[int]] = None, + source_sop_instance_uids: Optional[Sequence[str]] = None, + source_frame_numbers: Optional[Sequence[int]] = None, + source_frame_dimension: int = 0, ): """ @@ -93,15 +94,20 @@ def __init__( component. The last row should have value [0, 0, 0, 1]. frame_of_reference_uid: Optional[str], optional Frame of reference UID for the frame of reference, if known. - sop_instance_uids: Optional[Sequence[str]], optional + source_sop_instance_uids: Optional[Sequence[str]], optional SOP instance UIDs corresponding to each slice (stacked down dimension 0) of the implied volume. This is relevant if and only if the volume is formed from a series of single frame DICOM images. - frame_numbers: Optional[Sequence[int]], optional - Frame numbers of corresponding to each slice (stacked down - dimension 0) of the implied volume. This is relevant if and only if + source_frame_numbers: Optional[Sequence[int]], optional + Frame numbers of the source image (if any) corresponding to each + slice (stacked down dimension 0). This is relevant if and only if the volume is formed from a set of frames of a single multiframe DICOM image. + source_frame_dimension: int + Dimension (as a zero-based dimension index) down which source + frames were stacked to form the volume. Only applicable if + ``source_sop_instance_uids`` or ``source_frame_numbers`` is + provided, otherwise ignored. """ if array.ndim not in (3, 4): @@ -123,45 +129,59 @@ def __init__( self._array = array self._affine = affine self._frame_of_reference_uid = frame_of_reference_uid - if frame_numbers is not None: - if any(not isinstance(f, int) for f in frame_numbers): + + if source_frame_dimension not in (0, 1, 2): + raise ValueError( + f'Argument "source_frame_dimension" must have value 0, 1, or 2.' + ) + + if source_frame_numbers is not None: + if any(not isinstance(f, int) for f in source_frame_numbers): raise TypeError( - "Argument 'frame_numbers' should be a sequence of ints." + "Argument 'source_frame_numbers' should be a sequence of ints." ) - if any(f < 1 for f in frame_numbers): + if any(f < 1 for f in source_frame_numbers): raise ValueError( - "Argument 'frame_numbers' should contain only (strictly) " - "positive integers." + "Argument 'source_frame_numbers' should contain only " + "(strictly) positive integers." ) - if len(frame_numbers) != self._array.shape[0]: + if len(source_frame_numbers) != self._array.shape[source_frame_dimension]: raise ValueError( - "Length of 'frame_numbers' should match first dimension " - "of 'array'." + "Length of 'source_frame_numbers' should match size " + "of 'array' along the axis given by 'source_frame_dimension'." ) - self._frame_numbers = list(frame_numbers) + self._source_frame_numbers = list(source_frame_numbers) else: - self._frame_numbers = None - if sop_instance_uids is not None: - if any(not isinstance(u, str) for u in sop_instance_uids): + self._source_frame_numbers = None + if source_sop_instance_uids is not None: + if any(not isinstance(u, str) for u in source_sop_instance_uids): raise TypeError( - "Argument 'sop_instance_uids' should be a sequence of " + "Argument 'source_sop_instance_uids' should be a sequence of " "str." ) - if len(sop_instance_uids) != self._array.shape[0]: + if ( + len(source_sop_instance_uids) != + self._array.shape[source_frame_dimension] + ): raise ValueError( - "Length of 'sop_instance_uids' should match first " - "dimension of 'array'." + "Length of 'source_sop_instance_uids' should match size " + "of 'array' along the axis given by 'source_frame_dimension'." ) - self._sop_instance_uids = list(sop_instance_uids) + self._source_sop_instance_uids = list(source_sop_instance_uids) else: - self._sop_instance_uids = None + self._source_sop_instance_uids = None + + if source_frame_numbers is not None or source_sop_instance_uids is not None: + self._source_frame_dimension = source_frame_dimension + else: + self._source_frame_dimension = None @classmethod def from_image_series( cls, series_datasets: Sequence[Dataset], ) -> "Volume": - """Get volume geometry for a series of single frame images. + """Create volume from a series of single frame images. Parameters ---------- @@ -172,7 +192,7 @@ def from_image_series( Returns ------- Volume: - Object representing the geometry of the series. + Volume created from the series. """ coordinate_system = get_image_coordinate_system(series_datasets[0]) @@ -192,7 +212,7 @@ def from_image_series( raise ValueError('Images do not share a frame of reference.') series_datasets = sort_datasets(series_datasets) - sorted_sop_instance_uids = [ + sorted_source_sop_instance_uids = [ ds.SOPInstanceUID for ds in series_datasets ] @@ -216,7 +236,7 @@ def from_image_series( affine=affine, array=array, frame_of_reference_uid=frame_of_reference_uid, - sop_instance_uids=sorted_sop_instance_uids, + source_sop_instance_uids=sorted_source_sop_instance_uids, ) @classmethod @@ -224,7 +244,7 @@ def from_image( cls, dataset: Dataset, ) -> "Volume": - """Get volume geometry for a multiframe image. + """Create volume from a multiframe image. Parameters ---------- @@ -234,7 +254,7 @@ def from_image( Returns ------- Volume: - Object representing the geometry of the image. + Volume created from the image. """ if not is_multiframe_image(dataset): @@ -268,7 +288,7 @@ def from_image( image_orientation, ) sorted_positions = [image_positions[i] for i in sort_index] - sorted_frame_numbers = [f + 1 for f in sort_index] + sorted_source_frame_numbers = [f + 1 for f in sort_index] if 'PixelMeasuresSequence' not in sfgs: raise ValueError('Frames do not share pixel measures.') @@ -301,7 +321,7 @@ def from_image( affine=affine, array=array, frame_of_reference_uid=dataset.FrameOfReferenceUID, - frame_numbers=sorted_frame_numbers, + source_frame_numbers=sorted_source_frame_numbers, ) @classmethod @@ -313,10 +333,10 @@ def from_attributes( pixel_spacing: Sequence[float], spacing_between_slices: float, frame_of_reference_uid: Optional[str] = None, - sop_instance_uids: Optional[Sequence[str]] = None, - frame_numbers: Optional[Sequence[int]] = None, + source_sop_instance_uids: Optional[Sequence[str]] = None, + source_frame_numbers: Optional[Sequence[int]] = None, ) -> "Volume": - """Create a volume geometry from DICOM attributes. + """Create a volume from DICOM attributes. Parameters ---------- @@ -350,14 +370,20 @@ def from_attributes( frame_of_reference_uid: Union[str, None], optional Frame of reference UID, if known. Corresponds to DICOM attribute FrameOfReferenceUID. - sop_instance_uids: Union[Sequence[str], None], optional + source_sop_instance_uids: Union[Sequence[str], None], optional Ordered SOP Instance UIDs of each frame, if known, in the situation that the volume is formed from a sequence of individual DICOM instances, stacked down the first axis (index 0).. - frame_numbers: Union[Sequence[int], None], optional - Ordered frame numbers of each frame, if known, in the situation - that the volume is formed from a sequence of frames of one - multi-frame DICOM image, stacked down the first axis (index 0).. + source_frame_numbers: Union[Sequence[int], None], optional + Ordered frame numbers of each frame of the source image, in the + situation that the volume is formed from a sequence of frames of + one multi-frame DICOM image, stacked down the first axis (index + 0). + + Returns + ------- + highdicom.volume.Volume: + New Volume using the given array and DICOM attributes. """ affine = _create_affine_transformation_matrix( @@ -371,8 +397,8 @@ def from_attributes( affine=affine, array=array, frame_of_reference_uid=frame_of_reference_uid, - sop_instance_uids=sop_instance_uids, - frame_numbers=frame_numbers, + source_sop_instance_uids=source_sop_instance_uids, + source_frame_numbers=source_frame_numbers, ) @classmethod @@ -383,8 +409,8 @@ def from_components( direction: Sequence[float], spacing: Sequence[float], frame_of_reference_uid: Optional[str] = None, - sop_instance_uids: Optional[Sequence[str]] = None, - frame_numbers: Optional[Sequence[int]] = None, + source_sop_instance_uids: Optional[Sequence[str]] = None, + source_frame_numbers: Optional[Sequence[int]] = None, ) -> "Volume": """Construct a Volume from components. @@ -409,19 +435,19 @@ def from_components( Sequence of three integers giving the shape of the volume. frame_of_reference_uid: Union[str, None], optional Frame of reference UID for the frame of reference, if known. - sop_instance_uids: Union[Sequence[str], None], optional + source_sop_instance_uids: Union[Sequence[str], None], optional Ordered SOP Instance UIDs of each frame, if known, in the situation that the volume is formed from a sequence of individual DICOM instances, stacked down the first axis (index 0). - frame_numbers: Union[Sequence[int], None], optional - Ordered frame numbers of each frame, if known, in the situation - that the volume is formed from a sequence of frames of one - multi-frame DICOM image, stacked down the first axis (index 0). + source_frame_numbers: Union[Sequence[int], None], optional + Ordered frame numbers of each frame of the source image, in the + situation that the volume is formed from a sequence of frames of + one multi-frame DICOM image, stacked down the first axis (index 0). Returns ------- highdicom.spatial.Volume: - Volume geometry constructed from the provided components. + Volume constructed from the provided components. """ if not isinstance(position, Sequence): @@ -458,8 +484,8 @@ def from_components( array=array, affine=affine, frame_of_reference_uid=frame_of_reference_uid, - sop_instance_uids=sop_instance_uids, - frame_numbers=frame_numbers, + source_sop_instance_uids=source_sop_instance_uids, + source_frame_numbers=source_frame_numbers, ) def get_index_for_frame_number( @@ -478,14 +504,14 @@ def get_index_for_frame_number( Returns ------- 0-based index of this frame number down the - slice dimension (axis 0) of the volume. + dimension of the volume given by ``source_frame_dimension``. """ - if self._frame_numbers is None: + if self._source_frame_numbers is None: raise RuntimeError( "Frame information is not present." ) - return self._frame_numbers.index(frame_number) + return self._source_frame_numbers.index(frame_number) def get_index_for_sop_instance_uid( self, @@ -504,14 +530,14 @@ def get_index_for_sop_instance_uid( Returns ------- 0-based index of the image with the given SOP Instance UID down the - slice dimension (axis 0) of the volume. + dimension of the volume given by ``source_frame_dimension``. """ - if self._sop_instance_uids is None: + if self._source_sop_instance_uids is None: raise RuntimeError( "SOP Instance UID information is not present." ) - return self._sop_instance_uids.index(sop_instance_uid) + return self._source_sop_instance_uids.index(sop_instance_uid) def get_center_index(self, round_output: bool = False) -> np.ndarray: """Get array index of center of the volume. @@ -785,6 +811,17 @@ def number_of_channels(self) -> Optional[int]: return self._array.shape[3] return None + @property + def source_frame_dimension(self) -> Optional[int]: + """Optional[int]: Dimension along which source frames were stacked. + + Will return either 0, 1, or 2 when the volume was created from a source + image or image series. Will return ``None`` if the volume was not + created from a source image or image series. + + """ + return self._source_frame_dimension + def set_array(self, array: np.ndarray) -> None: # TODO make this a proper setter and getter """Change the voxel array without changing the affine. @@ -813,20 +850,23 @@ def array(self) -> np.ndarray: return self._array.copy() @property - def sop_instance_uids(self) -> Union[List[str], None]: + def source_sop_instance_uids(self) -> Union[List[str], None]: + # TODO account for rotated arrays """Union[List[str], None]: SOP Instance UID at each index.""" - if self._sop_instance_uids is not None: - return self._sop_instance_uids.copy() + if self._source_sop_instance_uids is not None: + return self._source_sop_instance_uids.copy() @property - def frame_numbers(self) -> Union[List[int], None]: + def source_frame_numbers(self) -> Union[List[int], None]: + # TODO account for rotated arrays """Union[List[int], None]: - Frame number at each index down the first dimension. + Frame number within the source image at each index down the first + dimension. """ - if self._frame_numbers is not None: - return self._frame_numbers.copy() + if self._source_frame_numbers is not None: + return self._source_frame_numbers.copy() @property def direction_cosines(self) -> List[float]: @@ -938,8 +978,120 @@ def with_array(self, array: np.ndarray) -> 'Volume': array=array, affine=self._affine.copy(), frame_of_reference_uid=self.frame_of_reference_uid, - sop_instance_uids=deepcopy(self.sop_instance_uids), - frame_numbers=deepcopy(self.frame_numbers), + source_sop_instance_uids=deepcopy(self.source_sop_instance_uids), + source_frame_numbers=deepcopy(self.source_frame_numbers), + ) + + def __getitem__( + self, + index: Union[int, slice, Tuple[Union[int, slice]]], + ) -> "Volume": + """Get a sub-volume of this volume as a new volume. + + Parameters + ---------- + index: Union[int, slice, Tuple[Union[int, slice]]] + + Returns + ------- + highdicom.volume.Volume: + + """ + if isinstance(index, int): + # Change the index to a slice of length one so that all dimensions + # are retained in the output array. Also make into a tuple of + # length 1 to standardize format + tuple_index = (slice(index, index + 1), ) + elif isinstance(index, slice): + # Make into a tuple of length one to standardize the format + tuple_index = (index, ) + elif isinstance(index, tuple): + index_list = [] + for item in index: + if isinstance(item, int): + # Change the index to a slice of length one so that all dimensions + # are retained in the output array. + item = slice(item, item + 1) + index_list.append(item) + elif isinstance(item, slice): + index_list.append(item) + else: + raise TypeError( + 'Items within "index" must be ints, or slices. Got ' + f'{type(item)}.' + ) + + tuple_index = tuple(index_list) + + else: + raise TypeError( + 'Argument "index" must be an int, slice or tuple. Got ' + f'{type(index)}.' + ) + + new_array = self._array[tuple_index] + + new_sop_instance_uids = None + new_frame_numbers = None + new_vectors = [] + origin_indices = [] + + for d in range(0, 3): + # The index item along this dimension + if len(tuple_index) > d: + index_item = tuple_index[d] + first, _, step = index_item.indices(self.shape[d]) + else: + index_item = None + first = 0 + step = 1 + + new_vectors.append(self._affine[:3, d] * step) + origin_indices.append(first) + + if self.source_frame_dimension is not None: + if d == self.source_frame_dimension: + if index_item is not None: + # Need to index the source frame lists along this + # dimension + if self._source_sop_instance_uids is not None: + new_sop_instance_uids = ( + self._source_sop_instance_uids[ + index_item + ] + ) + if self._source_frame_numbers is not None: + new_frame_numbers = self._source_frame_numbers[ + index_item + ] + else: + # Not indexing along this dimension so the lists are + # unchanged + new_sop_instance_uids = deepcopy( + self.source_sop_instance_uids + ) + new_frame_numbers = deepcopy( + self.source_frame_numbers + ) + + origin_index_arr = np.array([origin_indices]) + new_origin_arr = self.map_indices_to_reference(origin_index_arr).T + + new_rotation = np.column_stack(new_vectors) + new_affine = np.row_stack( + [ + np.column_stack([new_rotation, new_origin_arr]), + np.array([0., 0., 0., 1.0]), + ] + ) + + return Volume( + array=new_array, + affine=new_affine, + frame_of_reference_uid=self.frame_of_reference_uid, + source_sop_instance_uids=new_sop_instance_uids, + source_frame_numbers=new_frame_numbers, + source_frame_dimension=self.source_frame_dimension or 0, ) diff --git a/tests/test_volume.py b/tests/test_volume.py index 805f87a1..3ac16720 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -5,6 +5,7 @@ from highdicom.volume import Volume, concat_channels +from highdicom import UID def test_transforms(): @@ -139,13 +140,13 @@ def test_volume_single_frame(): assert volume.shape == (len(ct_files), rows, columns) assert volume.spatial_shape == volume.shape assert volume.number_of_channels is None - assert volume.frame_numbers is None - sop_instance_uids = [ + assert volume.source_frame_numbers is None + source_sop_instance_uids = [ ct_series[0].SOPInstanceUID, ct_series[2].SOPInstanceUID, ct_series[1].SOPInstanceUID, ] - assert volume.sop_instance_uids == sop_instance_uids + assert volume.source_sop_instance_uids == source_sop_instance_uids assert volume.get_index_for_sop_instance_uid( ct_series[2].SOPInstanceUID ) == 1 @@ -173,8 +174,8 @@ def test_volume_multiframe(): rows, columns = dcm.Rows, dcm.Columns assert volume.shape == (dcm.NumberOfFrames, rows, columns) assert volume.spatial_shape == volume.shape - assert volume.frame_numbers == [2, 1] - assert volume.sop_instance_uids is None + assert volume.source_frame_numbers == [2, 1] + assert volume.source_sop_instance_uids is None with pytest.raises(RuntimeError): volume.get_index_for_sop_instance_uid( dcm.SOPInstanceUID @@ -200,7 +201,7 @@ def test_volume_multiframe(): assert direction[:, 0] @ direction[:, 1] == 0.0 assert direction[:, 0] @ direction[:, 2] == 0.0 assert (direction[:, 0] ** 2).sum() == 1.0 - first_frame = volume.frame_numbers[0] + first_frame = volume.source_frame_numbers[0] first_frame_pos = ( dcm .PerFrameFunctionalGroupsSequence[first_frame - 1] @@ -212,3 +213,155 @@ def test_volume_multiframe(): slice_spacing = 10.0 assert volume.spacing == [slice_spacing, *pixel_spacing[::-1]] assert volume.number_of_channels is None + + +def test_construction_mismatched_source_lists(): + array = np.random.randint(0, 100, (50, 50, 25)) + affine = np.array([ + [ 0.0, 0.0, 1.0, 0.0], + [ 0.0, 1.0, 0.0, 0.0], + [10.0, 0.0, 0.0, 30.0], + [ 0.0, 0.0, 0.0, 1.0], + ]) + sop_instance_uids = [UID() for _ in range(25)] + frame_numbers = list(range(25)) + with pytest.raises(ValueError): + Volume( + array=array, + affine=affine, + source_sop_instance_uids=sop_instance_uids, + source_frame_dimension=0, + ) + with pytest.raises(ValueError): + Volume( + array=array, + affine=affine, + source_frame_numbers=frame_numbers, + source_frame_dimension=0, + ) + + +def test_indexing(): + array = np.random.randint(0, 100, (25, 50, 50)) + volume = Volume.from_attributes( + array=array, + image_position=[0.0, 0.0, 0.0], + image_orientation=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0], + pixel_spacing=[1.0, 1.0], + spacing_between_slices=10.0, + source_frame_numbers=list(range(1, 26)), + ) + + # Single integer index + subvolume = volume[3] + assert subvolume.shape == (1, 50, 50) + expected_affine = np.array([ + [ 0.0, 0.0, 1.0, 0.0], + [ 0.0, 1.0, 0.0, 0.0], + [10.0, 0.0, 0.0, 30.0], + [ 0.0, 0.0, 0.0, 1.0], + ]) + assert np.array_equal(subvolume.affine, expected_affine) + assert np.array_equal(subvolume.array, array[3:4]) + assert subvolume.source_frame_numbers == [4] + + # With colons + subvolume = volume[3, :] + assert subvolume.shape == (1, 50, 50) + assert np.array_equal(subvolume.affine, expected_affine) + assert np.array_equal(subvolume.array, array[3:4]) + assert subvolume.source_frame_numbers == [4] + subvolume = volume[3, :, :] + assert subvolume.shape == (1, 50, 50) + assert np.array_equal(subvolume.affine, expected_affine) + assert np.array_equal(subvolume.array, array[3:4]) + assert subvolume.source_frame_numbers == [4] + + # Single slice index + subvolume = volume[3:13] + assert subvolume.shape == (10, 50, 50) + assert np.array_equal(subvolume.affine, expected_affine) + assert np.array_equal(subvolume.array, array[3:13]) + assert subvolume.source_frame_numbers == list(range(4, 14)) + + # Multiple integer indices + subvolume = volume[3, 7] + assert subvolume.shape == (1, 1, 50) + expected_affine = np.array([ + [ 0.0, 0.0, 1.0, 0.0], + [ 0.0, 1.0, 0.0, 7.0], + [10.0, 0.0, 0.0, 30.0], + [ 0.0, 0.0, 0.0, 1.0], + ]) + assert np.array_equal(subvolume.affine, expected_affine) + assert np.array_equal(subvolume.array, array[3:4, 7:8]) + assert subvolume.source_frame_numbers == [4] + + # Multiple integer indices in sequence (should be the same as above) + subvolume = volume[:, 7][3, :] + assert subvolume.shape == (1, 1, 50) + assert np.array_equal(subvolume.affine, expected_affine) + assert np.array_equal(subvolume.array, array[3:4, 7:8]) + assert subvolume.source_frame_numbers == [4] + subvolume = volume[3, :][:, 7] + assert subvolume.shape == (1, 1, 50) + assert np.array_equal(subvolume.affine, expected_affine) + assert np.array_equal(subvolume.array, array[3:4, 7:8]) + assert subvolume.source_frame_numbers == [4] + + # Negative index + subvolume = volume[-4] + assert subvolume.shape == (1, 50, 50) + expected_affine = np.array([ + [ 0.0, 0.0, 1.0, 0.0], + [ 0.0, 1.0, 0.0, 0.0], + [10.0, 0.0, 0.0, 210.0], + [ 0.0, 0.0, 0.0, 1.0], + ]) + assert np.array_equal(subvolume.affine, expected_affine) + assert np.array_equal(subvolume.array, array[-4:-3]) + assert subvolume.source_frame_numbers == [22] + + # Negative index range + subvolume = volume[-4:-2, :, :] + assert subvolume.shape == (2, 50, 50) + assert np.array_equal(subvolume.affine, expected_affine) + assert np.array_equal(subvolume.array, array[-4:-2]) + assert subvolume.source_frame_numbers == [22, 23] + + # Non-zero steps + subvolume = volume[12:16:2, ::-1, :] + assert subvolume.shape == (2, 50, 50) + expected_affine = np.array([ + [ 0.0, 0.0, 1.0, 0.0], + [ 0.0, -1.0, 0.0, 49.0], + [20.0, 0.0, 0.0, 120.0], + [ 0.0, 0.0, 0.0, 1.0], + ]) + assert np.array_equal(subvolume.affine, expected_affine) + assert np.array_equal(subvolume.array, array[12:16:2, ::-1]) + assert subvolume.source_frame_numbers == [13, 15] + + +def test_indexing_source_dimension_2(): + array = np.random.randint(0, 100, (50, 50, 25)) + affine = np.array([ + [ 0.0, 0.0, 1.0, 0.0], + [ 0.0, 1.0, 0.0, 0.0], + [10.0, 0.0, 0.0, 30.0], + [ 0.0, 0.0, 0.0, 1.0], + ]) + sop_instance_uids = [UID() for _ in range(25)] + volume = Volume( + array=array, + affine=affine, + source_sop_instance_uids=sop_instance_uids, + source_frame_dimension=2, + ) + + subvolume = volume[12:14, :, 12:6:-2] + assert ( + subvolume.source_sop_instance_uids == + sop_instance_uids[12:6:-2] + ) + assert np.array_equal(subvolume.array, array[12:14, :, 12:6:-2]) From e61119ed8775b2ad8b49f3a890d83c6c4c0a01d0 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 15 Jul 2024 20:48:24 -0700 Subject: [PATCH 28/93] Add get_closest patient_orientation --- src/highdicom/spatial.py | 91 +++++++++++++++++++++++++++------------- tests/test_spatial.py | 24 +++++++++++ 2 files changed, 85 insertions(+), 30 deletions(-) diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index ea492892..7a19baa9 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -17,7 +17,7 @@ from highdicom.enum import ( CoordinateSystemNames, PixelIndexDirections, - PatientFrameOfReferenceDirections, + PatientOrientationValuesBiped, ) @@ -698,24 +698,24 @@ def _normalize_pixel_index_convention( return c -def _normalize_reference_direction_convention( - c: Union[str, Sequence[Union[str, PatientFrameOfReferenceDirections]]], +def _normalize_patient_orientation( + c: Union[str, Sequence[Union[str, PatientOrientationValuesBiped]]], ) -> Tuple[ - PatientFrameOfReferenceDirections, - PatientFrameOfReferenceDirections, - PatientFrameOfReferenceDirections, + PatientOrientationValuesBiped, + PatientOrientationValuesBiped, + PatientOrientationValuesBiped, ]: """Normalize and check a frame of reference direction convention. Parameters ---------- - c: Union[str, Sequence[Union[str, highdicom.enum.PatientFrameOfReferenceDirections]]] + c: Union[str, Sequence[Union[str, highdicom.enum.PatientOrientationValuesBiped]]] Frame of reference convention description consisting of three directions, - either L or R, either A or P, and either I or S, in any order. + either L or R, either A or P, and either F or H, in any order. Returns ------- - Tuple[highdicom.enum.PatientFrameOfReferenceDirections, highdicom.enum.PatientFrameOfReferenceDirections, highdicom.enum.PatientFrameOfReferenceDirections]: + Tuple[highdicom.enum.PatientOrientationValuesBiped, highdicom.enum.PatientOrientationValuesBiped, highdicom.enum.PatientOrientationValuesBiped]: Convention description in a canonical form as a tuple of three enum instances. Furthermore this is guaranteed to be a valid description. @@ -723,14 +723,14 @@ def _normalize_reference_direction_convention( if len(c) != 3: raise ValueError('Length of pixel index convention must be 3.') - c = tuple(PatientFrameOfReferenceDirections(d) for d in c) + c = tuple(PatientOrientationValuesBiped(d) for d in c) c_set = {d.value for d in c} criteria = [ ('L' in c_set) != ('R' in c_set), ('A' in c_set) != ('P' in c_set), - ('I' in c_set) != ('S' in c_set), + ('F' in c_set) != ('H' in c_set), ] if not all(criteria): c_str = [d.value for d in c] @@ -742,22 +742,25 @@ def _normalize_reference_direction_convention( return c -def get_closest_directions(affine: np.ndarray) -> Tuple[ - PatientFrameOfReferenceDirections, - PatientFrameOfReferenceDirections, - PatientFrameOfReferenceDirections, +def get_closest_patient_orientation(affine: np.ndarray) -> Tuple[ + PatientOrientationValuesBiped, + PatientOrientationValuesBiped, + PatientOrientationValuesBiped, ]: - """Given an affine matrix, find the + """Given an affine matrix, find the closest patient orientation. Parameters ---------- affine: numpy.ndarray - Direction matrix (4x4 affine matrices or a 3x3 direction matrices are + Direction matrix (4x4 affine matrices and 3x3 direction matrices are acceptable). Returns ------- - Tuple[PatientFrameOfReferenceDirections, PatientFrameOfReferenceDirections, PatientFrameOfReferenceDirections]: + Tuple[PatientOrientationValuesBiped, PatientOrientationValuesBiped, PatientOrientationValuesBiped]: + Tuple of PatientOrientationValuesBiped values, giving for each of the + three axes of the volume represented by the affine matrix, the closest + direction in the patient frame of reference coordinate system. """ if ( @@ -769,11 +772,39 @@ def get_closest_directions(affine: np.ndarray) -> Tuple[ ): raise ValueError(f"Invalid shape for array: {affine.shape}") + if not _is_matrix_orthogonal(affine, require_unit=False): + raise ValueError('Matrix is not orthogonal.') + + # Matrix representing alignment of dot product of rotation vector i with + # FoR reference j + alignments = np.eye(3) @ affine[:3, :3] + sort_indices = np.argsort(-np.abs(alignments), axis=0) + result = [] - for d in range(3): - v = affine[:3, d] - alignments = v + pos_directions = [ + PatientOrientationValuesBiped.L, + PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.H, + ] + neg_directions = [ + PatientOrientationValuesBiped.R, + PatientOrientationValuesBiped.A, + PatientOrientationValuesBiped.F, + ] + for d, sortind in enumerate(sort_indices.T): + # Check that this axis has not already been used. This can happen if + # one or more array axis is at 45% to some FoR axis. In this case take + # the next index in the sort list. + for i in sortind: + if pos_directions[i] not in result and neg_directions[i] not in result: + break + + if alignments[i, d] > 0: + result.append(pos_directions[i]) + else: + result.append(neg_directions[i]) + return tuple(result) def _is_matrix_orthogonal( @@ -1160,10 +1191,10 @@ def _transform_affine_to_convention( str, Sequence[Union[str, PixelIndexDirections]], None ] = None, from_reference_convention: Union[ - str, Sequence[Union[str, PatientFrameOfReferenceDirections]], None + str, Sequence[Union[str, PatientOrientationValuesBiped]], None ] = None, to_reference_convention: Union[ - str, Sequence[Union[str, PatientFrameOfReferenceDirections]], None + str, Sequence[Union[str, PatientOrientationValuesBiped]], None ] = None, ) -> np.ndarray: """Transform an affine matrix between different conventions. @@ -1178,9 +1209,9 @@ def _transform_affine_to_convention( Index convention used in the input affine. to_index_convention: Union[str, Sequence[Union[str, PixelIndexDirections]], None], optional Desired index convention for the output affine. - from_reference_convention: Union[str, Sequence[Union[str, PatientFrameOfReferenceDirections]], None], optional + from_reference_convention: Union[str, Sequence[Union[str, PatientOrientationValuesBiped]], None], optional Reference convention used in the input affine. - to_reference_convention: Union[str, Sequence[Union[str, PatientFrameOfReferenceDirections]], None], optional + to_reference_convention: Union[str, Sequence[Union[str, PatientOrientationValuesBiped]], None], optional Desired reference convention for the output affine. Returns @@ -1197,14 +1228,14 @@ def _transform_affine_to_convention( PixelIndexDirections.I: PixelIndexDirections.O, PixelIndexDirections.O: PixelIndexDirections.I, } - pfrd = PatientFrameOfReferenceDirections # shorthand + pfrd = PatientOrientationValuesBiped # shorthand reference_opposites = { pfrd.L: pfrd.R, pfrd.R: pfrd.L, pfrd.A: pfrd.P, pfrd.P: pfrd.A, - pfrd.I: pfrd.S, - pfrd.S: pfrd.I, + pfrd.F: pfrd.H, + pfrd.H: pfrd.F, } if (from_index_convention is None) != (to_index_convention is None): @@ -1245,10 +1276,10 @@ def _transform_affine_to_convention( from_reference_convention is not None and to_reference_convention is not None ): - from_reference_normed = _normalize_reference_direction_convention( + from_reference_normed = _normalize_patient_orientation( from_reference_convention ) - to_reference_normed = _normalize_reference_direction_convention( + to_reference_normed = _normalize_patient_orientation( to_reference_convention ) diff --git a/tests/test_spatial.py b/tests/test_spatial.py index 9f2f573b..18bf51b8 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -12,7 +12,10 @@ ReferenceToImageTransformer, ReferenceToPixelTransformer, _are_images_coplanar, + _normalize_patient_orientation, _transform_affine_matrix, + create_rotation_matrix, + get_closest_patient_orientation, get_series_slice_spacing, is_tiled_image, ) @@ -721,6 +724,27 @@ def test_map_coordinates_between_images(params, inputs, expected_outputs): np.testing.assert_array_almost_equal(outputs, expected_outputs) +@pytest.mark.parametrize( + 'image_orientation,orientation_str', + [ + ([ 1, 0, 0, 0, 1, 0], 'LPH'), + ([ 0, 1, 0, 1, 0, 0], 'PLF'), + ([-1, 0, 0, 0, 1, 0], 'RPF'), + ([ 0, 0, -1, 1, 0, 0], 'FLA'), + ([np.cos(np.pi / 4), -np.sin(np.pi / 4), 0, np.sin(np.pi / 4), np.cos(np.pi / 4), 0], 'LPH'), + ] +) +def test_get_closest_patient_orientation( + image_orientation, + orientation_str, +): + codes = _normalize_patient_orientation(orientation_str) + rotation_matrix = create_rotation_matrix(image_orientation) + assert get_closest_patient_orientation( + rotation_matrix + ) == codes + + all_single_image_transformer_classes = [ ImageToReferenceTransformer, PixelToReferenceTransformer, From e73f6a9297b194a6e1494373569f8dd890fe6ff2 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 15 Jul 2024 21:07:06 -0700 Subject: [PATCH 29/93] WIP on flip and permute --- src/highdicom/volume.py | 61 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 3 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 14120212..0b3a00e1 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -11,6 +11,7 @@ from highdicom.spatial import ( _create_affine_transformation_matrix, _is_matrix_orthogonal, + _transform_affine_matrix, get_image_coordinate_system, get_plane_sort_index, get_regular_slice_spacing, @@ -1009,8 +1010,8 @@ def __getitem__( index_list = [] for item in index: if isinstance(item, int): - # Change the index to a slice of length one so that all dimensions - # are retained in the output array. + # Change the index to a slice of length one so that all + # dimensions are retained in the output array. item = slice(item, item + 1) index_list.append(item) elif isinstance(item, slice): @@ -1085,7 +1086,7 @@ def __getitem__( ] ) - return Volume( + return self.__class__( array=new_array, affine=new_affine, frame_of_reference_uid=self.frame_of_reference_uid, @@ -1094,6 +1095,60 @@ def __getitem__( source_frame_dimension=self.source_frame_dimension or 0, ) + def permute(self, indices: Sequence[int]) -> 'Volume': + # TODO add tests for this + """Create a new volume by permuting the axes. + + Parameters + ---------- + indices: Sequence[int] + List of three integers containing the values 0, 1 and 2 + in some order. + + Returns + ------- + highdicom.volume.Volume: + New volume with spatial axes permuted in the provided order. + + """ + if len(indices) != 3 or set(indices) != {0, 1, 2}: + raise ValueError( + f'Argument "indices" must consist of the values 0, 1, and 2 ' + 'in some order.' + ) + + if self._array.ndim == 3: + new_array = self._array.permute(indices) + else: + new_array = self._array.permute([*indices, 3]) + + new_affine = _transform_affine_matrix( + affine=self._affine, + shape=self.spatial_shape, + permute_indices=indices, + ) + + if self.source_frame_dimension is None: + new_source_frame_dimension = 0 + else: + new_source_frame_dimension = indices.index( + self.source_frame_dimension + ) + + return self.__class__( + array=new_array, + affine=new_affine, + frame_of_reference_uid=self.frame_of_reference_uid, + source_sop_instance_uids=self.source_sop_instance_uids, + source_frame_numbers=self.source_frame_numbers, + source_frame_dimension=new_source_frame_dimension, + ) + + def flip(self, axis: Union[int, Sequence[int]]) -> 'Volume': + # TODO + # Remember to flip source lists + pass + def concat_channels(volumes: Sequence[Volume]) -> Volume: """Form a new volume by concatenating channels of existing volumes. From dd010460fd5d7fde2d7dae187b193837447dc33c Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 16 Jul 2024 09:04:50 -0400 Subject: [PATCH 30/93] Add flip volume --- src/highdicom/volume.py | 45 +++++++++++++++++++++++++++++++++++------ 1 file changed, 39 insertions(+), 6 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 0b3a00e1..d1f8f875 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -1097,13 +1097,14 @@ def __getitem__( def permute(self, indices: Sequence[int]) -> 'Volume': # TODO add tests for this - """Create a new volume by permuting the axes. + """Create a new volume by permuting the spatial axes. Parameters ---------- indices: Sequence[int] - List of three integers containing the values 0, 1 and 2 - in some order. + List of three integers containing the values 0, 1 and 2 in some + order. Note that you may not change the position of the channel + axis (if present). Returns ------- @@ -1145,9 +1146,41 @@ def permute(self, indices: Sequence[int]) -> 'Volume': ) def flip(self, axis: Union[int, Sequence[int]]) -> 'Volume': - # TODO - # Remember to flip source lists - pass + """Flip the spatial axes of the array. + + Note that this flips the array and updates the affine to reflect the + flip. + + Parameters + ---------- + axis: Union[int, Sequence[int]] + Axis or list of axes that should be flipped. These should include + only the spatial axes (0, 1, and/or 2). + + Returns + ------- + highdicom.volume.Volume: + New volume with spatial axes flipped as requested. + + """ + if isinstance(axis, int): + axis = [axis] + + if len(axis) > 3 or len(set(axis) - {0, 1, 2}) > 0: + raise ValueError( + 'Arugment "axis" must contain only values 0, 1, and/or 2.' + ) + + # We will re-use the existing __getitem__ implementation, which has all + # this logic figured out already + index = [] + for d in range(3): + if d in axis: + index.append(slice(-1, None, -1)) + else: + index.append(slice(None)) + + return self[tuple(index)] def concat_channels(volumes: Sequence[Volume]) -> Volume: From 25185496562e98578278ab421706b74557bb1698 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 16 Jul 2024 10:45:13 -0400 Subject: [PATCH 31/93] Add to_patient_orientation --- src/highdicom/spatial.py | 24 +++++++------- src/highdicom/volume.py | 70 ++++++++++++++++++++++++++++++++++++++-- tests/test_spatial.py | 2 -- tests/test_volume.py | 40 +++++++++++++++++++++++ 4 files changed, 121 insertions(+), 15 deletions(-) diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 7a19baa9..4d259e3b 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -29,6 +29,17 @@ """Tolerance value used by default in tests for equality""" +PATIENT_ORIENTATION_OPPOSITES = { + PatientOrientationValuesBiped.L: PatientOrientationValuesBiped.R, + PatientOrientationValuesBiped.R: PatientOrientationValuesBiped.L, + PatientOrientationValuesBiped.A: PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.P: PatientOrientationValuesBiped.A, + PatientOrientationValuesBiped.F: PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.H: PatientOrientationValuesBiped.F, +} +"""Mapping of each patient orientation value to its opposite.""" + + def is_tiled_image(dataset: Dataset) -> bool: """Determine whether a dataset represents a tiled image. @@ -772,7 +783,7 @@ def get_closest_patient_orientation(affine: np.ndarray) -> Tuple[ ): raise ValueError(f"Invalid shape for array: {affine.shape}") - if not _is_matrix_orthogonal(affine, require_unit=False): + if not _is_matrix_orthogonal(affine[:3, :3], require_unit=False): raise ValueError('Matrix is not orthogonal.') # Matrix representing alignment of dot product of rotation vector i with @@ -1228,15 +1239,6 @@ def _transform_affine_to_convention( PixelIndexDirections.I: PixelIndexDirections.O, PixelIndexDirections.O: PixelIndexDirections.I, } - pfrd = PatientOrientationValuesBiped # shorthand - reference_opposites = { - pfrd.L: pfrd.R, - pfrd.R: pfrd.L, - pfrd.A: pfrd.P, - pfrd.P: pfrd.A, - pfrd.F: pfrd.H, - pfrd.H: pfrd.F, - } if (from_index_convention is None) != (to_index_convention is None): raise TypeError( @@ -1289,7 +1291,7 @@ def _transform_affine_to_convention( permute_reference = [] for d, flipped in zip(to_reference_normed, flip_reference): if flipped: - d_ = reference_opposites[d] + d_ = PATIENT_ORIENTATION_OPPOSITES[d] permute_reference.append(from_reference_normed.index(d_)) else: permute_reference.append(from_reference_normed.index(d)) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index d1f8f875..e2d13e21 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -6,12 +6,16 @@ from highdicom._module_utils import is_multiframe_image from highdicom.enum import ( CoordinateSystemNames, + PatientOrientationValuesBiped, PixelIndexDirections, ) from highdicom.spatial import ( _create_affine_transformation_matrix, _is_matrix_orthogonal, + _normalize_patient_orientation, _transform_affine_matrix, + PATIENT_ORIENTATION_OPPOSITES, + get_closest_patient_orientation, get_image_coordinate_system, get_plane_sort_index, get_regular_slice_spacing, @@ -948,6 +952,21 @@ def direction(self) -> np.ndarray: norms = np.sqrt((dir_mat ** 2).sum(axis=0)) return dir_mat / norms + def get_closest_patient_orientation(self) -> Tuple[ + PatientOrientationValuesBiped, + PatientOrientationValuesBiped, + PatientOrientationValuesBiped, + ]: + """Get patient orientation codes that best represent the affine. + + Returns + ------- + Tuple[highdicom.enum.PatientOrientationValuesBiped, highdicom.enum.PatientOrientationValuesBiped, highdicom.enum.PatientOrientationValuesBiped]: + Tuple giving the closest patient orientation. + + """ + return get_closest_patient_orientation(self._affine) + def with_array(self, array: np.ndarray) -> 'Volume': """Get a new volume using a different array. @@ -1119,9 +1138,9 @@ def permute(self, indices: Sequence[int]) -> 'Volume': ) if self._array.ndim == 3: - new_array = self._array.permute(indices) + new_array = np.transpose(self._array, indices) else: - new_array = self._array.permute([*indices, 3]) + new_array = np.transpose(self._array, [*indices, 3]) new_affine = _transform_affine_matrix( affine=self._affine, @@ -1182,6 +1201,53 @@ def flip(self, axis: Union[int, Sequence[int]]) -> 'Volume': return self[tuple(index)] + def to_patient_orientation( + self, + patient_orientation: Union[ + str, + Sequence[Union[str, PatientOrientationValuesBiped]], + ], + ) -> 'Volume': + """Rearrange the array to a given orientation. + + The resulting volume is formed from this volume through a combination + of axis permutations and flips of the spatial axes. Its patient + orientation will be as close to the desired orientation as can be + achieved with these operations alone (and in particular without + resampling the array). + + Parameters + ---------- + patient_orientation: Union[str, Sequence[Union[str, highdicom.enum.PatientOrientationValuesBiped]]] + Desired patient orientation, as either a sequence of three + highdicom.enum.PatientOrientationValuesBiped values, or a string + such as ``"FPL"`` using the same characters. + + """ + desired_orientation = _normalize_patient_orientation( + patient_orientation + ) + + current_orientation = self.get_closest_patient_orientation() + + permute_indices = [] + flip_axes = [] + for d in desired_orientation: + if d in current_orientation: + from_index = current_orientation.index(d) + else: + d_inv = PATIENT_ORIENTATION_OPPOSITES[d] + from_index = current_orientation.index(d_inv) + flip_axes.append(from_index) + permute_indices.append(from_index) + + if len(flip_axes) > 0: + result = self.flip(flip_axes) + else: + result = self + + return result.permute(permute_indices) + def concat_channels(volumes: Sequence[Volume]) -> Volume: """Form a new volume by concatenating channels of existing volumes. diff --git a/tests/test_spatial.py b/tests/test_spatial.py index 18bf51b8..24293882 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -984,5 +984,3 @@ def test_transform_affine_matrix(): ] ) assert np.array_equal(transformed, expected) - - diff --git a/tests/test_volume.py b/tests/test_volume.py index 3ac16720..eeb1394b 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -4,6 +4,7 @@ import pytest +from highdicom.spatial import _normalize_patient_orientation from highdicom.volume import Volume, concat_channels from highdicom import UID @@ -365,3 +366,42 @@ def test_indexing_source_dimension_2(): sop_instance_uids[12:6:-2] ) assert np.array_equal(subvolume.array, array[12:14, :, 12:6:-2]) + + +@pytest.mark.parametrize( + 'desired', + [ + 'RAF', + 'RAH', + 'RPF', + 'RPH', + 'LAF', + 'LAH', + 'LPF', + 'LPH', + 'HLP', + 'FPR', + 'HRP', + ] +) +def test_to_patient_orientation(desired): + array = np.random.randint(0, 100, (25, 50, 50)) + volume = Volume.from_attributes( + array=array, + image_position=[0.0, 0.0, 0.0], + image_orientation=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0], + pixel_spacing=[1.0, 1.0], + spacing_between_slices=10.0, + source_frame_numbers=list(range(1, 26)), + ) + desired_tup = _normalize_patient_orientation(desired) + + flipped = volume.to_patient_orientation(desired) + print(volume.affine) + print(flipped.affine) + assert isinstance(flipped, Volume) + assert flipped.get_closest_patient_orientation() == desired_tup + + flipped = volume.to_patient_orientation(desired_tup) + assert isinstance(flipped, Volume) + assert flipped.get_closest_patient_orientation() == desired_tup From 3b00d6d0044377757c56d55d198a1407f1e7f057 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 16 Jul 2024 15:00:11 -0400 Subject: [PATCH 32/93] Add ability to pass Volume to segmentation constructor --- src/highdicom/__init__.py | 2 + src/highdicom/seg/sop.py | 183 +++++++++++++++++++++++++------------- src/highdicom/volume.py | 90 ++++++++++++++++--- tests/test_seg.py | 58 ++++++++++++ 4 files changed, 262 insertions(+), 71 deletions(-) diff --git a/src/highdicom/__init__.py b/src/highdicom/__init__.py index 02b7cbbf..223b545b 100644 --- a/src/highdicom/__init__.py +++ b/src/highdicom/__init__.py @@ -56,6 +56,7 @@ from highdicom.uid import UID from highdicom import utils from highdicom.version import __version__ +from highdicom.volume import Volume __all__ = [ 'LUT', @@ -98,6 +99,7 @@ 'UniversalEntityIDTypeValues', 'VOILUTFunctionValues', 'VOILUTTransformation', + 'Volume', '__version__', 'ann', 'color', diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 9ed1c808..35070f2c 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -87,6 +87,7 @@ _check_long_string, ) from highdicom.uid import UID as hd_UID +from highdicom.volume import Volume logger = logging.getLogger(__name__) @@ -789,7 +790,7 @@ class Segmentation(SOPClass): def __init__( self, source_images: Sequence[Dataset], - pixel_array: np.ndarray, + pixel_array: Union[np.ndarray, Volume], segmentation_type: Union[str, SegmentationTypeValues], segment_descriptions: Sequence[SegmentDescription], series_instance_uid: str, @@ -909,6 +910,13 @@ def __init__( or the extent to which a segment occupies the pixel (if `fractional_type` is ``"OCCUPANCY"``). + Alternatively, ``pixel_array`` may be an instance of a + :class:`highdicom.volume.Volume`. In this case, behavior is the + same as if the underlying numpy array is passed, and additionally, + the ``pixel_measures``, ``plane_positions`` and + ``plane_orientation`` will be computed from the volume, and + therefore should not be passed as parameters. + segmentation_type: Union[str, highdicom.seg.SegmentationTypeValues] Type of segmentation, either ``"BINARY"`` or ``"FRACTIONAL"`` segment_descriptions: Sequence[highdicom.seg.SegmentDescription] @@ -952,23 +960,32 @@ def __init__( JPEG 2000 Lossless (``"1.2.840.10008.1.2.4.90"``), and JPEG LS Lossless (``"1.2.840.10008.1.2.4.00"``). pixel_measures: Union[highdicom.PixelMeasures, None], optional - Physical spacing of image pixels in `pixel_array`. - If ``None``, it will be assumed that the segmentation image has the - same pixel measures as the source image(s). + Physical spacing of image pixels in `pixel_array`. If ``None``, it + will be assumed that the segmentation image has the same pixel + measures as the source image(s). If ``pixel_array`` is an instance + of :class:`highdicom.volume.Volume`, the pixel measures will be + computed from it and therefore this parameter should be left an + ``None``. plane_orientation: Union[highdicom.PlaneOrientationSequence, None], optional Orientation of planes in `pixel_array` relative to axes of - three-dimensional patient or slide coordinate space. - If ``None``, it will be assumed that the segmentation image as the - same plane orientation as the source image(s). + three-dimensional patient or slide coordinate space. If ``None``, + it will be assumed that the segmentation image as the same plane + orientation as the source image(s). If ``pixel_array`` is an + instance of :class:`highdicom.volume.Volume`, the plane orientation + will be computed from it and therefore this parameter should be + left an ``None``. plane_positions: Union[Sequence[highdicom.PlanePositionSequence], None], optional Position of each plane in `pixel_array` in the three-dimensional - patient or slide coordinate space. - If ``None``, it will be assumed that the segmentation image has the - same plane position as the source image(s). However, this will only - work when the first dimension of `pixel_array` matches the number - of frames in `source_images` (in case of multi-frame source images) - or the number of `source_images` (in case of single-frame source - images). + patient or slide coordinate space. If ``None``, it will be assumed + that the segmentation image has the same plane position as the + source image(s). However, this will only work when the first + dimension of `pixel_array` matches the number of frames in + `source_images` (in case of multi-frame source images) or the + number of `source_images` (in case of single-frame source images). + If ``pixel_array`` is an instance of + :class:`highdicom.volume.Volume`, the plane positions will be + computed from it and therefore this parameter should be left an + ``None``. omit_empty_frames: bool, optional If True (default), frames with no non-zero pixels are omitted from the segmentation image. If False, all frames are included. @@ -1058,7 +1075,6 @@ def __init__( The assumption is made that segments in `pixel_array` are defined in the same frame of reference as `source_images`. - """ # noqa: E501 if len(source_images) == 0: raise ValueError('At least one source image is required.') @@ -1098,24 +1114,6 @@ def __init__( f'Transfer syntax "{transfer_syntax_uid}" is not supported.' ) - if pixel_array.ndim == 2: - pixel_array = pixel_array[np.newaxis, ...] - if pixel_array.ndim not in [3, 4]: - raise ValueError('Pixel array must be a 2D, 3D, or 4D array.') - - is_tiled = hasattr(src_img, 'TotalPixelMatrixRows') - if tile_pixel_array and not is_tiled: - raise ValueError( - 'When argument "tile_pixel_array" is True, the source image ' - 'must be a tiled image.' - ) - if tile_pixel_array and pixel_array.shape[0] != 1: - raise ValueError( - 'When argument "tile_pixel_array" is True, the input pixel ' - 'array must contain only one "frame" representing the entire ' - 'entire pixel matrix.' - ) - super().__init__( study_instance_uid=src_img.StudyInstanceUID, series_instance_uid=series_instance_uid, @@ -1197,6 +1195,61 @@ def __init__( ) self._coordinate_system = None + from_volume = isinstance(pixel_array, Volume) + if from_volume: + if not has_ref_frame_uid: + raise ValueError( + "A volume should not be passed if the source image(s) " + "has/have no FrameOfReferenceUID." + ) + if pixel_array.frame_of_reference_uid is not None: + if ( + pixel_array.frame_of_reference_uid != + src_img.FrameOfReferenceUID + ): + raise ValueError( + "The volume passed as the pixel array has a " + "different frame of reference from the source " + "image." + ) + if pixel_measures is not None: + raise TypeError( + "Argument 'pixel_measures' should not be provided if " + "'pixel_array' is a highdicom.Volume." + ) + if plane_orientation is not None: + raise TypeError( + "Argument 'plane_orientation' should not be provided if " + "'pixel_array' is a highdicom.Volume." + ) + if plane_positions is not None: + raise TypeError( + "Argument 'plane_positions' should not be provided if " + "'pixel_array' is a highdicom.Volume." + ) + plane_positions = pixel_array.get_plane_positions() + plane_orientation = pixel_array.get_plane_orientation() + pixel_measures = pixel_array.get_pixel_measures() + pixel_array = pixel_array.array + + if pixel_array.ndim == 2: + pixel_array = pixel_array[np.newaxis, ...] + if pixel_array.ndim not in [3, 4]: + raise ValueError('Pixel array must be a 2D, 3D, or 4D array.') + + is_tiled = hasattr(src_img, 'TotalPixelMatrixRows') + if tile_pixel_array and not is_tiled: + raise ValueError( + 'When argument "tile_pixel_array" is True, the source image ' + 'must be a tiled image.' + ) + if tile_pixel_array and pixel_array.shape[0] != 1: + raise ValueError( + 'When argument "tile_pixel_array" is True, the input pixel ' + 'array must contain only one "frame" representing the ' + 'entire pixel matrix.' + ) + # Remember whether these values were provided by the user, or inferred # from the source image. If inferred, we can skip some checks user_provided_orientation = plane_orientation is not None @@ -1675,39 +1728,49 @@ def __init__( rows=self.Rows, columns=self.Columns, ) - if self._coordinate_system == CoordinateSystemNames.PATIENT: - spacing = get_regular_slice_spacing( - image_positions=np.array( - plane_position_values[plane_sort_index, 0, :] - ), - image_orientation=np.array( - plane_orientation[0].ImageOrientationPatient - ), - sort=False, - enforce_right_handed=True, - ) - if spacing is not None and spacing > 0.0: - # The image is a regular volume, so we should record this + if self._coordinate_system == CoordinateSystemNames.PATIENT: + if from_volume: + # Skip checks as this is 3D by construction + # TODO check handedness + # TODO what about omitted frames dimension_organization_type = ( DimensionOrganizationTypeValues.THREE_DIMENSIONAL ) - # Also add the slice spacing to the pixel measures - ( - self.SharedFunctionalGroupsSequence[0] - .PixelMeasuresSequence[0] - .SpacingBetweenSlices - ) = spacing else: - if ( - dimension_organization_type == - DimensionOrganizationTypeValues.THREE_DIMENSIONAL - ): - raise ValueError( - 'Dimension organization "3D" has been specified, ' - 'but the source image is not a regularly-spaced 3D ' - 'volume.' + spacing = get_regular_slice_spacing( + image_positions=np.array( + plane_position_values[plane_sort_index, 0, :] + ), + image_orientation=np.array( + plane_orientation[0].ImageOrientationPatient + ), + sort=False, + enforce_right_handed=True, + ) + + if spacing is not None and spacing > 0.0: + # The image is a regular volume, so we should record this + dimension_organization_type = ( + DimensionOrganizationTypeValues.THREE_DIMENSIONAL ) + # Also add the slice spacing to the pixel measures + ( + self.SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .SpacingBetweenSlices + ) = spacing + else: + if ( + dimension_organization_type == + DimensionOrganizationTypeValues.THREE_DIMENSIONAL + ): + raise ValueError( + 'Dimension organization "3D" has been specified, ' + 'but the source image is not a regularly-spaced 3D ' + 'volume.' + ) + if dimension_organization_type is not None: self.DimensionOrganizationType = dimension_organization_type.value diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index e2d13e21..9ed72959 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -22,7 +22,11 @@ get_series_slice_spacing, sort_datasets, ) -from highdicom.content import PlanePositionSequence +from highdicom.content import ( + PixelMeasuresSequence, + PlaneOrientationSequence, + PlanePositionSequence, +) from pydicom import Dataset @@ -387,7 +391,7 @@ def from_attributes( Returns ------- - highdicom.volume.Volume: + highdicom.Volume: New Volume using the given array and DICOM attributes. """ @@ -735,6 +739,9 @@ def get_plane_position(self, plane_number: int) -> PlanePositionSequence: def get_plane_positions(self) -> List[PlanePositionSequence]: """Get plane positions of all planes in the volume. + This assumes that the volume is encoded in a DICOM file with frames + down axis 0, rows stacked down axis 1, and columns stacked down axis 2. + Returns ------- List[highdicom.content.PlanePositionSequence]: @@ -757,6 +764,41 @@ def get_plane_positions(self) -> List[PlanePositionSequence]: for pos in positions ] + def get_plane_orientation(self) -> PlaneOrientationSequence: + """Get plane orientation sequence for the volume. + + This assumes that the volume is encoded in a DICOM file with frames + down axis 0, rows stacked down axis 1, and columns stacked down axis 2. + + Returns + ------- + highdicom.PlaneOrientationSequence: + Plane orientation sequence + + """ + return PlaneOrientationSequence( + CoordinateSystemNames.PATIENT, + self.direction_cosines, + ) + + def get_pixel_measures(self) -> PixelMeasuresSequence: + """Get pixel measures sequence for the volume. + + This assumes that the volume is encoded in a DICOM file with frames + down axis 0, rows stacked down axis 1, and columns stacked down axis 2. + + Returns + ------- + highdicom.PixelMeasuresSequence: + Pixel measures sequence for the volume. + + """ + return PixelMeasuresSequence( + pixel_spacing=self.pixel_spacing, + slice_thickness=None, + spacing_between_slices=self.spacing_between_slices, + ) + @property def frame_of_reference_uid(self) -> Optional[str]: """Union[str, None]: Frame of reference UID.""" @@ -964,9 +1006,35 @@ def get_closest_patient_orientation(self) -> Tuple[ Tuple[highdicom.enum.PatientOrientationValuesBiped, highdicom.enum.PatientOrientationValuesBiped, highdicom.enum.PatientOrientationValuesBiped]: Tuple giving the closest patient orientation. - """ + """ # noqa: E501 return get_closest_patient_orientation(self._affine) + def astype(self, dtype: type) -> 'Volume': + """Get new volume with a new datatype. + + Parameters + ---------- + dtype: type + A numpy datatype for the new volume. + + Returns + ------- + highdicom.Volume: + New volume with given datatype, and metadata copied from this + volume. + + """ + new_array = self._array.astype(dtype) + + return self.__class__( + array=new_array, + affine=self.affine, + frame_of_reference_uid=self.frame_of_reference_uid, + source_sop_instance_uids=deepcopy(self.source_sop_instance_uids), + source_frame_numbers=deepcopy(self.source_frame_numbers), + source_frame_dimension=self.source_frame_dimension or 0, + ) + def with_array(self, array: np.ndarray) -> 'Volume': """Get a new volume using a different array. @@ -982,7 +1050,7 @@ def with_array(self, array: np.ndarray) -> 'Volume': Returns ------- - highdicom.volume.Volume: + highdicom.Volume: New volume using the given array and the metadata of this volume. """ @@ -1014,7 +1082,7 @@ def __getitem__( Returns ------- - highdicom.volume.Volume: + highdicom.Volume: """ if isinstance(index, int): @@ -1127,7 +1195,7 @@ def permute(self, indices: Sequence[int]) -> 'Volume': Returns ------- - highdicom.volume.Volume: + highdicom.Volume: New volume with spatial axes permuted in the provided order. """ @@ -1159,8 +1227,8 @@ def permute(self, indices: Sequence[int]) -> 'Volume': array=new_array, affine=new_affine, frame_of_reference_uid=self.frame_of_reference_uid, - source_sop_instance_uids=self.source_sop_instance_uids, - source_frame_numbers=self.source_frame_numbers, + source_sop_instance_uids=deepcopy(self.source_sop_instance_uids), + source_frame_numbers=deepcopy(self.source_frame_numbers), source_frame_dimension=new_source_frame_dimension, ) @@ -1178,7 +1246,7 @@ def flip(self, axis: Union[int, Sequence[int]]) -> 'Volume': Returns ------- - highdicom.volume.Volume: + highdicom.Volume: New volume with spatial axes flipped as requested. """ @@ -1254,14 +1322,14 @@ def concat_channels(volumes: Sequence[Volume]) -> Volume: Parameters ---------- - volumes: Sequence[highdicom.volume.Volume] + volumes: Sequence[highdicom.Volume] Sequence of one or more volumes to concatenate. Volumes must share the same spatial shape and affine matrix, but may differ by number and presence of channels. Returns ------- - highdicom.volume.Volume: + highdicom.Volume: New volume formed by concatenating the input volumes. """ diff --git a/tests/test_seg.py b/tests/test_seg.py index 81432ace..3e2cb5b1 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -46,6 +46,7 @@ from highdicom.seg.utils import iter_segments from highdicom.sr.coding import CodedConcept from highdicom.uid import UID +from highdicom.volume import Volume from .utils import write_and_read_dataset @@ -639,6 +640,21 @@ def setUp(self): ) self._ct_pixel_array[1:5, 10:15] = True + self._ct_volume_position = [4.3, 1.4, 8.7] + self._ct_volume_orientation = [1., 0., 0, 0., -1., 0.] + self._ct_volume_pixel_spacing = [1., 1.5] + self._ct_volume_slice_spacing = 3.0 + self._ct_volume_array = np.zeros((4, 12, 12)) + self._ct_volume_array[0, 1:4, 8:9] = True + self._ct_volume_array[1, 5:7, 1:4] = True + self._ct_seg_volume = Volume.from_attributes( + array=self._ct_volume_array, + image_position=self._ct_volume_position, + image_orientation=self._ct_volume_orientation, + pixel_spacing=self._ct_volume_pixel_spacing, + spacing_between_slices=self._ct_volume_slice_spacing, + frame_of_reference_uid=self._ct_image.FrameOfReferenceUID, + ) # A single CR image self._cr_image = dcmread( get_testdata_file('dicomdirtests/77654033/CR1/6154') @@ -1445,6 +1461,48 @@ def test_construction_7(self): SegmentsOverlapValues.NO assert not hasattr(instance, 'DimensionOrganizationType') + def test_construction_volume(self): + # Segmentation instance from a series of single-frame CT images + # with empty frames kept in + instance = Segmentation( + [self._ct_image], + self._ct_seg_volume, + SegmentationTypeValues.BINARY.value, + self._segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number, + omit_empty_frames=False + ) + assert np.array_equal( + instance.pixel_array, + self._ct_seg_volume.array, + ) + + assert instance.DimensionOrganizationType == '3D' + shared_item = instance.SharedFunctionalGroupsSequence[0] + assert len(shared_item.PixelMeasuresSequence) == 1 + pm_item = shared_item.PixelMeasuresSequence[0] + assert pm_item.PixelSpacing == self._ct_volume_pixel_spacing + assert not hasattr(pm_item, 'SliceThickness') + assert len(shared_item.PlaneOrientationSequence) == 1 + po_item = shared_item.PlaneOrientationSequence[0] + assert po_item.ImageOrientationPatient == \ + self._ct_volume_orientation + for plane_item, pp in zip( + instance.PerFrameFunctionalGroupsSequence, + self._ct_seg_volume.get_plane_positions(), + ): + assert ( + plane_item.PlanePositionSequence[0].ImagePositionPatient == + pp[0].ImagePositionPatient + ) + def test_construction_3d_multiframe(self): # The CT multiframe image is already a volume, but the frames are # ordered the wrong way From c82c8bfeefd6fc4b0b6ce5f8725dc85c5adaafe9 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 16 Jul 2024 15:21:24 -0400 Subject: [PATCH 33/93] Change set_array to setter --- src/highdicom/volume.py | 20 ++++++++++---------- tests/test_volume.py | 23 +++++++++++++++++++++++ 2 files changed, 33 insertions(+), 10 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 9ed72959..1bfb29dd 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -869,8 +869,13 @@ def source_frame_dimension(self) -> Optional[int]: """ return self._source_frame_dimension - def set_array(self, array: np.ndarray) -> None: - # TODO make this a proper setter and getter + @property + def array(self) -> np.ndarray: + """numpy.ndarray: Volume array.""" + return self._array + + @array.setter + def array(self, value: np.ndarray) -> None: """Change the voxel array without changing the affine. Parameters @@ -881,20 +886,15 @@ def set_array(self, array: np.ndarray) -> None: voxel datatype may differ. """ - if array.ndim not in (3, 4): + if value.ndim not in (3, 4): raise ValueError( "Argument 'array' must be a three or four dimensional array." ) - if array.shape[:3] != self.spatial_shape: + if value.shape[:3] != self.spatial_shape: raise ValueError( "Array must match the spatial shape of the existing array." ) - self._array = array - - @property - def array(self) -> np.ndarray: - """numpy.ndarray: Volume array (copied).""" - return self._array.copy() + self._array = value @property def source_sop_instance_uids(self) -> Union[List[str], None]: diff --git a/tests/test_volume.py b/tests/test_volume.py index eeb1394b..80d54a58 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -368,6 +368,29 @@ def test_indexing_source_dimension_2(): assert np.array_equal(subvolume.array, array[12:14, :, 12:6:-2]) +def test_array_setter(): + array = np.random.randint(0, 100, (50, 50, 25)) + affine = np.array([ + [ 0.0, 0.0, 1.0, 0.0], + [ 0.0, 1.0, 0.0, 0.0], + [10.0, 0.0, 0.0, 30.0], + [ 0.0, 0.0, 0.0, 1.0], + ]) + + volume = Volume( + array=array, + affine=affine, + ) + + new_array = np.random.randint(0, 100, (50, 50, 25)) + volume.array = new_array + assert np.array_equal(volume.array, new_array) + + new_array = np.random.randint(0, 100, (25, 50, 50)) + with pytest.raises(ValueError): + volume.array = new_array + + @pytest.mark.parametrize( 'desired', [ From 508b53781584d9cb42cfb61c94c8f125aa4ecc9a Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 16 Jul 2024 15:27:34 -0400 Subject: [PATCH 34/93] Add TODOs --- src/highdicom/volume.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 1bfb29dd..81d20694 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -30,6 +30,12 @@ from pydicom import Dataset +# TODO add segmentation get_volume +# TODO add basic arithmetric operations +# TODO add normalization +# TODO add padding +# TODO add pixel value transformations + class Volume: From 7e1ff71acf72624568de4d6ea28054a2f20d37aa Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 19 Jul 2024 00:03:08 -0400 Subject: [PATCH 35/93] Add volread --- src/highdicom/__init__.py | 8 ++++- src/highdicom/spatial.py | 4 +-- src/highdicom/volume.py | 62 +++++++++++++++++++++++++++++++++++---- tests/test_volume.py | 38 ++++++++++++++++++++++-- 4 files changed, 101 insertions(+), 11 deletions(-) diff --git a/src/highdicom/__init__.py b/src/highdicom/__init__.py index 223b545b..33066bfb 100644 --- a/src/highdicom/__init__.py +++ b/src/highdicom/__init__.py @@ -56,7 +56,11 @@ from highdicom.uid import UID from highdicom import utils from highdicom.version import __version__ -from highdicom.volume import Volume +from highdicom.volume import ( + Volume, + volread, + concat_channels, +) __all__ = [ 'LUT', @@ -103,6 +107,7 @@ '__version__', 'ann', 'color', + 'concat_channels', 'frame', 'io', 'ko', @@ -114,4 +119,5 @@ 'spatial', 'sr', 'utils', + 'volread', ] diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 4d259e3b..844695fb 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -2833,7 +2833,7 @@ def get_series_slice_spacing( slice spacing. If the series does not represent a volume, returns None. Note that we stipulate that a single image is a 3D volume for the purposes - of this function. In this case the returned slice spacing will be 0.0. + of this function. In this case the returned slice spacing will be 1.0. Parameters ---------- @@ -2854,7 +2854,7 @@ def get_series_slice_spacing( raise ValueError("List must not be empty.") # We stipluate that a single image does represent a volume with spacing 0.0 if len(datasets) == 1: - return 0.0 + return 1.0 for ds in datasets: if is_multiframe_image(ds): raise ValueError( diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 81d20694..3d2ba920 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -1,4 +1,6 @@ from copy import deepcopy +from os import PathLike +from pathlib import Path from typing import List, Optional, Sequence, Union, Tuple import numpy as np @@ -28,7 +30,7 @@ PlanePositionSequence, ) -from pydicom import Dataset +from pydicom import Dataset, dcmread # TODO add segmentation get_volume # TODO add basic arithmetric operations @@ -210,6 +212,13 @@ def from_image_series( Volume created from the series. """ + series_instance_uid = series_datasets[0].SeriesInstanceUID + if not all( + ds.SeriesInstanceUID == series_instance_uid + for ds in series_datasets + ): + raise ValueError('Images do not belong to the same series.') + coordinate_system = get_image_coordinate_system(series_datasets[0]) if ( coordinate_system is None or @@ -219,6 +228,7 @@ def from_image_series( "Dataset should exist in the patient " "coordinate_system." ) + frame_of_reference_uid = series_datasets[0].FrameOfReferenceUID if not all( ds.FrameOfReferenceUID == frame_of_reference_uid @@ -231,11 +241,15 @@ def from_image_series( ds.SOPInstanceUID for ds in series_datasets ] - slice_spacing = get_series_slice_spacing(series_datasets) - if slice_spacing is None: - raise ValueError('Series is not a regularly spaced volume.') ds = series_datasets[0] + if len(series_datasets) == 1: + slice_spacing = ds.get('SpacingBetweenSlices', 1.0) + else: + slice_spacing = get_series_slice_spacing(series_datasets) + if slice_spacing is None: + raise ValueError('Series is not a regularly-spaced volume.') + affine = _create_affine_transformation_matrix( image_position=ds.ImagePositionPatient, image_orientation=ds.ImageOrientationPatient, @@ -1297,7 +1311,7 @@ def to_patient_orientation( highdicom.enum.PatientOrientationValuesBiped values, or a string such as ``"FPL"`` using the same characters. - """ + """ # noqa: E501 desired_orientation = _normalize_patient_orientation( patient_orientation ) @@ -1380,3 +1394,41 @@ def concat_channels(volumes: Sequence[Volume]) -> Volume: affine=affine, frame_of_reference_uid=frame_of_reference_uid, ) + + +def volread( + fp: Union[str, bytes, PathLike, List[Union[str, PathLike]]], + glob: str = '*.dcm', +) -> Volume: + """Read a volume from a file or list of files or file-like objects. + + Parameters + ---------- + fp: Union[str, bytes, os.PathLike] + Any file-like object, directory, list of file-like objects representing + a DICOM file or set of files. + glob: str, optional + Glob pattern used to find files within the direcotry in the case that + ``fp`` is a string or path that represents a directory. Follows the + format of the standard library glob ``module``. + + Returns + ------- + highdicom.Volume + Volume formed from the specified image file(s). + + """ + if isinstance(fp, (str, PathLike)): + fp = Path(fp) + if isinstance(fp, Path) and fp.is_dir(): + fp = list(fp.glob(glob)) + + if isinstance(fp, Sequence): + dcms = [dcmread(f) for f in fp] + else: + dcms = [dcmread(fp)] + + if len(dcms) == 1 and is_multiframe_image(dcms[0]): + return Volume.from_image(dcms[0]) + + return Volume.from_image_series(dcms) diff --git a/tests/test_volume.py b/tests/test_volume.py index 80d54a58..2754e28b 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -1,3 +1,4 @@ +from pathlib import Path import numpy as np import pydicom from pydicom.data import get_testdata_file @@ -5,7 +6,7 @@ from highdicom.spatial import _normalize_patient_orientation -from highdicom.volume import Volume, concat_channels +from highdicom.volume import Volume, concat_channels, volread from highdicom import UID @@ -420,11 +421,42 @@ def test_to_patient_orientation(desired): desired_tup = _normalize_patient_orientation(desired) flipped = volume.to_patient_orientation(desired) - print(volume.affine) - print(flipped.affine) assert isinstance(flipped, Volume) assert flipped.get_closest_patient_orientation() == desired_tup flipped = volume.to_patient_orientation(desired_tup) assert isinstance(flipped, Volume) assert flipped.get_closest_patient_orientation() == desired_tup + + +@pytest.mark.parametrize( + 'fp,glob', + [ + (Path(__file__).parent.parent.joinpath('data/test_files/ct_image.dcm'), None), + (str(Path(__file__).parent.parent.joinpath('data/test_files/ct_image.dcm')), None), + ([Path(__file__).parent.parent.joinpath('data/test_files/ct_image.dcm')], None), + (get_testdata_file('eCT_Supplemental.dcm'), None), + ([get_testdata_file('eCT_Supplemental.dcm')], None), + (Path(__file__).parent.parent.joinpath('data/test_files/'), 'ct_image.dcm'), + (str(Path(__file__).parent.parent.joinpath('data/test_files/')), 'ct_image.dcm'), + ( + [ + get_testdata_file('dicomdirtests/77654033/CT2/17136'), + get_testdata_file('dicomdirtests/77654033/CT2/17196'), + get_testdata_file('dicomdirtests/77654033/CT2/17166'), + ], + None, + ), + ( + [ + Path(get_testdata_file('dicomdirtests/77654033/CT2/17136')), + Path(get_testdata_file('dicomdirtests/77654033/CT2/17196')), + Path(get_testdata_file('dicomdirtests/77654033/CT2/17166')), + ], + None, + ), + ] +) +def test_volread(fp, glob): + volume = volread(fp, glob=glob) + assert isinstance(volume, Volume) From 0c0946eb420bb9ab4e432b43608756582d84bc12 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 28 Jul 2024 15:48:35 -0400 Subject: [PATCH 36/93] Add ability to sort volumes with duplicate and missing positions --- src/highdicom/_multiframe.py | 33 +++-- src/highdicom/seg/sop.py | 4 +- src/highdicom/spatial.py | 244 +++++++++++++++++++++++++++-------- src/highdicom/volume.py | 8 +- tests/test_spatial.py | 121 ++++++++++++++++- 5 files changed, 331 insertions(+), 79 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index ebcaae00..da566951 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -27,7 +27,7 @@ from highdicom.spatial import ( _DEFAULT_SPACING_TOLERANCE, get_image_coordinate_system, - get_regular_slice_spacing, + get_volume_positions, ) from highdicom.uid import UID as hd_UID from highdicom.utils import ( @@ -35,18 +35,8 @@ ) -_NO_FRAME_REF_VALUE = -1 - - -logger = logging.getLogger(__name__) - - -class MultiFrameDBManager: - - """Database manager for frame information in a multiframe image.""" - - # Dictionary mapping DCM VRs to appropriate SQLite types - _DCM_SQL_TYPE_MAP = { +# Dictionary mapping DCM VRs to appropriate SQLite types +_DCM_SQL_TYPE_MAP = { 'CS': 'VARCHAR', 'DS': 'REAL', 'FD': 'REAL', @@ -66,6 +56,15 @@ class MultiFrameDBManager: 'US': 'INTEGER', 'UT': 'TEXT', } +_NO_FRAME_REF_VALUE = -1 + + +logger = logging.getLogger(__name__) + + +class MultiFrameDBManager: + + """Database manager for frame information in a multiframe image.""" def __init__( self, @@ -349,7 +348,7 @@ def __init__( except ValueError: continue try: - sql_type = self._DCM_SQL_TYPE_MAP[vr] + sql_type = _DCM_SQL_TYPE_MAP[vr] except KeyError: continue @@ -371,7 +370,7 @@ def __init__( # and a VR that we can map to a sqlite type # Otherwise, we just omit the data from the db vm = int(vm_str) - sql_type = self._DCM_SQL_TYPE_MAP[vr] + sql_type = _DCM_SQL_TYPE_MAP[vr] if vm > 1: for d in range(vm): @@ -825,7 +824,7 @@ def get_slice_spacing( image_positions = np.array( [r for r in cur.execute(query)] ) - spacing = get_regular_slice_spacing( + spacing, _ = get_volume_positions( image_positions=image_positions, image_orientation=np.array(self.shared_image_orientation), sort=True, @@ -885,7 +884,7 @@ def get_slice_spacing( # positions return None - spacing = get_regular_slice_spacing( + spacing, _ = get_volume_positions( image_positions=all_image_positions[0], image_orientation=np.array(self.shared_image_orientation), sort=True, diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 35070f2c..11486f6c 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -76,7 +76,7 @@ ImageToReferenceTransformer, compute_tile_positions_per_frame, get_image_coordinate_system, - get_regular_slice_spacing, + get_volume_positions, get_tile_array, is_tiled_image, ) @@ -1738,7 +1738,7 @@ def __init__( DimensionOrganizationTypeValues.THREE_DIMENSIONAL ) else: - spacing = get_regular_slice_spacing( + spacing, _ = get_volume_positions( image_positions=np.array( plane_position_values[plane_sort_index, 0, :] ), diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 844695fb..a341f4cc 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -2818,11 +2818,15 @@ def are_points_coplanar( return max_dev <= tol -def get_series_slice_spacing( +def get_series_volume_positions( datasets: Sequence[pydicom.Dataset], tol: float = _DEFAULT_SPACING_TOLERANCE, -) -> Optional[float]: - """Get slice spacing, if any, for a series of single frame images. + sort: bool = True, + enforce_right_handed: bool = False, + allow_missing: bool = False, + allow_duplicates: bool = False, +) -> Tuple[Optional[float], Optional[List[int]]]: + """Get volume positions and spacing for a series of single frame images. First determines whether the image series represents a 3D volume. A 3D volume consists of regularly spaced slices with orthogonal axes, i.e. @@ -2830,7 +2834,9 @@ def get_series_slice_spacing( in-plane image coordinates. If the series does represent a volume, returns the absolute value of the - slice spacing. If the series does not represent a volume, returns None. + slice spacing and the slice indices in the volume for each of the input + datasets. If the series does not represent a volume, returns None for both + outputs. Note that we stipulate that a single image is a 3D volume for the purposes of this function. In this case the returned slice spacing will be 1.0. @@ -2842,19 +2848,48 @@ def get_series_slice_spacing( tol: float Tolerance for determining spacing regularity. If slice spacings vary by less that this spacing, they are considered to be regular. + sort: bool, optional + Sort the image positions before finding the spacing. If True, this + makes the function tolerant of unsorted inputs. Set to False to check + whether the positions represent a 3D volume in the specific order in + which they are passed. + enforce_right_handed: bool, optional + If True and sort is False, require that the images are not only + regularly spaced but also that they are ordered correctly to give a + right-handed coordinate system, i.e. frames are ordered along the + direction of the increasing normal vector, as opposed to being ordered + regularly along the direction of the decreasing normal vector. If sort + is True, this has no effect since positions will be sorted in the + right-handed direction before finding the spacing. + allow_missing: bool, optional + Allow for slices missing from the volume. If True, the smallest + distance between two consective slices is found and returned as the + slice spacing, provided all other spacings are an integer multiple of + this value (within tolerance). Alternatively, if ``spacing_hint`` is + used, that value will be used instead of the minimum consecutive + spacing. If False, any gaps will result in failure. + allow_duplicates: bool, optional + Allow multiple slices to map to the same position within the volume. + If False, duplicated image positions will result in failure. Returns ------- - float: - Absolute value of the regular slice spacing if the series of images - meets the definition of a 3D volume, above. None otherwise. + Union[float, None]: + If the image positions are regularly spaced, the (absolute value of) + the slice spacing. If the image positions do not represent a + regularly-spaced volume, returns None. + Union[List[int], None]: + List with the same length as the number of image positions. Each + element gives the zero-based index of the corresponding input position + in the volume. If the image positions do not represent a volume, + returns None. """ if len(datasets) == 0: raise ValueError("List must not be empty.") # We stipluate that a single image does represent a volume with spacing 0.0 if len(datasets) == 1: - return 1.0 + return 1.0, [0] for ds in datasets: if is_multiframe_image(ds): raise ValueError( @@ -2865,34 +2900,48 @@ def get_series_slice_spacing( image_orientation = datasets[0].ImageOrientationPatient for ds in datasets[1:]: if ds.ImageOrientationPatient != image_orientation: - return None + return None, None - positions = np.array( - [ds.ImagePositionPatient for ds in datasets] - ) + positions = [ds.ImagePositionPatient for ds in datasets] - return get_regular_slice_spacing( + spacing_hint = datasets[0].get('SpacingBetweenSlices') + + return get_volume_positions( image_positions=positions, - image_orientation=np.array(image_orientation), + image_orientation=image_orientation, tol=tol, + enforce_right_handed=enforce_right_handed, + sort=sort, + allow_duplicates=allow_duplicates, + allow_missing=allow_missing, + spacing_hint=spacing_hint, ) -def get_regular_slice_spacing( +def get_volume_positions( image_positions: Sequence[Sequence[float]], image_orientation: Sequence[float], tol: float = _DEFAULT_SPACING_TOLERANCE, sort: bool = True, enforce_right_handed: bool = False, -) -> Optional[float]: - """Get the regular spacing between set of image positions, if any. - - A 3D volume consists of regularly spaced slices with orthogonal axes, i.e. - the slices are spaced equally along the direction orthogonal to the - in-plane image coordinates. + allow_missing: bool = False, + allow_duplicates: bool = False, + spacing_hint: Optional[float] = None, +) -> Tuple[Optional[float], Optional[List[int]]]: + """Get the spacing and positions of images within a 3D volume. + + First determines whether the image positions and orientation represent a 3D + volume. A 3D volume consists of regularly spaced slices with orthogonal + axes, i.e. the slices are spaced equally along the direction orthogonal to + the in-plane image coordinates. + + If the positions represent a volume, returns the absolute value of the + slice spacing and the slice indices in the volume for each of the input + positions. If the positions do not represent a volume, returns None for both + outputs. Note that we stipulate that a single image is a 3D volume for the purposes - of this function. In this case the returned slice spacing will be 0.0. + of this function. In this case the returned slice spacing will be 1.0. Parameters ---------- @@ -2904,15 +2953,15 @@ def get_regular_slice_spacing( Image orientation as direction cosine values taken directly from the ImageOrientationPatient attribute. 1D array of length 6. Either a numpy array or anything convertible to it may be passed. - tol: float + tol: float, optional Tolerance for determining spacing regularity. If slice spacings vary by less that this spacing, they are considered to be regular. - sort: bool + sort: bool, optional Sort the image positions before finding the spacing. If True, this makes the function tolerant of unsorted inputs. Set to False to check whether the positions represent a 3D volume in the specific order in which they are passed. - enforce_positive: bool + enforce_right_handed: bool, optional If True and sort is False, require that the images are not only regularly spaced but also that they are ordered correctly to give a right-handed coordinate system, i.e. frames are ordered along the @@ -2920,67 +2969,156 @@ def get_regular_slice_spacing( regularly along the direction of the decreasing normal vector. If sort is True, this has no effect since positions will be sorted in the right-handed direction before finding the spacing. + allow_missing: bool, optional + Allow for slices missing from the volume. If True, the smallest + distance between two consective slices is found and returned as the + slice spacing, provided all other spacings are an integer multiple of + this value (within tolerance). Alternatively, if ``spacing_hint`` is + used, that value will be used instead of the minimum consecutive + spacing. If False, any gaps will result in failure. + allow_duplicates: bool, optional + Allow multiple slices to map to the same position within the volume. + If False, duplicated image positions will result in failure. + spacing_hint: Union[float, None], optional + Expected spacing between slices. If the calculated value is not equal + to this, within tolerance, the outputs will be None. The primary use of + this option is in combination with ``allow_missing``. If + ``allow_missing`` is ``True`` and a ``spacing_hint`` is given, the hint + is used to calculate the index positions instead of the smallest + consecutive spacing. Returns ------- - Union[float, None] - If the image positions are regularly spaced, the (absolute value of) the - slice spacing. If the image positions are not regularly spaced, returns - None. + Union[float, None]: + If the image positions are regularly spaced, the (absolute value of) + the slice spacing. If the image positions do not represent a + regularly-spaced volume, returns None. + Union[List[int], None]: + List with the same length as the number of image positions. Each + element gives the zero-based index of the corresponding input position + in the volume. If the image positions do not represent a volume, + returns None. """ - image_positions = np.array(image_positions) + if not sort: + if allow_duplicates: + raise ValueError( + "Argument 'allow_duplicates' requires 'sort'." + ) + if allow_missing: + raise ValueError( + "Argument 'allow_missing' requires 'sort'." + ) + + if spacing_hint is not None and spacing_hint <= 0.0: + raise ValueError( + "Argument 'spacing_hint' should be a postive value." + ) + image_positions_arr = np.array(image_positions) - if image_positions.ndim != 2 or image_positions.shape[1] != 3: + if image_positions_arr.ndim != 2 or image_positions_arr.shape[1] != 3: raise ValueError( "Argument 'image_positions' should be an (N, 3) array." ) - n = image_positions.shape[0] + n = image_positions_arr.shape[0] if n == 0: raise ValueError( "Argument 'image_positions' should contain at least 1 position." ) elif n == 1: # Special case, we stipluate that this has spacing 0.0 - return 0.0 + return 1.0, [0] normal_vector = get_normal_vector(image_orientation) + if allow_duplicates: + # Unique index specifies, for each position in the input positions + # array, the position in the unique_positions array of the + # de-duplicated position + unique_positions, unique_index = np.unique( + image_positions_arr, + axis=0, + return_inverse=True, + ) + else: + unique_positions = image_positions_arr + unique_index = np.arange(image_positions_arr.shape[0]) + # Calculate distance of each slice from coordinate system origin along the # normal vector - origin_distances = _get_slice_distances(image_positions, normal_vector) + origin_distances = _get_slice_distances(unique_positions, normal_vector) if sort: + # sort_index index gives, for each position in the sorted unique + # positions, the initial index of the corresponding unique position sort_index = np.argsort(origin_distances) - origin_distances = origin_distances[sort_index] + origin_distances_sorted = origin_distances[sort_index] + inverse_sort_index = np.argsort(sort_index) else: - sort_index = np.arange(image_positions.shape[0]) + sort_index = np.arange(unique_positions.shape[0]) + origin_distances_sorted = origin_distances + inverse_sort_index = sort_index - spacings = np.diff(origin_distances) - avg_spacing = spacings.mean() + if allow_missing: + if spacing_hint is not None: + spacing = spacing_hint + else: + spacings = np.diff(origin_distances_sorted) + spacing = spacings.min() + # Check here to prevent divide by zero errors. Positions should + # have been de-duplicated already, is this is allowed, so there + # should only be zero spacings if some positions are related by + # in-plane translations + if np.isclose(spacing, 0.0, atol=tol): + return None, None + + origin_distance_multiples = origin_distances / spacing + + is_regular = np.allclose( + origin_distance_multiples, + origin_distance_multiples.round(), + atol=tol + ) + + inverse_sort_index = origin_distance_multiples.round().astype(np.int64) + + else: + spacings = np.diff(origin_distances_sorted) + spacing = spacings.mean() + + if spacing_hint is not None: + if not np.isclose(spacing, spacing_hint): + raise RuntimeError( + "Inferred spacing does not match the given 'spacing_hint'." + ) + + is_regular = np.isclose( + spacing, + spacings, + atol=tol + ).all() - is_regular = np.isclose( - avg_spacing, - spacings, - atol=tol - ).all() if is_regular and enforce_right_handed: - if avg_spacing < 0.0: - return None + if spacing < 0.0: + return None, None # Additionally check that the vector from the first to the last plane lies # approximately along the normal vector - pos1 = image_positions[sort_index[0], :] - pos2 = image_positions[sort_index[-1], :] + pos1 = unique_positions[sort_index[0], :] + pos2 = unique_positions[sort_index[-1], :] span = (pos2 - pos1) span /= np.linalg.norm(span) is_perpendicular = abs(normal_vector.T @ span - 1.0) < tol if is_regular and is_perpendicular: - return abs(avg_spacing) + vol_positions = [ + inverse_sort_index[unique_index[i]].item() + for i in range(len(image_positions_arr)) + ] + return abs(spacing), vol_positions else: - return None + return None, None def get_normal_vector( @@ -3002,16 +3140,16 @@ def get_normal_vector( Unit normal vector as a NumPy array with shape (3, ). """ - image_orientation = np.array(image_orientation) - if image_orientation.ndim != 1 or image_orientation.shape[0] != 6: + image_orientation_arr = np.array(image_orientation, dtype=np.float64) + if image_orientation_arr.ndim != 1 or image_orientation_arr.shape[0] != 6: raise ValueError( "Argument 'image_orientation' should be an array of " "length 6." ) # Find normal vector to the imaging plane - v1 = image_orientation[:3] - v2 = image_orientation[3:] + v1 = image_orientation_arr[:3] + v2 = image_orientation_arr[3:] v3 = np.cross(v1, v2) return v3 diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 3d2ba920..8862c4aa 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -20,8 +20,8 @@ get_closest_patient_orientation, get_image_coordinate_system, get_plane_sort_index, - get_regular_slice_spacing, - get_series_slice_spacing, + get_volume_positions, + get_series_volume_positions, sort_datasets, ) from highdicom.content import ( @@ -246,7 +246,7 @@ def from_image_series( if len(series_datasets) == 1: slice_spacing = ds.get('SpacingBetweenSlices', 1.0) else: - slice_spacing = get_series_slice_spacing(series_datasets) + slice_spacing, _ = get_series_volume_positions(series_datasets) if slice_spacing is None: raise ValueError('Series is not a regularly-spaced volume.') @@ -323,7 +323,7 @@ def from_image( raise ValueError('Frames do not share pixel measures.') pixel_spacing = sfgs.PixelMeasuresSequence[0].PixelSpacing - slice_spacing = get_regular_slice_spacing( + slice_spacing, _ = get_volume_positions( image_positions=image_positions, image_orientation=image_orientation, ) diff --git a/tests/test_spatial.py b/tests/test_spatial.py index 24293882..0095c465 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -16,7 +16,8 @@ _transform_affine_matrix, create_rotation_matrix, get_closest_patient_orientation, - get_series_slice_spacing, + get_volume_positions, + get_series_volume_positions, is_tiled_image, ) @@ -899,7 +900,7 @@ def test_get_series_slice_spacing_irregular(): pydicom.dcmread(f) for f in get_testdata_files('dicomdirtests/77654033/CT2/*') ] - spacing = get_series_slice_spacing(ct_series) + spacing, _ = get_series_volume_positions(ct_series) assert spacing is None @@ -911,10 +912,124 @@ def test_get_series_slice_spacing_regular(): get_testdata_file('dicomdirtests/77654033/CT2/17166'), ] ct_series = [pydicom.dcmread(f) for f in ct_files] - spacing = get_series_slice_spacing(ct_series) + spacing, _ = get_series_volume_positions(ct_series) assert spacing == 1.25 +def test_get_spacing_duplicates(): + # Test ability to determine spacing and volume positions with duplicate + # positions + position_indices = np.array( + [0, 1, 2, 3, 4, 5, 2, 5, 5, 3, 1, 1, 2, 4, 1, 2, 0] + ) + expected_spacing = 0.2 + positions = [ + [0.0, 0.0, i * expected_spacing] for i in position_indices + ] + orientation = [1, 0, 0, 0, 1, 0] + + spacing, volume_positions = get_volume_positions( + positions, + orientation, + allow_duplicates=False, + ) + assert spacing is None + assert volume_positions is None + + spacing, volume_positions = get_volume_positions( + positions, + orientation, + allow_duplicates=True, + ) + assert np.isclose(spacing, expected_spacing) + assert volume_positions == position_indices.tolist() + + +def test_get_spacing_missing(): + # Test ability to determine spacing and volume positions with missing + # slices + position_indices = np.array( + [1, 3, 0, 9], # an incomplete list of indices from 0 to 9 + ) + expected_spacing = 0.125 + positions = [ + [0.0, 0.0, i * expected_spacing] for i in position_indices + ] + orientation = [1, 0, 0, 0, 1, 0] + + spacing, volume_positions = get_volume_positions( + positions, + orientation, + allow_missing=True + ) + + assert np.isclose(spacing, expected_spacing) + assert volume_positions == position_indices.tolist() + + +def test_get_spacing_missing_duplicates(): + # Test ability to determine spacing and volume positions with missing + # slices and duplicate positions + position_indices = np.array( + [1, 3, 0, 9, 3], + ) + expected_spacing = 0.125 + positions = [ + [0.0, 0.0, i * expected_spacing] for i in position_indices + ] + orientation = [1, 0, 0, 0, 1, 0] + + spacing, volume_positions = get_volume_positions( + positions, + orientation, + allow_missing=True, + ) + assert spacing is None + assert volume_positions is None + + spacing, volume_positions = get_volume_positions( + positions, + orientation, + allow_missing=True, + allow_duplicates=True, + ) + assert np.isclose(spacing, expected_spacing) + assert volume_positions == position_indices.tolist() + + +def test_get_spacing_missing_duplicates_non_consecutive(): + # Test ability to determine spacing and volume positions with missing + # slices and duplicate positions, with no two positions from consecutive + # slices + position_indices = np.array([7, 3, 0, 9, 3]) + expected_spacing = 0.125 + positions = [ + [0.0, 0.0, i * expected_spacing] for i in position_indices + ] + orientation = [1, 0, 0, 0, 1, 0] + + # Without the spacing_hint, the positions do not appear to be a volume + spacing, volume_positions = get_volume_positions( + positions, + orientation, + allow_missing=True, + allow_duplicates=True, + ) + assert spacing is None + assert volume_positions is None + + # With the hint, the positions should be correctly calculated + spacing, volume_positions = get_volume_positions( + positions, + orientation, + allow_missing=True, + allow_duplicates=True, + spacing_hint=expected_spacing, + ) + assert np.isclose(spacing, expected_spacing) + assert volume_positions == position_indices.tolist() + + def test_transform_affine_matrix(): affine = np.array( [ From 3cd033ca3309e1ebfe2c428962e25f7b3ad08ef5 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 28 Jul 2024 18:39:24 -0400 Subject: [PATCH 37/93] Add basic reading as volume. Lots of tidy up and docs to do --- src/highdicom/_multiframe.py | 124 ++++++++++++++++++++-------------- src/highdicom/seg/sop.py | 126 +++++++++++++++++++++++++++++++++++ src/highdicom/spatial.py | 4 +- 3 files changed, 202 insertions(+), 52 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index da566951..2d0dd2f9 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -104,6 +104,7 @@ def __init__( # indices extra_collection_pointers = [] extra_collection_func_pointers = {} + spacing_hint = None if self._coordinate_system == CoordinateSystemNames.PATIENT: image_position_tag = tag_for_keyword('ImagePositionPatient') plane_pos_seq_tag = tag_for_keyword('PlanePositionSequence') @@ -114,6 +115,24 @@ def __init__( image_position_tag ] = plane_pos_seq_tag + if hasattr(dataset, 'SharedFunctionalGroupsSequence'): + sfgs = dataset.SharedFunctionalGroupsSequence[0] + if hasattr(sfgs, 'PixelMeasuresSequence'): + spacing_hint = ( + sfgs + .PixelMeasuresSequence[0] + .get('SpacingBetweenSlices') + ) + if spacing_hint is None: + # Get the orientation of the first frame, and in the later loop + # check whether it is shared. + if hasattr(dataset, 'PerFrameFunctionalGroupsSequence'): + pfg1 = dataset.PerFrameFunctionalGroupsSequence[0] + if hasattr(pfg1, 'PixelMeasuresSequence'): + spacing_hint = pfg1.PixelMeasuresSequence[0].get( + 'SpacingBetweenSlices' + ) + dim_ind_positions = { dim_ind.DimensionIndexPointer: i for i, dim_ind in enumerate(dataset.DimensionIndexSequence) @@ -129,9 +148,27 @@ def __init__( ptr: [] for ptr in extra_collection_pointers } - self.shared_image_orientation = self._get_shared_image_orientation( - dataset - ) + # Get the shared orientation + self.shared_image_orientation = None + if hasattr(dataset, 'ImageOrientationSlide'): + self.shared_image_orientation = dataset.ImageOrientationSlide + elif hasattr(dataset, 'SharedFunctionalGroupsSequence'): + sfgs = dataset.SharedFunctionalGroupsSequence[0] + if hasattr(sfgs, 'PlaneOrientationSequence'): + self.shared_image_orientation = ( + sfgs.PlaneOrientationSequence[0].ImageOrientationPatient + ) + if self.shared_image_orientation is None: + # Get the orientation of the first frame, and in the later loop + # check whether it is shared. + if hasattr(dataset, 'PerFrameFunctionalGroupsSequence'): + pfg1 = dataset.PerFrameFunctionalGroupsSequence[0] + if hasattr(pfg1, 'PlaneOrientationSequence'): + self.shared_image_orientation = ( + pfg1 + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) self._single_source_frame_per_frame = True @@ -284,6 +321,17 @@ def __init__( referenced_instances.append(ref_instance_uid) referenced_frames.append(frame_source_frames[0]) + # Check that this doesn't have a conflicting orientation + if self.shared_image_orientation is not None: + if hasattr(frame_item, 'PlaneOrientationSequence'): + iop = ( + frame_item + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + if iop != self.shared_image_orientation: + self.shared_image_orientation = None + # Summarise if any( isinstance(v, SpatialLocationsPreservedValues) and @@ -382,6 +430,27 @@ def __init__( col_defs.append(f'{kw} {sql_type} NOT NULL') col_data.append(dim_values[t]) + # Volume related information + self.number_of_volume_positions: Optional[int] = None + self.volume_spacing: Optional[float] = None + if ( + self._coordinate_system == CoordinateSystemNames.PATIENT + and self.shared_image_orientation is not None + ): + if self.shared_image_orientation is not None: + volume_spacing, volume_positions = get_volume_positions( + image_positions=dim_values[image_position_tag], + image_orientation=self.shared_image_orientation, + allow_missing=True, + allow_duplicates=True, + spacing_hint=spacing_hint, + ) + if volume_positions is not None: + self.number_of_volume_positions = max(volume_positions) + 1 + self.volume_spacing = volume_spacing + col_defs.append('VolumePosition INTEGER NOT NULL') + col_data.append(volume_positions) + # Columns related to source frames, if they are usable for indexing if (referenced_frames is None) != (referenced_instances is None): raise TypeError( @@ -570,53 +639,6 @@ def _create_ref_instance_table( referenced_uids, ) - def _get_shared_image_orientation( - self, - dataset: Dataset - ) -> Optional[List[float]]: - """Get image orientation if it is shared between frames. - - Parameters - ---------- - dataset: pydicom.Dataset - Dataset for which to get the image orientation. - - Returns - ------- - List[float]: - Image orientation attribute (list of 6 floats containing direction - cosines) if this is shared between frames in the image. Otherwise - returns None. - - """ - if hasattr(dataset, 'ImageOrientationSlide'): - return dataset.ImageOrientationSlide - - if hasattr(dataset, 'SharedFunctionalGroupsSequence'): - sfgs = dataset.SharedFunctionalGroupsSequence[0] - if hasattr(sfgs, 'PlaneOrientationSequence'): - return sfgs.PlaneOrientationSequence[0].ImageOrientationPatient - - if hasattr(dataset, 'PerFrameFunctionalGroupsSequence'): - pfg1 = dataset.PerFrameFunctionalGroupsSequence[0] - if hasattr(pfg1, 'PlaneOrientationSequence'): - iop = pfg1.PlaneOrientationSequence[0].ImageOrientationPatient - - if len(dataset.PerFrameFunctionalGroupsSequence) == 1: - return iop - else: - for pfg in dataset.PerFrameFunctionalGroupsSequence[1:]: - frame_iop = ( - pfg.PlaneOrientationSequence[0]. - ImageOrientationPatient - ) - if frame_iop != iop: - break - else: - return iop - - return None - def are_dimension_indices_unique( self, dimension_index_pointers: Sequence[Union[int, BaseTag]], @@ -886,7 +908,7 @@ def get_slice_spacing( spacing, _ = get_volume_positions( image_positions=all_image_positions[0], - image_orientation=np.array(self.shared_image_orientation), + image_orientation=self.shared_image_orientation, sort=True, tol=tol, ) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 11486f6c..d57d4c67 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -513,6 +513,96 @@ def iterate_indices_by_source_frame( for (fo, fi, seg_no) in self._db_con.execute(query) ) + @contextmanager + def iterate_indices_by_volume( + self, + segment_numbers: Sequence[int], + combine_segments: bool = False, + relabel: bool = False, + ) -> Generator[ + Iterator[ + Tuple[ + Tuple[Union[slice, int], ...], + Tuple[Union[slice, int], ...], + int + ] + ], + None, + None, + ]: + """Iterate over frame indices sorted by volume. + + This yields an iterator to the underlying database result that iterates + over information on the steps required to construct the requested + segmentation mask from the stored frames of the segmentation image. + + This method is intended to be used as a context manager that yields the + requested iterator. The iterator is only valid while the context + manager is active. + + Parameters + ---------- + segment_numbers: Sequence[int] + Sequence containing segment numbers to include. + combine_segments: bool, optional + If True, produce indices to combine the different segments into a + single label map in which the value of a pixel represents its + segment. If False (the default), segments are binary and stacked + down the last dimension of the output array. + relabel: bool, optional + If True and ``combine_segments`` is ``True``, the output segment + numbers are relabelled into the range ``0`` to + ``len(segment_numbers)`` (inclusive) according to the position of + the original segment numbers in ``segment_numbers`` parameter. If + ``combine_segments`` is ``False``, this has no effect. + + Yields + ------ + Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: + Indices required to construct the requested mask. Each + triplet denotes the (output indexer, segmentation indexer, + output segment number) representing a list of "instructions" to + create the requested output array by copying frames from the + segmentation dataset and inserting them into the output array with + a given segment value. Output indexer and segmentation indexer are + tuples that can be used to index the output and segmentations + numpy arrays directly. + + """ # noqa: E501 + if self.volume_spacing is None: + raise RuntimeError( + 'This segmentation does not represent a regularly-spaced ' + 'volume.' + ) + + # Construct the query The ORDER BY is not logically necessary + # but seems to improve performance of the downstream numpy + # operations, presumably as it is more cache efficient + query = ( + 'SELECT ' + ' L.VolumePosition,' + ' L.FrameNumber - 1,' + ' S.OutputSegmentNumber ' + 'FROM FrameLUT L ' + 'INNER JOIN TemporarySegmentNumbers S' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' + 'ORDER BY L.VolumePosition' + ) + + with self._generate_temp_segment_table( + segment_numbers=segment_numbers, + combine_segments=combine_segments, + relabel=relabel + ): + yield ( + ( + (fo, slice(None), slice(None)), + (fi, slice(None), slice(None)), + seg_no + ) + for (fo, fi, seg_no) in self._db_con.execute(query) + ) + @contextmanager def iterate_indices_by_dimension_index_values( self, @@ -4288,6 +4378,42 @@ def get_pixels_by_source_frame( dtype=dtype, ) + def get_pixels_as_volume( + self, + segment_numbers: Optional[Sequence[int]] = None, + combine_segments: bool = False, + relabel: bool = False, + ignore_spatial_locations: bool = False, + assert_missing_frames_are_empty: bool = False, + rescale_fractional: bool = True, + skip_overlap_checks: bool = False, + dtype: Union[type, str, np.dtype, None] = None, + ): + # Checks on validity of the inputs + if segment_numbers is None: + segment_numbers = list(self.segment_numbers) + if len(segment_numbers) == 0: + raise ValueError( + 'Segment numbers may not be empty.' + ) + + with self._db_man.iterate_indices_by_volume( + segment_numbers=segment_numbers, + combine_segments=combine_segments, + relabel=relabel, + ) as indices: + + return self._get_pixels_by_seg_frame( + output_shape=self._db_man.number_of_volume_positions, + indices_iterator=indices, + segment_numbers=np.array(segment_numbers), + combine_segments=combine_segments, + relabel=relabel, + rescale_fractional=rescale_fractional, + skip_overlap_checks=skip_overlap_checks, + dtype=dtype, + ) + def get_pixels_by_dimension_index_values( self, dimension_index_values: Sequence[Sequence[int]], diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index a341f4cc..aec87cfb 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -3072,7 +3072,9 @@ def get_volume_positions( if np.isclose(spacing, 0.0, atol=tol): return None, None - origin_distance_multiples = origin_distances / spacing + origin_distance_multiples = ( + (origin_distances - origin_distances[0]) / spacing + ) is_regular = np.allclose( origin_distance_multiples, From 5c247a3a36c2831f36b77e0c8325fcdf2973041b Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 28 Jul 2024 22:01:11 -0400 Subject: [PATCH 38/93] Fixes --- src/highdicom/_multiframe.py | 10 ++++++++-- src/highdicom/spatial.py | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 2d0dd2f9..200b5b95 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -105,8 +105,8 @@ def __init__( extra_collection_pointers = [] extra_collection_func_pointers = {} spacing_hint = None + image_position_tag = tag_for_keyword('ImagePositionPatient') if self._coordinate_system == CoordinateSystemNames.PATIENT: - image_position_tag = tag_for_keyword('ImagePositionPatient') plane_pos_seq_tag = tag_for_keyword('PlanePositionSequence') # Include the image position if it is not an index if image_position_tag not in self._dim_ind_pointers: @@ -438,8 +438,14 @@ def __init__( and self.shared_image_orientation is not None ): if self.shared_image_orientation is not None: + if image_position_tag in self._dim_ind_pointers: + image_positions = dim_values[image_position_tag] + else: + image_positions = extra_collection_values[ + image_position_tag + ] volume_spacing, volume_positions = get_volume_positions( - image_positions=dim_values[image_position_tag], + image_positions=image_positions, image_orientation=self.shared_image_orientation, allow_missing=True, allow_duplicates=True, diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index aec87cfb..4ffea235 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -3073,7 +3073,7 @@ def get_volume_positions( return None, None origin_distance_multiples = ( - (origin_distances - origin_distances[0]) / spacing + (origin_distances - origin_distances.min()) / spacing ) is_regular = np.allclose( From d6a534f7ce7d7d7a3b738a2859d537322a8d6e9e Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 29 Jul 2024 11:12:25 -0400 Subject: [PATCH 39/93] Add affine to Segmentation.get_volume --- src/highdicom/_multiframe.py | 72 ++++++++++--- src/highdicom/seg/sop.py | 204 ++++++++++++++++++++++++++++------- 2 files changed, 224 insertions(+), 52 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 200b5b95..7b47c075 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -26,6 +26,7 @@ from highdicom.seg.enum import SpatialLocationsPreservedValues from highdicom.spatial import ( _DEFAULT_SPACING_TOLERANCE, + _create_affine_transformation_matrix, get_image_coordinate_system, get_volume_positions, ) @@ -104,8 +105,9 @@ def __init__( # indices extra_collection_pointers = [] extra_collection_func_pointers = {} - spacing_hint = None + slice_spacing_hint = None image_position_tag = tag_for_keyword('ImagePositionPatient') + self.shared_pixel_spacing = None if self._coordinate_system == CoordinateSystemNames.PATIENT: plane_pos_seq_tag = tag_for_keyword('PlanePositionSequence') # Include the image position if it is not an index @@ -118,20 +120,19 @@ def __init__( if hasattr(dataset, 'SharedFunctionalGroupsSequence'): sfgs = dataset.SharedFunctionalGroupsSequence[0] if hasattr(sfgs, 'PixelMeasuresSequence'): - spacing_hint = ( - sfgs - .PixelMeasuresSequence[0] - .get('SpacingBetweenSlices') - ) - if spacing_hint is None: + measures = sfgs.PixelMeasuresSequence[0] + slice_spacing_hint = measures.get('SpacingBetweenSlices') + self.shared_pixel_spacing = measures.get('PixelSpacing') + if slice_spacing_hint is None or self.shared_pixel_spacing is None: # Get the orientation of the first frame, and in the later loop # check whether it is shared. if hasattr(dataset, 'PerFrameFunctionalGroupsSequence'): pfg1 = dataset.PerFrameFunctionalGroupsSequence[0] if hasattr(pfg1, 'PixelMeasuresSequence'): - spacing_hint = pfg1.PixelMeasuresSequence[0].get( + slice_spacing_hint = pfg1.PixelMeasuresSequence[0].get( 'SpacingBetweenSlices' ) + self.shared_pixel_spacing = pfg1.get('PixelSpacing') dim_ind_positions = { dim_ind.DimensionIndexPointer: i @@ -152,7 +153,7 @@ def __init__( self.shared_image_orientation = None if hasattr(dataset, 'ImageOrientationSlide'): self.shared_image_orientation = dataset.ImageOrientationSlide - elif hasattr(dataset, 'SharedFunctionalGroupsSequence'): + if hasattr(dataset, 'SharedFunctionalGroupsSequence'): sfgs = dataset.SharedFunctionalGroupsSequence[0] if hasattr(sfgs, 'PlaneOrientationSequence'): self.shared_image_orientation = ( @@ -432,7 +433,7 @@ def __init__( # Volume related information self.number_of_volume_positions: Optional[int] = None - self.volume_spacing: Optional[float] = None + self.spacing_between_slices: Optional[float] = None if ( self._coordinate_system == CoordinateSystemNames.PATIENT and self.shared_image_orientation is not None @@ -449,11 +450,11 @@ def __init__( image_orientation=self.shared_image_orientation, allow_missing=True, allow_duplicates=True, - spacing_hint=spacing_hint, + spacing_hint=slice_spacing_hint, ) if volume_positions is not None: self.number_of_volume_positions = max(volume_positions) + 1 - self.volume_spacing = volume_spacing + self.spacing_between_slices = volume_spacing col_defs.append('VolumePosition INTEGER NOT NULL') col_data.append(volume_positions) @@ -921,6 +922,53 @@ def get_slice_spacing( return spacing + def get_image_position_at_volume_position( + self, + volume_position: int, + ) -> List[float]: + + if self.number_of_volume_positions is None: + raise RuntimeError( + "This image does not represent a regularly-spaced 3D volume." + ) + + if volume_position < 0: + raise ValueError( + "Argument 'volume_position' should be non-negative." + ) + elif volume_position >= self.number_of_volume_positions: + raise ValueError( + "Value of {volume_position} for argument 'volume_position' " + "is not valid for image with " + ) + + cur = self._db_con.cursor() + + query = ( + 'SELECT ' + 'ImagePositionPatient_0, ' + 'ImagePositionPatient_1, ' + 'ImagePositionPatient_2 ' + 'FROM FrameLUT ' + f'WHERE VolumePosition={volume_position} ' + 'LIMIT 1;' + ) + + image_position = list(list(cur.execute(query))[0]) + return image_position + + def get_volume_affine(self, slice_start: int = 0) -> np.ndarray: + + image_position = self.get_image_position_at_volume_position(slice_start) + + affine = _create_affine_transformation_matrix( + image_position=image_position, + image_orientation=self.shared_image_orientation, + spacing_between_slices=self.spacing_between_slices, + pixel_spacing=self.shared_pixel_spacing, + ) + + return affine @contextmanager def _generate_temp_table( diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index d57d4c67..ce15e232 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -514,8 +514,10 @@ def iterate_indices_by_source_frame( ) @contextmanager - def iterate_indices_by_volume( + def iterate_indices_for_volume( self, + slice_start: int, + slice_end: int, segment_numbers: Sequence[int], combine_segments: bool = False, relabel: bool = False, @@ -542,6 +544,18 @@ def iterate_indices_by_volume( Parameters ---------- + slice_start: int, optional + Zero-based index of the "volume position" of the first slice of the + returned volume. The "volume position" refers to the position of + slices after sorting spatially, and may correspond to any frame in + the segmentation file, depending on its construction. Must be a + non-negative integer. + slice_end: Union[int, None], optional + Zero-based index of the "volume position" one beyond the last slice + of the returned volume. The "volume position" refers to the + position of slices after sorting spatially, and may correspond to + any frame in the segmentation file, depending on its construction. + Must be a positive integer. segment_numbers: Sequence[int] Sequence containing segment numbers to include. combine_segments: bool, optional @@ -569,7 +583,7 @@ def iterate_indices_by_volume( numpy arrays directly. """ # noqa: E501 - if self.volume_spacing is None: + if self.number_of_volume_positions is None: raise RuntimeError( 'This segmentation does not represent a regularly-spaced ' 'volume.' @@ -580,12 +594,15 @@ def iterate_indices_by_volume( # operations, presumably as it is more cache efficient query = ( 'SELECT ' - ' L.VolumePosition,' + f' L.VolumePosition - {slice_start},' ' L.FrameNumber - 1,' ' S.OutputSegmentNumber ' 'FROM FrameLUT L ' 'INNER JOIN TemporarySegmentNumbers S' ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' + 'WHERE ' + f' L.VolumePosition >= {slice_start} AND ' + f' L.VolumePosition < {slice_end} ' 'ORDER BY L.VolumePosition' ) @@ -3538,40 +3555,22 @@ def segmented_property_types(self) -> List[CodedConcept]: return types - def is_3d_volume( - self, - split_dimensions: Optional[Sequence[str]] = None, - ): - """Determine whether this segmentation is a 3D volume. - - For this purpose, a 3D volume is a set of regularly slices in 3D space - distributed at regular spacings along a vector perpendicular to the - normal vector to each image. - - Parameters - ---------- - + @property + def number_of_volume_positions(self) -> Optional[int]: + """Union[int, None]: Number of volume positions, if the segmentation + represents a regularly-spaced 3D volume. ``None`` otherwise. """ - if split_dimensions is not None: - split_dimensions = list(split_dimensions) - if len(split_dimensions) == 0: - raise ValueError( - 'Argument "split_dimensions" must not be empty.' - ) - if 'ReferencedSegmentNumber' in split_dimensions: - raise ValueError( - 'The value "ReferencedSegmentNumber" should not be ' - 'included in the spplit dimensions.' - ) - else: - split_dimensions = [] + return self._db_man.number_of_volume_positions - split_dimensions.append('ReferencedSegmentNumber') - - spacing = self._db_man.get_slice_spacing(split_dimensions) + @property + def spacing_between_slices(self) -> Optional[float]: + """Union[float, None]: Spacing between slices in the frame of reference + coordinate system if the segmentation represents a regularly-spaced 3D + volume. ``None`` otherwise. - return spacing is not None + """ + return self._db_man.spacing_between_slices def _get_pixels_by_seg_frame( self, @@ -4378,17 +4377,100 @@ def get_pixels_by_source_frame( dtype=dtype, ) - def get_pixels_as_volume( + def get_volume( self, + slice_start: int = 0, + slice_end: Optional[int] = None, segment_numbers: Optional[Sequence[int]] = None, combine_segments: bool = False, relabel: bool = False, - ignore_spatial_locations: bool = False, - assert_missing_frames_are_empty: bool = False, + allow_missing_frames: bool = True, # TODO rescale_fractional: bool = True, skip_overlap_checks: bool = False, dtype: Union[type, str, np.dtype, None] = None, - ): + ) -> Volume: + """Create a :class:`highdicom.Volume` from the segmentation. + + This is only possible if the segmentation represents a regularly-spaced + 3D volume. + + Parameters + ---------- + slice_start: int, optional + Zero-based index of the "volume position" of the first slice of the + returned volume. The "volume position" refers to the position of + slices after sorting spatially, and may correspond to any frame in + the segmentation file, depending on its construction. May be + negative, in which case standard Python indexing behavior is + followed (-1 corresponds to the last volume position, etc). + slice_end: Union[int, None], optional + Zero-based index of the "volume position" one beyond the last slice + of the returned volume. The "volume position" refers to the + position of slices after sorting spatially, and may correspond to + any frame in the segmentation file, depending on its construction. + May be negative, in which case standard Python indexing behavior is + followed (-1 corresponds to the last volume position, etc). If + None, the last volume position is included as the last output + slice. + segment_numbers: Optional[Sequence[int]], optional + Sequence containing segment numbers to include. If unspecified, + all segments are included. + combine_segments: bool, optional + If True, combine the different segments into a single label + map in which the value of a pixel represents its segment. + If False (the default), segments are binary and stacked down the + last dimension of the output array. + relabel: bool, optional + If True and ``combine_segments`` is ``True``, the pixel values in + the output array are relabelled into the range ``0`` to + ``len(segment_numbers)`` (inclusive) according to the position of + the original segment numbers in ``segment_numbers`` parameter. If + ``combine_segments`` is ``False``, this has no effect. + ignore_spatial_locations: bool, optional + Ignore whether or not spatial locations were preserved in the + derivation of the segmentation frames from the source frames. In + some segmentation images, the pixel locations in the segmentation + frames may not correspond to pixel locations in the frames of the + source image from which they were derived. The segmentation image + may or may not specify whether or not spatial locations are + preserved in this way through use of the optional (0028,135A) + SpatialLocationsPreserved attribute. If this attribute specifies + that spatial locations are not preserved, or is absent from the + segmentation image, highdicom's default behavior is to disallow + indexing by source frames. To override this behavior and retrieve + segmentation pixels regardless of the presence or value of the + spatial locations preserved attribute, set this parameter to True. + assert_missing_frames_are_empty: bool, optional + Assert that requested source frame numbers that are not referenced + by the segmentation image contain no segments. If a source frame + number is not referenced by the segmentation image and is larger + than the frame number of the highest referenced frame, highdicom is + unable to check that the frame number is valid in the source image. + By default, highdicom will raise an error in this situation. To + override this behavior and return a segmentation frame of all zeros + for such frames, set this parameter to True. + rescale_fractional: bool + If this is a FRACTIONAL segmentation and ``rescale_fractional`` is + True, the raw integer-valued array stored in the segmentation image + output will be rescaled by the MaximumFractionalValue such that + each pixel lies in the range 0.0 to 1.0. If False, the raw integer + values are returned. If the segmentation has BINARY type, this + parameter has no effect. + skip_overlap_checks: bool + If True, skip checks for overlap between different segments. By + default, checks are performed to ensure that the segments do not + overlap. However, this reduces performance. If checks are skipped + and multiple segments do overlap, the segment with the highest + segment number (after relabelling, if applicable) will be placed + into the output array. + dtype: Union[type, str, numpy.dtype, None] + Data type of the returned array. If None, an appropriate type will + be chosen automatically. If the returned values are rescaled + fractional values, this will be numpy.float32. Otherwise, the + smallest unsigned integer type that accommodates all of the output + values will be chosen. + + """ # Checks on validity of the inputs if segment_numbers is None: segment_numbers = list(self.segment_numbers) @@ -4397,14 +4479,48 @@ def get_pixels_as_volume( 'Segment numbers may not be empty.' ) - with self._db_man.iterate_indices_by_volume( + if self.number_of_volume_positions is None: + raise RuntimeError( + "This segmentation is not a regularly-spaced 3D volume." + ) + n_vol_positions = self.number_of_volume_positions + + if slice_start < 0: + slice_start = n_vol_positions + slice_start + + if slice_end is None: + slice_end = n_vol_positions + 1 + elif slice_end > n_vol_positions: + raise IndexError( + f"Value of {slice_end} is not valid for segmentation with " + f"{n_vol_positions} volume positions." + ) + elif slice_end < 0: + if slice_end < (- n_vol_positions): + raise IndexError( + f"Value of {slice_end} is not valid for segmentation with " + f"{n_vol_positions} volume positions." + ) + slice_end = n_vol_positions + slice_end + + number_of_slices = cast(int, slice_end) - slice_start + + if number_of_slices < 1: + raise ValueError( + "The combination of 'slice_start' and 'slice_end' gives an " + "empty volume." + ) + + with self._db_man.iterate_indices_for_volume( + slice_start=slice_start, + slice_end=cast(int, slice_end), segment_numbers=segment_numbers, combine_segments=combine_segments, relabel=relabel, ) as indices: - return self._get_pixels_by_seg_frame( - output_shape=self._db_man.number_of_volume_positions, + array = self._get_pixels_by_seg_frame( + output_shape=number_of_slices, indices_iterator=indices, segment_numbers=np.array(segment_numbers), combine_segments=combine_segments, @@ -4414,6 +4530,14 @@ def get_pixels_as_volume( dtype=dtype, ) + affine = self._db_man.get_volume_affine(slice_start) + + return Volume( + array=array, + affine=affine, + frame_of_reference_uid=self.FrameOfReferenceUID, + ) + def get_pixels_by_dimension_index_values( self, dimension_index_values: Sequence[Sequence[int]], From f2721237694ae99b3d0a91d970d1278ad409bcc1 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 29 Jul 2024 14:30:38 -0400 Subject: [PATCH 40/93] Fix seg.get_volume affine indexing --- src/highdicom/_multiframe.py | 16 ++++++++++++++-- src/highdicom/spatial.py | 8 ++++++++ src/highdicom/volume.py | 15 ++++----------- 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 7b47c075..2718aeac 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -22,7 +22,10 @@ from pydicom.datadict import get_entry, tag_for_keyword from pydicom.multival import MultiValue -from highdicom.enum import CoordinateSystemNames +from highdicom.enum import ( + CoordinateSystemNames, + PixelIndexDirections, +) from highdicom.seg.enum import SpatialLocationsPreservedValues from highdicom.spatial import ( _DEFAULT_SPACING_TOLERANCE, @@ -957,7 +960,15 @@ def get_image_position_at_volume_position( image_position = list(list(cur.execute(query))[0]) return image_position - def get_volume_affine(self, slice_start: int = 0) -> np.ndarray: + def get_volume_affine( + self, + slice_start: int = 0, + index_convention: Optional[Sequence[PixelIndexDirections]] = ( + PixelIndexDirections.I, + PixelIndexDirections.D, + PixelIndexDirections.R, + ), + ) -> np.ndarray: image_position = self.get_image_position_at_volume_position(slice_start) @@ -966,6 +977,7 @@ def get_volume_affine(self, slice_start: int = 0) -> np.ndarray: image_orientation=self.shared_image_orientation, spacing_between_slices=self.spacing_between_slices, pixel_spacing=self.shared_pixel_spacing, + index_convention=index_convention, ) return affine diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 4ffea235..e9d27d29 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -40,6 +40,14 @@ """Mapping of each patient orientation value to its opposite.""" +VOLUME_INDEX_CONVENTION = ( + PixelIndexDirections.I, + PixelIndexDirections.D, + PixelIndexDirections.R, +) +"""The indexing convention used for affine matrices within volumes.""" + + def is_tiled_image(dataset: Dataset) -> bool: """Determine whether a dataset represents a tiled image. diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 8862c4aa..165f4bd2 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -9,7 +9,6 @@ from highdicom.enum import ( CoordinateSystemNames, PatientOrientationValuesBiped, - PixelIndexDirections, ) from highdicom.spatial import ( _create_affine_transformation_matrix, @@ -17,6 +16,7 @@ _normalize_patient_orientation, _transform_affine_matrix, PATIENT_ORIENTATION_OPPOSITES, + VOLUME_INDEX_CONVENTION, get_closest_patient_orientation, get_image_coordinate_system, get_plane_sort_index, @@ -77,13 +77,6 @@ class Volume: classes in the ``highdicom.spatial`` module. """ - # The indexing convention used for all internal representations of the - # affine matrix. - _INTERNAL_INDEX_CONVENTION = ( - PixelIndexDirections.I, - PixelIndexDirections.D, - PixelIndexDirections.R, - ) def __init__( self, @@ -255,7 +248,7 @@ def from_image_series( image_orientation=ds.ImageOrientationPatient, pixel_spacing=ds.PixelSpacing, spacing_between_slices=slice_spacing, - index_convention=cls._INTERNAL_INDEX_CONVENTION, + index_convention=VOLUME_INDEX_CONVENTION, ) # TODO apply color, modality and VOI lookup @@ -337,7 +330,7 @@ def from_image( image_orientation=image_orientation, pixel_spacing=pixel_spacing, spacing_between_slices=slice_spacing, - index_convention=cls._INTERNAL_INDEX_CONVENTION, + index_convention=VOLUME_INDEX_CONVENTION, ) # TODO apply VOI color modality LUT etc @@ -420,7 +413,7 @@ def from_attributes( image_orientation=image_orientation, pixel_spacing=pixel_spacing, spacing_between_slices=spacing_between_slices, - index_convention=cls._INTERNAL_INDEX_CONVENTION, + index_convention=VOLUME_INDEX_CONVENTION, ) return cls( affine=affine, From 23d91c3e598318ee2b5d9549e703ef604f707103 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 29 Jul 2024 18:35:06 -0400 Subject: [PATCH 41/93] Add tests, some failing --- src/highdicom/_multiframe.py | 178 ++++-------------- src/highdicom/seg/sop.py | 2 +- tests/test_multiframe.py | 6 +- tests/test_seg.py | 338 +++++++++++++++++++++++++++++++++++ 4 files changed, 377 insertions(+), 147 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 2718aeac..65c4c84d 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -28,7 +28,7 @@ ) from highdicom.seg.enum import SpatialLocationsPreservedValues from highdicom.spatial import ( - _DEFAULT_SPACING_TOLERANCE, + VOLUME_INDEX_CONVENTION, _create_affine_transformation_matrix, get_image_coordinate_system, get_volume_positions, @@ -782,153 +782,30 @@ def get_unique_dim_index_values( ) } - def get_slice_spacing( + def get_image_position_at_volume_position( self, - split_dimensions: Optional[Sequence[str]] = None, - tol: float = _DEFAULT_SPACING_TOLERANCE, - ) -> Optional[float]: - """Get slice spacing, if any, for the image. - - First determines whether the multiframe image represents a 3D volume. - A 3D volume consists of regularly spaced slices with orthogonal axes, i.e. - the slices are spaced equally along the direction orthogonal to the - in-plane image orientation cosines. - - If the image does represent a volume, returns the absolute value of the - slice spacing. If the series does not represent a volume, returns None. - - Note that we stipulate that an image with a single frame in the patient - coordinate system is a 3D volume for the purposes of this function. In this - case the returned slice spacing will be 0.0 if it cannot be deduced from - the metadata. - - Note also that this function checks the image position and image - orientation metadata found in the file and ignores any SpacingBetweenSlices - or DimensionOrganizationType found in the dataset. Therefore it does not - rely upon the creator having populated these attributes, or that their - values are correct. + volume_position: int, + ) -> List[float]: + """Get the image position at a location in the implied volume. + + This requires that the image represents a regularly-spaced 3D volume. Parameters ---------- - tol: float, optional - Tolerance for determining spacing regularity. If slice spacings vary by - less that this spacing, they are considered to be regular. - split_dimensions: Union[Sequence[pydicom.tag.BaseTag], None], optional - Split on these dimension indices and determine whether there is 3D - volume for each value of this dimension index, the same 3D volumes of - frames exist. For example, if time were included as a split dimension, - this function will check whether a 3D volume exists at each timepoint - (and that the volume is the same at each time point). Each dimension - index should be provided as the keyword representing the relevant - DICOM attribute. + volume_position: int + Zero-based index into the slice positions within the implied + volume. Must be an integer between >= 0 and < + ``number_of_volume_positions``. Returns ------- - float: - Absolute value of the regular slice spacing if the series of images - meets the definition of a 3D volume, above. None otherwise. + List[float]: + Image position (x, y, z) in the frame of reference coordinate + system of the center of the top-left pixel. This definition matches + the standard DICOM definition used in the ImagePositionPatient + attribute. """ - if self._coordinate_system is None: - return None - if self._coordinate_system != CoordinateSystemNames.PATIENT: - return None - - if self.shared_image_orientation is None: - return None - - if self._number_of_frames == 1: - # Stipulate that this does represent a volume - return 0.0 - - cur = self._db_con.cursor() - - if split_dimensions is None: - - query = ( - 'SELECT ' - 'ImagePositionPatient_0, ' - 'ImagePositionPatient_1, ' - 'ImagePositionPatient_2 ' - 'FROM FrameLUT;' - ) - - image_positions = np.array( - [r for r in cur.execute(query)] - ) - spacing, _ = get_volume_positions( - image_positions=image_positions, - image_orientation=np.array(self.shared_image_orientation), - sort=True, - tol=tol, - ) - else: - dim_values = [] - - # Get lists of all unique values for the specified dimensions - for kw in split_dimensions: - # Find unique values of this attribute - query = f""" - SELECT DISTINCT {kw} FROM FrameLUT; - """ - - dim_values.append( - [ - v[0] for v in cur.execute(query) - ] - ) - - # Check that each combination of the split dimension has the same - # list of image positions - all_image_positions = [] - for vals in itertools.product(*dim_values): - filter_str = 'AND '.join( - f'{kw} = {val}' for kw, val in zip(split_dimensions, vals) - ) - query = ( - 'SELECT ' - 'ImagePositionPatient_0, ' - 'ImagePositionPatient_1, ' - 'ImagePositionPatient_2 ' - 'FROM FrameLUT ' - 'WHERE ' - f'{filter_str} ' - 'ORDER BY ' - 'ImagePositionPatient_0, ' - 'ImagePositionPatient_1, ' - 'ImagePositionPatient_2 ' - ';' - ) - - image_positions = np.array( - [r for r in cur.execute(query)] - ) - all_image_positions.append(image_positions) - - if len(all_image_positions) > 1: - for image_positions in all_image_positions: - if not np.array_equal( - image_positions, - all_image_positions[0] - ): - # The volumes described by each combination of the - # split dimensions have different sets of image - # positions - return None - - spacing, _ = get_volume_positions( - image_positions=all_image_positions[0], - image_orientation=self.shared_image_orientation, - sort=True, - tol=tol, - ) - - return spacing - - def get_image_position_at_volume_position( - self, - volume_position: int, - ) -> List[float]: if self.number_of_volume_positions is None: raise RuntimeError( @@ -963,13 +840,26 @@ def get_image_position_at_volume_position( def get_volume_affine( self, slice_start: int = 0, - index_convention: Optional[Sequence[PixelIndexDirections]] = ( - PixelIndexDirections.I, - PixelIndexDirections.D, - PixelIndexDirections.R, - ), + index_convention: Sequence[PixelIndexDirections] = VOLUME_INDEX_CONVENTION, ) -> np.ndarray: + """Get the affine matrix for the implied volume. + + This requires that the image represents a regularly-spaced 3D volume. + + Parameters + ---------- + slice_start: int, optional + Zero-based index into the slice positions within the implied + volume marking the beginning of the relevant region. + index_convention: Sequence[highdicom.PixelIndexDirections], optional + Index convention to use to construct the affine matrix. + Returns + ------- + numpy.ndarray: + 4 x 4 affine matrix. + + """ image_position = self.get_image_position_at_volume_position(slice_start) affine = _create_affine_transformation_matrix( diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index ce15e232..cfeb4414 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -4503,7 +4503,7 @@ def get_volume( ) slice_end = n_vol_positions + slice_end - number_of_slices = cast(int, slice_end) - slice_start + number_of_slices = cast(int, slice_end) - slice_start - 1 if number_of_slices < 1: raise ValueError( diff --git a/tests/test_multiframe.py b/tests/test_multiframe.py index 7274023b..fd2d8879 100644 --- a/tests/test_multiframe.py +++ b/tests/test_multiframe.py @@ -11,7 +11,8 @@ def test_slice_spacing(): ) db = MultiFrameDBManager(ct_multiframe) - assert db.get_slice_spacing() == 10.0 + assert db.number_of_volume_positions == 2 + assert db.spacing_between_slices == 10.0 def test_slice_spacing_irregular(): @@ -25,4 +26,5 @@ def test_slice_spacing_irregular(): db = MultiFrameDBManager(ct_multiframe) - assert db.get_slice_spacing() is None + assert db.number_of_volume_positions is None + assert db.spacing_between_slices is None diff --git a/tests/test_seg.py b/tests/test_seg.py index 3e2cb5b1..fed66c1e 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -31,6 +31,8 @@ from highdicom.enum import ( CoordinateSystemNames, DimensionOrganizationTypeValues, + PatientOrientationValuesBiped, + PixelIndexDirections, ) from highdicom.seg import ( create_segmentation_pyramid, @@ -3794,6 +3796,342 @@ def test_get_pixels_by_source_instances_overlap_no_checks(self): ) assert np.array_equal(expected_array, out) + def test_get_volume_binary(self): + vol = self._ct_binary_seg.get_volume() + assert isinstance(vol, Volume) + assert vol.spatial_shape == (3, 16, 16) + assert vol.shape == (3, 16, 16, 1) + assert vol.pixel_spacing == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert vol.spacing_between_slices == ( + self._ct_binary_seg._db_man.spacing_between_slices + ) + assert vol.direction_cosines == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + assert vol.get_closest_patient_orientation() == ( + PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.L, + ) + + def test_get_volume_binary_multisegments(self): + vol = self._ct_binary_overlap_seg.get_volume() + assert isinstance(vol, Volume) + # Number this segmentation has a large number of missing slices + assert vol.spatial_shape == (165, 16, 16) + assert vol.shape == (165, 16, 16, 2) + assert vol.pixel_spacing == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert vol.spacing_between_slices == ( + self._ct_binary_seg._db_man.spacing_between_slices + ) + assert vol.direction_cosines == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + assert vol.get_closest_patient_orientation() == ( + PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.L, + ) + + def test_get_volume_binary_multisegment2(self): + vol = self._ct_binary_overlap_seg.get_volume(segment_numbers=[2]) + assert isinstance(vol, Volume) + # Number this segmentation has a large number of missing slices + assert vol.spatial_shape == (165, 16, 16) + assert vol.shape == (165, 16, 16, 1) + assert vol.pixel_spacing == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert vol.spacing_between_slices == ( + self._ct_binary_seg._db_man.spacing_between_slices + ) + assert vol.direction_cosines == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + assert vol.get_closest_patient_orientation() == ( + PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.L, + ) + + def test_get_volume_binary_multisegment_combine(self): + vol = self._ct_binary_overlap_seg.get_volume( + combine_segments=True, + skip_overlap_checks=True, + ) + assert isinstance(vol, Volume) + # Number this segmentation has a large number of missing slices + assert vol.spatial_shape == (165, 16, 16) + assert vol.shape == (165, 16, 16) + assert vol.pixel_spacing == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert vol.spacing_between_slices == ( + self._ct_binary_seg._db_man.spacing_between_slices + ) + assert vol.direction_cosines == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + assert vol.get_closest_patient_orientation() == ( + PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.L, + ) + + def test_get_volume_binary_multisegment_slice_start(self): + vol = self._ct_binary_overlap_seg.get_volume( + slice_start=160, + ) + assert isinstance(vol, Volume) + # Number this segmentation has a large number of missing slices + assert vol.spatial_shape == (6, 16, 16) + assert vol.shape == (6, 16, 16) + assert vol.pixel_spacing == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert vol.spacing_between_slices == ( + self._ct_binary_seg._db_man.spacing_between_slices + ) + assert vol.direction_cosines == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + assert vol.get_closest_patient_orientation() == ( + PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.L, + ) + + def test_get_volume_binary_multisegment_slice_start_negative(self): + vol = self._ct_binary_overlap_seg.get_volume( + slice_start=-6, + ) + assert isinstance(vol, Volume) + # Number this segmentation has a large number of missing slices + assert vol.spatial_shape == (6, 16, 16) + assert vol.shape == (6, 16, 16) + assert vol.pixel_spacing == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert vol.spacing_between_slices == ( + self._ct_binary_seg._db_man.spacing_between_slices + ) + assert vol.direction_cosines == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + assert vol.get_closest_patient_orientation() == ( + PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.L, + ) + + def test_get_volume_binary_multisegment_slice_end(self): + vol = self._ct_binary_overlap_seg.get_volume( + slice_end=17, + ) + assert isinstance(vol, Volume) + # Number this segmentation has a large number of missing slices + assert vol.spatial_shape == (17, 16, 16) + assert vol.shape == (17, 16, 16) + assert vol.pixel_spacing == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert vol.spacing_between_slices == ( + self._ct_binary_seg._db_man.spacing_between_slices + ) + assert vol.direction_cosines == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + assert vol.get_closest_patient_orientation() == ( + PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.L, + ) + + def test_get_volume_binary_multisegment_slice_end_negative(self): + vol = self._ct_binary_overlap_seg.get_volume( + slice_end=-10, + ) + assert isinstance(vol, Volume) + # Number this segmentation has a large number of missing slices + assert vol.spatial_shape == (155, 16, 16) + assert vol.shape == (155, 16, 16) + assert vol.pixel_spacing == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert vol.spacing_between_slices == ( + self._ct_binary_seg._db_man.spacing_between_slices + ) + assert vol.direction_cosines == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + assert vol.get_closest_patient_orientation() == ( + PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.L, + ) + + def test_get_volume_binary_multisegment_center(self): + vol = self._ct_binary_overlap_seg.get_volume( + slice_start=50, + slice_end=57, + ) + assert isinstance(vol, Volume) + # Number this segmentation has a large number of missing slices + assert vol.spatial_shape == (6, 16, 16) + assert vol.shape == (6, 16, 16) + assert vol.pixel_spacing == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert vol.spacing_between_slices == ( + self._ct_binary_seg._db_man.spacing_between_slices + ) + assert vol.direction_cosines == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + assert vol.get_closest_patient_orientation() == ( + PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.L, + ) + + def test_get_volume_binary_combine(self): + vol = self._ct_binary_seg.get_volume(combine_segments=True) + assert isinstance(vol, Volume) + assert vol.spatial_shape == (3, 16, 16) + assert vol.shape == (3, 16, 16) + assert vol.pixel_spacing == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert vol.spacing_between_slices == ( + self._ct_binary_seg._db_man.spacing_between_slices + ) + assert vol.direction_cosines == ( + self._ct_binary_seg + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + assert vol.get_closest_patient_orientation() == ( + PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.L, + ) + + def test_get_volume_fractional(self): + vol = self._ct_true_fractional_seg.get_volume() + assert isinstance(vol, Volume) + assert vol.spatial_shape == (3, 16, 16) + assert vol.shape == (3, 16, 16, 1) + assert vol.pixel_spacing == ( + self._ct_true_fractional_seg + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert vol.spacing_between_slices == ( + self._ct_true_fractional_seg._db_man.spacing_between_slices + ) + assert vol.direction_cosines == ( + self._ct_true_fractional_seg + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + assert vol.get_closest_patient_orientation() == ( + PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.L, + ) + assert vol.dtype == np.float32 + + def test_get_volume_fractional_noscale(self): + vol = self._ct_true_fractional_seg.get_volume(rescale_fractional=False) + assert isinstance(vol, Volume) + assert vol.spatial_shape == (3, 16, 16) + assert vol.shape == (3, 16, 16, 1) + assert vol.pixel_spacing == ( + self._ct_true_fractional_seg + .SharedFunctionalGroupsSequence[0] + .PixelMeasuresSequence[0] + .PixelSpacing + ) + assert vol.spacing_between_slices == ( + self._ct_true_fractional_seg._db_man.spacing_between_slices + ) + assert vol.direction_cosines == ( + self._ct_true_fractional_seg + .SharedFunctionalGroupsSequence[0] + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + assert vol.get_closest_patient_orientation() == ( + PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.P, + PatientOrientationValuesBiped.L, + ) + assert vol.dtype == np.uint8 + class TestSegUtilities(unittest.TestCase): From 2c04427c580335feebce5d75294aa441960d8642 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 29 Jul 2024 22:17:29 -0400 Subject: [PATCH 42/93] Fix indexing issues --- src/highdicom/_multiframe.py | 41 ++++++++++++----- src/highdicom/seg/sop.py | 10 +++-- src/highdicom/spatial.py | 33 ++++++++++++++ tests/test_seg.py | 86 ++++++++++++++++++------------------ 4 files changed, 112 insertions(+), 58 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 65c4c84d..976fde95 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -30,6 +30,7 @@ from highdicom.spatial import ( VOLUME_INDEX_CONVENTION, _create_affine_transformation_matrix, + _translate_affine_matrix, get_image_coordinate_system, get_volume_positions, ) @@ -436,7 +437,7 @@ def __init__( # Volume related information self.number_of_volume_positions: Optional[int] = None - self.spacing_between_slices: Optional[float] = None + self.affine: Optional[np.ndarray] = None if ( self._coordinate_system == CoordinateSystemNames.PATIENT and self.shared_image_orientation is not None @@ -456,8 +457,15 @@ def __init__( spacing_hint=slice_spacing_hint, ) if volume_positions is not None: + origin_slice_index = volume_positions.index(0) self.number_of_volume_positions = max(volume_positions) + 1 - self.spacing_between_slices = volume_spacing + self.affine = _create_affine_transformation_matrix( + image_position=image_positions[origin_slice_index], + image_orientation=self.shared_image_orientation, + pixel_spacing=self.shared_pixel_spacing, + spacing_between_slices=volume_spacing, + index_convention=VOLUME_INDEX_CONVENTION, + ) col_defs.append('VolumePosition INTEGER NOT NULL') col_data.append(volume_positions) @@ -860,17 +868,26 @@ def get_volume_affine( 4 x 4 affine matrix. """ - image_position = self.get_image_position_at_volume_position(slice_start) - - affine = _create_affine_transformation_matrix( - image_position=image_position, - image_orientation=self.shared_image_orientation, - spacing_between_slices=self.spacing_between_slices, - pixel_spacing=self.shared_pixel_spacing, - index_convention=index_convention, - ) + if self.number_of_volume_positions is None: + raise RuntimeError( + "This image does not represent a regularly-spaced 3D volume." + ) - return affine + if slice_start < 0: + raise ValueError( + "Argument 'slice_start' should be non-negative." + ) + elif slice_start >= self.number_of_volume_positions: + raise ValueError( + f"Value of {slice_start} for argument 'slice_start' " + 'is not valid for image with ' + f'{self.number_of_volume_positions} volume positions.' + ) + + if slice_start == 0: + return self.affine + else: + return _translate_affine_matrix(self.affine, [slice_start, 0, 0]) @contextmanager def _generate_temp_table( diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index cfeb4414..c61f8693 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -3570,7 +3570,11 @@ def spacing_between_slices(self) -> Optional[float]: volume. ``None`` otherwise. """ - return self._db_man.spacing_between_slices + if self._db_man.affine is None: + return None + slice_vec = self._db_man.affine[:3, 0] + spacing = np.sqrt((slice_vec ** 2).sum()).item() + return spacing def _get_pixels_by_seg_frame( self, @@ -4489,7 +4493,7 @@ def get_volume( slice_start = n_vol_positions + slice_start if slice_end is None: - slice_end = n_vol_positions + 1 + slice_end = n_vol_positions elif slice_end > n_vol_positions: raise IndexError( f"Value of {slice_end} is not valid for segmentation with " @@ -4503,7 +4507,7 @@ def get_volume( ) slice_end = n_vol_positions + slice_end - number_of_slices = cast(int, slice_end) - slice_start - 1 + number_of_slices = cast(int, slice_end) - slice_start if number_of_slices < 1: raise ValueError( diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index e9d27d29..014383e4 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -1200,6 +1200,39 @@ def _transform_affine_matrix( return transformed +def _translate_affine_matrix( + affine: np.ndarray, + pixel_offset: Sequence[int], +) -> np.ndarray: + """Translate the origin of an affine matrix. + + Parameters + ---------- + affine: numpy.ndarray + Original affine matrix (4 x 4). + pixel_offset: Sequence[int] + Offset, in pixel units. + + Returns + ------- + numpy.ndarray: + Translated affine matrix. + + """ + if len(pixel_offset) != 3: + raise ValueError( + f"Argument 'pixel_spacing' must have three elements." + ) + offset_arr = np.array(pixel_offset) + origin = affine[:3, 3] + direction = affine[:3, :3] + reference_offset = direction @ offset_arr + new_origin = origin + reference_offset + result = affine.copy() + result[:3, 3] = new_origin + return result + + def _transform_affine_to_convention( affine: np.ndarray, shape: Sequence[int], diff --git a/tests/test_seg.py b/tests/test_seg.py index fed66c1e..49967b9f 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -3808,7 +3808,7 @@ def test_get_volume_binary(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_seg._db_man.spacing_between_slices + self._ct_binary_seg.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_binary_seg @@ -3825,20 +3825,20 @@ def test_get_volume_binary(self): def test_get_volume_binary_multisegments(self): vol = self._ct_binary_overlap_seg.get_volume() assert isinstance(vol, Volume) - # Number this segmentation has a large number of missing slices + # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (165, 16, 16) assert vol.shape == (165, 16, 16, 2) assert vol.pixel_spacing == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_seg._db_man.spacing_between_slices + self._ct_binary_overlap_seg.spacing_between_slices ) assert vol.direction_cosines == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] .ImageOrientationPatient @@ -3852,20 +3852,20 @@ def test_get_volume_binary_multisegments(self): def test_get_volume_binary_multisegment2(self): vol = self._ct_binary_overlap_seg.get_volume(segment_numbers=[2]) assert isinstance(vol, Volume) - # Number this segmentation has a large number of missing slices + # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (165, 16, 16) assert vol.shape == (165, 16, 16, 1) assert vol.pixel_spacing == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_seg._db_man.spacing_between_slices + self._ct_binary_overlap_seg.spacing_between_slices ) assert vol.direction_cosines == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] .ImageOrientationPatient @@ -3882,20 +3882,20 @@ def test_get_volume_binary_multisegment_combine(self): skip_overlap_checks=True, ) assert isinstance(vol, Volume) - # Number this segmentation has a large number of missing slices + # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (165, 16, 16) assert vol.shape == (165, 16, 16) assert vol.pixel_spacing == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_seg._db_man.spacing_between_slices + self._ct_binary_overlap_seg.spacing_between_slices ) assert vol.direction_cosines == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] .ImageOrientationPatient @@ -3911,20 +3911,20 @@ def test_get_volume_binary_multisegment_slice_start(self): slice_start=160, ) assert isinstance(vol, Volume) - # Number this segmentation has a large number of missing slices - assert vol.spatial_shape == (6, 16, 16) - assert vol.shape == (6, 16, 16) + # Note that this segmentation has a large number of missing slices + assert vol.spatial_shape == (5, 16, 16) + assert vol.shape == (5, 16, 16, 2) assert vol.pixel_spacing == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_seg._db_man.spacing_between_slices + self._ct_binary_overlap_seg.spacing_between_slices ) assert vol.direction_cosines == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] .ImageOrientationPatient @@ -3940,20 +3940,20 @@ def test_get_volume_binary_multisegment_slice_start_negative(self): slice_start=-6, ) assert isinstance(vol, Volume) - # Number this segmentation has a large number of missing slices + # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (6, 16, 16) - assert vol.shape == (6, 16, 16) + assert vol.shape == (6, 16, 16, 2) assert vol.pixel_spacing == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_seg._db_man.spacing_between_slices + self._ct_binary_overlap_seg.spacing_between_slices ) assert vol.direction_cosines == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] .ImageOrientationPatient @@ -3969,20 +3969,20 @@ def test_get_volume_binary_multisegment_slice_end(self): slice_end=17, ) assert isinstance(vol, Volume) - # Number this segmentation has a large number of missing slices + # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (17, 16, 16) - assert vol.shape == (17, 16, 16) + assert vol.shape == (17, 16, 16, 2) assert vol.pixel_spacing == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_seg._db_man.spacing_between_slices + self._ct_binary_overlap_seg.spacing_between_slices ) assert vol.direction_cosines == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] .ImageOrientationPatient @@ -3998,20 +3998,20 @@ def test_get_volume_binary_multisegment_slice_end_negative(self): slice_end=-10, ) assert isinstance(vol, Volume) - # Number this segmentation has a large number of missing slices + # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (155, 16, 16) - assert vol.shape == (155, 16, 16) + assert vol.shape == (155, 16, 16, 2) assert vol.pixel_spacing == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_seg._db_man.spacing_between_slices + self._ct_binary_overlap_seg.spacing_between_slices ) assert vol.direction_cosines == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] .ImageOrientationPatient @@ -4028,20 +4028,20 @@ def test_get_volume_binary_multisegment_center(self): slice_end=57, ) assert isinstance(vol, Volume) - # Number this segmentation has a large number of missing slices - assert vol.spatial_shape == (6, 16, 16) - assert vol.shape == (6, 16, 16) + # Note that this segmentation has a large number of missing slices + assert vol.spatial_shape == (7, 16, 16) + assert vol.shape == (7, 16, 16, 2) assert vol.pixel_spacing == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_seg._db_man.spacing_between_slices + self._ct_binary_seg.spacing_between_slices ) assert vol.direction_cosines == ( - self._ct_binary_seg + self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] .ImageOrientationPatient @@ -4064,7 +4064,7 @@ def test_get_volume_binary_combine(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_seg._db_man.spacing_between_slices + self._ct_binary_seg.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_binary_seg @@ -4090,7 +4090,7 @@ def test_get_volume_fractional(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_true_fractional_seg._db_man.spacing_between_slices + self._ct_true_fractional_seg.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_true_fractional_seg @@ -4117,7 +4117,7 @@ def test_get_volume_fractional_noscale(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_true_fractional_seg._db_man.spacing_between_slices + self._ct_true_fractional_seg.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_true_fractional_seg From 960fba37ab2c9d09897e1464835ef4a74a02683d Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 30 Jul 2024 21:51:30 -0400 Subject: [PATCH 43/93] Fix multiframe tests --- src/highdicom/_multiframe.py | 2 -- tests/test_multiframe.py | 14 ++++++++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 976fde95..160729bf 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -24,7 +24,6 @@ from highdicom.enum import ( CoordinateSystemNames, - PixelIndexDirections, ) from highdicom.seg.enum import SpatialLocationsPreservedValues from highdicom.spatial import ( @@ -848,7 +847,6 @@ def get_image_position_at_volume_position( def get_volume_affine( self, slice_start: int = 0, - index_convention: Sequence[PixelIndexDirections] = VOLUME_INDEX_CONVENTION, ) -> np.ndarray: """Get the affine matrix for the implied volume. diff --git a/tests/test_multiframe.py b/tests/test_multiframe.py index fd2d8879..64d54a1f 100644 --- a/tests/test_multiframe.py +++ b/tests/test_multiframe.py @@ -1,4 +1,5 @@ """Tests for the highdicom._multiframe module.""" +import numpy as np from pydicom import dcmread from pydicom.data import get_testdata_file, get_testdata_files @@ -11,8 +12,17 @@ def test_slice_spacing(): ) db = MultiFrameDBManager(ct_multiframe) + expected_affine = np.array( + [ + [0.0, 0.0, -0.388672, 99.5], + [0.0, 0.388672, 0.0, -301.5], + [-10.0, 0.0, 0.0, -149], + [0.0, 0.0, 0.0, 1.0], + ] + ) + print(db.affine) assert db.number_of_volume_positions == 2 - assert db.spacing_between_slices == 10.0 + assert np.array_equal(db.affine, expected_affine) def test_slice_spacing_irregular(): @@ -27,4 +37,4 @@ def test_slice_spacing_irregular(): db = MultiFrameDBManager(ct_multiframe) assert db.number_of_volume_positions is None - assert db.spacing_between_slices is None + assert db.affine is None From 341a149ecdb7fbde246f8520b4ec0b0387bb9c4e Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 30 Jul 2024 23:03:43 -0400 Subject: [PATCH 44/93] Add intensity operations to Volume --- src/highdicom/volume.py | 191 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 181 insertions(+), 10 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 165f4bd2..1837505e 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -32,9 +32,7 @@ from pydicom import Dataset, dcmread -# TODO add segmentation get_volume # TODO add basic arithmetric operations -# TODO add normalization # TODO add padding # TODO add pixel value transformations @@ -1039,14 +1037,7 @@ def astype(self, dtype: type) -> 'Volume': """ new_array = self._array.astype(dtype) - return self.__class__( - array=new_array, - affine=self.affine, - frame_of_reference_uid=self.frame_of_reference_uid, - source_sop_instance_uids=deepcopy(self.source_sop_instance_uids), - source_frame_numbers=deepcopy(self.source_frame_numbers), - source_frame_dimension=self.source_frame_dimension or 0, - ) + return self.with_array(new_array) def with_array(self, array: np.ndarray) -> 'Volume': """Get a new volume using a different array. @@ -1081,6 +1072,7 @@ def with_array(self, array: np.ndarray) -> 'Volume': frame_of_reference_uid=self.frame_of_reference_uid, source_sop_instance_uids=deepcopy(self.source_sop_instance_uids), source_frame_numbers=deepcopy(self.source_frame_numbers), + source_frame_dimension=self.source_frame_dimension or 0, ) def __getitem__( @@ -1329,6 +1321,185 @@ def to_patient_orientation( return result.permute(permute_indices) + def normalize_intensity( + self, + per_channel: bool = True, + ) -> 'Volume': + """Normalize the intensities using the mean and variance. + + The resulting volume has zero mean and unit variance. + + Parameters + ---------- + per_channel: bool, optional + If True (the default), each channel is normalized by its own mean + and variance. If False, all channels are normalized together using + the overall mean and variance. + + Returns + ------- + highdicom.Volume: + Volume with normalized intensities. Note that the dtype will + be promoted to floating point. + + """ + if ( + per_channel and + self.number_of_channels is not None and + self.number_of_channels > 1 + ): + new_array = self.array.copy() + for c in range(self.number_of_channels): + channel = new_array[:,:, :, c] + new_array[:, :, :, c] = ( + (channel - channel.mean()) / + channel.std() + ) + else: + new_array = (self.array - self.array.mean()) / self.array.std() + + return self.with_array(new_array) + + def normalize_intensity_minmax( + self, + output_min: float = 0.0, + output_max: float = 1.0, + per_channel: bool = True, + ) -> 'Volume': + """Normalize by mapping its full intensity range to a fixed range. + + Other pixel values are scaled linearly within this range. + + Parameters + ---------- + output_min: float, optional + The value to which the minimum intensity is mapped. + output_max: float, optional + The value to which the maximum intensity is mapped. + per_channel: bool, optional + If True (the default), each channel is normalized by its own mean + and variance. If False, all channels are normalized together using + the overall mean and variance. + + Returns + ------- + highdicom.Volume: + Volume with normalized intensities. Note that the dtype will + be promoted to floating point. + + """ + output_range = output_max - output_min + + if ( + per_channel and + self.number_of_channels is not None and + self.number_of_channels > 1 + ): + new_array = self.array.copy() + for c in range(self.number_of_channels): + channel = new_array[:,:, :, c] + imin = channel.min() + imax = channel.max() + scale_factor = output_range / (imax - imin) + new_array[:, :, :, c] = ( + (channel - imin) * scale_factor + output_min + ) + else: + imin = self.array.min() + imax = self.array.max() + scale_factor = output_range / (imax - imin) + new_array = (self.array - imin) * scale_factor + output_min + + return self.with_array(new_array) + + def clip_intensities( + self, + a_min: Optional[float], + a_max: Optional[float], + ) -> 'Volume': + """Clip voxel intensities. + + Parameters + ---------- + a_min: Union[float, None] + Lower value to clip. May be None if no lower clipping is to be + applied. + a_max: Union[float, None] + Upper value to clip. May be None if no upper clipping is to be + applied. + + Returns + ------- + highdicom.Volume: + Volume with clipped intensities. + + """ + new_array = np.clip(self.array, a_min, a_max) + + return self.with_array(new_array) + + def apply_window( + self, + *, + window_min: Optional[float] = None, + window_max: Optional[float]= None, + window_center: Optional[float] = None, + window_width: Optional[float] = None, + output_min: float = 0.0, + output_max: float = 1.0, + clip: bool = True, + ) -> 'Volume': + """Apply a window (similar to VOI transform) to the volume. + + Parameters + ---------- + window_min: Union[float, None], optional + Minimum value of window (mapped to ``output_min``). + window_max: Union[float, None], optional + Maximum value of window (mapped to ``output_max``). + window_center: Union[float, None], optional + Center value of the window. + window_width: Union[float, None], optional + Width of the window. + output_min: float, optional + Value to which the lower edge of the window is mapped. + output_max: float, optional + Value to which the upper edge of the window is mapped. + clip: bool, optional + Whether to clip the values to lie within the output range. + + Note + ---- + Either ``window_min`` and ``window_max`` or ``window_center`` and + ``window_width`` should be specified. Other combinations are not valid. + + Returns + ------- + highdicom.Volume: + Volume with windowed intensities. + + """ + if window_min is None != window_max is None: + raise TypeError("Invalid combination of inputs specified.") + if window_center is None != window_width is None: + raise TypeError("Invalid combination of inputs specified.") + if window_center is None == window_min is None: + raise TypeError("Invalid combination of inputs specified.") + + if window_min is None: + window_min = window_center - (window_width / 2) + if window_width is None: + window_width = window_max - window_min + output_range = output_max - output_min + scale_factor = output_range / window_width + + new_array = (self.array - window_min) * scale_factor + output_min + + if clip: + new_array = np.clip(new_array, output_min, output_max) + + return self.with_array(new_array) + def concat_channels(volumes: Sequence[Volume]) -> Volume: """Form a new volume by concatenating channels of existing volumes. From 262c712c388fcd7a721146d6de7c4f8b4736c688 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 2 Aug 2024 03:24:19 -0400 Subject: [PATCH 45/93] Add cropping and padding to volumes --- src/highdicom/__init__.py | 2 + src/highdicom/enum.py | 23 +++ src/highdicom/volume.py | 331 +++++++++++++++++++++++++++++++++++++- 3 files changed, 351 insertions(+), 5 deletions(-) diff --git a/src/highdicom/__init__.py b/src/highdicom/__init__.py index 33066bfb..a9a27d5c 100644 --- a/src/highdicom/__init__.py +++ b/src/highdicom/__init__.py @@ -39,6 +39,7 @@ ContentQualificationValues, DimensionOrganizationTypeValues, LateralityValues, + PadModes, PatientSexValues, PhotometricInterpretationValues, PixelRepresentationValues, @@ -76,6 +77,7 @@ 'LateralityValues', 'ModalityLUT', 'ModalityLUTTransformation', + 'PadModes', 'PaletteColorLUT', 'PaletteColorLUTTransformation', 'PatientOrientationValuesBiped', diff --git a/src/highdicom/enum.py b/src/highdicom/enum.py index 9f5eef12..66645c38 100644 --- a/src/highdicom/enum.py +++ b/src/highdicom/enum.py @@ -376,3 +376,26 @@ class UniversalEntityIDTypeValues(Enum): X500 = 'X500' """An X.500 directory name.""" + + +class PadModes(Enum): + + """Enumerated values of modes to pad an array.""" + + CONSTANT = 'CONSTANT' + """Pad with a specified constant value.""" + + EDGE = 'EDGE' + """Pad with the edge value.""" + + MINIMUM = 'MINIMUM' + """Pad with the minimum value.""" + + MAXIMUM = 'MAXIMUM' + """Pad with the maximum value.""" + + MEAN = 'MEAN' + """Pad with the mean value.""" + + MEDIAN = 'MEDIAN' + """Pad with the median value.""" diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 1837505e..474797cb 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -8,6 +8,7 @@ from highdicom._module_utils import is_multiframe_image from highdicom.enum import ( CoordinateSystemNames, + PadModes, PatientOrientationValuesBiped, ) from highdicom.spatial import ( @@ -15,6 +16,7 @@ _is_matrix_orthogonal, _normalize_patient_orientation, _transform_affine_matrix, + _translate_affine_matrix, PATIENT_ORIENTATION_OPPOSITES, VOLUME_INDEX_CONVENTION, get_closest_patient_orientation, @@ -33,8 +35,11 @@ from pydicom import Dataset, dcmread # TODO add basic arithmetric operations -# TODO add padding # TODO add pixel value transformations +# TODO handedness checks/constraints +# TODO should methods copy arrays? +# TODO crop_or_pad +# TODO random crop class Volume: @@ -47,7 +52,8 @@ class Volume: be extracted from DICOM image, and/or encoded within a DICOM object, potentially following any number of processing steps. - All such geometries exist within DICOM's patient coordinate system. + All such volumes have a geometry that exists within DICOM's patient + coordinate system. Internally this class uses the following conventions to represent the geometry, however this can be constructed from or transformed to other @@ -1039,6 +1045,24 @@ def astype(self, dtype: type) -> 'Volume': return self.with_array(new_array) + def copy(self) -> 'Volume': + """Get an unaltered copy of the volume. + + Returns + ------- + highdicom.Volume: + Copy of the original volume. + + """ + return self.__class__( + array=self.array, # TODO should this copy? + affine=self._affine.copy(), + frame_of_reference_uid=self.frame_of_reference_uid, + source_sop_instance_uids=deepcopy(self.source_sop_instance_uids), + source_frame_numbers=deepcopy(self.source_frame_numbers), + source_frame_dimension=self.source_frame_dimension or 0, + ) + def with_array(self, array: np.ndarray) -> 'Volume': """Get a new volume using a different array. @@ -1084,10 +1108,13 @@ def __getitem__( Parameters ---------- index: Union[int, slice, Tuple[Union[int, slice]]] + Index values. All possibilities supported by numpy arrays are + supported, including negative indices and different step sizes. Returns ------- highdicom.Volume: + New volume representing a sub-volume of the original volume. """ if isinstance(index, int): @@ -1364,7 +1391,7 @@ def normalize_intensity_minmax( self, output_min: float = 0.0, output_max: float = 1.0, - per_channel: bool = True, + per_channel: bool = False, ) -> 'Volume': """Normalize by mapping its full intensity range to a fixed range. @@ -1377,8 +1404,8 @@ def normalize_intensity_minmax( output_max: float, optional The value to which the maximum intensity is mapped. per_channel: bool, optional - If True (the default), each channel is normalized by its own mean - and variance. If False, all channels are normalized together using + If True, each channel is normalized by its own mean and variance. + If False (the default), all channels are normalized together using the overall mean and variance. Returns @@ -1500,6 +1527,300 @@ def apply_window( return self.with_array(new_array) + def squeeze_channel(self) -> 'Volume': + """Remove a singleton channel axis. + + If the volume has no channels, returns an unaltered copy. + + Returns + ------- + highdicom.Volume: + Volume with channel axis removed. + + """ + if self.number_of_channels is None: + return self.copy() + if self.number_of_channels == 1: + return self.with_array(self.array.squeeze(3)) + else: + raise RuntimeError( + 'Volume with multiple channels cannot be squeezed.' + ) + + def ensure_channel(self) -> 'Volume': + """Add a singleton channel axis, if needed. + + If the volume has channels already, returns an unaltered copy. + + Returns + ------- + highdicom.Volume: + Volume with added channel axis (if required). + + """ + if self.number_of_channels is None: + return self.with_array(self.array[:, :, :, None]) + return self.copy() + + def pad( + self, + pad_width: Union[int, Sequence[int], Sequence[Sequence[int]]], + mode: PadModes = PadModes.CONSTANT, + constant_value: float = 0.0, + per_channel: bool = False, + ) -> 'Volume': + """Pad volume along the three spatial dimensions. + + Parameters + ---------- + pad_width: Union[int, Sequence[int], Sequence[Sequence[int]]] + Values to pad the array. Takes the same form as ``numpy.pad()``. + May be: + + * A single integer value, which results in that many voxels being + added to the beginning and end of all three spatial dimensions. + * A sequence of two values in the form ``[before, after]``, which + results in 'before' voxels being added to the beginning of each + of the three spatial dimensions, and 'after' voxels being added + to the end of each of the three spatial dimensions + * A nested sequence of integers of the form ``[[pad1], [pad2], + [pad3]]``, in which separate padding values are supplied for each + of the three spatial axes and used to pad before and after along + those axes, or + * A nested sequence of integers in the form ``[[before1, after1], + [before2, after2], [before3, after3]]``, in which separate values + are supplied for the before and after padding of each of the + three spatial dimensions. + mode: highdicom.PadModes, optional + Mode to use to pad the array. See :class:`highdicom.PadModes` for + options. + constant_value: Union[float, Sequence[float]], optional + Value used to pad when mode is ``"CONSTANT"``. If ``per_channel`` + if True, a sequence whose length is equal to the number of channels + may be passed, and each value will be used for the corresponding + channel. With other pad modes, this argument is ignored. + per_channel: bool, optional + For padding modes that involve calculation of image statistics to + determine the padding value (i.e. ``MINIMUM``, ``MAXIMUM``, + ``MEAN``, ``MEDIAN``), pad each channel separately using the value + calculated using that channel alone (rather than the statistics of + the entire array). For other padding modes, this argument makes no + difference. This should not the True if the image does not have a + channel dimension. + + Returns + ------- + highdicom.Volume: + Volume with padding applied. + + """ + if isinstance(mode, str): + mode = mode.upper() + mode = PadModes(mode) + + if per_channel and self.number_of_channels is None: + raise ValueError( + "Argument 'per_channel' may not be True if the image has no " + "channels." + ) + + if mode in ( + PadModes.MINIMUM, + PadModes.MAXIMUM, + PadModes.MEAN, + PadModes.MEDIAN, + ): + used_mode = PadModes.CONSTANT + elif ( + mode == PadModes.CONSTANT and + isinstance(constant_value, Sequence) + ): + used_mode = mode + if not per_channel: + raise TypeError( + "Argument 'constant_value' should be a single value if " + "'per_channel' is False." + ) + if len(constant_value) != self.number_of_channels: + raise ValueError( + "Argument 'constant_value' must have length equal to the " + 'number of channels in the volume.' + ) + else: + used_mode = mode + # per_channel result is same as default result, so just ignore it + per_channel = False + + if ( + self.number_of_channels is None or + self.number_of_channels == 1 + ): + # Only one channel, so can ignore the per_channel logic + per_channel = False + + padding_with_channels = ( + self.number_of_channels is not None and not per_channel + ) + if isinstance(pad_width, int): + origin_offset = [-pad_width] * 3 + if padding_with_channels: + pad_width = [*([[pad_width]] * 3), [0]] # no channel padding + elif isinstance(pad_width, Sequence): + if isinstance(pad_width[0], int): + origin_offset = [-pad_width[0]] * 3 + if padding_with_channels: + pad_width = [*([pad_width] * 3), [0, 0]] # no channel padding + elif isinstance(pad_width[0], Sequence): + if len(pad_width[0]) == 1: + origin_offset = [-p[0] for p in pad_width] + if padding_with_channels: + pad_width = pad_width.copy() + pad_width.append([0]) # no channel padding + elif len(pad_width[0]) == 2: + origin_offset = [-p[0] for p in pad_width] + if padding_with_channels: + pad_width = pad_width.copy() + pad_width.append([0, 0]) # no channel padding + else: + raise ValueError("Invalid arrangement in 'pad_width'.") + else: + raise TypeError("Invalid format for 'pad_width'.") + + def pad_array(array: np.ndarray, cval: float) -> float: + if used_mode == PadModes.CONSTANT: + if mode == PadModes.MINIMUM: + v = array.min() + elif mode == PadModes.MAXIMUM: + v = array.max() + elif mode == PadModes.MEAN: + v = array.mean() + elif mode == PadModes.MEDIAN: + v = np.median(array) + elif mode == PadModes.CONSTANT: + v = cval + pad_kwargs = {'constant_values': v} + else: + pad_kwargs = {} + + return np.pad( + array, + pad_width=pad_width, + mode=used_mode.value.lower(), + **pad_kwargs, + ) + + if per_channel: + if not isinstance(constant_value, Sequence): + constant_value = [constant_value] * self.number_of_channels + padded_channels = [] + for c, v in enumerate(constant_value): + padded_channels.append(pad_array(self.array[:, :, :, c], v)) + new_array = np.stack(padded_channels, axis=-1) + else: + new_array = pad_array(self.array, constant_value) + + new_affine = _translate_affine_matrix(self.affine, origin_offset) + + return self.__class__( + array=new_array, + affine=new_affine, + frame_of_reference_uid=self.frame_of_reference_uid, + source_frame_dimension=self.source_frame_dimension or 0, + ) + + def pad_to_shape( + self, + shape: Sequence[int], + mode: PadModes = PadModes.CONSTANT, + constant_value: float = 0.0, + per_channel: bool = False, + ) -> 'Volume': + """Pad volume to given spatial shape. + + The volume is padded symmetrically, placing the original array at the + center of the output array, to achieve the given shape. If this + requires an odd number of elements to be added along a certain + dimension, one more element is placed at the end of the array than at + the start. + + Parameters + ---------- + shape: Sequence[int] + Sequence of three integers specifying the spatial shape to pad to. + This shape must be no smaller than the existing shape along any of + the three spatial dimensions. + mode: highdicom.PadModes, optional + Mode to use to pad the array. See :class:`highdicom.PadModes` for + options. + constant_value: Union[float, Sequence[float]], optional + Value used to pad when mode is ``"CONSTANT"``. If ``per_channel`` + if True, a sequence whose length is equal to the number of channels + may be passed, and each value will be used for the corresponding + channel. With other pad modes, this argument is ignored. + per_channel: bool, optional + For padding modes that involve calculation of image statistics to + determine the padding value (i.e. ``MINIMUM``, ``MAXIMUM``, + ``MEAN``, ``MEDIAN``), pad each channel separately using the value + calculated using that channel alone (rather than the statistics of + the entire array). For other padding modes, this argument makes no + difference. This should not the True if the image does not have a + channel dimension. + + Returns + ------- + highdicom.Volume: + Volume with padding applied. + + """ + if len(shape) != 3: + raise ValueError( + "Argument 'shape' must have length 3." + ) + + pad_width = [] + for insize, outsize in zip(self.spatial_shape, shape): + to_pad = outsize - insize + if to_pad < 0: + raise ValueError( + 'Shape is smaller than existing shape along at least ' + 'one axis.' + ) + pad_front = to_pad // 2 + pad_back = to_pad - pad_front + pad_width.append((pad_front, pad_back)) + + return self.pad( + pad_width=pad_width, + mode=mode, + constant_value=constant_value, + per_channel=per_channel, + ) + + def crop_to_shape(self, shape: Sequence[int]) -> 'Volume': + + if len(shape) != 3: + raise ValueError( + "Argument 'shape' must have length 3." + ) + + crop_vals = [] + for insize, outsize in zip(self.spatial_shape, shape): + to_crop = insize - outsize + if to_crop < 0: + raise ValueError( + 'Shape is larger than existing shape along at least ' + 'one axis.' + ) + crop_front = to_crop // 2 + crop_back = to_crop - crop_front + crop_vals.append((crop_front, insize - crop_back)) + + return self[ + crop_vals[0][0]:crop_vals[0][1], + crop_vals[1][0]:crop_vals[1][1], + crop_vals[2][0]:crop_vals[2][1], + ] + def concat_channels(volumes: Sequence[Volume]) -> Volume: """Form a new volume by concatenating channels of existing volumes. From 7b4e6aa1e2a9d485f8cc41b30255dc9c5d84ab1f Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 2 Aug 2024 15:05:06 -0400 Subject: [PATCH 46/93] Fix intensity normalization methods --- src/highdicom/volume.py | 55 +++++++++++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 13 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 474797cb..83c5665f 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -39,7 +39,9 @@ # TODO handedness checks/constraints # TODO should methods copy arrays? # TODO crop_or_pad -# TODO random crop +# TODO random crop, random flip +# TODO match geometry +# TODO trim non-zero class Volume: @@ -1348,9 +1350,11 @@ def to_patient_orientation( return result.permute(permute_indices) - def normalize_intensity( + def normalize_mean_std( self, per_channel: bool = True, + output_mean: float = 0.0, + output_std: float = 1.0, ) -> 'Volume': """Normalize the intensities using the mean and variance. @@ -1362,6 +1366,11 @@ def normalize_intensity( If True (the default), each channel is normalized by its own mean and variance. If False, all channels are normalized together using the overall mean and variance. + output_mean: float, optional + The mean value of the output array (or channel), after scaling. + output_std: float, optional + The standard deviation of the output array (or channel), + after scaling. Returns ------- @@ -1375,19 +1384,23 @@ def normalize_intensity( self.number_of_channels is not None and self.number_of_channels > 1 ): - new_array = self.array.copy() + new_array = self.array.astype(np.float64) for c in range(self.number_of_channels): - channel = new_array[:,:, :, c] + channel = new_array[:, :, :, c] new_array[:, :, :, c] = ( (channel - channel.mean()) / - channel.std() - ) + (channel.std() / output_std) + ) + output_mean else: - new_array = (self.array - self.array.mean()) / self.array.std() + new_array = ( + (self.array - self.array.mean()) / + (self.array.std() / output_std) + + output_mean + ) return self.with_array(new_array) - def normalize_intensity_minmax( + def normalize_min_max( self, output_min: float = 0.0, output_max: float = 1.0, @@ -1416,13 +1429,15 @@ def normalize_intensity_minmax( """ output_range = output_max - output_min + if output_range <= 0.0: + raise ValueError('Output min must be below output max.') if ( per_channel and self.number_of_channels is not None and self.number_of_channels > 1 ): - new_array = self.array.copy() + new_array = self.array.astype(np.float64) for c in range(self.number_of_channels): channel = new_array[:,:, :, c] imin = channel.min() @@ -1439,21 +1454,21 @@ def normalize_intensity_minmax( return self.with_array(new_array) - def clip_intensities( + def clip( self, a_min: Optional[float], a_max: Optional[float], ) -> 'Volume': - """Clip voxel intensities. + """Clip voxel intensities to lie within a given range. Parameters ---------- a_min: Union[float, None] Lower value to clip. May be None if no lower clipping is to be - applied. + applied. Voxel intensities below this value are set to this value. a_max: Union[float, None] Upper value to clip. May be None if no upper clipping is to be - applied. + applied. Voxel intensities above this value are set to this value. Returns ------- @@ -1797,7 +1812,21 @@ def pad_to_shape( ) def crop_to_shape(self, shape: Sequence[int]) -> 'Volume': + """Center-crop volume to a given spatial shape. + + Parameters + ---------- + shape: Sequence[int] + Sequence of three integers specifying the spatial shape to crop to. + This shape must be no larger than the existing shape along any of + the three spatial dimensions. + Returns + ------- + highdicom.Volume: + Volume with padding applied. + + """ if len(shape) != 3: raise ValueError( "Argument 'shape' must have length 3." From 7abd30c5c05e4e965edd38f56208ff96586ffc55 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 2 Aug 2024 20:23:56 -0400 Subject: [PATCH 47/93] add handed check/ensure --- src/highdicom/__init__.py | 2 + src/highdicom/enum.py | 31 +++++ src/highdicom/volume.py | 263 +++++++++++++++++++++++++++++++++++--- 3 files changed, 278 insertions(+), 18 deletions(-) diff --git a/src/highdicom/__init__.py b/src/highdicom/__init__.py index a9a27d5c..bf364d22 100644 --- a/src/highdicom/__init__.py +++ b/src/highdicom/__init__.py @@ -34,6 +34,7 @@ VOILUTTransformation, ) from highdicom.enum import ( + AxisHandedness, AnatomicalOrientationTypeValues, CoordinateSystemNames, ContentQualificationValues, @@ -69,6 +70,7 @@ 'VOILUT', 'AlgorithmIdentificationSequence', 'AnatomicalOrientationTypeValues', + 'AxisHandedness', 'ContentCreatorIdentificationCodeSequence', 'ContentQualificationValues', 'CoordinateSystemNames', diff --git a/src/highdicom/enum.py b/src/highdicom/enum.py index 66645c38..0d15632f 100644 --- a/src/highdicom/enum.py +++ b/src/highdicom/enum.py @@ -399,3 +399,34 @@ class PadModes(Enum): MEDIAN = 'MEDIAN' """Pad with the median value.""" + + +class AxisHandedness(Enum): + + """Enumerated values for axis handedness. + + Axis handedness refers to a property of a mapping between voxel indices and + their corresponding coordinates in the frame-of-reference coordinate + system, as represented by the affine matrix. + + """ + + LEFT_HANDED = "LEFT_HANDED" + """ + + The unit vectors of the first, second and third axes form a left hand when + drawn in the frame-of-reference coordinate system with the thumb + representing the first vector, the index finger representing the second + vector, and the middle finger representing the third vector. + + """ + + RIGHT_HANDED = "RIGHT_HANDED" + """ + + The unit vectors of the first, second and third axes form a right hand when + drawn in the frame-of-reference coordinate system with the thumb + representing the first vector, the index finger representing the second + vector, and the middle finger representing the third vector. + + """ diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 83c5665f..b4a2df3c 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -7,6 +7,7 @@ from highdicom._module_utils import is_multiframe_image from highdicom.enum import ( + AxisHandedness, CoordinateSystemNames, PadModes, PatientOrientationValuesBiped, @@ -36,12 +37,16 @@ # TODO add basic arithmetric operations # TODO add pixel value transformations -# TODO handedness checks/constraints # TODO should methods copy arrays? -# TODO crop_or_pad -# TODO random crop, random flip +# TODO random crop, random flip, random permute # TODO match geometry # TODO trim non-zero +# TODO physical extent, physical volume, pixel area, voxel volume +# TODO support slide coordinate system +# TODO volume to volume transformer +# TODO split out a separate geometry only class +# TODO volread and metadata +# TODO make RIGHT handed the default class Volume: @@ -987,17 +992,31 @@ def spacing(self) -> List[float]: norms = np.sqrt((dir_mat ** 2).sum(axis=0)) return norms.tolist() + @property + def voxel_volume(self) -> float: + """float: The volume of a single voxel in cubic millimeters.""" + return np.product(self.spacing).item() + @property def position(self) -> List[float]: """List[float]: - Pixel spacing in millimeter units for the three spatial directions. - Three values (spacing between slices, spacing spacing between rows, - spacing between columns). + Position in the frame of reference space of the center of voxel at + indices (0, 0, [). """ return self._affine[:3, 3].tolist() + @property + def physical_extent(self) -> List[float]: + """List[float]: Side lengths of the volume in millimeters.""" + return [(n + 1) * d for n, d in zip(self.shape, self.spacing)] + + @property + def physical_volume(self) -> float: + """float: Total volume in cubic millimeter.""" + return self.voxel_volume * self.array.size + @property def direction(self) -> np.ndarray: """numpy.ndarray: @@ -1013,6 +1032,33 @@ def direction(self) -> np.ndarray: norms = np.sqrt((dir_mat ** 2).sum(axis=0)) return dir_mat / norms + def direction_vectors(self) -> List[np.ndarray]: + """Get the vectors along the three array dimensions. + + Note that these vectors are not normalized, they have length equal to + the spacing along the relevant dimension. + + Returns + ------- + List[np.ndarray]: + List of three vectors for the three axes of the volume array. Each + vector is a 1D numpy array. + + """ + return list(self.affine[:3, :3].T) + + def unit_vectors(self) -> List[np.ndarray]: + """Get the normalized vectors along the three array dimensions. + + Returns + ------- + List[np.ndarray]: + List of three vectors for the three axes of the volume array. Each + vector is a 1D numpy array and has unit length. + + """ + return list(self.direction.T) + def get_closest_patient_orientation(self) -> Tuple[ PatientOrientationValuesBiped, PatientOrientationValuesBiped, @@ -1216,7 +1262,7 @@ def __getitem__( source_frame_dimension=self.source_frame_dimension or 0, ) - def permute(self, indices: Sequence[int]) -> 'Volume': + def permute_axes(self, indices: Sequence[int]) -> 'Volume': # TODO add tests for this """Create a new volume by permuting the spatial axes. @@ -1266,6 +1312,39 @@ def permute(self, indices: Sequence[int]) -> 'Volume': source_frame_dimension=new_source_frame_dimension, ) + def swap_axes(self, axis_1: int, axis_2: int) -> 'Volume': + """Swap the spatial axes of the array. + + Parameters + ---------- + axis_1: int + Spatial axis index (0, 1 or 2) to swap with ``axis_2``. + axis_2: int + Spatial axis index (0, 1 or 2) to swap with ``axis_1``. + + Returns + ------- + highdicom.Volume: + New volume with spatial axes swapped as requested. + + """ + for a in [axis_1, axis_2]: + if a not in {0, 1, 2}: + raise ValueError( + 'Axis values must be one of 0, 1 or 2.' + ) + + if axis_1 == axis_2: + raise ValueError( + "Arguments 'axis_1' and 'axis_2' must be different." + ) + + permutation = [0, 1, 2] + permutation[axis_1] = axis_2 + permutation[axis_2] = axis_1 + + return self.permute_axes(permutation) + def flip(self, axis: Union[int, Sequence[int]]) -> 'Volume': """Flip the spatial axes of the array. @@ -1275,8 +1354,8 @@ def flip(self, axis: Union[int, Sequence[int]]) -> 'Volume': Parameters ---------- axis: Union[int, Sequence[int]] - Axis or list of axes that should be flipped. These should include - only the spatial axes (0, 1, and/or 2). + Axis or list of axis indices that should be flipped. These should + include only the spatial axes (0, 1, and/or 2). Returns ------- @@ -1325,6 +1404,11 @@ def to_patient_orientation( highdicom.enum.PatientOrientationValuesBiped values, or a string such as ``"FPL"`` using the same characters. + Returns + ------- + highdicom.Volume: + New volume with the requested patient orientation. + """ # noqa: E501 desired_orientation = _normalize_patient_orientation( patient_orientation @@ -1348,7 +1432,69 @@ def to_patient_orientation( else: result = self - return result.permute(permute_indices) + return result.permute_axes(permute_indices) + + @property + def handedness(self) -> AxisHandedness: + """highdicom.AxisHandedness: Axis handedness of the volume.""" + v1, v2, v3 = self.direction_vectors() + if np.cross(v1, v2) @ v3 < 0.0: + return AxisHandedness.LEFT_HANDED + return AxisHandedness.RIGHT_HANDED + + def ensure_handedness( + self, + handedness: Union[AxisHandedness, str], + flip_axis: Optional[int] = None, + swap_axes: Optional[Sequence[int]] = None, + ) -> 'Volume': + """Manipulate the volume if necessary to ensure a given handedness. + + If the volume already has the specified handedness, it is returned + unaltered. + + If the volume does not meet the requirement, the volume is manipulated + using a user specified operation to meet the requirement. The two + options are reversing the direction of a single axis ("flipping") or + swapping the position of two axes. + + Parameters + ---------- + handedness: highdicom.AxisHandedness + Handedness to ensure. + flip_axis: Union[int, None], optional + Specification of a spatial axis index (0, 1, or 2) to flip if + required to meet the given handedness requirement. + swap_axes: Union[int, None], optional + Specification of a sequence of two spatial axis indices (each being + 0, 1, or 2) to swap if required to meet the given handedness + requirement. + + Note + ---- + Either ``flip_axis`` or ``swap_axes`` must be provided (and not both) + to specify the operation to perform to correct the handedness (if + required). + + """ + if (flip_axis is None) == (swap_axes is None): + raise TypeError( + "Exactly one of either 'flip_axis' or 'swap_axes' " + "must be specified." + ) + handedness = AxisHandedness(handedness) + if handedness == self.handedness: + return self + + if flip_axis is not None: + return self.flip(flip_axis) + + if len(swap_axes) != 2: + raise ValueError( + "Argument 'swap_axes' must have length 2." + ) + + return self.swap_axes(swap_axes[0], swap_axes[1]) def normalize_mean_std( self, @@ -1745,7 +1891,7 @@ def pad_array(array: np.ndarray, cval: float) -> float: def pad_to_shape( self, - shape: Sequence[int], + spatial_shape: Sequence[int], mode: PadModes = PadModes.CONSTANT, constant_value: float = 0.0, per_channel: bool = False, @@ -1760,7 +1906,7 @@ def pad_to_shape( Parameters ---------- - shape: Sequence[int] + spatial_shape: Sequence[int] Sequence of three integers specifying the spatial shape to pad to. This shape must be no smaller than the existing shape along any of the three spatial dimensions. @@ -1787,13 +1933,13 @@ def pad_to_shape( Volume with padding applied. """ - if len(shape) != 3: + if len(spatial_shape) != 3: raise ValueError( "Argument 'shape' must have length 3." ) pad_width = [] - for insize, outsize in zip(self.spatial_shape, shape): + for insize, outsize in zip(self.spatial_shape, spatial_shape): to_pad = outsize - insize if to_pad < 0: raise ValueError( @@ -1811,12 +1957,12 @@ def pad_to_shape( per_channel=per_channel, ) - def crop_to_shape(self, shape: Sequence[int]) -> 'Volume': + def crop_to_shape(self, spatial_shape: Sequence[int]) -> 'Volume': """Center-crop volume to a given spatial shape. Parameters ---------- - shape: Sequence[int] + spatial_shape: Sequence[int] Sequence of three integers specifying the spatial shape to crop to. This shape must be no larger than the existing shape along any of the three spatial dimensions. @@ -1827,13 +1973,13 @@ def crop_to_shape(self, shape: Sequence[int]) -> 'Volume': Volume with padding applied. """ - if len(shape) != 3: + if len(spatial_shape) != 3: raise ValueError( "Argument 'shape' must have length 3." ) crop_vals = [] - for insize, outsize in zip(self.spatial_shape, shape): + for insize, outsize in zip(self.spatial_shape, spatial_shape): to_crop = insize - outsize if to_crop < 0: raise ValueError( @@ -1850,6 +1996,87 @@ def crop_to_shape(self, shape: Sequence[int]) -> 'Volume': crop_vals[2][0]:crop_vals[2][1], ] + def pad_or_crop_to_shape( + self, + spatial_shape: Sequence[int], + mode: PadModes = PadModes.CONSTANT, + constant_value: float = 0.0, + per_channel: bool = False, + ) -> 'Volume': + """Pad and/or crop volume to given spatial shape. + + For each dimension where padding is required, the volume is padded + symmetrically, placing the original array at the center of the output + array, to achieve the given shape. If this requires an odd number of + elements to be added along a certain dimension, one more element is + placed at the end of the array than at the start. + + For each dimension where cropping is required, center cropping is used. + + Parameters + ---------- + spatial_shape: Sequence[int] + Sequence of three integers specifying the spatial shape to pad or + crop to. + mode: highdicom.PadModes, optional + Mode to use to pad the array, if padding is required. See + :class:`highdicom.PadModes` for options. + constant_value: Union[float, Sequence[float]], optional + Value used to pad when mode is ``"CONSTANT"``. If ``per_channel`` + if True, a sequence whose length is equal to the number of channels + may be passed, and each value will be used for the corresponding + channel. With other pad modes, this argument is ignored. + per_channel: bool, optional + For padding modes that involve calculation of image statistics to + determine the padding value (i.e. ``MINIMUM``, ``MAXIMUM``, + ``MEAN``, ``MEDIAN``), pad each channel separately using the value + calculated using that channel alone (rather than the statistics of + the entire array). For other padding modes, this argument makes no + difference. This should not the True if the image does not have a + channel dimension. + + Returns + ------- + highdicom.Volume: + Volume with padding and/or cropping applied. + + """ + if len(spatial_shape) != 3: + raise ValueError( + "Argument 'shape' must have length 3." + ) + + pad_width = [] + crop_vals = [] + for insize, outsize in zip(self.spatial_shape, spatial_shape): + diff = outsize - insize + if diff > 0: + pad_front = diff // 2 + pad_back = diff - pad_front + pad_width.append((pad_front, pad_back)) + crop_vals.append((0, outsize)) + elif diff < 0: + crop_front = (-diff) // 2 + crop_back = (-diff) - crop_front + crop_vals.append((crop_front, insize - crop_back)) + pad_width.append((0, 0)) + else: + pad_width.append((0, 0)) + crop_vals.append((0, outsize)) + + cropped = self[ + crop_vals[0][0]:crop_vals[0][1], + crop_vals[1][0]:crop_vals[1][1], + crop_vals[2][0]:crop_vals[2][1], + ] + padded = cropped.pad( + pad_width=pad_width, + mode=mode, + constant_value=constant_value, + per_channel=per_channel, + ) + return padded + def concat_channels(volumes: Sequence[Volume]) -> Volume: """Form a new volume by concatenating channels of existing volumes. From a546466b9a9d679c92a8f34a00a7fe7c4d68c416 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sat, 3 Aug 2024 19:39:56 -0400 Subject: [PATCH 48/93] Add some tests for volumes --- src/highdicom/volume.py | 38 ++++++++++++++++--------- tests/test_volume.py | 63 ++++++++++++++++++++++++++++++++++------- 2 files changed, 77 insertions(+), 24 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index b4a2df3c..12f0af44 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -995,14 +995,14 @@ def spacing(self) -> List[float]: @property def voxel_volume(self) -> float: """float: The volume of a single voxel in cubic millimeters.""" - return np.product(self.spacing).item() + return np.prod(self.spacing).item() @property def position(self) -> List[float]: """List[float]: Position in the frame of reference space of the center of voxel at - indices (0, 0, [). + indices (0, 0, 0). """ return self._affine[:3, 3].tolist() @@ -1032,7 +1032,7 @@ def direction(self) -> np.ndarray: norms = np.sqrt((dir_mat ** 2).sum(axis=0)) return dir_mat / norms - def direction_vectors(self) -> List[np.ndarray]: + def spacing_vectors(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """Get the vectors along the three array dimensions. Note that these vectors are not normalized, they have length equal to @@ -1040,24 +1040,33 @@ def direction_vectors(self) -> List[np.ndarray]: Returns ------- - List[np.ndarray]: - List of three vectors for the three axes of the volume array. Each - vector is a 1D numpy array. + numpy.ndarray: + Vector between voxel centers along the increasing first axis. + 1D NumPy array. + numpy.ndarray: + Vector between voxel centers along the increasing second axis. + 1D NumPy array. + numpy.ndarray: + Vector between voxel centers along the increasing third axis. + 1D NumPy array. """ - return list(self.affine[:3, :3].T) + return tuple(self.affine[:3, :3].T) - def unit_vectors(self) -> List[np.ndarray]: + def unit_vectors(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """Get the normalized vectors along the three array dimensions. Returns ------- - List[np.ndarray]: - List of three vectors for the three axes of the volume array. Each - vector is a 1D numpy array and has unit length. + numpy.ndarray: + Unit vector along the increasing first axis. 1D NumPy array. + numpy.ndarray: + Unit vector along the increasing second axis. 1D NumPy array. + numpy.ndarray: + Unit vector along the increasing third axis. 1D NumPy array. """ - return list(self.direction.T) + return tuple(self.direction.T) def get_closest_patient_orientation(self) -> Tuple[ PatientOrientationValuesBiped, @@ -1156,8 +1165,9 @@ def __getitem__( Parameters ---------- index: Union[int, slice, Tuple[Union[int, slice]]] - Index values. All possibilities supported by numpy arrays are + Index values. MOst possibilities supported by numpy arrays are supported, including negative indices and different step sizes. + Indexing with lists is not supported. Returns ------- @@ -1437,7 +1447,7 @@ def to_patient_orientation( @property def handedness(self) -> AxisHandedness: """highdicom.AxisHandedness: Axis handedness of the volume.""" - v1, v2, v3 = self.direction_vectors() + v1, v2, v3 = self.spacing_vectors() if np.cross(v1, v2) @ v3 < 0.0: return AxisHandedness.LEFT_HANDED return AxisHandedness.RIGHT_HANDED diff --git a/tests/test_volume.py b/tests/test_volume.py index 2754e28b..2d4bae24 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -10,6 +10,21 @@ from highdicom import UID +def read_multiframe_ct_volume(): + dcm = pydicom.dcmread(get_testdata_file('eCT_Supplemental.dcm')) + return Volume.from_image(dcm), dcm + + +def read_ct_series_volume(): + ct_files = [ + get_testdata_file('dicomdirtests/77654033/CT2/17136'), + get_testdata_file('dicomdirtests/77654033/CT2/17196'), + get_testdata_file('dicomdirtests/77654033/CT2/17166'), + ] + ct_series = [pydicom.dcmread(f) for f in ct_files] + return Volume.from_image_series(ct_series), ct_series + + def test_transforms(): array = np.zeros((25, 50, 50)) volume = Volume.from_attributes( @@ -130,16 +145,10 @@ def test_with_array(): def test_volume_single_frame(): - ct_files = [ - get_testdata_file('dicomdirtests/77654033/CT2/17136'), - get_testdata_file('dicomdirtests/77654033/CT2/17196'), - get_testdata_file('dicomdirtests/77654033/CT2/17166'), - ] - ct_series = [pydicom.dcmread(f) for f in ct_files] - volume = Volume.from_image_series(ct_series) + volume, ct_series = read_ct_series_volume() assert isinstance(volume, Volume) rows, columns = ct_series[0].Rows, ct_series[0].Columns - assert volume.shape == (len(ct_files), rows, columns) + assert volume.shape == (len(ct_series), rows, columns) assert volume.spatial_shape == volume.shape assert volume.number_of_channels is None assert volume.source_frame_numbers is None @@ -167,11 +176,28 @@ def test_volume_single_frame(): assert volume.pixel_spacing == ct_series[0].PixelSpacing slice_spacing = 1.25 assert volume.spacing == [slice_spacing, *ct_series[0].PixelSpacing[::-1]] + pixel_spacing = ct_series[0].PixelSpacing + expected_voxel_volume = ( + pixel_spacing[0] * pixel_spacing[1] * slice_spacing + ) + expected_volume = expected_voxel_volume * np.prod(volume.spatial_shape) + assert np.allclose(volume.voxel_volume, expected_voxel_volume) + assert np.allclose(volume.physical_volume, expected_volume) + u1, u2, u3 = volume.unit_vectors() + for u in [u1, u2, u3]: + assert u.shape == (3, ) + assert np.linalg.norm(u) == 1.0 + assert np.allclose(u3, orientation[:3]) + assert np.allclose(u2, orientation[3:]) + + v1, v2, v3 = volume.spacing_vectors() + for v, spacing in zip([v1, v2, v3], volume.spacing): + assert v.shape == (3, ) + assert np.linalg.norm(v) == spacing def test_volume_multiframe(): - dcm = pydicom.dcmread(get_testdata_file('eCT_Supplemental.dcm')) - volume = Volume.from_image(dcm) + volume, dcm = read_multiframe_ct_volume() assert isinstance(volume, Volume) rows, columns = dcm.Rows, dcm.Columns assert volume.shape == (dcm.NumberOfFrames, rows, columns) @@ -215,6 +241,23 @@ def test_volume_multiframe(): slice_spacing = 10.0 assert volume.spacing == [slice_spacing, *pixel_spacing[::-1]] assert volume.number_of_channels is None + expected_voxel_volume = ( + pixel_spacing[0] * pixel_spacing[1] * slice_spacing + ) + expected_volume = expected_voxel_volume * np.prod(volume.spatial_shape) + assert np.allclose(volume.voxel_volume, expected_voxel_volume) + assert np.allclose(volume.physical_volume, expected_volume) + u1, u2, u3 = volume.unit_vectors() + for u in [u1, u2, u3]: + assert u.shape == (3, ) + assert np.linalg.norm(u) == 1.0 + assert np.allclose(u3, orientation[:3]) + assert np.allclose(u2, orientation[3:]) + + v1, v2, v3 = volume.spacing_vectors() + for v, spacing in zip([v1, v2, v3], volume.spacing): + assert v.shape == (3, ) + assert np.linalg.norm(v) == spacing def test_construction_mismatched_source_lists(): From 6efd2b8d92e30abc03fd751a2e346d540fc29801 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 4 Aug 2024 14:12:25 -0400 Subject: [PATCH 49/93] Remove source frame information from Volume --- src/highdicom/volume.py | 220 ---------------------------------------- tests/test_volume.py | 67 +----------- 2 files changed, 1 insertion(+), 286 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 12f0af44..27607a4d 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -94,9 +94,6 @@ def __init__( array: np.ndarray, affine: np.ndarray, frame_of_reference_uid: Optional[str] = None, - source_sop_instance_uids: Optional[Sequence[str]] = None, - source_frame_numbers: Optional[Sequence[int]] = None, - source_frame_dimension: int = 0, ): """ @@ -115,20 +112,6 @@ def __init__( component. The last row should have value [0, 0, 0, 1]. frame_of_reference_uid: Optional[str], optional Frame of reference UID for the frame of reference, if known. - source_sop_instance_uids: Optional[Sequence[str]], optional - SOP instance UIDs corresponding to each slice (stacked down - dimension 0) of the implied volume. This is relevant if and only if - the volume is formed from a series of single frame DICOM images. - source_frame_numbers: Optional[Sequence[int]], optional - Frame numbers of the source image (if any) corresponding to each - slice (stacked down dimension 0). This is relevant if and only if - the volume is formed from a set of frames of a single multiframe - DICOM image. - source_frame_dimension: int - Dimension (as a zero-based dimension index) down which source - frames were stacked to form the volume. Only applicable if - ``source_sop_instance_uids`` or ``source_frame_numbers`` is - provided, otherwise ignored. """ if array.ndim not in (3, 4): @@ -151,52 +134,6 @@ def __init__( self._affine = affine self._frame_of_reference_uid = frame_of_reference_uid - if source_frame_dimension not in (0, 1, 2): - raise ValueError( - f'Argument "source_frame_dimension" must have value 0, 1, or 2.' - ) - - if source_frame_numbers is not None: - if any(not isinstance(f, int) for f in source_frame_numbers): - raise TypeError( - "Argument 'source_frame_numbers' should be a sequence of ints." - ) - if any(f < 1 for f in source_frame_numbers): - raise ValueError( - "Argument 'source_frame_numbers' should contain only " - "(strictly) positive integers." - ) - if len(source_frame_numbers) != self._array.shape[source_frame_dimension]: - raise ValueError( - "Length of 'source_frame_numbers' should match size " - "of 'array' along the axis given by 'source_frame_dimension'." - ) - self._source_frame_numbers = list(source_frame_numbers) - else: - self._source_frame_numbers = None - if source_sop_instance_uids is not None: - if any(not isinstance(u, str) for u in source_sop_instance_uids): - raise TypeError( - "Argument 'source_sop_instance_uids' should be a sequence of " - "str." - ) - if ( - len(source_sop_instance_uids) != - self._array.shape[source_frame_dimension] - ): - raise ValueError( - "Length of 'source_sop_instance_uids' should match size " - "of 'array' along the axis given by 'source_frame_dimension'." - ) - self._source_sop_instance_uids = list(source_sop_instance_uids) - else: - self._source_sop_instance_uids = None - - if source_frame_numbers is not None or source_sop_instance_uids is not None: - self._source_frame_dimension = source_frame_dimension - else: - self._source_frame_dimension = None - @classmethod def from_image_series( cls, @@ -241,9 +178,6 @@ def from_image_series( raise ValueError('Images do not share a frame of reference.') series_datasets = sort_datasets(series_datasets) - sorted_source_sop_instance_uids = [ - ds.SOPInstanceUID for ds in series_datasets - ] ds = series_datasets[0] @@ -269,7 +203,6 @@ def from_image_series( affine=affine, array=array, frame_of_reference_uid=frame_of_reference_uid, - source_sop_instance_uids=sorted_source_sop_instance_uids, ) @classmethod @@ -321,7 +254,6 @@ def from_image( image_orientation, ) sorted_positions = [image_positions[i] for i in sort_index] - sorted_source_frame_numbers = [f + 1 for f in sort_index] if 'PixelMeasuresSequence' not in sfgs: raise ValueError('Frames do not share pixel measures.') @@ -354,7 +286,6 @@ def from_image( affine=affine, array=array, frame_of_reference_uid=dataset.FrameOfReferenceUID, - source_frame_numbers=sorted_source_frame_numbers, ) @classmethod @@ -366,8 +297,6 @@ def from_attributes( pixel_spacing: Sequence[float], spacing_between_slices: float, frame_of_reference_uid: Optional[str] = None, - source_sop_instance_uids: Optional[Sequence[str]] = None, - source_frame_numbers: Optional[Sequence[int]] = None, ) -> "Volume": """Create a volume from DICOM attributes. @@ -403,15 +332,6 @@ def from_attributes( frame_of_reference_uid: Union[str, None], optional Frame of reference UID, if known. Corresponds to DICOM attribute FrameOfReferenceUID. - source_sop_instance_uids: Union[Sequence[str], None], optional - Ordered SOP Instance UIDs of each frame, if known, in the situation - that the volume is formed from a sequence of individual DICOM - instances, stacked down the first axis (index 0).. - source_frame_numbers: Union[Sequence[int], None], optional - Ordered frame numbers of each frame of the source image, in the - situation that the volume is formed from a sequence of frames of - one multi-frame DICOM image, stacked down the first axis (index - 0). Returns ------- @@ -430,8 +350,6 @@ def from_attributes( affine=affine, array=array, frame_of_reference_uid=frame_of_reference_uid, - source_sop_instance_uids=source_sop_instance_uids, - source_frame_numbers=source_frame_numbers, ) @classmethod @@ -442,8 +360,6 @@ def from_components( direction: Sequence[float], spacing: Sequence[float], frame_of_reference_uid: Optional[str] = None, - source_sop_instance_uids: Optional[Sequence[str]] = None, - source_frame_numbers: Optional[Sequence[int]] = None, ) -> "Volume": """Construct a Volume from components. @@ -468,14 +384,6 @@ def from_components( Sequence of three integers giving the shape of the volume. frame_of_reference_uid: Union[str, None], optional Frame of reference UID for the frame of reference, if known. - source_sop_instance_uids: Union[Sequence[str], None], optional - Ordered SOP Instance UIDs of each frame, if known, in the situation - that the volume is formed from a sequence of individual DICOM - instances, stacked down the first axis (index 0). - source_frame_numbers: Union[Sequence[int], None], optional - Ordered frame numbers of each frame of the source image, in the - situation that the volume is formed from a sequence of frames of - one multi-frame DICOM image, stacked down the first axis (index 0). Returns ------- @@ -517,61 +425,8 @@ def from_components( array=array, affine=affine, frame_of_reference_uid=frame_of_reference_uid, - source_sop_instance_uids=source_sop_instance_uids, - source_frame_numbers=source_frame_numbers, ) - def get_index_for_frame_number( - self, - frame_number: int, - ) -> int: - """Get the slice index for a frame number. - - This is intended for volumes representing for multi-frame images. - - Parameters - ---------- - frame_number: int - 1-based frame number in the original image. - - Returns - ------- - 0-based index of this frame number down the - dimension of the volume given by ``source_frame_dimension``. - - """ - if self._source_frame_numbers is None: - raise RuntimeError( - "Frame information is not present." - ) - return self._source_frame_numbers.index(frame_number) - - def get_index_for_sop_instance_uid( - self, - sop_instance_uid: str, - ) -> int: - """Get the slice index for a SOP Instance UID. - - This is intended for volumes representing a series of single-frame - images. - - Parameters - ---------- - sop_instance_uid: str - SOP Instance of a particular image in the series. - - Returns - ------- - 0-based index of the image with the given SOP Instance UID down the - dimension of the volume given by ``source_frame_dimension``. - - """ - if self._source_sop_instance_uids is None: - raise RuntimeError( - "SOP Instance UID information is not present." - ) - return self._source_sop_instance_uids.index(sop_instance_uid) - def get_center_index(self, round_output: bool = False) -> np.ndarray: """Get array index of center of the volume. @@ -882,17 +737,6 @@ def number_of_channels(self) -> Optional[int]: return self._array.shape[3] return None - @property - def source_frame_dimension(self) -> Optional[int]: - """Optional[int]: Dimension along which source frames were stacked. - - Will return either 0, 1, or 2 when the volume was created from a source - image or image series. Will return ``None`` if the volume was not - created from a source image or image series. - - """ - return self._source_frame_dimension - @property def array(self) -> np.ndarray: """numpy.ndarray: Volume array.""" @@ -920,25 +764,6 @@ def array(self, value: np.ndarray) -> None: ) self._array = value - @property - def source_sop_instance_uids(self) -> Union[List[str], None]: - # TODO account for rotated arrays - """Union[List[str], None]: SOP Instance UID at each index.""" - if self._source_sop_instance_uids is not None: - return self._source_sop_instance_uids.copy() - - @property - def source_frame_numbers(self) -> Union[List[int], None]: - # TODO account for rotated arrays - """Union[List[int], None]: - - Frame number within the source image at each index down the first - dimension. - - """ - if self._source_frame_numbers is not None: - return self._source_frame_numbers.copy() - @property def direction_cosines(self) -> List[float]: """List[float]: @@ -1115,9 +940,6 @@ def copy(self) -> 'Volume': array=self.array, # TODO should this copy? affine=self._affine.copy(), frame_of_reference_uid=self.frame_of_reference_uid, - source_sop_instance_uids=deepcopy(self.source_sop_instance_uids), - source_frame_numbers=deepcopy(self.source_frame_numbers), - source_frame_dimension=self.source_frame_dimension or 0, ) def with_array(self, array: np.ndarray) -> 'Volume': @@ -1151,9 +973,6 @@ def with_array(self, array: np.ndarray) -> 'Volume': array=array, affine=self._affine.copy(), frame_of_reference_uid=self.frame_of_reference_uid, - source_sop_instance_uids=deepcopy(self.source_sop_instance_uids), - source_frame_numbers=deepcopy(self.source_frame_numbers), - source_frame_dimension=self.source_frame_dimension or 0, ) def __getitem__( @@ -1227,31 +1046,6 @@ def __getitem__( new_vectors.append(self._affine[:3, d] * step) origin_indices.append(first) - if self.source_frame_dimension is not None: - if d == self.source_frame_dimension: - if index_item is not None: - # Need to index the source frame lists along this - # dimension - if self._source_sop_instance_uids is not None: - new_sop_instance_uids = ( - self._source_sop_instance_uids[ - index_item - ] - ) - if self._source_frame_numbers is not None: - new_frame_numbers = self._source_frame_numbers[ - index_item - ] - else: - # Not indexing along this dimension so the lists are - # unchanged - new_sop_instance_uids = deepcopy( - self.source_sop_instance_uids - ) - new_frame_numbers = deepcopy( - self.source_frame_numbers - ) - origin_index_arr = np.array([origin_indices]) new_origin_arr = self.map_indices_to_reference(origin_index_arr).T @@ -1267,9 +1061,6 @@ def __getitem__( array=new_array, affine=new_affine, frame_of_reference_uid=self.frame_of_reference_uid, - source_sop_instance_uids=new_sop_instance_uids, - source_frame_numbers=new_frame_numbers, - source_frame_dimension=self.source_frame_dimension or 0, ) def permute_axes(self, indices: Sequence[int]) -> 'Volume': @@ -1306,20 +1097,10 @@ def permute_axes(self, indices: Sequence[int]) -> 'Volume': permute_indices=indices, ) - if self.source_frame_dimension is None: - new_source_frame_dimension = 0 - else: - new_source_frame_dimension = indices.index( - self.source_frame_dimension - ) - return self.__class__( array=new_array, affine=new_affine, frame_of_reference_uid=self.frame_of_reference_uid, - source_sop_instance_uids=deepcopy(self.source_sop_instance_uids), - source_frame_numbers=deepcopy(self.source_frame_numbers), - source_frame_dimension=new_source_frame_dimension, ) def swap_axes(self, axis_1: int, axis_2: int) -> 'Volume': @@ -1896,7 +1677,6 @@ def pad_array(array: np.ndarray, cval: float) -> float: array=new_array, affine=new_affine, frame_of_reference_uid=self.frame_of_reference_uid, - source_frame_dimension=self.source_frame_dimension or 0, ) def pad_to_shape( diff --git a/tests/test_volume.py b/tests/test_volume.py index 2d4bae24..68a21516 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -151,18 +151,6 @@ def test_volume_single_frame(): assert volume.shape == (len(ct_series), rows, columns) assert volume.spatial_shape == volume.shape assert volume.number_of_channels is None - assert volume.source_frame_numbers is None - source_sop_instance_uids = [ - ct_series[0].SOPInstanceUID, - ct_series[2].SOPInstanceUID, - ct_series[1].SOPInstanceUID, - ] - assert volume.source_sop_instance_uids == source_sop_instance_uids - assert volume.get_index_for_sop_instance_uid( - ct_series[2].SOPInstanceUID - ) == 1 - with pytest.raises(RuntimeError): - volume.get_index_for_frame_number(2) orientation = ct_series[0].ImageOrientationPatient assert volume.direction_cosines == orientation direction = volume.direction @@ -202,13 +190,6 @@ def test_volume_multiframe(): rows, columns = dcm.Rows, dcm.Columns assert volume.shape == (dcm.NumberOfFrames, rows, columns) assert volume.spatial_shape == volume.shape - assert volume.source_frame_numbers == [2, 1] - assert volume.source_sop_instance_uids is None - with pytest.raises(RuntimeError): - volume.get_index_for_sop_instance_uid( - dcm.SOPInstanceUID - ) - assert volume.get_index_for_frame_number(2) == 0 orientation = ( dcm .SharedFunctionalGroupsSequence[0] @@ -229,10 +210,9 @@ def test_volume_multiframe(): assert direction[:, 0] @ direction[:, 1] == 0.0 assert direction[:, 0] @ direction[:, 2] == 0.0 assert (direction[:, 0] ** 2).sum() == 1.0 - first_frame = volume.source_frame_numbers[0] first_frame_pos = ( dcm - .PerFrameFunctionalGroupsSequence[first_frame - 1] + .PerFrameFunctionalGroupsSequence[1] # due to ordering .PlanePositionSequence[0] .ImagePositionPatient ) @@ -260,32 +240,6 @@ def test_volume_multiframe(): assert np.linalg.norm(v) == spacing -def test_construction_mismatched_source_lists(): - array = np.random.randint(0, 100, (50, 50, 25)) - affine = np.array([ - [ 0.0, 0.0, 1.0, 0.0], - [ 0.0, 1.0, 0.0, 0.0], - [10.0, 0.0, 0.0, 30.0], - [ 0.0, 0.0, 0.0, 1.0], - ]) - sop_instance_uids = [UID() for _ in range(25)] - frame_numbers = list(range(25)) - with pytest.raises(ValueError): - Volume( - array=array, - affine=affine, - source_sop_instance_uids=sop_instance_uids, - source_frame_dimension=0, - ) - with pytest.raises(ValueError): - Volume( - array=array, - affine=affine, - source_frame_numbers=frame_numbers, - source_frame_dimension=0, - ) - - def test_indexing(): array = np.random.randint(0, 100, (25, 50, 50)) volume = Volume.from_attributes( @@ -294,7 +248,6 @@ def test_indexing(): image_orientation=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0], pixel_spacing=[1.0, 1.0], spacing_between_slices=10.0, - source_frame_numbers=list(range(1, 26)), ) # Single integer index @@ -308,26 +261,22 @@ def test_indexing(): ]) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[3:4]) - assert subvolume.source_frame_numbers == [4] # With colons subvolume = volume[3, :] assert subvolume.shape == (1, 50, 50) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[3:4]) - assert subvolume.source_frame_numbers == [4] subvolume = volume[3, :, :] assert subvolume.shape == (1, 50, 50) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[3:4]) - assert subvolume.source_frame_numbers == [4] # Single slice index subvolume = volume[3:13] assert subvolume.shape == (10, 50, 50) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[3:13]) - assert subvolume.source_frame_numbers == list(range(4, 14)) # Multiple integer indices subvolume = volume[3, 7] @@ -340,19 +289,16 @@ def test_indexing(): ]) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[3:4, 7:8]) - assert subvolume.source_frame_numbers == [4] # Multiple integer indices in sequence (should be the same as above) subvolume = volume[:, 7][3, :] assert subvolume.shape == (1, 1, 50) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[3:4, 7:8]) - assert subvolume.source_frame_numbers == [4] subvolume = volume[3, :][:, 7] assert subvolume.shape == (1, 1, 50) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[3:4, 7:8]) - assert subvolume.source_frame_numbers == [4] # Negative index subvolume = volume[-4] @@ -365,14 +311,12 @@ def test_indexing(): ]) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[-4:-3]) - assert subvolume.source_frame_numbers == [22] # Negative index range subvolume = volume[-4:-2, :, :] assert subvolume.shape == (2, 50, 50) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[-4:-2]) - assert subvolume.source_frame_numbers == [22, 23] # Non-zero steps subvolume = volume[12:16:2, ::-1, :] @@ -385,7 +329,6 @@ def test_indexing(): ]) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[12:16:2, ::-1]) - assert subvolume.source_frame_numbers == [13, 15] def test_indexing_source_dimension_2(): @@ -396,19 +339,12 @@ def test_indexing_source_dimension_2(): [10.0, 0.0, 0.0, 30.0], [ 0.0, 0.0, 0.0, 1.0], ]) - sop_instance_uids = [UID() for _ in range(25)] volume = Volume( array=array, affine=affine, - source_sop_instance_uids=sop_instance_uids, - source_frame_dimension=2, ) subvolume = volume[12:14, :, 12:6:-2] - assert ( - subvolume.source_sop_instance_uids == - sop_instance_uids[12:6:-2] - ) assert np.array_equal(subvolume.array, array[12:14, :, 12:6:-2]) @@ -459,7 +395,6 @@ def test_to_patient_orientation(desired): image_orientation=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0], pixel_spacing=[1.0, 1.0], spacing_between_slices=10.0, - source_frame_numbers=list(range(1, 26)), ) desired_tup = _normalize_patient_orientation(desired) From 4fde6945225405af29ea9c6d7b6ba691558c0d5f Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 4 Aug 2024 22:14:53 -0400 Subject: [PATCH 50/93] Add random crop method --- src/highdicom/volume.py | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 27607a4d..d28ad91f 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -1028,8 +1028,6 @@ def __getitem__( new_array = self._array[tuple_index] - new_sop_instance_uids = None - new_frame_numbers = None new_vectors = [] origin_indices = [] @@ -1867,6 +1865,34 @@ def pad_or_crop_to_shape( ) return padded + def random_crop(self, spatial_shape: Sequence[int]) -> 'Volume': + """Create a random crop of a certain shape from the volume. + + Parameters + ---------- + spatial_shape: Sequence[int] + Sequence of three integers specifying the spatial shape to pad or + crop to. + + Returns + ------- + highdicom.Volume: + New volume formed by cropping the volumes. + + """ + crop_slices = [] + for c, d in zip(spatial_shape, self.spatial_shape): + max_start = d - c + if max_start < 0: + raise ValueError( + 'Crop shape is larger than volume in at least one ' + 'dimension.' + ) + start = np.random.randint(0, max_start + 1) + crop_slices.append(slice(start, start + c)) + + return self[tuple(crop_slices)] + def concat_channels(volumes: Sequence[Volume]) -> Volume: """Form a new volume by concatenating channels of existing volumes. From e0d6c8be158224f8d7480e3246c3b5cf9b2a1946 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 8 Aug 2024 23:16:16 -0400 Subject: [PATCH 51/93] WIP on pixel transformations --- src/highdicom/volume.py | 81 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 75 insertions(+), 6 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index d28ad91f..cfac2415 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -6,6 +6,7 @@ import numpy as np from highdicom._module_utils import is_multiframe_image +from highdicom.color import ColorManager from highdicom.enum import ( AxisHandedness, CoordinateSystemNames, @@ -34,6 +35,12 @@ ) from pydicom import Dataset, dcmread +from pydicom.pixel_data_handlers.util import ( + apply_modality_lut, + apply_color_lut, + apply_voi_lut, + convert_color_space, +) # TODO add basic arithmetric operations # TODO add pixel value transformations @@ -41,7 +48,6 @@ # TODO random crop, random flip, random permute # TODO match geometry # TODO trim non-zero -# TODO physical extent, physical volume, pixel area, voxel volume # TODO support slide coordinate system # TODO volume to volume transformer # TODO split out a separate geometry only class @@ -138,6 +144,12 @@ def __init__( def from_image_series( cls, series_datasets: Sequence[Dataset], + apply_modality_transform: bool = True, + apply_voi_transform: bool = False, + voi_transform_index: int = 0, + apply_palette_color_lut: bool = True, + apply_icc_transform: bool = True, + standardize_color_space: bool = True, ) -> "Volume": """Create volume from a series of single frame images. @@ -146,6 +158,29 @@ def from_image_series( series_datasets: Sequence[pydicom.Dataset] Series of single frame datasets. There is no requirement on the sorting of the datasets. + apply_modality_transform: bool, optional + Whether to apply the modality transform (either a rescale intercept + and slope or modality LUT) to the pixel values, if present in the + datasets. + apply_voi_transform: bool, optional + Whether to apply the value of interest (VOI) transform (either a + windowing operation or VOI LUT) to the pixel values, if present in + the datasets. + voi_transform_index: int, optional + Index of the VOI transform to apply if multiple are included in the + datasets. Ignored if ``apply_voi_transform`` is ``False`` or no VOI + transform is included in the datasets. + apply_palette_color_lut: bool, optional + Whether to apply the palette color LUT if a dataset has photometric + interpretation ``'PALETTE_COLOR'``. + apply_icc_transform: bool, optional + Whether to apply an ICC color profile, if present in the datasets. + convert_color_space: bool, optional + Whether to convert the color space to a standardized space. If + True, images with photometric interpretation ``MONOCHROME1`` are + inverted to mimic ``MONOCHROME2``, and images with photometric + interpretation ``YBR_FULL`` or ``YBR_FULL_422`` are converted to + ``RGB``. Returns ------- @@ -153,6 +188,10 @@ def from_image_series( Volume created from the series. """ + if apply_voi_transform and not apply_modality_lut: + raise ValueError( + "Argument 'apply_voi_transform' requires 'apply_modality_lut'." + ) series_instance_uid = series_datasets[0].SeriesInstanceUID if not all( ds.SeriesInstanceUID == series_instance_uid @@ -196,8 +235,38 @@ def from_image_series( index_convention=VOLUME_INDEX_CONVENTION, ) - # TODO apply color, modality and VOI lookup - array = np.stack([ds.pixel_array for ds in series_datasets]) + frames = [] + for ds in series_datasets: + frame = ds.pixel_array + max_value = 2 ** np.iinfo(ds.pixel_array.dtype).bits + if apply_modality_transform: + frame = apply_modality_lut(frame, ds) + if apply_voi_transform: + frame = apply_voi_lut(frame, ds, voi_transform_index) + if ( + apply_palette_color_lut and + ds.PhotometricInterpretation == 'PALETTE_COLOR' + ): + frame = apply_color_lut(frame, ds) + if apply_icc_transform and 'ICCProfile' in ds: + manager = ColorManager(ds.ICCProfile) + frame = manager.transform_frame(frame) + if standardize_color_space: + if ds.PhotometricInterpretation == 'MONOCHROME1': + # TODO what if a VOI_LUT has been applied + frame = max_value - frame + elif ds.PhotometricInterpretation in ( + 'YBR_FULL', 'YBR_FULL_422' + ): + frame = convert_color_space( + frame, + current=ds.PhotometricInterpretation, + desired='RGB' + ) + + frames.append(frame) + + array = np.stack(frames) return cls( affine=affine, @@ -1157,7 +1226,7 @@ def flip(self, axis: Union[int, Sequence[int]]) -> 'Volume': if len(axis) > 3 or len(set(axis) - {0, 1, 2}) > 0: raise ValueError( - 'Arugment "axis" must contain only values 0, 1, and/or 2.' + 'Argument "axis" must contain only values 0, 1, and/or 2.' ) # We will re-use the existing __getitem__ implementation, which has all @@ -1515,7 +1584,7 @@ def ensure_channel(self) -> 'Volume': def pad( self, pad_width: Union[int, Sequence[int], Sequence[Sequence[int]]], - mode: PadModes = PadModes.CONSTANT, + mode: Union[PadModes, str] = PadModes.CONSTANT, constant_value: float = 0.0, per_channel: bool = False, ) -> 'Volume': @@ -1541,7 +1610,7 @@ def pad( [before2, after2], [before3, after3]]``, in which separate values are supplied for the before and after padding of each of the three spatial dimensions. - mode: highdicom.PadModes, optional + mode: Union[highdicom.PadModes, str], optional Mode to use to pad the array. See :class:`highdicom.PadModes` for options. constant_value: Union[float, Sequence[float]], optional From 934077be1118cba4852c276ad3e4969363cc7aae Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sat, 10 Aug 2024 16:24:29 -0400 Subject: [PATCH 52/93] Create new VolumeGeometry class --- src/highdicom/_multiframe.py | 5 +- src/highdicom/enum.py | 80 +- src/highdicom/seg/content.py | 59 +- src/highdicom/seg/sop.py | 4 +- src/highdicom/spatial.py | 744 ++++++---- src/highdicom/volume.py | 2459 ++++++++++++++++++++-------------- tests/test_seg.py | 29 +- tests/test_spatial.py | 8 +- tests/test_volume.py | 18 +- 9 files changed, 1980 insertions(+), 1426 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 160729bf..361ad88c 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -27,9 +27,9 @@ ) from highdicom.seg.enum import SpatialLocationsPreservedValues from highdicom.spatial import ( - VOLUME_INDEX_CONVENTION, _create_affine_transformation_matrix, _translate_affine_matrix, + VOLUME_INDEX_CONVENTION, get_image_coordinate_system, get_volume_positions, ) @@ -464,6 +464,7 @@ def __init__( pixel_spacing=self.shared_pixel_spacing, spacing_between_slices=volume_spacing, index_convention=VOLUME_INDEX_CONVENTION, + slices_first=True, ) col_defs.append('VolumePosition INTEGER NOT NULL') col_data.append(volume_positions) @@ -857,8 +858,6 @@ def get_volume_affine( slice_start: int, optional Zero-based index into the slice positions within the implied volume marking the beginning of the relevant region. - index_convention: Sequence[highdicom.PixelIndexDirections], optional - Index convention to use to construct the affine matrix. Returns ------- diff --git a/src/highdicom/enum.py b/src/highdicom/enum.py index 0d15632f..f815b639 100644 --- a/src/highdicom/enum.py +++ b/src/highdicom/enum.py @@ -21,100 +21,28 @@ class PixelIndexDirections(Enum): L = 'L' """ - Left: Pixel index that increases moving across the image from right to left. + Left: Pixel index that increases moving across the rows from right to left. """ R = 'R' """ - Right: Pixel index that increases moving across the image from left to right. + Right: Pixel index that increases moving across the rows from left to right. """ U = 'U' """ - Up: Pixel index that increases moving up the image from bottom to top. + Up: Pixel index that increases moving up the columns from bottom to top. """ D = 'D' """ - Down: Pixel index that increases moving down the image from top to bottom. - - """ - - I = 'I' - """ - - In: Pixel index that increases moving through the slices in the away from - the viewer. - - """ - - O = 'O' - """ - - Out: Pixel index that increases moving through the slices in the towards - the viewer. - - """ - - -class PatientFrameOfReferenceDirections(Enum): - - """ - - Enumerated values used to describe directions in the patient frame of - reference coordinate space. - - """ - - L = 'L' - """ - - Left: Direction that increases moving from the patient's right to left. - - """ - - R = 'R' - """ - - Right: Direction that increases moving from the patient's left to right. - - """ - - P = 'P' - """ - - Posterior: Direction that increases moving from the patient's anterior to - posterior. - - """ - - A = 'A' - """ - - Anterior: Direction that increases moving from the patient's posterior to - anterior. - - """ - - I = 'I' - """ - - Inferior: Direction that increases moving from the patient's superior to - inferior. - - """ - - S = 'S' - """ - - Superior: Direction that increases moving from the patient's ingerior to - superior. + Down: Pixel index that increases moving down the columns from top to bottom. """ diff --git a/src/highdicom/seg/content.py b/src/highdicom/seg/content.py index 36873bd5..7029dd73 100644 --- a/src/highdicom/seg/content.py +++ b/src/highdicom/seg/content.py @@ -12,7 +12,11 @@ AlgorithmIdentificationSequence, PlanePositionSequence, ) -from highdicom.enum import CoordinateSystemNames +from highdicom.enum import ( + AxisHandedness, + CoordinateSystemNames, + PixelIndexDirections, +) from highdicom.seg.enum import SegmentAlgorithmTypeValues from highdicom.spatial import ( _get_slice_distances, @@ -611,6 +615,11 @@ def get_index_values( self, plane_positions: Sequence[PlanePositionSequence], image_orientation: Optional[Sequence[float]] = None, + index_convention: Union[str, Sequence[Union[PixelIndexDirections, str]]] = ( + PixelIndexDirections.R, + PixelIndexDirections.D, + ), + handedness: Union[AxisHandedness, str] = AxisHandedness.RIGHT_HANDED, ) -> Tuple[np.ndarray, np.ndarray]: """Get values of indexed attributes that specify position of planes. @@ -618,7 +627,38 @@ def get_index_values( ---------- plane_positions: Sequence[highdicom.PlanePositionSequence] Plane position of frames in a multi-frame image or in a series of - single-frame images + single-frame images. + image_orientation: Union[Sequence[float], None], optional + An image orientation to use to order frames within a 3D coordinate + system. By default (if ``image_orientation`` is ``None``), the + plane positions are ordered using their raw numerical values and + not along any particular spatial vector. If ``image_orientation`` + is provided, planes are ordered along the positive direction of the + vector normal to the specified. Should be a sequence of 6 floats. + This is only valid when plane position inputs contain only the + ImagePositionPatient. + index_convention: Sequence[Union[highdicom.enum.PixelIndexDirections, str]], optional + Convention used to determine how to order frames if + ``image_orientation`` is specified. Should be a sequence of two + :class:`highdicom.enum.PixelIndexDirections` or their string + representations, giving in order, the indexing conventions used for + specifying pixel indices. For example ``('R', 'D')`` means that the + first pixel index indexes the columns from left to right, and the + second pixel index indexes the rows from top to bottom (this is the + convention typically used within DICOM). As another example ``('D', + 'R')`` would switch the order of the indices to give the convention + typically used within NumPy. + + Alternatively, a single shorthand string may be passed that combines + the string representations of the two directions. So for example, + passing ``'RD'`` is equivalent to passing ``('R', 'D')``. + + This is used in combination with the ``handedness`` to determine + the positive direction used to order frames. + handedness: Union[highdicom.enum.AxisHandedness, str], optional + Choose the frame order in order such that the frame axis creates a + coordinate system with this handedness in the when combined with + the within-frame convention given by ``index_convention``. Returns ------- @@ -631,15 +671,6 @@ def get_index_values( plane_indices: numpy.ndarray 1D array of planes indices for sorting frames according to their spatial position specified by the dimension index - image_orientation: Union[Sequence[float], None], optional - An image orientation to use to order frames within a 3D coordinate - system. By default (if ``image_orientation`` is ``None``), the - plane positions are ordered using their raw numerical values and - not along any particular spatial vector. If ``image_orientation`` - is provided, planes are ordered along the positive direction of the - vector normal to the specified. Should be a sequence of 6 floats. - This is only valid when plane position inputs contain only the - ImagePositionPatient. Note ---- @@ -679,7 +710,11 @@ def get_index_values( 'Provided "image_orientation" is only valid when ' 'plane_positions contain the ImagePositionPatient.' ) - normal_vector = get_normal_vector(image_orientation) + normal_vector = get_normal_vector( + image_orientation, + index_convention=index_convention, + handedness=handedness, + ) origin_distances = _get_slice_distances( plane_position_values[:, 0, :], normal_vector, diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index c61f8693..81f23d3a 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -1839,12 +1839,12 @@ def __init__( if self._coordinate_system == CoordinateSystemNames.PATIENT: if from_volume: # Skip checks as this is 3D by construction - # TODO check handedness # TODO what about omitted frames dimension_organization_type = ( DimensionOrganizationTypeValues.THREE_DIMENSIONAL ) else: + # TODO calculate spacing before omitting frames? spacing, _ = get_volume_positions( image_positions=np.array( plane_position_values[plane_sort_index, 0, :] @@ -1852,8 +1852,6 @@ def __init__( image_orientation=np.array( plane_orientation[0].ImageOrientationPatient ), - sort=False, - enforce_right_handed=True, ) if spacing is not None and spacing > 0.0: diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 014383e4..a67a9659 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -15,6 +15,7 @@ from highdicom._module_utils import is_multiframe_image from highdicom.enum import ( + AxisHandedness, CoordinateSystemNames, PixelIndexDirections, PatientOrientationValuesBiped, @@ -41,11 +42,10 @@ VOLUME_INDEX_CONVENTION = ( - PixelIndexDirections.I, PixelIndexDirections.D, PixelIndexDirections.R, ) -"""The indexing convention used for affine matrices within volumes.""" +"""Indexing convention used within volumes.""" def is_tiled_image(dataset: Dataset) -> bool: @@ -610,27 +610,6 @@ def _get_spatial_information( return position, orientation, pixel_spacing, spacing_between_slices -def _get_normal_vector(image_orientation: Sequence[float]) -> np.ndarray: - """Get normal vector given image cosines. - - Parameters - ---------- - image_orientation: Sequence[float] - Row and column cosines (6 element list) giving the orientation of the - image. - - Returns - ------- - np.ndarray - Array of shape (3, ) giving the normal vector to the image plane. - - """ - row_cosines = np.array(image_orientation[:3], dtype=float) - column_cosines = np.array(image_orientation[3:], dtype=float) - n = np.cross(row_cosines.T, column_cosines.T) - return n - - def _are_images_coplanar( image_position_a: Sequence[float], image_orientation_a: Sequence[float], @@ -641,9 +620,9 @@ def _are_images_coplanar( """Determine whether two images or image frames are coplanar. Two images are coplanar in the frame of reference coordinate system if and - only if their vectors have the same (or opposite direction) and the - shortest distance from the plane to the coordinate system origin is - the same for both planes. + only if their normal vectors have the same (or opposite direction) and the + shortest distance from the plane to the coordinate system origin is the + same for both planes. Parameters ---------- @@ -668,8 +647,8 @@ def _are_images_coplanar( True if the two images are coplanar. False otherwise. """ - n_a = _get_normal_vector(image_orientation_a) - n_b = _get_normal_vector(image_orientation_b) + n_a = get_normal_vector(image_orientation_a) + n_b = get_normal_vector(image_orientation_b) if 1.0 - np.abs(n_a @ n_b) > tol: return False @@ -682,24 +661,24 @@ def _are_images_coplanar( def _normalize_pixel_index_convention( c: Union[str, Sequence[Union[str, PixelIndexDirections]]], -) -> Tuple[PixelIndexDirections, PixelIndexDirections, PixelIndexDirections]: +) -> Tuple[PixelIndexDirections, PixelIndexDirections]: """Normalize and check a pixel index convention. Parameters ---------- c: Union[str, Sequence[Union[str, highdicom.enum.PixelIndexDirections]]] - Pixel index convention description consisting of three directions, - either L or R, either U or D, and either I or O, in any order. + Pixel index convention description consisting of two directions, + either L or R, and either U or D. Returns ------- - Tuple[highdicom.enum.PixelIndexDirections, highdicom.enum.PixelIndexDirections, highdicom.enum.PixelIndexDirections]: - Convention description in a canonical form as a tuple of three enum + Tuple[highdicom.enum.PixelIndexDirections, highdicom.enum.PixelIndexDirections]: + Convention description in a canonical form as a tuple of two enum instances. Furthermore this is guaranteed to be a valid description. """ # noqa: E501 - if len(c) != 3: - raise ValueError('Length of pixel index convention must be 3.') + if len(c) != 2: + raise ValueError('Length of pixel index convention must be 2.') c = tuple(PixelIndexDirections(d) for d in c) @@ -708,7 +687,6 @@ def _normalize_pixel_index_convention( criteria = [ ('L' in c_set) != ('R' in c_set), ('U' in c_set) != ('D' in c_set), - ('I' in c_set) != ('O' in c_set), ] if not all(criteria): c_str = [d.value for d in c] @@ -724,13 +702,13 @@ def _normalize_patient_orientation( PatientOrientationValuesBiped, PatientOrientationValuesBiped, ]: - """Normalize and check a frame of reference direction convention. + """Normalize and check a patient orientation. Parameters ---------- c: Union[str, Sequence[Union[str, highdicom.enum.PatientOrientationValuesBiped]]] - Frame of reference convention description consisting of three directions, - either L or R, either A or P, and either F or H, in any order. + Patient orientation consisting of three directions, either L or R, + either A or P, and either F or H, in any order. Returns ------- @@ -869,10 +847,100 @@ def _is_matrix_orthogonal( return np.allclose(m.T @ m, np.diag(norm_squared), atol=tol) +def get_normal_vector( + image_orientation: Sequence[float], + index_convention: Union[str, Sequence[Union[PixelIndexDirections, str]]] = ( + PixelIndexDirections.R, + PixelIndexDirections.D, + ), + handedness: Union[AxisHandedness, str] = AxisHandedness.RIGHT_HANDED, +): + """Get a vector normal to an imaging plane. + + Parameters + ---------- + image_orientation: Sequence[float] + Image orientation in the standard DICOM format used for the + ImageOrientationPatient and ImageOrientationSlide attributes, + consisting of 6 numbers representing the direction cosines along the + rows (first three elements) and columns (second three elements). + index_convention: Sequence[Union[highdicom.enum.PixelIndexDirections, str]], optional + Convention used to index pixels. Should be a sequence of two + :class:`highdicom.enum.PixelIndexDirections` or their string + representations, giving in order, the indexing conventions used for + specifying pixel indices. For example ``('R', 'D')`` means that the + first pixel index indexes the columns from left to right, and the + second pixel index indexes the rows from top to bottom (this is the + convention typically used within DICOM). As another example ``('D', + 'R')`` would switch the order of the indices to give the convention + typically used within NumPy. + + Alternatively, a single shorthand string may be passed that combines + the string representations of the two directions. So for example, + passing ``'RD'`` is equivalent to passing ``('R', 'D')``. + handedness: Union[highdicom.enum.AxisHandedness, str], optional + Choose the positive direction of the resulting normal in order to give + this handedness in the resulting coordinate system. This assumes that + the normal vector will be used to define a coordinate system when + combined with the column cosines (unit vector pointing down the + columns) and row cosines (unit vector pointing along the rows) in that + order (for the sake of handedness, it does not matter whether the axis + defined by the normal vector is placed before or after the column and + row vectors because the two possibilities are cyclic permutations of + each other). If used to define a coordinte system with the row cosines + followed by the column cosines, the handedness of the resulting + coordinate system will be inverted. + + Returns + ------- + np.ndarray: + Unit normal vector as a NumPy array with shape (3, ). + + """ + image_orientation_arr = np.array(image_orientation, dtype=np.float64) + if image_orientation_arr.ndim != 1 or image_orientation_arr.shape[0] != 6: + raise ValueError( + "Argument 'image_orientation' should be an array of " + "length 6." + ) + index_convention_ = _normalize_pixel_index_convention(index_convention) + handedness_ = AxisHandedness(handedness) + + # Find normal vector to the imaging plane + row_cosines = image_orientation_arr[:3] + column_cosines = image_orientation_arr[3:] + + rotation_columns = [] + for d in index_convention_: + if d == PixelIndexDirections.R: + rotation_columns.append(row_cosines) + elif d == PixelIndexDirections.L: + rotation_columns.append(-row_cosines) + elif d == PixelIndexDirections.D: + rotation_columns.append(column_cosines) + elif d == PixelIndexDirections.U: + rotation_columns.append(-column_cosines) + + if handedness_ == AxisHandedness.RIGHT_HANDED: + n = np.cross(rotation_columns[0], rotation_columns[1]) + else: + n = np.cross(rotation_columns[1], rotation_columns[0]) + + return n + + def create_rotation_matrix( image_orientation: Sequence[float], + index_convention: Union[str, Sequence[Union[PixelIndexDirections, str]]] = ( + PixelIndexDirections.R, + PixelIndexDirections.D, + ), + slices_first: bool = False, + handedness: Union[AxisHandedness, str] = AxisHandedness.RIGHT_HANDED, + pixel_spacing: Union[float, Sequence[float]] = 1.0, + spacing_between_slices: float = 1.0, ) -> np.ndarray: - """Builds a rotation matrix. + """Builds a rotation matrix (with or without scaling). Parameters ---------- @@ -882,34 +950,139 @@ def create_rotation_matrix( vertical, top to bottom, increasing row index) direction expressed in the three-dimensional patient or slide coordinate system defined by the frame of reference. + index_convention: Sequence[Union[highdicom.enum.PixelIndexDirections, str]], optional + Convention used to index pixels. Should be a sequence of two + :class:`highdicom.enum.PixelIndexDirections` or their string + representations, giving in order, the indexing conventions used for + specifying pixel indices. For example ``('R', 'D')`` means that the + first pixel index indexes the columns from left to right, and the + second pixel index indexes the rows from top to bottom (this is the + convention typically used within DICOM). As another example ``('D', + 'R')`` would switch the order of the indices to give the convention + typically used within NumPy. + + Alternatively, a single shorthand string may be passed that combines + the string representations of the two directions. So for example, + passing ``'RD'`` is equivalent to passing ``('R', 'D')``. + slices_first: bool, optional + Whether the slice index dimension is placed before the rows and columns + (``True``) or after them. + handedness: Union[highdicom.enum.AxisHandedness, str], optional + Handedness to use to determine the positive direction of the slice + index. The resulting rotation matrix will have the given handedness. + pixel_spacing: Union[float, Sequence[float]], optional + Spacing between pixels in the in-frame dimensions. Either a single + value to apply in both the row and column dimensions, or a sequence of + length 2 giving ``[spacing_between_rows, spacing_between_columns]`` in + the same format as the DICOM "PixelSpacing" attribute. Returns ------- numpy.ndarray - 3 x 3 rotation matrix. Pre-multiplying a pixel index in format (column + 3 x 3 rotation matrix. Pre-multiplying an image coordinate in the format (column index, row index, slice index) by this matrix gives the x, y, z position in the frame-of-reference coordinate system. """ if len(image_orientation) != 6: raise ValueError('Argument "image_orientation" must have length 6.') + index_convention_ = _normalize_pixel_index_convention(index_convention) + handedness_ = AxisHandedness(handedness) + row_cosines = np.array(image_orientation[:3], dtype=float) column_cosines = np.array(image_orientation[3:], dtype=float) - n = np.cross(row_cosines.T, column_cosines.T) + if isinstance(pixel_spacing, Sequence): + if len(pixel_spacing) != 2: + raise Value.LEF( + "A sequence passed to argument 'pixel_spacing' must have " + "length 2." + ) + spacing_between_rows = float(pixel_spacing[0]) + spacing_between_columns = float(pixel_spacing[1]) + else: + spacing_between_rows = pixel_spacing + spacing_between_columns = pixel_spacing + + rotation_columns = [] + spacings = [] + for d in index_convention_: + if d == PixelIndexDirections.R: + rotation_columns.append(row_cosines) + spacings.append(spacing_between_columns) + elif d == PixelIndexDirections.L: + rotation_columns.append(-row_cosines) + spacings.append(spacing_between_columns) + elif d == PixelIndexDirections.D: + rotation_columns.append(column_cosines) + spacings.append(spacing_between_rows) + elif d == PixelIndexDirections.U: + rotation_columns.append(-column_cosines) + spacings.append(spacing_between_rows) + + if handedness_ == AxisHandedness.RIGHT_HANDED: + n = np.cross(rotation_columns[0], rotation_columns[1]) + else: + n = np.cross(rotation_columns[1], rotation_columns[0]) + + if slices_first: + rotation_columns.insert(0, n) + spacings.insert(0, spacing_between_slices) + else: + rotation_columns.append(n) + spacings.append(spacing_between_slices) + + rotation_columns = [c * s for c, s in zip(rotation_columns, spacings)] - return np.column_stack([ - row_cosines, - column_cosines, - n, - ]) + return np.column_stack(rotation_columns) + + +def _stack_affine_matrix( + rotation: np.ndarray, + translation: np.ndarray, +) -> np.ndarray: + """Create an affine matrix by stacking together. + + Parameters + ---------- + rotation: numpy.ndarray + Numpy array of shape ``(3, 3)`` representing a scaled rotation matrix. + position: numpy.ndarray + Numpy array with three elements representing a translation. + + Returns + ------- + numpy.ndarray: + Affine matrix of shape ``(4, 4)``. + + """ + if rotation.shape != (3, 3): + raise ValueError( + "Argument 'rotation' must have shape (3, 3)." + ) + if translation.size != 3: + raise ValueError( + "Argument 'translation' must have 3 elements." + ) + + return np.row_stack( + [ + np.column_stack([rotation, translation.reshape(3, 1)]), + [0.0, 0.0, 0.0, 1.0] + ] + ) def _create_affine_transformation_matrix( image_position: Sequence[float], image_orientation: Sequence[float], - pixel_spacing: Sequence[float], + pixel_spacing: Union[float, Sequence[float]], spacing_between_slices: float = 1.0, - index_convention: Optional[Sequence[PixelIndexDirections]] = None, + index_convention: Union[str, Sequence[Union[PixelIndexDirections, str]]] = ( + PixelIndexDirections.R, + PixelIndexDirections.D, + ), + slices_first: bool = False, + handedness: Union[AxisHandedness, str] = AxisHandedness.RIGHT_HANDED, ) -> np.ndarray: """Create affine matrix for transformation. @@ -937,12 +1110,32 @@ def _create_affine_transformation_matrix( direction (first value: spacing between rows, vertical, top to bottom, increasing row index) and the rows direction (second value: spacing between columns: horizontal, left to right, increasing - column index) + column index). This matches the format of the DICOM "PixelSpacing" + attribute. Alternatiely, a single value that is used along both + directions. spacing_between_slices: float - Spacing between consecutive slices. - index_convention: Union[Sequence[highdicom.enum.PixelIndexDirections], None] - Desired convention for the pixel index directions. Must consist of only - D, I, and R. + Spacing between consecutive slices in the frame of reference coordinate + system in millimeter units. + index_convention: Sequence[Union[highdicom.enum.PixelIndexDirections, str]], optional + Convention used to index pixels. Should be a sequence of two + :class:`highdicom.enum.PixelIndexDirections` or their string + representations, giving in order, the indexing conventions used for + specifying pixel indices. For example ``('R', 'D')`` means that the + first pixel index indexes the columns from left to right, and the + second pixel index indexes the rows from top to bottom (this is the + convention typically used within DICOM). As another example ``('D', + 'R')`` would switch the order of the indices to give the convention + typically used within NumPy. + + Alternatively, a single shorthand string may be passed that combines + the string representations of the two directions. So for example, + passing ``'RD'`` is equivalent to passing ``('R', 'D')``. + slices_first: bool, optional + Whether the slice index dimension is placed before the rows and columns + (``True``) or after them. + handedness: Union[highdicom.enum.AxisHandedness, str], optional + Handedness to use to determine the positive direction of the slice + index. The resulting rotation matrix will have the given handedness. Returns ------- @@ -965,50 +1158,28 @@ def _create_affine_transformation_matrix( if len(pixel_spacing) != 2: raise ValueError('Argument "pixel_spacing" must have length 2.') - x_offset = float(image_position[0]) - y_offset = float(image_position[1]) - z_offset = float(image_position[2]) - translation = np.array([x_offset, y_offset, z_offset], dtype=float) + index_convention_ = _normalize_pixel_index_convention(index_convention) + if ( + PixelIndexDirections.L in index_convention_ or + PixelIndexDirections.U in index_convention_ + ): + raise ValueError( + f"Index convention cannot include 'L' or 'U'." + + ) + translation = np.array([float(x) for x in image_position], dtype=float) rotation = create_rotation_matrix( - image_orientation, + image_orientation=image_orientation, + pixel_spacing=pixel_spacing, + spacing_between_slices=spacing_between_slices, + index_convention=index_convention_, + handedness=handedness, + slices_first=slices_first, ) - # Column direction (spacing between rows) - spacing_between_rows = float(pixel_spacing[0]) - # Row direction (spacing between columns) - spacing_between_columns = float(pixel_spacing[1]) - - rotation[:, 0] *= spacing_between_columns - rotation[:, 1] *= spacing_between_rows - rotation[:, 2] *= spacing_between_slices # 4x4 transformation matrix - affine = np.row_stack( - [ - np.column_stack([ - rotation, - translation, - ]), - [0.0, 0.0, 0.0, 1.0] - ] - ) - - if index_convention is not None: - current_convention = ( - PixelIndexDirections.R, - PixelIndexDirections.D, - PixelIndexDirections.I, - ) - if set(index_convention) != set(current_convention): - raise ValueError( - 'Index convention must consist of D, I, and R.' - ) - affine = _transform_affine_to_convention( - affine=affine, - shape=(1, 1, 1), # dummy (not used) - from_index_convention=current_convention, - to_index_convention=index_convention, - ) + affine = _stack_affine_matrix(rotation, translation) return affine @@ -1080,31 +1251,20 @@ def _create_inv_affine_transformation_matrix( if len(pixel_spacing) != 2: raise ValueError('Argument "pixel_spacing" must have length 2.') - x_offset = float(image_position[0]) - y_offset = float(image_position[1]) - z_offset = float(image_position[2]) - translation = np.array([x_offset, y_offset, z_offset]) + translation = np.array([float(x) for x in image_position], dtype=float) - rotation = create_rotation_matrix(image_orientation) + rotation = create_rotation_matrix( + image_orientation=image_orientation, + pixel_spacing=pixel_spacing, + spacing_between_slices=spacing_between_slices, + ) - # Column direction (spacing between rows) - spacing_between_rows = float(pixel_spacing[0]) - # Row direction (spacing between columns) - spacing_between_columns = float(pixel_spacing[1]) - rotation[:, 0] *= spacing_between_columns - rotation[:, 1] *= spacing_between_rows - rotation[:, 2] *= spacing_between_slices inv_rotation = np.linalg.inv(rotation) # 4x4 transformation matrix - return np.row_stack( - [ - np.column_stack([ - inv_rotation, - -np.dot(inv_rotation, translation) - ]), - [0.0, 0.0, 0.0, 1.0] - ] + return _stack_affine_matrix( + rotation=inv_rotation, + translation=-np.dot(inv_rotation, translation) ) @@ -1204,7 +1364,7 @@ def _translate_affine_matrix( affine: np.ndarray, pixel_offset: Sequence[int], ) -> np.ndarray: - """Translate the origin of an affine matrix. + """Translate the origin of an affine matrix by a pixel offset. Parameters ---------- @@ -1236,18 +1396,12 @@ def _translate_affine_matrix( def _transform_affine_to_convention( affine: np.ndarray, shape: Sequence[int], - from_index_convention: Union[ - str, Sequence[Union[str, PixelIndexDirections]], None - ] = None, - to_index_convention: Union[ - str, Sequence[Union[str, PixelIndexDirections]], None - ] = None, from_reference_convention: Union[ - str, Sequence[Union[str, PatientOrientationValuesBiped]], None - ] = None, + str, Sequence[Union[str, PatientOrientationValuesBiped]], + ], to_reference_convention: Union[ - str, Sequence[Union[str, PatientOrientationValuesBiped]], None - ] = None, + str, Sequence[Union[str, PatientOrientationValuesBiped]], + ] ) -> np.ndarray: """Transform an affine matrix between different conventions. @@ -1257,13 +1411,9 @@ def _transform_affine_to_convention( Affine matrix to transform. shape: Sequence[int] Shape of the array. - from_index_convention: Union[str, Sequence[Union[str, PixelIndexDirections]], None], optional - Index convention used in the input affine. - to_index_convention: Union[str, Sequence[Union[str, PixelIndexDirections]], None], optional - Desired index convention for the output affine. - from_reference_convention: Union[str, Sequence[Union[str, PatientOrientationValuesBiped]], None], optional + from_reference_convention: Union[str, Sequence[Union[str, PatientOrientationValuesBiped]]], Reference convention used in the input affine. - to_reference_convention: Union[str, Sequence[Union[str, PatientOrientationValuesBiped]], None], optional + to_reference_convention: Union[str, Sequence[Union[str, PatientOrientationValuesBiped]]], Desired reference convention for the output affine. Returns @@ -1272,80 +1422,30 @@ def _transform_affine_to_convention( Affine matrix after operations are applied. """ # noqa: E501 - indices_opposites = { - PixelIndexDirections.U: PixelIndexDirections.D, - PixelIndexDirections.D: PixelIndexDirections.U, - PixelIndexDirections.L: PixelIndexDirections.R, - PixelIndexDirections.R: PixelIndexDirections.L, - PixelIndexDirections.I: PixelIndexDirections.O, - PixelIndexDirections.O: PixelIndexDirections.I, - } - - if (from_index_convention is None) != (to_index_convention is None): - raise TypeError( - 'Arguments "from_index_convention" and "to_index_convention" ' - 'should either both be None, or neither should be None.' - ) - if from_index_convention is not None and to_index_convention is not None: - from_index_normed = _normalize_pixel_index_convention( - from_index_convention - ) - to_index_normed = _normalize_pixel_index_convention( - to_index_convention - ) - flip_indices = [ - d not in from_index_normed for d in to_index_normed - ] - - permute_indices = [] - for d, flipped in zip(to_index_normed, flip_indices): - if flipped: - d_ = indices_opposites[d] - permute_indices.append(from_index_normed.index(d_)) - else: - permute_indices.append(from_index_normed.index(d)) - else: - flip_indices = None - permute_indices = None - - if ( - (from_reference_convention is None) != (to_reference_convention is None) - ): - raise TypeError( - 'Arguments "from_reference_convention" and "to_reference_convention" ' - 'should either both be None, or neither should be None.' - ) - if ( - from_reference_convention is not None - and to_reference_convention is not None - ): - from_reference_normed = _normalize_patient_orientation( - from_reference_convention - ) - to_reference_normed = _normalize_patient_orientation( - to_reference_convention - ) + from_reference_normed = _normalize_patient_orientation( + from_reference_convention + ) + to_reference_normed = _normalize_patient_orientation( + to_reference_convention + ) - flip_reference = [ - d not in to_reference_normed for d in from_reference_normed - ] - permute_reference = [] - for d, flipped in zip(to_reference_normed, flip_reference): - if flipped: - d_ = PATIENT_ORIENTATION_OPPOSITES[d] - permute_reference.append(from_reference_normed.index(d_)) - else: - permute_reference.append(from_reference_normed.index(d)) - else: - flip_reference = None - permute_reference = None + flip_reference = [ + d not in to_reference_normed for d in from_reference_normed + ] + permute_reference = [] + for d, flipped in zip(to_reference_normed, flip_reference): + if flipped: + d_ = PATIENT_ORIENTATION_OPPOSITES[d] + permute_reference.append(from_reference_normed.index(d_)) + else: + permute_reference.append(from_reference_normed.index(d)) return _transform_affine_matrix( affine=affine, shape=shape, - permute_indices=permute_indices, + permute_indices=None, permute_reference=permute_reference, - flip_indices=flip_indices, + flip_indices=None, flip_reference=flip_reference, ) @@ -2863,9 +2963,14 @@ def get_series_volume_positions( datasets: Sequence[pydicom.Dataset], tol: float = _DEFAULT_SPACING_TOLERANCE, sort: bool = True, - enforce_right_handed: bool = False, allow_missing: bool = False, allow_duplicates: bool = False, + index_convention: Union[ + str, + Sequence[Union[PixelIndexDirections, str]] + ] = VOLUME_INDEX_CONVENTION, + handedness: Union[AxisHandedness, str] = AxisHandedness.RIGHT_HANDED, + enforce_handedness: bool = False, ) -> Tuple[Optional[float], Optional[List[int]]]: """Get volume positions and spacing for a series of single frame images. @@ -2894,14 +2999,6 @@ def get_series_volume_positions( makes the function tolerant of unsorted inputs. Set to False to check whether the positions represent a 3D volume in the specific order in which they are passed. - enforce_right_handed: bool, optional - If True and sort is False, require that the images are not only - regularly spaced but also that they are ordered correctly to give a - right-handed coordinate system, i.e. frames are ordered along the - direction of the increasing normal vector, as opposed to being ordered - regularly along the direction of the decreasing normal vector. If sort - is True, this has no effect since positions will be sorted in the - right-handed direction before finding the spacing. allow_missing: bool, optional Allow for slices missing from the volume. If True, the smallest distance between two consective slices is found and returned as the @@ -2912,6 +3009,35 @@ def get_series_volume_positions( allow_duplicates: bool, optional Allow multiple slices to map to the same position within the volume. If False, duplicated image positions will result in failure. + index_convention: Sequence[Union[highdicom.enum.PixelIndexDirections, str]], optional + Convention used to determine how to order frames. Should be a sequence + of two :class:`highdicom.enum.PixelIndexDirections` or their string + representations, giving in order, the indexing conventions used for + specifying pixel indices. For example ``('R', 'D')`` means that the + first pixel index indexes the columns from left to right, and the + second pixel index indexes the rows from top to bottom (this is the + convention typically used within DICOM). As another example ``('D', + 'R')`` would switch the order of the indices to give the convention + typically used within NumPy. + + Alternatively, a single shorthand string may be passed that combines + the string representations of the two directions. So for example, + passing ``'RD'`` is equivalent to passing ``('R', 'D')``. + + This is used in combination with the ``handedness`` to determine + the positive direction used to order frames. + handedness: Union[highdicom.enum.AxisHandedness, str], optional + Choose the frame order in order such that the frame axis creates a + coordinate system with this handedness in the when combined with + the within-frame convention given by ``index_convention``. + enforce_handedness: bool, optional + If True and sort is False, require that the images are not only + regularly spaced but also that they are ordered correctly to give a + coordinate system with the specified handedness, i.e. frames are + ordered along the direction of the increasing normal vector, as opposed + to being ordered regularly along the direction of the decreasing normal + vector. If sort is True, this has no effect since positions will be + sorted in the correct direction before finding the spacing. Returns ------- @@ -2951,11 +3077,13 @@ def get_series_volume_positions( image_positions=positions, image_orientation=image_orientation, tol=tol, - enforce_right_handed=enforce_right_handed, sort=sort, allow_duplicates=allow_duplicates, allow_missing=allow_missing, spacing_hint=spacing_hint, + index_convention=index_convention, + handedness=handedness, + enforce_handedness=enforce_handedness, ) @@ -2964,10 +3092,15 @@ def get_volume_positions( image_orientation: Sequence[float], tol: float = _DEFAULT_SPACING_TOLERANCE, sort: bool = True, - enforce_right_handed: bool = False, allow_missing: bool = False, allow_duplicates: bool = False, spacing_hint: Optional[float] = None, + index_convention: Union[ + str, + Sequence[Union[PixelIndexDirections, str]] + ] = VOLUME_INDEX_CONVENTION, + handedness: Union[AxisHandedness, str] = AxisHandedness.RIGHT_HANDED, + enforce_handedness: bool = False, ) -> Tuple[Optional[float], Optional[List[int]]]: """Get the spacing and positions of images within a 3D volume. @@ -2977,12 +3110,12 @@ def get_volume_positions( the in-plane image coordinates. If the positions represent a volume, returns the absolute value of the - slice spacing and the slice indices in the volume for each of the input - positions. If the positions do not represent a volume, returns None for both - outputs. + slice spacing and the volume indices for each of the input positions. If + the positions do not represent a volume, returns None for both outputs. Note that we stipulate that a single image is a 3D volume for the purposes - of this function. In this case the returned slice spacing will be 1.0. + of this function. In this case, and it ``spacing_hint`` is not provied, the + returned slice spacing will be 1.0. Parameters ---------- @@ -3002,14 +3135,6 @@ def get_volume_positions( makes the function tolerant of unsorted inputs. Set to False to check whether the positions represent a 3D volume in the specific order in which they are passed. - enforce_right_handed: bool, optional - If True and sort is False, require that the images are not only - regularly spaced but also that they are ordered correctly to give a - right-handed coordinate system, i.e. frames are ordered along the - direction of the increasing normal vector, as opposed to being ordered - regularly along the direction of the decreasing normal vector. If sort - is True, this has no effect since positions will be sorted in the - right-handed direction before finding the spacing. allow_missing: bool, optional Allow for slices missing from the volume. If True, the smallest distance between two consective slices is found and returned as the @@ -3027,6 +3152,35 @@ def get_volume_positions( ``allow_missing`` is ``True`` and a ``spacing_hint`` is given, the hint is used to calculate the index positions instead of the smallest consecutive spacing. + index_convention: Sequence[Union[highdicom.enum.PixelIndexDirections, str]], optional + Convention used to determine how to order frames. Should be a sequence + of two :class:`highdicom.enum.PixelIndexDirections` or their string + representations, giving in order, the indexing conventions used for + specifying pixel indices. For example ``('R', 'D')`` means that the + first pixel index indexes the columns from left to right, and the + second pixel index indexes the rows from top to bottom (this is the + convention typically used within DICOM). As another example ``('D', + 'R')`` would switch the order of the indices to give the convention + typically used within NumPy. + + Alternatively, a single shorthand string may be passed that combines + the string representations of the two directions. So for example, + passing ``'RD'`` is equivalent to passing ``('R', 'D')``. + + This is used in combination with the ``handedness`` to determine + the positive direction used to order frames. + handedness: Union[highdicom.enum.AxisHandedness, str], optional + Choose the frame order in order such that the frame axis creates a + coordinate system with this handedness in the when combined with + the within-frame convention given by ``index_convention``. + enforce_handedness: bool, optional + If True and sort is False, require that the images are not only + regularly spaced but also that they are ordered correctly to give a + coordinate system with the specified handedness, i.e. frames are + ordered along the direction of the increasing normal vector, as opposed + to being ordered regularly along the direction of the decreasing normal + vector. If sort is True, this has no effect since positions will be + sorted in the correct direction before finding the spacing. Returns ------- @@ -3070,7 +3224,11 @@ def get_volume_positions( # Special case, we stipluate that this has spacing 0.0 return 1.0, [0] - normal_vector = get_normal_vector(image_orientation) + normal_vector = get_normal_vector( + image_orientation, + index_convention=index_convention, + handedness=handedness, + ) if allow_duplicates: # Unique index specifies, for each position in the input positions @@ -3141,7 +3299,7 @@ def get_volume_positions( atol=tol ).all() - if is_regular and enforce_right_handed: + if is_regular and enforce_handedness: if spacing < 0.0: return None, None @@ -3164,43 +3322,14 @@ def get_volume_positions( return None, None -def get_normal_vector( - image_orientation: Sequence[float], -): - """Get a vector normal to an imaging plane. - - Parameters - ---------- - image_orientation: Sequence[float] - Image orientation in the standard DICOM format used for the - ImageOrientationPatient and ImageOrientationSlide attributes, - consisting of 6 numbers representing the direction cosines along the - rows (first three elements) and columns (second three elements). - - Returns - ------- - np.ndarray: - Unit normal vector as a NumPy array with shape (3, ). - - """ - image_orientation_arr = np.array(image_orientation, dtype=np.float64) - if image_orientation_arr.ndim != 1 or image_orientation_arr.shape[0] != 6: - raise ValueError( - "Argument 'image_orientation' should be an array of " - "length 6." - ) - - # Find normal vector to the imaging plane - v1 = image_orientation_arr[:3] - v2 = image_orientation_arr[3:] - v3 = np.cross(v1, v2) - - return v3 - - def get_plane_sort_index( image_positions: Sequence[Sequence[float]], image_orientation: Sequence[float], + index_convention: Union[ + str, + Sequence[Union[PixelIndexDirections, str]] + ] = VOLUME_INDEX_CONVENTION, + handedness: Union[AxisHandedness, str] = AxisHandedness.RIGHT_HANDED, ) -> List[int]: """ @@ -3214,6 +3343,27 @@ def get_plane_sort_index( Image orientation as direction cosine values taken directly from the ImageOrientationPatient attribute. 1D array of length 6. Either a numpy array or anything convertible to it may be passed. + index_convention: Sequence[Union[highdicom.enum.PixelIndexDirections, str]], optional + Convention used to determine how to order frames. Should be a sequence + of two :class:`highdicom.enum.PixelIndexDirections` or their string + representations, giving in order, the indexing conventions used for + specifying pixel indices. For example ``('R', 'D')`` means that the + first pixel index indexes the columns from left to right, and the + second pixel index indexes the rows from top to bottom (this is the + convention typically used within DICOM). As another example ``('D', + 'R')`` would switch the order of the indices to give the convention + typically used within NumPy. + + Alternatively, a single shorthand string may be passed that combines + the string representations of the two directions. So for example, + passing ``'RD'`` is equivalent to passing ``('R', 'D')``. + + This is used in combination with the ``handedness`` to determine + the positive direction used to order frames. + handedness: Union[highdicom.enum.AxisHandedness, str], optional + Choose the frame order in order such that the frame axis creates a + coordinate system with this handedness in the when combined with + the within-frame convention given by ``index_convention``. Returns ------- @@ -3231,7 +3381,11 @@ def get_plane_sort_index( if ori_arr.ndim != 1 or ori_arr.shape[0] != 6: raise ValueError("Argument 'image_orientation' must have shape (6, )") - normal_vector = get_normal_vector(ori_arr) + normal_vector = get_normal_vector( + ori_arr, + index_convention=index_convention, + handedness=handedness, + ) # Calculate distance of each slice from coordinate system origin along the # normal vector @@ -3242,13 +3396,41 @@ def get_plane_sort_index( return sort_index.tolist() -def get_dataset_sort_index(datasets: Sequence[Dataset]) -> List[int]: +def get_dataset_sort_index( + datasets: Sequence[Dataset], + index_convention: Union[ + str, + Sequence[Union[PixelIndexDirections, str]] + ] = VOLUME_INDEX_CONVENTION, + handedness: Union[AxisHandedness, str] = AxisHandedness.RIGHT_HANDED, +) -> List[int]: """Get index to sort single frame datasets spatially. Parameters ---------- datasets: Sequence[pydicom.Dataset] Datasets containing single frame images, with a consistent orientation. + index_convention: Sequence[Union[highdicom.enum.PixelIndexDirections, str]], optional + Convention used to determine how to order frames. Should be a sequence + of two :class:`highdicom.enum.PixelIndexDirections` or their string + representations, giving in order, the indexing conventions used for + specifying pixel indices. For example ``('R', 'D')`` means that the + first pixel index indexes the columns from left to right, and the + second pixel index indexes the rows from top to bottom (this is the + convention typically used within DICOM). As another example ``('D', + 'R')`` would switch the order of the indices to give the convention + typically used within NumPy. + + Alternatively, a single shorthand string may be passed that combines + the string representations of the two directions. So for example, + passing ``'RD'`` is equivalent to passing ``('R', 'D')``. + + This is used in combination with the ``handedness`` to determine + the positive direction used to order frames. + handedness: Union[highdicom.enum.AxisHandedness, str], optional + Choose the frame order in order such that the frame axis creates a + coordinate system with this handedness in the when combined with + the within-frame convention given by ``index_convention``. Returns ------- @@ -3272,16 +3454,50 @@ def get_dataset_sort_index(datasets: Sequence[Dataset]) -> List[int]: ): raise ValueError('Datasets do not have a consistent orientation.') positions = [ds.ImagePositionPatient for ds in datasets] - return get_plane_sort_index(positions, image_orientation) + return get_plane_sort_index( + positions, + image_orientation, + index_convention=index_convention, + handedness=handedness, + ) -def sort_datasets(datasets: Sequence[Dataset]) -> List[Dataset]: +def sort_datasets( + datasets: Sequence[Dataset], + index_convention: Union[ + str, + Sequence[Union[PixelIndexDirections, str]] + ] = VOLUME_INDEX_CONVENTION, + handedness: Union[AxisHandedness, str] = AxisHandedness.RIGHT_HANDED, +) -> List[Dataset]: """Sort single frame datasets spatially. Parameters ---------- datasets: Sequence[pydicom.Dataset] Datasets containing single frame images, with a consistent orientation. + index_convention: Sequence[Union[highdicom.enum.PixelIndexDirections, str]], optional + Convention used to determine how to order frames. Should be a sequence + of two :class:`highdicom.enum.PixelIndexDirections` or their string + representations, giving in order, the indexing conventions used for + specifying pixel indices. For example ``('R', 'D')`` means that the + first pixel index indexes the columns from left to right, and the + second pixel index indexes the rows from top to bottom (this is the + convention typically used within DICOM). As another example ``('D', + 'R')`` would switch the order of the indices to give the convention + typically used within NumPy. + + Alternatively, a single shorthand string may be passed that combines + the string representations of the two directions. So for example, + passing ``'RD'`` is equivalent to passing ``('R', 'D')``. + + This is used in combination with the ``handedness`` to determine + the positive direction used to order frames. + handedness: Union[highdicom.enum.AxisHandedness, str], optional + Choose the frame order in order such that the frame axis creates a + coordinate system with this handedness in the when combined with + the within-frame convention given by ``index_convention``. + Returns ------- @@ -3292,7 +3508,11 @@ def sort_datasets(datasets: Sequence[Dataset]) -> List[Dataset]: plane. """ - sort_index = get_dataset_sort_index(datasets) + sort_index = get_dataset_sort_index( + datasets, + index_convention=index_convention, + handedness=handedness, + ) return [datasets[i] for i in sort_index] diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index cfac2415..3328e9eb 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -1,7 +1,8 @@ +from abc import ABC, abstractmethod from copy import deepcopy from os import PathLike from pathlib import Path -from typing import List, Optional, Sequence, Union, Tuple +from typing import List, Optional, Sequence, Union, Tuple, cast import numpy as np @@ -17,6 +18,7 @@ _create_affine_transformation_matrix, _is_matrix_orthogonal, _normalize_patient_orientation, + _stack_affine_matrix, _transform_affine_matrix, _translate_affine_matrix, PATIENT_ORIENTATION_OPPOSITES, @@ -42,6 +44,7 @@ convert_color_space, ) + # TODO add basic arithmetric operations # TODO add pixel value transformations # TODO should methods copy arrays? @@ -50,54 +53,19 @@ # TODO trim non-zero # TODO support slide coordinate system # TODO volume to volume transformer -# TODO split out a separate geometry only class # TODO volread and metadata # TODO make RIGHT handed the default +# TODO constructors for geometry, do they make sense for volume? +# TODO ordering of frames in seg, applying 3D +# TODO use VolumeGeometry within MultiFrameDB manager -class Volume: - - """Class representing a 3D array of regularly-spaced frames in 3D space. - - This class combines a 3D NumPy array with an affine matrix describing the - location of the voxels in the frame of reference coordinate space. A - Volume is not a DICOM object itself, but represents a volume that may - be extracted from DICOM image, and/or encoded within a DICOM object, - potentially following any number of processing steps. - - All such volumes have a geometry that exists within DICOM's patient - coordinate system. - - Internally this class uses the following conventions to represent the - geometry, however this can be constructed from or transformed to other - conventions with appropriate optional parameters to its methods: - - * The pixel indices are ordered (slice index, row index, column index). - * Pixel indices are zero-based and represent the center of the pixel. - * Column indices are ordered top to bottom, row indices are ordered left to - right. The interpretation of the slice indices direction is not defined. - * The x, y, z coordinates of frame-of-reference coordinate system follow - the "LPS" convention used in DICOM (see - :dcm:`Part 3 Section C.7.6.2.1.1 `). - I.e. - * The first coordinate (``x``) increases from the patient's right to left - * The second coordinate (``y``) increases from the patient's anterior to - posterior. - * The third coordinate (``z``) increases from the patient's caudal - direction (inferior) to cranial direction (superior). - - Note - ---- - The ordering of pixel indices used by this class (slice, row, column) - matches the way pydicom and highdicom represent pixel arrays but differs - from the (column, row, slice) convention used by the various "transformer" - classes in the ``highdicom.spatial`` module. +class _VolumeBase(ABC): - """ + """Base class for object exhibiting volume geometry.""" def __init__( self, - array: np.ndarray, affine: np.ndarray, frame_of_reference_uid: Optional[str] = None, ): @@ -105,10 +73,6 @@ def __init__( Parameters ---------- - array: numpy.ndarray - Array of voxel data. Must be either 3D (three spatial dimensions), - or 4D (three spatial dimensions followed by a channel dimension). - Any datatype is permitted. affine: numpy.ndarray 4 x 4 affine matrix representing the transformation from pixel indices (slice index, row index, column index) to the @@ -120,11 +84,6 @@ def __init__( Frame of reference UID for the frame of reference, if known. """ - if array.ndim not in (3, 4): - raise ValueError( - "Argument 'array' must be three or four-dimensional." - ) - if affine.shape != (4, 4): raise ValueError("Affine matrix must have shape (4, 4).") if not np.array_equal(affine[-1, :], np.array([0.0, 0.0, 0.0, 1.0])): @@ -136,443 +95,97 @@ def __init__( "Argument 'affine' must be an orthogonal matrix." ) - self._array = array self._affine = affine self._frame_of_reference_uid = frame_of_reference_uid - @classmethod - def from_image_series( - cls, - series_datasets: Sequence[Dataset], - apply_modality_transform: bool = True, - apply_voi_transform: bool = False, - voi_transform_index: int = 0, - apply_palette_color_lut: bool = True, - apply_icc_transform: bool = True, - standardize_color_space: bool = True, - ) -> "Volume": - """Create volume from a series of single frame images. + + @property + @abstractmethod + def spatial_shape(self) -> Tuple[int, int, int]: + """Tuple[int, int, int]: Spatial shape of the array. + + Does not include the channel dimension. + + """ + pass + + def get_center_index(self, round_output: bool = False) -> np.ndarray: + """Get array index of center of the volume. Parameters ---------- - series_datasets: Sequence[pydicom.Dataset] - Series of single frame datasets. There is no requirement on the - sorting of the datasets. - apply_modality_transform: bool, optional - Whether to apply the modality transform (either a rescale intercept - and slope or modality LUT) to the pixel values, if present in the - datasets. - apply_voi_transform: bool, optional - Whether to apply the value of interest (VOI) transform (either a - windowing operation or VOI LUT) to the pixel values, if present in - the datasets. - voi_transform_index: int, optional - Index of the VOI transform to apply if multiple are included in the - datasets. Ignored if ``apply_voi_transform`` is ``False`` or no VOI - transform is included in the datasets. - apply_palette_color_lut: bool, optional - Whether to apply the palette color LUT if a dataset has photometric - interpretation ``'PALETTE_COLOR'``. - apply_icc_transform: bool, optional - Whether to apply an ICC color profile, if present in the datasets. - convert_color_space: bool, optional - Whether to convert the color space to a standardized space. If - True, images with photometric interpretation ``MONOCHROME1`` are - inverted to mimic ``MONOCHROME2``, and images with photometric - interpretation ``YBR_FULL`` or ``YBR_FULL_422`` are converted to - ``RGB``. + round_output: bool, optional + If True, the result is returned rounded down to and with an integer + datatype. Otherwise it is returned as a floating point datatype + without rounding, to sub-voxel precision. Returns ------- - Volume: - Volume created from the series. + numpy.ndarray: + Array of shape 3 representing the array indices at the center of + the volume. """ - if apply_voi_transform and not apply_modality_lut: - raise ValueError( - "Argument 'apply_voi_transform' requires 'apply_modality_lut'." - ) - series_instance_uid = series_datasets[0].SeriesInstanceUID - if not all( - ds.SeriesInstanceUID == series_instance_uid - for ds in series_datasets - ): - raise ValueError('Images do not belong to the same series.') - - coordinate_system = get_image_coordinate_system(series_datasets[0]) - if ( - coordinate_system is None or - coordinate_system != CoordinateSystemNames.PATIENT - ): - raise ValueError( - "Dataset should exist in the patient " - "coordinate_system." + if round_output: + center = np.array( + [(self.spatial_shape[d] - 1) // 2 for d in range(3)], + dtype=np.uint32, ) - - frame_of_reference_uid = series_datasets[0].FrameOfReferenceUID - if not all( - ds.FrameOfReferenceUID == frame_of_reference_uid - for ds in series_datasets - ): - raise ValueError('Images do not share a frame of reference.') - - series_datasets = sort_datasets(series_datasets) - - ds = series_datasets[0] - - if len(series_datasets) == 1: - slice_spacing = ds.get('SpacingBetweenSlices', 1.0) else: - slice_spacing, _ = get_series_volume_positions(series_datasets) - if slice_spacing is None: - raise ValueError('Series is not a regularly-spaced volume.') + center = np.array( + [(self.spatial_shape[d] - 1) / 2.0 for d in range(3)] + ) - affine = _create_affine_transformation_matrix( - image_position=ds.ImagePositionPatient, - image_orientation=ds.ImageOrientationPatient, - pixel_spacing=ds.PixelSpacing, - spacing_between_slices=slice_spacing, - index_convention=VOLUME_INDEX_CONVENTION, - ) + return center - frames = [] - for ds in series_datasets: - frame = ds.pixel_array - max_value = 2 ** np.iinfo(ds.pixel_array.dtype).bits - if apply_modality_transform: - frame = apply_modality_lut(frame, ds) - if apply_voi_transform: - frame = apply_voi_lut(frame, ds, voi_transform_index) - if ( - apply_palette_color_lut and - ds.PhotometricInterpretation == 'PALETTE_COLOR' - ): - frame = apply_color_lut(frame, ds) - if apply_icc_transform and 'ICCProfile' in ds: - manager = ColorManager(ds.ICCProfile) - frame = manager.transform_frame(frame) - if standardize_color_space: - if ds.PhotometricInterpretation == 'MONOCHROME1': - # TODO what if a VOI_LUT has been applied - frame = max_value - frame - elif ds.PhotometricInterpretation in ( - 'YBR_FULL', 'YBR_FULL_422' - ): - frame = convert_color_space( - frame, - current=ds.PhotometricInterpretation, - desired='RGB' - ) + def get_center_coordinate(self) -> np.ndarray: + """Get frame-of-reference coordinate at the center of the volume. - frames.append(frame) + Returns + ------- + numpy.ndarray: + Array of shape 3 representing the frame-of-reference coordinate at + the center of the volume. - array = np.stack(frames) + """ + center_index = self.get_center_index().reshape((1, 3)) + center_coordinate = self.map_indices_to_reference(center_index) - return cls( - affine=affine, - array=array, - frame_of_reference_uid=frame_of_reference_uid, - ) + return center_coordinate.reshape((3, )) - @classmethod - def from_image( - cls, - dataset: Dataset, - ) -> "Volume": - """Create volume from a multiframe image. + def map_indices_to_reference( + self, + indices: np.ndarray, + ) -> np.ndarray: + """Transform image pixel indices to frame of reference coordinates. Parameters ---------- - dataset: pydicom.Dataset - A multi-frame image dataset. + indices: numpy.ndarray + Array of zero-based array indices. Array of integer values with + shape ``(n, 3)``, where *n* is the number of indices, the first + column represents the `column` index and the second column + represents the `row` index. Returns ------- - Volume: - Volume created from the image. + numpy.ndarray + Array of (x, y, z) coordinates in the coordinate system defined by + the frame of reference. Array has shape ``(n, 3)``, where *n* is + the number of coordinates, the first column represents the `x` + offsets, the second column represents the `y` offsets and the third + column represents the `z` offsets + + Raises + ------ + ValueError + When `indices` has incorrect shape. """ - if not is_multiframe_image(dataset): - raise ValueError( - 'Dataset should be a multi-frame image.' - ) - coordinate_system = get_image_coordinate_system(dataset) - if ( - coordinate_system is None or - coordinate_system != CoordinateSystemNames.PATIENT - ): + if indices.ndim != 2 or indices.shape[1] != 3: raise ValueError( - "Dataset should exist in the patient " - "coordinate_system." - ) - sfgs = dataset.SharedFunctionalGroupsSequence[0] - if 'PlaneOrientationSequence' not in sfgs: - raise ValueError('Frames do not share an orientation.') - image_orientation = ( - sfgs - .PlaneOrientationSequence[0] - .ImageOrientationPatient - ) - pffgs = dataset.PerFrameFunctionalGroupsSequence - image_positions = [ - g.PlanePositionSequence[0].ImagePositionPatient - for g in pffgs - ] - sort_index = get_plane_sort_index( - image_positions, - image_orientation, - ) - sorted_positions = [image_positions[i] for i in sort_index] - - if 'PixelMeasuresSequence' not in sfgs: - raise ValueError('Frames do not share pixel measures.') - pixel_spacing = sfgs.PixelMeasuresSequence[0].PixelSpacing - - slice_spacing, _ = get_volume_positions( - image_positions=image_positions, - image_orientation=image_orientation, - ) - if slice_spacing is None: - raise ValueError( - 'Dataset does not represent a regularly sampled volume.' - ) - - affine = _create_affine_transformation_matrix( - image_position=sorted_positions[0], - image_orientation=image_orientation, - pixel_spacing=pixel_spacing, - spacing_between_slices=slice_spacing, - index_convention=VOLUME_INDEX_CONVENTION, - ) - - # TODO apply VOI color modality LUT etc - array = dataset.pixel_array - if array.ndim == 2: - array = array[np.newaxis] - array = array[sort_index] - - return cls( - affine=affine, - array=array, - frame_of_reference_uid=dataset.FrameOfReferenceUID, - ) - - @classmethod - def from_attributes( - cls, - array: np.ndarray, - image_position: Sequence[float], - image_orientation: Sequence[float], - pixel_spacing: Sequence[float], - spacing_between_slices: float, - frame_of_reference_uid: Optional[str] = None, - ) -> "Volume": - """Create a volume from DICOM attributes. - - Parameters - ---------- - array: numpy.ndarray - Three dimensional array of voxel data. The first dimension indexes - slices, the second dimension indexes rows, and the final dimension - indexes columns. - image_position: Sequence[float] - Position in the frame of reference space of the center of the top - left pixel of the image. Corresponds to DICOM attributes - "ImagePositionPatient". Should be a sequence of length 3. - image_orientation: Sequence[float] - Cosines of the row direction (first triplet: horizontal, left to - right, increasing column index) and the column direction (second - triplet: vertical, top to bottom, increasing row index) direction - expressed in the three-dimensional patient or slide coordinate - system defined by the frame of reference. Corresponds to the DICOM - attribute "ImageOrientationPatient". - pixel_spacing: Sequence[float] - Spacing between pixels in millimeter unit along the column - direction (first value: spacing between rows, vertical, top to - bottom, increasing row index) and the row direction (second value: - spacing between columns: horizontal, left to right, increasing - column index). Corresponds to DICOM attribute "PixelSpacing". - spacing_between_slices: float - Spacing between slices in millimeter units in the frame of - reference coordinate system space. Corresponds to the DICOM - attribute "SpacingBetweenSlices" (however, this may not be present in - many images and may need to be inferred from "ImagePositionPatient" - attributes of consecutive slices). - frame_of_reference_uid: Union[str, None], optional - Frame of reference UID, if known. Corresponds to DICOM attribute - FrameOfReferenceUID. - - Returns - ------- - highdicom.Volume: - New Volume using the given array and DICOM attributes. - - """ - affine = _create_affine_transformation_matrix( - image_position=image_position, - image_orientation=image_orientation, - pixel_spacing=pixel_spacing, - spacing_between_slices=spacing_between_slices, - index_convention=VOLUME_INDEX_CONVENTION, - ) - return cls( - affine=affine, - array=array, - frame_of_reference_uid=frame_of_reference_uid, - ) - - @classmethod - def from_components( - cls, - array: np.ndarray, - position: Sequence[float], - direction: Sequence[float], - spacing: Sequence[float], - frame_of_reference_uid: Optional[str] = None, - ) -> "Volume": - """Construct a Volume from components. - - Parameters - ---------- - array: numpy.ndarray - Three dimensional array of voxel data. - position: Sequence[float] - Sequence of three floats giving the position in the frame of - reference coordinate system of the center of the pixel at location - (0, 0, 0). - direction: Sequence[float] - Direction matrix for the volume. The columns of the direction - matrix are orthogonal unit vectors that give the direction in the - frame of reference space of the increasing direction of each axis - of the array. This matrix may be passed either as a 3x3 matrix or a - flattened 9 element array (first row, second row, third row). - spacing: Sequence[float] - Spacing between pixel centers in the the frame of reference - coordinate system along each of the dimensions of the array. - shape: Sequence[int] - Sequence of three integers giving the shape of the volume. - frame_of_reference_uid: Union[str, None], optional - Frame of reference UID for the frame of reference, if known. - - Returns - ------- - highdicom.spatial.Volume: - Volume constructed from the provided components. - - """ - if not isinstance(position, Sequence): - raise TypeError('Argument "position" must be a sequence.') - if len(position) != 3: - raise ValueError('Argument "position" must have length 3.') - if not isinstance(spacing, Sequence): - raise TypeError('Argument "spacing" must be a sequence.') - if len(spacing) != 3: - raise ValueError('Argument "spacing" must have length 3.') - direction_arr = np.array(direction, dtype=np.float32) - if direction_arr.shape == (9, ): - direction_arr = direction_arr.reshape(3, 3) - elif direction_arr.shape == (3, 3): - pass - else: - raise ValueError( - "Argument 'direction' must have shape (9, ) or (3, 3)." - ) - if not _is_matrix_orthogonal(direction_arr, require_unit=True): - raise ValueError( - "Argument 'direction' must be an orthogonal matrix of " - "unit vectors." - ) - - scaled_direction = direction_arr * spacing - affine = np.row_stack( - [ - np.column_stack([scaled_direction, position]), - [0.0, 0.0, 0.0, 1.0] - ] - ) - return cls( - array=array, - affine=affine, - frame_of_reference_uid=frame_of_reference_uid, - ) - - def get_center_index(self, round_output: bool = False) -> np.ndarray: - """Get array index of center of the volume. - - Parameters - ---------- - round_output: bool, optional - If True, the result is returned rounded down to and with an integer - datatype. Otherwise it is returned as a floating point datatype - without rounding, to sub-voxel precision. - - Returns - ------- - numpy.ndarray: - Array of shape 3 representing the array indices at the center of - the volume. - - """ - if round_output: - center = np.array( - [(self.shape[d] - 1) // 2 for d in range(3)], - dtype=np.uint32, - ) - else: - center = np.array( - [(self.shape[d] - 1) / 2.0 for d in range(3)] - ) - - return center - - def get_center_coordinate(self) -> np.ndarray: - """Get frame-of-reference coordinate at the center of the volume. - - Returns - ------- - numpy.ndarray: - Array of shape 3 representing the frame-of-reference coordinate at - the center of the volume. - - """ - center_index = self.get_center_index().reshape((1, 3)) - center_coordinate = self.map_indices_to_reference(center_index) - - return center_coordinate.reshape((3, )) - - def map_indices_to_reference( - self, - indices: np.ndarray, - ) -> np.ndarray: - """Transform image pixel indices to frame of reference coordinates. - - Parameters - ---------- - indices: numpy.ndarray - Array of zero-based array indices. Array of integer values with - shape ``(n, 3)``, where *n* is the number of indices, the first - column represents the `column` index and the second column - represents the `row` index. - - Returns - ------- - numpy.ndarray - Array of (x, y, z) coordinates in the coordinate system defined by - the frame of reference. Array has shape ``(n, 3)``, where *n* is - the number of coordinates, the first column represents the `x` - offsets, the second column represents the `y` offsets and the third - column represents the `z` offsets - - Raises - ------ - ValueError - When `indices` has incorrect shape. - - """ - if indices.ndim != 2 or indices.shape[1] != 3: - raise ValueError( - 'Argument "indices" must be a two-dimensional array ' - 'with shape [n, 3].' + 'Argument "indices" must be a two-dimensional array ' + 'with shape [n, 3].' ) indices_augmented = np.row_stack([ indices.T.astype(float), @@ -648,7 +261,7 @@ def map_reference_to_indices( if indices[:, d].min() < -0.5: out_of_bounds = True break - if indices[:, d].max() > self.shape[d] - 0.5: + if indices[:, d].max() > self.spatial_shape[d] - 0.5: out_of_bounds = True break @@ -674,7 +287,7 @@ def get_plane_position(self, plane_number: int) -> PlanePositionSequence: Plane position of the plane. """ - if plane_number < 0 or plane_number >= self.shape[0]: + if plane_number < 0 or plane_number >= self.spatial_shape[0]: raise ValueError("Invalid plane number for volume.") index = np.array([[plane_number, 0, 0]]) position = self.map_indices_to_reference(index)[0] @@ -699,7 +312,7 @@ def get_plane_positions(self) -> List[PlanePositionSequence]: """ indices = np.array( [ - [p, 0, 0] for p in range(self.shape[0]) + [p, 0, 0] for p in range(self.spatial_shape[0]) ] ) positions = self.map_indices_to_reference(indices) @@ -772,67 +385,6 @@ def inverse_affine(self) -> np.ndarray: """ return np.linalg.inv(self._affine) - @property - def dtype(self) -> type: - """type: Datatype of the array.""" - return self._array.dtype - - @property - def shape(self) -> Tuple[int, ...]: - """Tuple[int, ...]: Shape of the underlying array. - - May or may not include a fourth channel dimension. - - """ - return tuple(self._array.shape) - - @property - def spatial_shape(self) -> Tuple[int, int, int]: - """Tuple[int, int, int]: Spatial shape of the array. - - Does not include the channel dimension. - - """ - return tuple(self._array.shape[:3]) - - @property - def number_of_channels(self) -> Optional[int]: - """Optional[int]: Number of channels. - - If the array has no channel dimension, returns None. - - """ - if self._array.ndim == 4: - return self._array.shape[3] - return None - - @property - def array(self) -> np.ndarray: - """numpy.ndarray: Volume array.""" - return self._array - - @array.setter - def array(self, value: np.ndarray) -> None: - """Change the voxel array without changing the affine. - - Parameters - ---------- - array: np.ndarray - New 3D or 4D array of voxel data. The spatial shape must match the - existing array, but the presence and number of channels and/or the - voxel datatype may differ. - - """ - if value.ndim not in (3, 4): - raise ValueError( - "Argument 'array' must be a three or four dimensional array." - ) - if value.shape[:3] != self.spatial_shape: - raise ValueError( - "Array must match the spatial shape of the existing array." - ) - self._array = value - @property def direction_cosines(self) -> List[float]: """List[float]: @@ -878,8 +430,7 @@ def spacing(self) -> List[float]: """List[float]: Pixel spacing in millimeter units for the three spatial directions. - Three values (spacing between slices, spacing spacing between rows, - spacing between columns). + Three values, one for each spatial dimension. """ dir_mat = self._affine[:3, :3] @@ -904,12 +455,12 @@ def position(self) -> List[float]: @property def physical_extent(self) -> List[float]: """List[float]: Side lengths of the volume in millimeters.""" - return [(n + 1) * d for n, d in zip(self.shape, self.spacing)] + return [n * d for n, d in zip(self.spatial_shape, self.spacing)] @property def physical_volume(self) -> float: """float: Total volume in cubic millimeter.""" - return self.voxel_volume * self.array.size + return self.voxel_volume * np.prod(self.spatial_shape).item() @property def direction(self) -> np.ndarray: @@ -962,6 +513,252 @@ def unit_vectors(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ return tuple(self.direction.T) + @abstractmethod + def __getitem__( + self, + index: Union[int, slice, Tuple[Union[int, slice]]], + ) -> '_VolumeBase': + pass + + def _prepare_getitem_index( + self, + index: Union[int, slice, Tuple[Union[int, slice]]], + ) -> Tuple[Tuple[slice], Tuple[int, int, int], np.ndarray]: + + def _check_int(val: int, dim: int) -> None: + if ( + val < -self.spatial_shape[dim] + or val >= self.spatial_shape[dim] + ): + raise IndexError( + f'Index {val} is out of bounds for axis {dim} with size ' + f'{self.spatial_shape[dim]}.' + ) + + def _check_slice(val: slice, dim: int) -> None: + if ( + val.start is not None and + ( + val.start < -self.spatial_shape[dim] or + val.start >= self.spatial_shape[dim] + ) + ): + raise ValueError( + f'val {val.start} is out of bounds for axis {dim} with ' + f'size {self.spatial_shape[dim]}.' + ) + if ( + val.stop is not None and + ( + val.stop < -self.spatial_shape[dim] - 1 or + val.stop > self.spatial_shape[dim] + ) + ): + raise ValueError( + f'val {val.stop} is out of bounds for axis {dim} with ' + f'size {self.spatial_shape[dim]}.' + ) + + if isinstance(index, int): + # Change the index to a slice of length one so that all dimensions + # are retained in the output array. Also make into a tuple of + # length 1 to standardize format + _check_int(index, 0) + tuple_index = (slice(index, index + 1), ) + elif isinstance(index, slice): + # Make into a tuple of length one to standardize the format + _check_slice(index, 0) + tuple_index = (cast(slice, index), ) + elif isinstance(index, tuple): + index_list: List[slice] = [] + for dim, item in enumerate(index): + if isinstance(item, int): + # Change the index to a slice of length one so that all + # dimensions are retained in the output array. + _check_int(item, dim) + item = slice(item, item + 1) + index_list.append(item) + elif isinstance(item, slice): + _check_slice(item, dim) + index_list.append(item) + else: + raise TypeError( + 'Items within "index" must be ints, or slices. Got ' + f'{type(item)}.' + ) + + tuple_index = tuple(index_list) + + else: + raise TypeError( + 'Argument "index" must be an int, slice or tuple. Got ' + f'{type(index)}.' + ) + + new_vectors = [] + origin_indices = [] + new_shape = [] + for d in range(0, 3): + # The index item along this dimension + if len(tuple_index) > d: + index_item = tuple_index[d] + first, last, step = index_item.indices(self.spatial_shape[d]) + index_range = last - first + if index_range == 0 or ((index_range < 0) != (step < 0)): + raise IndexError('Indexing would result in an empty array.') + size = (abs(index_range) - 1) // abs(step) + 1 + new_shape.append(size) + else: + index_item = None + first = 0 + step = 1 + new_shape.append(self.spatial_shape[d]) + + new_vectors.append(self._affine[:3, d] * step) + origin_indices.append(first) + + origin_index_arr = np.array([origin_indices]) + new_origin_arr = self.map_indices_to_reference(origin_index_arr).T + + new_rotation = np.column_stack(new_vectors) + new_affine = _stack_affine_matrix(new_rotation, new_origin_arr) + + return tuple_index, tuple(new_shape), new_affine + + @abstractmethod + def pad( + self, + pad_width: Union[int, Sequence[int], Sequence[Sequence[int]]], + mode: Union[PadModes, str] = PadModes.CONSTANT, + constant_value: float = 0.0, + per_channel: bool = False, + ) -> '_VolumeBase': + pass + + def _prepare_pad_width( + self, + pad_width: Union[int, Sequence[int], Sequence[Sequence[int]]], + ) -> Tuple[np.ndarray, List[List[int]]]: + """Pad volume along the three spatial dimensions. + + Parameters + ---------- + pad_width: Union[int, Sequence[int], Sequence[Sequence[int]]] + Values to pad the array. Takes the same form as ``numpy.pad()``. + May be: + + * A single integer value, which results in that many voxels being + added to the beginning and end of all three spatial dimensions, + or + * A sequence of two values in the form ``[before, after]``, which + results in 'before' voxels being added to the beginning of each + of the three spatial dimensions, and 'after' voxels being added + to the end of each of the three spatial dimensions, or + * A nested sequence of integers of the form ``[[pad1], [pad2], + [pad3]]``, in which separate padding values are supplied for each + of the three spatial axes and used to pad before and after along + those axes, or + * A nested sequence of integers in the form ``[[before1, after1], + [before2, after2], [before3, after3]]``, in which separate values + are supplied for the before and after padding of each of the + three spatial dimensions. + + In all cases, all integer values must be non-negative. + + Returns + ------- + numpy.ndarray: + Affine matrix of the padded array. + List[List[int]]: + Padding specification along three spatial dimensions in format + ``[[before1, after1], [before2, after2], [before3, after3]]``. + + """ + if isinstance(pad_width, int): + if pad_width < 0: + raise ValueError( + f"Argument 'pad_width' cannot contain negative values." + ) + origin_offset = [-pad_width] * 3 + full_pad_width: List[List[int]] = [[pad_width, pad_width]] * 3 + elif isinstance(pad_width, Sequence): + if isinstance(pad_width[0], int): + if len(pad_width) != 2: + raise ValueError("Invalid arrangement in 'pad_width'.") + if pad_width[0] < 0 or pad_width[1] < 0: + raise ValueError( + f"Argument 'pad_width' cannot contain negative values." + ) + full_pad_width = [list(pad_width)] * 3 + elif isinstance(pad_width[0], Sequence): + if len(pad_width) != 3: + raise ValueError("Invalid arrangement in 'pad_width'.") + if len(pad_width[0]) == 1: + if len(pad_width[1]) != 1 or len(pad_width[2]) != 1: + raise ValueError("Invalid arrangement in 'pad_width'.") + full_pad_width = [[w[0], w[0]] for w in pad_width] + elif len(pad_width[0]) == 2: + if len(pad_width[1]) != 2 or len(pad_width[2]) != 2: + raise ValueError("Invalid arrangement in 'pad_width'.") + full_pad_width = [list(w) for w in pad_width] + else: + raise ValueError("Invalid arrangement in 'pad_width'.") + else: + raise TypeError("Invalid format for 'pad_width'.") + + origin_offset = [-p[0] for p in full_pad_width] + new_affine = _translate_affine_matrix(self.affine, origin_offset) + + return new_affine, full_pad_width + + + def _permute_affine(self, indices: Sequence[int]) -> np.ndarray: + """Get affine after permuting spatial axes. + + Parameters + ---------- + indices: Sequence[int] + List of three integers containing the values 0, 1 and 2 in some + order. Note that you may not change the position of the channel + axis (if present). + + Returns + ------- + numpy.numpy: + Affine matrix (4 x 4) spatial axes permuted in the provided order. + + """ + if len(indices) != 3 or set(indices) != {0, 1, 2}: + raise ValueError( + f'Argument "indices" must consist of the values 0, 1, and 2 ' + 'in some order.' + ) + + return _transform_affine_matrix( + affine=self._affine, + shape=self.spatial_shape, + permute_indices=indices, + ) + + @abstractmethod + def permute_axes(self, indices: Sequence[int]) -> '_VolumeBase': + """Create a new volume by permuting the spatial axes. + + Parameters + ---------- + indices: Sequence[int] + List of three integers containing the values 0, 1 and 2 in some + order. Note that you may not change the position of the channel + axis (if present). + + Returns + ------- + highdicom._VolumeBase: + New volume with spatial axes permuted in the provided order. + + """ + pass + def get_closest_patient_orientation(self) -> Tuple[ PatientOrientationValuesBiped, PatientOrientationValuesBiped, @@ -974,8 +771,1076 @@ def get_closest_patient_orientation(self) -> Tuple[ Tuple[highdicom.enum.PatientOrientationValuesBiped, highdicom.enum.PatientOrientationValuesBiped, highdicom.enum.PatientOrientationValuesBiped]: Tuple giving the closest patient orientation. - """ # noqa: E501 - return get_closest_patient_orientation(self._affine) + """ # noqa: E501 + return get_closest_patient_orientation(self._affine) + + def to_patient_orientation( + self, + patient_orientation: Union[ + str, + Sequence[Union[str, PatientOrientationValuesBiped]], + ], + ) -> '_VolumeBase': + """Rearrange the array to a given orientation. + + The resulting volume is formed from this volume through a combination + of axis permutations and flips of the spatial axes. Its patient + orientation will be as close to the desired orientation as can be + achieved with these operations alone (and in particular without + resampling the array). + + Parameters + ---------- + patient_orientation: Union[str, Sequence[Union[str, highdicom.enum.PatientOrientationValuesBiped]]] + Desired patient orientation, as either a sequence of three + highdicom.enum.PatientOrientationValuesBiped values, or a string + such as ``"FPL"`` using the same characters. + + Returns + ------- + highdicom.Volume: + New volume with the requested patient orientation. + + """ # noqa: E501 + desired_orientation = _normalize_patient_orientation( + patient_orientation + ) + + current_orientation = self.get_closest_patient_orientation() + + permute_indices = [] + flip_axes = [] + for d in desired_orientation: + if d in current_orientation: + from_index = current_orientation.index(d) + else: + d_inv = PATIENT_ORIENTATION_OPPOSITES[d] + from_index = current_orientation.index(d_inv) + flip_axes.append(from_index) + permute_indices.append(from_index) + + if len(flip_axes) > 0: + result = self.flip(flip_axes) + else: + result = self + + return result.permute_axes(permute_indices) + + def swap_axes(self, axis_1: int, axis_2: int) -> '_VolumeBase': + """Swap the spatial axes of the array. + + Parameters + ---------- + axis_1: int + Spatial axis index (0, 1 or 2) to swap with ``axis_2``. + axis_2: int + Spatial axis index (0, 1 or 2) to swap with ``axis_1``. + + Returns + ------- + highdicom.Volume: + New volume with spatial axes swapped as requested. + + """ + for a in [axis_1, axis_2]: + if a not in {0, 1, 2}: + raise ValueError( + 'Axis values must be one of 0, 1 or 2.' + ) + + if axis_1 == axis_2: + raise ValueError( + "Arguments 'axis_1' and 'axis_2' must be different." + ) + + permutation = [0, 1, 2] + permutation[axis_1] = axis_2 + permutation[axis_2] = axis_1 + + return self.permute_axes(permutation) + + def flip(self, axis: Union[int, Sequence[int]]) -> '_VolumeBase': + """Flip the spatial axes of the array. + + Note that this flips the array and updates the affine to reflect the + flip. + + Parameters + ---------- + axis: Union[int, Sequence[int]] + Axis or list of axis indices that should be flipped. These should + include only the spatial axes (0, 1, and/or 2). + + Returns + ------- + highdicom.Volume: + New volume with spatial axes flipped as requested. + + """ + if isinstance(axis, int): + axis = [axis] + + if len(axis) > 3 or len(set(axis) - {0, 1, 2}) > 0: + raise ValueError( + 'Argument "axis" must contain only values 0, 1, and/or 2.' + ) + + # We will re-use the existing __getitem__ implementation, which has all + # this logic figured out already + index = [] + for d in range(3): + if d in axis: + index.append(slice(-1, None, -1)) + else: + index.append(slice(None)) + + return self[tuple(index)] + + @property + def handedness(self) -> AxisHandedness: + """highdicom.AxisHandedness: Axis handedness of the volume.""" + v1, v2, v3 = self.spacing_vectors() + if np.cross(v1, v2) @ v3 < 0.0: + return AxisHandedness.LEFT_HANDED + return AxisHandedness.RIGHT_HANDED + + def ensure_handedness( + self, + handedness: Union[AxisHandedness, str], + flip_axis: Optional[int] = None, + swap_axes: Optional[Sequence[int]] = None, + ) -> '_VolumeBase': + """Manipulate the volume if necessary to ensure a given handedness. + + If the volume already has the specified handedness, it is returned + unaltered. + + If the volume does not meet the requirement, the volume is manipulated + using a user specified operation to meet the requirement. The two + options are reversing the direction of a single axis ("flipping") or + swapping the position of two axes. + + Parameters + ---------- + handedness: highdicom.AxisHandedness + Handedness to ensure. + flip_axis: Union[int, None], optional + Specification of a spatial axis index (0, 1, or 2) to flip if + required to meet the given handedness requirement. + swap_axes: Union[int, None], optional + Specification of a sequence of two spatial axis indices (each being + 0, 1, or 2) to swap if required to meet the given handedness + requirement. + + Note + ---- + Either ``flip_axis`` or ``swap_axes`` must be provided (and not both) + to specify the operation to perform to correct the handedness (if + required). + + """ + if (flip_axis is None) == (swap_axes is None): + raise TypeError( + "Exactly one of either 'flip_axis' or 'swap_axes' " + "must be specified." + ) + handedness = AxisHandedness(handedness) + if handedness == self.handedness: + return self + + if flip_axis is not None: + return self.flip(flip_axis) + + if len(swap_axes) != 2: + raise ValueError( + "Argument 'swap_axes' must have length 2." + ) + + return self.swap_axes(swap_axes[0], swap_axes[1]) + + def pad_to_shape( + self, + spatial_shape: Sequence[int], + mode: PadModes = PadModes.CONSTANT, + constant_value: float = 0.0, + per_channel: bool = False, + ) -> '_VolumeBase': + """Pad volume to given spatial shape. + + The volume is padded symmetrically, placing the original array at the + center of the output array, to achieve the given shape. If this + requires an odd number of elements to be added along a certain + dimension, one more element is placed at the end of the array than at + the start. + + Parameters + ---------- + spatial_shape: Sequence[int] + Sequence of three integers specifying the spatial shape to pad to. + This shape must be no smaller than the existing shape along any of + the three spatial dimensions. + mode: highdicom.PadModes, optional + Mode to use to pad the array. See :class:`highdicom.PadModes` for + options. + constant_value: Union[float, Sequence[float]], optional + Value used to pad when mode is ``"CONSTANT"``. If ``per_channel`` + if True, a sequence whose length is equal to the number of channels + may be passed, and each value will be used for the corresponding + channel. With other pad modes, this argument is ignored. + per_channel: bool, optional + For padding modes that involve calculation of image statistics to + determine the padding value (i.e. ``MINIMUM``, ``MAXIMUM``, + ``MEAN``, ``MEDIAN``), pad each channel separately using the value + calculated using that channel alone (rather than the statistics of + the entire array). For other padding modes, this argument makes no + difference. This should not the True if the image does not have a + channel dimension. + + Returns + ------- + highdicom.Volume: + Volume with padding applied. + + """ + if len(spatial_shape) != 3: + raise ValueError( + "Argument 'shape' must have length 3." + ) + + pad_width = [] + for insize, outsize in zip(self.spatial_shape, spatial_shape): + to_pad = outsize - insize + if to_pad < 0: + raise ValueError( + 'Shape is smaller than existing shape along at least ' + 'one axis.' + ) + pad_front = to_pad // 2 + pad_back = to_pad - pad_front + pad_width.append((pad_front, pad_back)) + + return self.pad( + pad_width=pad_width, + mode=mode, + constant_value=constant_value, + per_channel=per_channel, + ) + + def crop_to_shape(self, spatial_shape: Sequence[int]) -> '_VolumeBase': + """Center-crop volume to a given spatial shape. + + Parameters + ---------- + spatial_shape: Sequence[int] + Sequence of three integers specifying the spatial shape to crop to. + This shape must be no larger than the existing shape along any of + the three spatial dimensions. + + Returns + ------- + highdicom.Volume: + Volume with padding applied. + + """ + if len(spatial_shape) != 3: + raise ValueError( + "Argument 'shape' must have length 3." + ) + + crop_vals = [] + for insize, outsize in zip(self.spatial_shape, spatial_shape): + to_crop = insize - outsize + if to_crop < 0: + raise ValueError( + 'Shape is larger than existing shape along at least ' + 'one axis.' + ) + crop_front = to_crop // 2 + crop_back = to_crop - crop_front + crop_vals.append((crop_front, insize - crop_back)) + + return self[ + crop_vals[0][0]:crop_vals[0][1], + crop_vals[1][0]:crop_vals[1][1], + crop_vals[2][0]:crop_vals[2][1], + ] + + def pad_or_crop_to_shape( + self, + spatial_shape: Sequence[int], + mode: PadModes = PadModes.CONSTANT, + constant_value: float = 0.0, + per_channel: bool = False, + ) -> '_VolumeBase': + """Pad and/or crop volume to given spatial shape. + + For each dimension where padding is required, the volume is padded + symmetrically, placing the original array at the center of the output + array, to achieve the given shape. If this requires an odd number of + elements to be added along a certain dimension, one more element is + placed at the end of the array than at the start. + + For each dimension where cropping is required, center cropping is used. + + Parameters + ---------- + spatial_shape: Sequence[int] + Sequence of three integers specifying the spatial shape to pad or + crop to. + mode: highdicom.PadModes, optional + Mode to use to pad the array, if padding is required. See + :class:`highdicom.PadModes` for options. + constant_value: Union[float, Sequence[float]], optional + Value used to pad when mode is ``"CONSTANT"``. If ``per_channel`` + if True, a sequence whose length is equal to the number of channels + may be passed, and each value will be used for the corresponding + channel. With other pad modes, this argument is ignored. + per_channel: bool, optional + For padding modes that involve calculation of image statistics to + determine the padding value (i.e. ``MINIMUM``, ``MAXIMUM``, + ``MEAN``, ``MEDIAN``), pad each channel separately using the value + calculated using that channel alone (rather than the statistics of + the entire array). For other padding modes, this argument makes no + difference. This should not the True if the image does not have a + channel dimension. + + Returns + ------- + highdicom.Volume: + Volume with padding and/or cropping applied. + + """ + if len(spatial_shape) != 3: + raise ValueError( + "Argument 'shape' must have length 3." + ) + + pad_width = [] + crop_vals = [] + for insize, outsize in zip(self.spatial_shape, spatial_shape): + diff = outsize - insize + if diff > 0: + pad_front = diff // 2 + pad_back = diff - pad_front + pad_width.append((pad_front, pad_back)) + crop_vals.append((0, outsize)) + elif diff < 0: + crop_front = (-diff) // 2 + crop_back = (-diff) - crop_front + crop_vals.append((crop_front, insize - crop_back)) + pad_width.append((0, 0)) + else: + pad_width.append((0, 0)) + crop_vals.append((0, outsize)) + + cropped = self[ + crop_vals[0][0]:crop_vals[0][1], + crop_vals[1][0]:crop_vals[1][1], + crop_vals[2][0]:crop_vals[2][1], + ] + padded = cropped.pad( + pad_width=pad_width, + mode=mode, + constant_value=constant_value, + per_channel=per_channel, + ) + return padded + + def random_crop(self, spatial_shape: Sequence[int]) -> '_VolumeBase': + """Create a random crop of a certain shape from the volume. + + Parameters + ---------- + spatial_shape: Sequence[int] + Sequence of three integers specifying the spatial shape to pad or + crop to. + + Returns + ------- + highdicom.Volume: + New volume formed by cropping the volumes. + + """ + crop_slices = [] + for c, d in zip(spatial_shape, self.spatial_shape): + max_start = d - c + if max_start < 0: + raise ValueError( + 'Crop shape is larger than volume in at least one ' + 'dimension.' + ) + start = np.random.randint(0, max_start + 1) + crop_slices.append(slice(start, start + c)) + + return self[tuple(crop_slices)] + + +class VolumeGeometry(_VolumeBase): + + def __init__( + self, + affine: np.ndarray, + spatial_shape: Sequence[int], + frame_of_reference_uid: Optional[str] = None, + ): + """ + + Parameters + ---------- + affine: numpy.ndarray + 4 x 4 affine matrix representing the transformation from pixel + indices (slice index, row index, column index) to the + frame-of-reference coordinate system. The top left 3 x 3 matrix + should be a scaled orthogonal matrix representing the rotation and + scaling. The top right 3 x 1 vector represents the translation + component. The last row should have value [0, 0, 0, 1]. + spatial_shape: Sequence[int] + Number of voxels in the volume along the three spatial dimensions. + frame_of_reference_uid: Optional[str], optional + Frame of reference UID for the frame of reference, if known. + + """ + super().__init__(affine, frame_of_reference_uid) + + if len(spatial_shape) != 3: + raise ValueError("Argument 'spatial_shape' must have length 3.") + self._spatial_shape = tuple(spatial_shape) + + @property + def spatial_shape(self) -> Tuple[int, int, int]: + """Tuple[int, int, int]: Spatial shape of the array. + + Does not include the channel dimension. + + """ + return self._spatial_shape + + @property + def shape(self) -> Tuple[int, ...]: + """Tuple[int, ...]: Shape of the underlying array. + + For objects of type :class:`highdicom.VolumeGeometry`, this is + equivalent to `.shape`. + + """ + return self.spatial_shape + + def __getitem__( + self, + index: Union[int, slice, Tuple[Union[int, slice]]], + ) -> "VolumeGeometry": + """Get a sub-volume of this volume as a new volume. + + Parameters + ---------- + index: Union[int, slice, Tuple[Union[int, slice]]] + Index values. Most possibilities supported by numpy arrays are + supported, including negative indices and different step sizes. + Indexing with lists is not supported. + + Returns + ------- + highdicom.VolumeGeometry: + New volume representing a sub-volume of the original volume. + + """ + _, new_shape, new_affine = self._prepare_getitem_index(index) + self._spatial_shape = new_shape + + return self.__class__( + affine=new_affine, + spatial_shape=new_shape, + frame_of_reference_uid=self.frame_of_reference_uid, + ) + + def pad( + self, + pad_width: Union[int, Sequence[int], Sequence[Sequence[int]]], + mode: Union[PadModes, str] = PadModes.CONSTANT, + constant_value: float = 0.0, + per_channel: bool = False, + ) -> 'VolumeGeometry': + """Pad volume along the three spatial dimensions. + + Parameters + ---------- + pad_width: Union[int, Sequence[int], Sequence[Sequence[int]]] + Values to pad the array. Takes the same form as ``numpy.pad()``. + May be: + + * A single integer value, which results in that many voxels being + added to the beginning and end of all three spatial dimensions, + or + * A sequence of two values in the form ``[before, after]``, which + results in 'before' voxels being added to the beginning of each + of the three spatial dimensions, and 'after' voxels being added + to the end of each of the three spatial dimensions, or + * A nested sequence of integers of the form ``[[pad1], [pad2], + [pad3]]``, in which separate padding values are supplied for each + of the three spatial axes and used to pad before and after along + those axes, or + * A nested sequence of integers in the form ``[[before1, after1], + [before2, after2], [before3, after3]]``, in which separate values + are supplied for the before and after padding of each of the + three spatial dimensions. + + In all cases, all integer values must be non-negative. + mode: Union[highdicom.PadModes, str], optional + Ignored for :class:`highdicom.VolumeGeometry`. + constant_value: Union[float, Sequence[float]], optional + Ignored for :class:`highdicom.VolumeGeometry`. + per_channel: bool, optional + Ignored for :class:`highdicom.VolumeGeometry`. + + Returns + ------- + highdicom.VolumeGeometry: + Volume with padding applied. + + """ + new_affine, full_pad_width = self._prepare_pad_width(pad_width) + + new_shape = [ + d + p[0] + p[1] for d, p in zip(self.spatial_shape, full_pad_width) + ] + + return self.__class__( + spatial_shape=new_shape, + affine=new_affine, + frame_of_reference_uid=self.frame_of_reference_uid, + ) + + def permute_axes(self, indices: Sequence[int]) -> 'VolumeGeometry': + """Create a new geometry by permuting the spatial axes. + + Parameters + ---------- + indices: Sequence[int] + List of three integers containing the values 0, 1 and 2 in some + order. Note that you may not change the position of the channel + axis (if present). + + Returns + ------- + highdicom.VolumeGeometry: + New geometry with spatial axes permuted in the provided order. + + """ + new_affine = self._permute_affine(indices) + + new_shape = [self.spatial_shape[i] for i in indices] + + return self.__class__( + spatial_shape=new_shape, + affine=new_affine, + frame_of_reference_uid=self.frame_of_reference_uid, + ) + + +class Volume(_VolumeBase): + + """Class representing a 3D array of regularly-spaced frames in 3D space. + + This class combines a 3D NumPy array with an affine matrix describing the + location of the voxels in the frame of reference coordinate space. A + Volume is not a DICOM object itself, but represents a volume that may + be extracted from DICOM image, and/or encoded within a DICOM object, + potentially following any number of processing steps. + + All such volumes have a geometry that exists within DICOM's patient + coordinate system. + + Internally this class uses the following conventions to represent the + geometry, however this can be constructed from or transformed to other + conventions with appropriate optional parameters to its methods: + + * The pixel indices are ordered (slice index, row index, column index). + * Pixel indices are zero-based and represent the center of the pixel. + * Column indices are ordered top to bottom, row indices are ordered left to + right. The interpretation of the slice indices direction is not defined. + * The x, y, z coordinates of frame-of-reference coordinate system follow + the "LPS" convention used in DICOM (see + :dcm:`Part 3 Section C.7.6.2.1.1 `). + I.e. + * The first coordinate (``x``) increases from the patient's right to left + * The second coordinate (``y``) increases from the patient's anterior to + posterior. + * The third coordinate (``z``) increases from the patient's caudal + direction (inferior) to cranial direction (superior). + + Note + ---- + The ordering of pixel indices used by this class (slice, row, column) + matches the way pydicom and highdicom represent pixel arrays but differs + from the (column, row, slice) convention used by the various "transformer" + classes in the ``highdicom.spatial`` module. + + """ + + def __init__( + self, + array: np.ndarray, + affine: np.ndarray, + frame_of_reference_uid: Optional[str] = None, + ): + """ + + Parameters + ---------- + array: numpy.ndarray + Array of voxel data. Must be either 3D (three spatial dimensions), + or 4D (three spatial dimensions followed by a channel dimension). + Any datatype is permitted. + affine: numpy.ndarray + 4 x 4 affine matrix representing the transformation from pixel + indices (slice index, row index, column index) to the + frame-of-reference coordinate system. The top left 3 x 3 matrix + should be a scaled orthogonal matrix representing the rotation and + scaling. The top right 3 x 1 vector represents the translation + component. The last row should have value [0, 0, 0, 1]. + frame_of_reference_uid: Optional[str], optional + Frame of reference UID for the frame of reference, if known. + + """ + super().__init__( + affine=affine, + frame_of_reference_uid=frame_of_reference_uid, + ) + if array.ndim not in (3, 4): + raise ValueError( + "Argument 'array' must be three or four-dimensional." + ) + self._array = array + + @classmethod + def from_image_series( + cls, + series_datasets: Sequence[Dataset], + apply_modality_transform: bool = True, + apply_voi_transform: bool = False, + voi_transform_index: int = 0, + apply_palette_color_lut: bool = True, + apply_icc_transform: bool = True, + standardize_color_space: bool = True, + ) -> "Volume": + """Create volume from a series of single frame images. + + Parameters + ---------- + series_datasets: Sequence[pydicom.Dataset] + Series of single frame datasets. There is no requirement on the + sorting of the datasets. + apply_modality_transform: bool, optional + Whether to apply the modality transform (either a rescale intercept + and slope or modality LUT) to the pixel values, if present in the + datasets. + apply_voi_transform: bool, optional + Whether to apply the value of interest (VOI) transform (either a + windowing operation or VOI LUT) to the pixel values, if present in + the datasets. + voi_transform_index: int, optional + Index of the VOI transform to apply if multiple are included in the + datasets. Ignored if ``apply_voi_transform`` is ``False`` or no VOI + transform is included in the datasets. + apply_palette_color_lut: bool, optional + Whether to apply the palette color LUT if a dataset has photometric + interpretation ``'PALETTE_COLOR'``. + apply_icc_transform: bool, optional + Whether to apply an ICC color profile, if present in the datasets. + convert_color_space: bool, optional + Whether to convert the color space to a standardized space. If + True, images with photometric interpretation ``MONOCHROME1`` are + inverted to mimic ``MONOCHROME2``, and images with photometric + interpretation ``YBR_FULL`` or ``YBR_FULL_422`` are converted to + ``RGB``. + + Returns + ------- + Volume: + Volume created from the series. + + """ + if apply_voi_transform and not apply_modality_lut: + raise ValueError( + "Argument 'apply_voi_transform' requires 'apply_modality_lut'." + ) + series_instance_uid = series_datasets[0].SeriesInstanceUID + if not all( + ds.SeriesInstanceUID == series_instance_uid + for ds in series_datasets + ): + raise ValueError('Images do not belong to the same series.') + + coordinate_system = get_image_coordinate_system(series_datasets[0]) + if ( + coordinate_system is None or + coordinate_system != CoordinateSystemNames.PATIENT + ): + raise ValueError( + "Dataset should exist in the patient " + "coordinate_system." + ) + + frame_of_reference_uid = series_datasets[0].FrameOfReferenceUID + if not all( + ds.FrameOfReferenceUID == frame_of_reference_uid + for ds in series_datasets + ): + raise ValueError('Images do not share a frame of reference.') + + series_datasets = sort_datasets(series_datasets) + + ds = series_datasets[0] + + if len(series_datasets) == 1: + slice_spacing = ds.get('SpacingBetweenSlices', 1.0) + else: + slice_spacing, _ = get_series_volume_positions(series_datasets) + if slice_spacing is None: + raise ValueError('Series is not a regularly-spaced volume.') + + affine = _create_affine_transformation_matrix( + image_position=ds.ImagePositionPatient, + image_orientation=ds.ImageOrientationPatient, + pixel_spacing=ds.PixelSpacing, + spacing_between_slices=slice_spacing, + index_convention=VOLUME_INDEX_CONVENTION, + slices_first=True, + ) + + frames = [] + for ds in series_datasets: + frame = ds.pixel_array + max_value = 2 ** np.iinfo(ds.pixel_array.dtype).bits + if apply_modality_transform: + frame = apply_modality_lut(frame, ds) + if apply_voi_transform: + frame = apply_voi_lut(frame, ds, voi_transform_index) + if ( + apply_palette_color_lut and + ds.PhotometricInterpretation == 'PALETTE_COLOR' + ): + frame = apply_color_lut(frame, ds) + if apply_icc_transform and 'ICCProfile' in ds: + manager = ColorManager(ds.ICCProfile) + frame = manager.transform_frame(frame) + if standardize_color_space: + if ds.PhotometricInterpretation == 'MONOCHROME1': + # TODO what if a VOI_LUT has been applied + frame = max_value - frame + elif ds.PhotometricInterpretation in ( + 'YBR_FULL', 'YBR_FULL_422' + ): + frame = convert_color_space( + frame, + current=ds.PhotometricInterpretation, + desired='RGB' + ) + + frames.append(frame) + + array = np.stack(frames) + + return cls( + affine=affine, + array=array, + frame_of_reference_uid=frame_of_reference_uid, + ) + + @classmethod + def from_image( + cls, + dataset: Dataset, + ) -> "Volume": + """Create volume from a multiframe image. + + Parameters + ---------- + dataset: pydicom.Dataset + A multi-frame image dataset. + + Returns + ------- + Volume: + Volume created from the image. + + """ + if not is_multiframe_image(dataset): + raise ValueError( + 'Dataset should be a multi-frame image.' + ) + coordinate_system = get_image_coordinate_system(dataset) + if ( + coordinate_system is None or + coordinate_system != CoordinateSystemNames.PATIENT + ): + raise ValueError( + "Dataset should exist in the patient " + "coordinate_system." + ) + sfgs = dataset.SharedFunctionalGroupsSequence[0] + if 'PlaneOrientationSequence' not in sfgs: + raise ValueError('Frames do not share an orientation.') + image_orientation = ( + sfgs + .PlaneOrientationSequence[0] + .ImageOrientationPatient + ) + pffgs = dataset.PerFrameFunctionalGroupsSequence + image_positions = [ + g.PlanePositionSequence[0].ImagePositionPatient + for g in pffgs + ] + sort_index = get_plane_sort_index( + image_positions, + image_orientation, + ) + sorted_positions = [image_positions[i] for i in sort_index] + + if 'PixelMeasuresSequence' not in sfgs: + raise ValueError('Frames do not share pixel measures.') + pixel_spacing = sfgs.PixelMeasuresSequence[0].PixelSpacing + + slice_spacing, _ = get_volume_positions( + image_positions=image_positions, + image_orientation=image_orientation, + ) + if slice_spacing is None: + raise ValueError( + 'Dataset does not represent a regularly sampled volume.' + ) + + affine = _create_affine_transformation_matrix( + image_position=sorted_positions[0], + image_orientation=image_orientation, + pixel_spacing=pixel_spacing, + spacing_between_slices=slice_spacing, + index_convention=VOLUME_INDEX_CONVENTION, + slices_first=True, + ) + + # TODO apply VOI color modality LUT etc + array = dataset.pixel_array + if array.ndim == 2: + array = array[np.newaxis] + array = array[sort_index] + + return cls( + affine=affine, + array=array, + frame_of_reference_uid=dataset.FrameOfReferenceUID, + ) + + @classmethod + def from_attributes( + cls, + array: np.ndarray, + image_position: Sequence[float], + image_orientation: Sequence[float], + pixel_spacing: Sequence[float], + spacing_between_slices: float, + frame_of_reference_uid: Optional[str] = None, + ) -> "Volume": + """Create a volume from DICOM attributes. + + Parameters + ---------- + array: numpy.ndarray + Three dimensional array of voxel data. The first dimension indexes + slices, the second dimension indexes rows, and the final dimension + indexes columns. + image_position: Sequence[float] + Position in the frame of reference space of the center of the top + left pixel of the image. Corresponds to DICOM attributes + "ImagePositionPatient". Should be a sequence of length 3. + image_orientation: Sequence[float] + Cosines of the row direction (first triplet: horizontal, left to + right, increasing column index) and the column direction (second + triplet: vertical, top to bottom, increasing row index) direction + expressed in the three-dimensional patient or slide coordinate + system defined by the frame of reference. Corresponds to the DICOM + attribute "ImageOrientationPatient". + pixel_spacing: Sequence[float] + Spacing between pixels in millimeter unit along the column + direction (first value: spacing between rows, vertical, top to + bottom, increasing row index) and the row direction (second value: + spacing between columns: horizontal, left to right, increasing + column index). Corresponds to DICOM attribute "PixelSpacing". + spacing_between_slices: float + Spacing between slices in millimeter units in the frame of + reference coordinate system space. Corresponds to the DICOM + attribute "SpacingBetweenSlices" (however, this may not be present in + many images and may need to be inferred from "ImagePositionPatient" + attributes of consecutive slices). + frame_of_reference_uid: Union[str, None], optional + Frame of reference UID, if known. Corresponds to DICOM attribute + FrameOfReferenceUID. + + Returns + ------- + highdicom.Volume: + New Volume using the given array and DICOM attributes. + + """ + affine = _create_affine_transformation_matrix( + image_position=image_position, + image_orientation=image_orientation, + pixel_spacing=pixel_spacing, + spacing_between_slices=spacing_between_slices, + index_convention=VOLUME_INDEX_CONVENTION, + slices_first=True, + ) + return cls( + affine=affine, + array=array, + frame_of_reference_uid=frame_of_reference_uid, + ) + + @classmethod + def from_components( + cls, + array: np.ndarray, + position: Sequence[float], + direction: Sequence[float], + spacing: Sequence[float], + frame_of_reference_uid: Optional[str] = None, + ) -> "Volume": + """Construct a Volume from components. + + Parameters + ---------- + array: numpy.ndarray + Three dimensional array of voxel data. + position: Sequence[float] + Sequence of three floats giving the position in the frame of + reference coordinate system of the center of the pixel at location + (0, 0, 0). + direction: Sequence[float] + Direction matrix for the volume. The columns of the direction + matrix are orthogonal unit vectors that give the direction in the + frame of reference space of the increasing direction of each axis + of the array. This matrix may be passed either as a 3x3 matrix or a + flattened 9 element array (first row, second row, third row). + spacing: Sequence[float] + Spacing between pixel centers in the the frame of reference + coordinate system along each of the dimensions of the array. + shape: Sequence[int] + Sequence of three integers giving the shape of the volume. + frame_of_reference_uid: Union[str, None], optional + Frame of reference UID for the frame of reference, if known. + + Returns + ------- + highdicom.spatial.Volume: + Volume constructed from the provided components. + + """ + if not isinstance(position, Sequence): + raise TypeError('Argument "position" must be a sequence.') + if len(position) != 3: + raise ValueError('Argument "position" must have length 3.') + if not isinstance(spacing, Sequence): + raise TypeError('Argument "spacing" must be a sequence.') + if len(spacing) != 3: + raise ValueError('Argument "spacing" must have length 3.') + direction_arr = np.array(direction, dtype=np.float32) + if direction_arr.shape == (9, ): + direction_arr = direction_arr.reshape(3, 3) + elif direction_arr.shape == (3, 3): + pass + else: + raise ValueError( + "Argument 'direction' must have shape (9, ) or (3, 3)." + ) + if not _is_matrix_orthogonal(direction_arr, require_unit=True): + raise ValueError( + "Argument 'direction' must be an orthogonal matrix of " + "unit vectors." + ) + + scaled_direction = direction_arr * spacing + affine = _stack_affine_matrix(scaled_direction, np.array(position)) + return cls( + array=array, + affine=affine, + frame_of_reference_uid=frame_of_reference_uid, + ) + + def get_geometry(self) -> VolumeGeometry: + """Get geometry for this volume. + + Returns + ------- + hd.VolumeGeometry: + Geometry object matching this volume. + + """ + return VolumeGeometry( + affine=self._affine.copy(), + spatial_shape=self.spatial_shape, + frame_of_reference_uid=self.frame_of_reference_uid + ) + + @property + def dtype(self) -> type: + """type: Datatype of the array.""" + return self._array.dtype + + @property + def shape(self) -> Tuple[int, ...]: + """Tuple[int, ...]: Shape of the underlying array. + + May or may not include a fourth channel dimension. + + """ + return tuple(self._array.shape) + + @property + def spatial_shape(self) -> Tuple[int, int, int]: + """Tuple[int, int, int]: Spatial shape of the array. + + Does not include the channel dimension. + + """ + return tuple(self._array.shape[:3]) + + @property + def number_of_channels(self) -> Optional[int]: + """Optional[int]: Number of channels. + + If the array has no channel dimension, returns None. + + """ + if self._array.ndim == 4: + return self._array.shape[3] + return None + + @property + def array(self) -> np.ndarray: + """numpy.ndarray: Volume array.""" + return self._array + + @array.setter + def array(self, value: np.ndarray) -> None: + """Change the voxel array without changing the affine. + + Parameters + ---------- + array: np.ndarray + New 3D or 4D array of voxel data. The spatial shape must match the + existing array, but the presence and number of channels and/or the + voxel datatype may differ. + + """ + if value.ndim not in (3, 4): + raise ValueError( + "Argument 'array' must be a three or four dimensional array." + ) + if value.shape[:3] != self.spatial_shape: + raise ValueError( + "Array must match the spatial shape of the existing array." + ) + self._array = value def astype(self, dtype: type) -> 'Volume': """Get new volume with a new datatype. @@ -1053,7 +1918,7 @@ def __getitem__( Parameters ---------- index: Union[int, slice, Tuple[Union[int, slice]]] - Index values. MOst possibilities supported by numpy arrays are + Index values. Most possibilities supported by numpy arrays are supported, including negative indices and different step sizes. Indexing with lists is not supported. @@ -1063,67 +1928,10 @@ def __getitem__( New volume representing a sub-volume of the original volume. """ - if isinstance(index, int): - # Change the index to a slice of length one so that all dimensions - # are retained in the output array. Also make into a tuple of - # length 1 to standardize format - tuple_index = (slice(index, index + 1), ) - elif isinstance(index, slice): - # Make into a tuple of length one to standardize the format - tuple_index = (index, ) - elif isinstance(index, tuple): - index_list = [] - for item in index: - if isinstance(item, int): - # Change the index to a slice of length one so that all - # dimensions are retained in the output array. - item = slice(item, item + 1) - index_list.append(item) - elif isinstance(item, slice): - index_list.append(item) - else: - raise TypeError( - 'Items within "index" must be ints, or slices. Got ' - f'{type(item)}.' - ) - - tuple_index = tuple(index_list) - - else: - raise TypeError( - 'Argument "index" must be an int, slice or tuple. Got ' - f'{type(index)}.' - ) + tuple_index, _, new_affine = self._prepare_getitem_index(index) new_array = self._array[tuple_index] - new_vectors = [] - origin_indices = [] - - for d in range(0, 3): - # The index item along this dimension - if len(tuple_index) > d: - index_item = tuple_index[d] - first, _, step = index_item.indices(self.shape[d]) - else: - index_item = None - first = 0 - step = 1 - - new_vectors.append(self._affine[:3, d] * step) - origin_indices.append(first) - - origin_index_arr = np.array([origin_indices]) - new_origin_arr = self.map_indices_to_reference(origin_index_arr).T - - new_rotation = np.column_stack(new_vectors) - new_affine = np.row_stack( - [ - np.column_stack([new_rotation, new_origin_arr]), - np.array([0., 0., 0., 1.0]), - ] - ) - return self.__class__( array=new_array, affine=new_affine, @@ -1131,7 +1939,6 @@ def __getitem__( ) def permute_axes(self, indices: Sequence[int]) -> 'Volume': - # TODO add tests for this """Create a new volume by permuting the spatial axes. Parameters @@ -1144,215 +1951,21 @@ def permute_axes(self, indices: Sequence[int]) -> 'Volume': Returns ------- highdicom.Volume: - New volume with spatial axes permuted in the provided order. - - """ - if len(indices) != 3 or set(indices) != {0, 1, 2}: - raise ValueError( - f'Argument "indices" must consist of the values 0, 1, and 2 ' - 'in some order.' - ) - - if self._array.ndim == 3: - new_array = np.transpose(self._array, indices) - else: - new_array = np.transpose(self._array, [*indices, 3]) - - new_affine = _transform_affine_matrix( - affine=self._affine, - shape=self.spatial_shape, - permute_indices=indices, - ) - - return self.__class__( - array=new_array, - affine=new_affine, - frame_of_reference_uid=self.frame_of_reference_uid, - ) - - def swap_axes(self, axis_1: int, axis_2: int) -> 'Volume': - """Swap the spatial axes of the array. - - Parameters - ---------- - axis_1: int - Spatial axis index (0, 1 or 2) to swap with ``axis_2``. - axis_2: int - Spatial axis index (0, 1 or 2) to swap with ``axis_1``. - - Returns - ------- - highdicom.Volume: - New volume with spatial axes swapped as requested. - - """ - for a in [axis_1, axis_2]: - if a not in {0, 1, 2}: - raise ValueError( - 'Axis values must be one of 0, 1 or 2.' - ) - - if axis_1 == axis_2: - raise ValueError( - "Arguments 'axis_1' and 'axis_2' must be different." - ) - - permutation = [0, 1, 2] - permutation[axis_1] = axis_2 - permutation[axis_2] = axis_1 - - return self.permute_axes(permutation) - - def flip(self, axis: Union[int, Sequence[int]]) -> 'Volume': - """Flip the spatial axes of the array. - - Note that this flips the array and updates the affine to reflect the - flip. - - Parameters - ---------- - axis: Union[int, Sequence[int]] - Axis or list of axis indices that should be flipped. These should - include only the spatial axes (0, 1, and/or 2). - - Returns - ------- - highdicom.Volume: - New volume with spatial axes flipped as requested. - - """ - if isinstance(axis, int): - axis = [axis] - - if len(axis) > 3 or len(set(axis) - {0, 1, 2}) > 0: - raise ValueError( - 'Argument "axis" must contain only values 0, 1, and/or 2.' - ) - - # We will re-use the existing __getitem__ implementation, which has all - # this logic figured out already - index = [] - for d in range(3): - if d in axis: - index.append(slice(-1, None, -1)) - else: - index.append(slice(None)) - - return self[tuple(index)] - - def to_patient_orientation( - self, - patient_orientation: Union[ - str, - Sequence[Union[str, PatientOrientationValuesBiped]], - ], - ) -> 'Volume': - """Rearrange the array to a given orientation. - - The resulting volume is formed from this volume through a combination - of axis permutations and flips of the spatial axes. Its patient - orientation will be as close to the desired orientation as can be - achieved with these operations alone (and in particular without - resampling the array). - - Parameters - ---------- - patient_orientation: Union[str, Sequence[Union[str, highdicom.enum.PatientOrientationValuesBiped]]] - Desired patient orientation, as either a sequence of three - highdicom.enum.PatientOrientationValuesBiped values, or a string - such as ``"FPL"`` using the same characters. - - Returns - ------- - highdicom.Volume: - New volume with the requested patient orientation. - - """ # noqa: E501 - desired_orientation = _normalize_patient_orientation( - patient_orientation - ) - - current_orientation = self.get_closest_patient_orientation() - - permute_indices = [] - flip_axes = [] - for d in desired_orientation: - if d in current_orientation: - from_index = current_orientation.index(d) - else: - d_inv = PATIENT_ORIENTATION_OPPOSITES[d] - from_index = current_orientation.index(d_inv) - flip_axes.append(from_index) - permute_indices.append(from_index) - - if len(flip_axes) > 0: - result = self.flip(flip_axes) - else: - result = self - - return result.permute_axes(permute_indices) - - @property - def handedness(self) -> AxisHandedness: - """highdicom.AxisHandedness: Axis handedness of the volume.""" - v1, v2, v3 = self.spacing_vectors() - if np.cross(v1, v2) @ v3 < 0.0: - return AxisHandedness.LEFT_HANDED - return AxisHandedness.RIGHT_HANDED - - def ensure_handedness( - self, - handedness: Union[AxisHandedness, str], - flip_axis: Optional[int] = None, - swap_axes: Optional[Sequence[int]] = None, - ) -> 'Volume': - """Manipulate the volume if necessary to ensure a given handedness. - - If the volume already has the specified handedness, it is returned - unaltered. - - If the volume does not meet the requirement, the volume is manipulated - using a user specified operation to meet the requirement. The two - options are reversing the direction of a single axis ("flipping") or - swapping the position of two axes. - - Parameters - ---------- - handedness: highdicom.AxisHandedness - Handedness to ensure. - flip_axis: Union[int, None], optional - Specification of a spatial axis index (0, 1, or 2) to flip if - required to meet the given handedness requirement. - swap_axes: Union[int, None], optional - Specification of a sequence of two spatial axis indices (each being - 0, 1, or 2) to swap if required to meet the given handedness - requirement. - - Note - ---- - Either ``flip_axis`` or ``swap_axes`` must be provided (and not both) - to specify the operation to perform to correct the handedness (if - required). + New volume with spatial axes permuted in the provided order. """ - if (flip_axis is None) == (swap_axes is None): - raise TypeError( - "Exactly one of either 'flip_axis' or 'swap_axes' " - "must be specified." - ) - handedness = AxisHandedness(handedness) - if handedness == self.handedness: - return self - - if flip_axis is not None: - return self.flip(flip_axis) + new_affine = self._permute_affine(indices) - if len(swap_axes) != 2: - raise ValueError( - "Argument 'swap_axes' must have length 2." - ) + if self._array.ndim == 3: + new_array = np.transpose(self._array, indices) + else: + new_array = np.transpose(self._array, [*indices, 3]) - return self.swap_axes(swap_axes[0], swap_axes[1]) + return self.__class__( + array=new_array, + affine=new_affine, + frame_of_reference_uid=self.frame_of_reference_uid, + ) def normalize_mean_std( self, @@ -1597,11 +2210,12 @@ def pad( May be: * A single integer value, which results in that many voxels being - added to the beginning and end of all three spatial dimensions. + added to the beginning and end of all three spatial dimensions, + or * A sequence of two values in the form ``[before, after]``, which results in 'before' voxels being added to the beginning of each of the three spatial dimensions, and 'after' voxels being added - to the end of each of the three spatial dimensions + to the end of each of the three spatial dimensions, or * A nested sequence of integers of the form ``[[pad1], [pad2], [pad3]]``, in which separate padding values are supplied for each of the three spatial axes and used to pad before and after along @@ -1610,6 +2224,8 @@ def pad( [before2, after2], [before3, after3]]``, in which separate values are supplied for the before and after padding of each of the three spatial dimensions. + + In all cases, all integer values must be non-negative. mode: Union[highdicom.PadModes, str], optional Mode to use to pad the array. See :class:`highdicom.PadModes` for options. @@ -1677,33 +2293,10 @@ def pad( # Only one channel, so can ignore the per_channel logic per_channel = False - padding_with_channels = ( - self.number_of_channels is not None and not per_channel - ) - if isinstance(pad_width, int): - origin_offset = [-pad_width] * 3 - if padding_with_channels: - pad_width = [*([[pad_width]] * 3), [0]] # no channel padding - elif isinstance(pad_width, Sequence): - if isinstance(pad_width[0], int): - origin_offset = [-pad_width[0]] * 3 - if padding_with_channels: - pad_width = [*([pad_width] * 3), [0, 0]] # no channel padding - elif isinstance(pad_width[0], Sequence): - if len(pad_width[0]) == 1: - origin_offset = [-p[0] for p in pad_width] - if padding_with_channels: - pad_width = pad_width.copy() - pad_width.append([0]) # no channel padding - elif len(pad_width[0]) == 2: - origin_offset = [-p[0] for p in pad_width] - if padding_with_channels: - pad_width = pad_width.copy() - pad_width.append([0, 0]) # no channel padding - else: - raise ValueError("Invalid arrangement in 'pad_width'.") - else: - raise TypeError("Invalid format for 'pad_width'.") + new_affine, full_pad_width = self._prepare_pad_width(pad_width) + + if self.number_of_channels is not None and not per_channel: + full_pad_width.append([0, 0]) # no padding for channel dim def pad_array(array: np.ndarray, cval: float) -> float: if used_mode == PadModes.CONSTANT: @@ -1723,7 +2316,7 @@ def pad_array(array: np.ndarray, cval: float) -> float: return np.pad( array, - pad_width=pad_width, + pad_width=full_pad_width, mode=used_mode.value.lower(), **pad_kwargs, ) @@ -1738,230 +2331,12 @@ def pad_array(array: np.ndarray, cval: float) -> float: else: new_array = pad_array(self.array, constant_value) - new_affine = _translate_affine_matrix(self.affine, origin_offset) - return self.__class__( array=new_array, affine=new_affine, frame_of_reference_uid=self.frame_of_reference_uid, ) - def pad_to_shape( - self, - spatial_shape: Sequence[int], - mode: PadModes = PadModes.CONSTANT, - constant_value: float = 0.0, - per_channel: bool = False, - ) -> 'Volume': - """Pad volume to given spatial shape. - - The volume is padded symmetrically, placing the original array at the - center of the output array, to achieve the given shape. If this - requires an odd number of elements to be added along a certain - dimension, one more element is placed at the end of the array than at - the start. - - Parameters - ---------- - spatial_shape: Sequence[int] - Sequence of three integers specifying the spatial shape to pad to. - This shape must be no smaller than the existing shape along any of - the three spatial dimensions. - mode: highdicom.PadModes, optional - Mode to use to pad the array. See :class:`highdicom.PadModes` for - options. - constant_value: Union[float, Sequence[float]], optional - Value used to pad when mode is ``"CONSTANT"``. If ``per_channel`` - if True, a sequence whose length is equal to the number of channels - may be passed, and each value will be used for the corresponding - channel. With other pad modes, this argument is ignored. - per_channel: bool, optional - For padding modes that involve calculation of image statistics to - determine the padding value (i.e. ``MINIMUM``, ``MAXIMUM``, - ``MEAN``, ``MEDIAN``), pad each channel separately using the value - calculated using that channel alone (rather than the statistics of - the entire array). For other padding modes, this argument makes no - difference. This should not the True if the image does not have a - channel dimension. - - Returns - ------- - highdicom.Volume: - Volume with padding applied. - - """ - if len(spatial_shape) != 3: - raise ValueError( - "Argument 'shape' must have length 3." - ) - - pad_width = [] - for insize, outsize in zip(self.spatial_shape, spatial_shape): - to_pad = outsize - insize - if to_pad < 0: - raise ValueError( - 'Shape is smaller than existing shape along at least ' - 'one axis.' - ) - pad_front = to_pad // 2 - pad_back = to_pad - pad_front - pad_width.append((pad_front, pad_back)) - - return self.pad( - pad_width=pad_width, - mode=mode, - constant_value=constant_value, - per_channel=per_channel, - ) - - def crop_to_shape(self, spatial_shape: Sequence[int]) -> 'Volume': - """Center-crop volume to a given spatial shape. - - Parameters - ---------- - spatial_shape: Sequence[int] - Sequence of three integers specifying the spatial shape to crop to. - This shape must be no larger than the existing shape along any of - the three spatial dimensions. - - Returns - ------- - highdicom.Volume: - Volume with padding applied. - - """ - if len(spatial_shape) != 3: - raise ValueError( - "Argument 'shape' must have length 3." - ) - - crop_vals = [] - for insize, outsize in zip(self.spatial_shape, spatial_shape): - to_crop = insize - outsize - if to_crop < 0: - raise ValueError( - 'Shape is larger than existing shape along at least ' - 'one axis.' - ) - crop_front = to_crop // 2 - crop_back = to_crop - crop_front - crop_vals.append((crop_front, insize - crop_back)) - - return self[ - crop_vals[0][0]:crop_vals[0][1], - crop_vals[1][0]:crop_vals[1][1], - crop_vals[2][0]:crop_vals[2][1], - ] - - def pad_or_crop_to_shape( - self, - spatial_shape: Sequence[int], - mode: PadModes = PadModes.CONSTANT, - constant_value: float = 0.0, - per_channel: bool = False, - ) -> 'Volume': - """Pad and/or crop volume to given spatial shape. - - For each dimension where padding is required, the volume is padded - symmetrically, placing the original array at the center of the output - array, to achieve the given shape. If this requires an odd number of - elements to be added along a certain dimension, one more element is - placed at the end of the array than at the start. - - For each dimension where cropping is required, center cropping is used. - - Parameters - ---------- - spatial_shape: Sequence[int] - Sequence of three integers specifying the spatial shape to pad or - crop to. - mode: highdicom.PadModes, optional - Mode to use to pad the array, if padding is required. See - :class:`highdicom.PadModes` for options. - constant_value: Union[float, Sequence[float]], optional - Value used to pad when mode is ``"CONSTANT"``. If ``per_channel`` - if True, a sequence whose length is equal to the number of channels - may be passed, and each value will be used for the corresponding - channel. With other pad modes, this argument is ignored. - per_channel: bool, optional - For padding modes that involve calculation of image statistics to - determine the padding value (i.e. ``MINIMUM``, ``MAXIMUM``, - ``MEAN``, ``MEDIAN``), pad each channel separately using the value - calculated using that channel alone (rather than the statistics of - the entire array). For other padding modes, this argument makes no - difference. This should not the True if the image does not have a - channel dimension. - - Returns - ------- - highdicom.Volume: - Volume with padding and/or cropping applied. - - """ - if len(spatial_shape) != 3: - raise ValueError( - "Argument 'shape' must have length 3." - ) - - pad_width = [] - crop_vals = [] - for insize, outsize in zip(self.spatial_shape, spatial_shape): - diff = outsize - insize - if diff > 0: - pad_front = diff // 2 - pad_back = diff - pad_front - pad_width.append((pad_front, pad_back)) - crop_vals.append((0, outsize)) - elif diff < 0: - crop_front = (-diff) // 2 - crop_back = (-diff) - crop_front - crop_vals.append((crop_front, insize - crop_back)) - pad_width.append((0, 0)) - else: - pad_width.append((0, 0)) - crop_vals.append((0, outsize)) - - cropped = self[ - crop_vals[0][0]:crop_vals[0][1], - crop_vals[1][0]:crop_vals[1][1], - crop_vals[2][0]:crop_vals[2][1], - ] - padded = cropped.pad( - pad_width=pad_width, - mode=mode, - constant_value=constant_value, - per_channel=per_channel, - ) - return padded - - def random_crop(self, spatial_shape: Sequence[int]) -> 'Volume': - """Create a random crop of a certain shape from the volume. - - Parameters - ---------- - spatial_shape: Sequence[int] - Sequence of three integers specifying the spatial shape to pad or - crop to. - - Returns - ------- - highdicom.Volume: - New volume formed by cropping the volumes. - - """ - crop_slices = [] - for c, d in zip(spatial_shape, self.spatial_shape): - max_start = d - c - if max_start < 0: - raise ValueError( - 'Crop shape is larger than volume in at least one ' - 'dimension.' - ) - start = np.random.randint(0, max_start + 1) - crop_slices.append(slice(start, start + c)) - - return self[tuple(crop_slices)] - def concat_channels(volumes: Sequence[Volume]) -> Volume: """Form a new volume by concatenating channels of existing volumes. diff --git a/tests/test_seg.py b/tests/test_seg.py index 49967b9f..1a8ffae5 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -1197,8 +1197,7 @@ def test_construction_4(self): with pytest.raises(AttributeError): frame_item.PlanePositionSlideSequence # noqa: B018 - # Frames are regularly but ordered the wrong way in this case - assert not hasattr(instance, 'DimensionOrganizationType') + assert hasattr(instance, 'DimensionOrganizationType') self.check_dimension_index_vals(instance) def test_construction_5(self): @@ -1569,7 +1568,7 @@ def test_construction_3d_singleframe(self): self._manufacturer, self._manufacturer_model_name, self._software_versions, - self._device_serial_number + self._device_serial_number, ) # This is a "volume" image, so the output instance should have # the DimensionOrganizationType set correctly and should have deduced @@ -3817,7 +3816,7 @@ def test_get_volume_binary(self): .ImageOrientationPatient ) assert vol.get_closest_patient_orientation() == ( - PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.F, PatientOrientationValuesBiped.P, PatientOrientationValuesBiped.L, ) @@ -3844,7 +3843,7 @@ def test_get_volume_binary_multisegments(self): .ImageOrientationPatient ) assert vol.get_closest_patient_orientation() == ( - PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.F, PatientOrientationValuesBiped.P, PatientOrientationValuesBiped.L, ) @@ -3871,7 +3870,7 @@ def test_get_volume_binary_multisegment2(self): .ImageOrientationPatient ) assert vol.get_closest_patient_orientation() == ( - PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.F, PatientOrientationValuesBiped.P, PatientOrientationValuesBiped.L, ) @@ -3901,7 +3900,7 @@ def test_get_volume_binary_multisegment_combine(self): .ImageOrientationPatient ) assert vol.get_closest_patient_orientation() == ( - PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.F, PatientOrientationValuesBiped.P, PatientOrientationValuesBiped.L, ) @@ -3930,7 +3929,7 @@ def test_get_volume_binary_multisegment_slice_start(self): .ImageOrientationPatient ) assert vol.get_closest_patient_orientation() == ( - PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.F, PatientOrientationValuesBiped.P, PatientOrientationValuesBiped.L, ) @@ -3959,7 +3958,7 @@ def test_get_volume_binary_multisegment_slice_start_negative(self): .ImageOrientationPatient ) assert vol.get_closest_patient_orientation() == ( - PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.F, PatientOrientationValuesBiped.P, PatientOrientationValuesBiped.L, ) @@ -3988,7 +3987,7 @@ def test_get_volume_binary_multisegment_slice_end(self): .ImageOrientationPatient ) assert vol.get_closest_patient_orientation() == ( - PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.F, PatientOrientationValuesBiped.P, PatientOrientationValuesBiped.L, ) @@ -4017,7 +4016,7 @@ def test_get_volume_binary_multisegment_slice_end_negative(self): .ImageOrientationPatient ) assert vol.get_closest_patient_orientation() == ( - PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.F, PatientOrientationValuesBiped.P, PatientOrientationValuesBiped.L, ) @@ -4047,7 +4046,7 @@ def test_get_volume_binary_multisegment_center(self): .ImageOrientationPatient ) assert vol.get_closest_patient_orientation() == ( - PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.F, PatientOrientationValuesBiped.P, PatientOrientationValuesBiped.L, ) @@ -4073,7 +4072,7 @@ def test_get_volume_binary_combine(self): .ImageOrientationPatient ) assert vol.get_closest_patient_orientation() == ( - PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.F, PatientOrientationValuesBiped.P, PatientOrientationValuesBiped.L, ) @@ -4099,7 +4098,7 @@ def test_get_volume_fractional(self): .ImageOrientationPatient ) assert vol.get_closest_patient_orientation() == ( - PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.F, PatientOrientationValuesBiped.P, PatientOrientationValuesBiped.L, ) @@ -4126,7 +4125,7 @@ def test_get_volume_fractional_noscale(self): .ImageOrientationPatient ) assert vol.get_closest_patient_orientation() == ( - PatientOrientationValuesBiped.H, + PatientOrientationValuesBiped.F, PatientOrientationValuesBiped.P, PatientOrientationValuesBiped.L, ) diff --git a/tests/test_spatial.py b/tests/test_spatial.py index 0095c465..1048b095 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -926,7 +926,7 @@ def test_get_spacing_duplicates(): positions = [ [0.0, 0.0, i * expected_spacing] for i in position_indices ] - orientation = [1, 0, 0, 0, 1, 0] + orientation = [1, 0, 0, 0, -1, 0] spacing, volume_positions = get_volume_positions( positions, @@ -955,7 +955,7 @@ def test_get_spacing_missing(): positions = [ [0.0, 0.0, i * expected_spacing] for i in position_indices ] - orientation = [1, 0, 0, 0, 1, 0] + orientation = [1, 0, 0, 0, -1, 0] spacing, volume_positions = get_volume_positions( positions, @@ -977,7 +977,7 @@ def test_get_spacing_missing_duplicates(): positions = [ [0.0, 0.0, i * expected_spacing] for i in position_indices ] - orientation = [1, 0, 0, 0, 1, 0] + orientation = [1, 0, 0, 0, -1, 0] spacing, volume_positions = get_volume_positions( positions, @@ -1006,7 +1006,7 @@ def test_get_spacing_missing_duplicates_non_consecutive(): positions = [ [0.0, 0.0, i * expected_spacing] for i in position_indices ] - orientation = [1, 0, 0, 0, 1, 0] + orientation = [1, 0, 0, 0, -1, 0] # Without the spacing_hint, the positions do not appear to be a volume spacing, volume_positions = get_volume_positions( diff --git a/tests/test_volume.py b/tests/test_volume.py index 68a21516..1f68f991 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -36,11 +36,11 @@ def test_transforms(): ) plane_positions = volume.get_plane_positions() for i, pos in enumerate(plane_positions): - assert np.array_equal(pos[0].ImagePositionPatient, [0.0, 0.0, 10.0 * i]) + assert np.array_equal(pos[0].ImagePositionPatient, [0.0, 0.0, -10.0 * i]) indices = np.array([[1, 2, 3]]) coords = volume.map_indices_to_reference(indices) - assert np.array_equal(coords, np.array([[3.0, 2.0, 10.0]])) + assert np.array_equal(coords, np.array([[3.0, 2.0, -10.0]])) round_trip = volume.map_reference_to_indices(coords) assert np.array_equal(round_trip, indices) index_center = volume.get_center_index() @@ -48,7 +48,7 @@ def test_transforms(): index_center = volume.get_center_index(round_output=True) assert np.array_equal(index_center, [12, 24, 24]) coord_center = volume.get_center_coordinate() - assert np.array_equal(coord_center, [24.5, 24.5, 120]) + assert np.array_equal(coord_center, [24.5, 24.5, -120]) @pytest.mark.parametrize( @@ -160,7 +160,7 @@ def test_volume_single_frame(): assert direction[:, 0] @ direction[:, 1] == 0.0 assert direction[:, 0] @ direction[:, 2] == 0.0 assert (direction[:, 0] ** 2).sum() == 1.0 - assert volume.position == ct_series[0].ImagePositionPatient + assert volume.position == ct_series[1].ImagePositionPatient # sorting assert volume.pixel_spacing == ct_series[0].PixelSpacing slice_spacing = 1.25 assert volume.spacing == [slice_spacing, *ct_series[0].PixelSpacing[::-1]] @@ -212,7 +212,7 @@ def test_volume_multiframe(): assert (direction[:, 0] ** 2).sum() == 1.0 first_frame_pos = ( dcm - .PerFrameFunctionalGroupsSequence[1] # due to ordering + .PerFrameFunctionalGroupsSequence[0] .PlanePositionSequence[0] .ImagePositionPatient ) @@ -256,7 +256,7 @@ def test_indexing(): expected_affine = np.array([ [ 0.0, 0.0, 1.0, 0.0], [ 0.0, 1.0, 0.0, 0.0], - [10.0, 0.0, 0.0, 30.0], + [-10.0, 0.0, 0.0, -30.0], [ 0.0, 0.0, 0.0, 1.0], ]) assert np.array_equal(subvolume.affine, expected_affine) @@ -284,7 +284,7 @@ def test_indexing(): expected_affine = np.array([ [ 0.0, 0.0, 1.0, 0.0], [ 0.0, 1.0, 0.0, 7.0], - [10.0, 0.0, 0.0, 30.0], + [-10.0, 0.0, 0.0, -30.0], [ 0.0, 0.0, 0.0, 1.0], ]) assert np.array_equal(subvolume.affine, expected_affine) @@ -306,7 +306,7 @@ def test_indexing(): expected_affine = np.array([ [ 0.0, 0.0, 1.0, 0.0], [ 0.0, 1.0, 0.0, 0.0], - [10.0, 0.0, 0.0, 210.0], + [-10.0, 0.0, 0.0, -210.0], [ 0.0, 0.0, 0.0, 1.0], ]) assert np.array_equal(subvolume.affine, expected_affine) @@ -324,7 +324,7 @@ def test_indexing(): expected_affine = np.array([ [ 0.0, 0.0, 1.0, 0.0], [ 0.0, -1.0, 0.0, 49.0], - [20.0, 0.0, 0.0, 120.0], + [-20.0, 0.0, 0.0, -120.0], [ 0.0, 0.0, 0.0, 1.0], ]) assert np.array_equal(subvolume.affine, expected_affine) From e4aa39199dee76cf3c34fd17406e24dff21b342d Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sat, 10 Aug 2024 20:12:57 -0400 Subject: [PATCH 53/93] Refactor multiframe to use VolumeGeometry, add VolumeGeometry.create_volume --- src/highdicom/__init__.py | 2 + src/highdicom/_multiframe.py | 105 +++++++++++------------------------ src/highdicom/seg/sop.py | 32 ++++------- src/highdicom/spatial.py | 16 ++++-- src/highdicom/volume.py | 98 +++++++++++++++++++++++++++++++- tests/test_multiframe.py | 8 +-- tests/test_seg.py | 24 ++++---- 7 files changed, 167 insertions(+), 118 deletions(-) diff --git a/src/highdicom/__init__.py b/src/highdicom/__init__.py index bf364d22..7bc7cd4e 100644 --- a/src/highdicom/__init__.py +++ b/src/highdicom/__init__.py @@ -60,6 +60,7 @@ from highdicom.version import __version__ from highdicom.volume import ( Volume, + VolumeGeometry, volread, concat_channels, ) @@ -108,6 +109,7 @@ 'VOILUTFunctionValues', 'VOILUTTransformation', 'Volume', + 'VolumeGeometry', '__version__', 'ann', 'color', diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 361ad88c..c658b69b 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -1,7 +1,6 @@ """Tools for working with multiframe DICOM images.""" from collections import Counter from contextlib import contextmanager -import itertools import logging import sqlite3 from typing import ( @@ -37,6 +36,7 @@ from highdicom.utils import ( iter_tiled_full_frame_data, ) +from highdicom.volume import VolumeGeometry # Dictionary mapping DCM VRs to appropriate SQLite types @@ -110,7 +110,7 @@ def __init__( extra_collection_func_pointers = {} slice_spacing_hint = None image_position_tag = tag_for_keyword('ImagePositionPatient') - self.shared_pixel_spacing = None + shared_pixel_spacing: Optional[List[float]] = None if self._coordinate_system == CoordinateSystemNames.PATIENT: plane_pos_seq_tag = tag_for_keyword('PlanePositionSequence') # Include the image position if it is not an index @@ -125,8 +125,8 @@ def __init__( if hasattr(sfgs, 'PixelMeasuresSequence'): measures = sfgs.PixelMeasuresSequence[0] slice_spacing_hint = measures.get('SpacingBetweenSlices') - self.shared_pixel_spacing = measures.get('PixelSpacing') - if slice_spacing_hint is None or self.shared_pixel_spacing is None: + shared_pixel_spacing = measures.get('PixelSpacing') + if slice_spacing_hint is None or shared_pixel_spacing is None: # Get the orientation of the first frame, and in the later loop # check whether it is shared. if hasattr(dataset, 'PerFrameFunctionalGroupsSequence'): @@ -135,7 +135,7 @@ def __init__( slice_spacing_hint = pfg1.PixelMeasuresSequence[0].get( 'SpacingBetweenSlices' ) - self.shared_pixel_spacing = pfg1.get('PixelSpacing') + shared_pixel_spacing = pfg1.get('PixelSpacing') dim_ind_positions = { dim_ind.DimensionIndexPointer: i @@ -153,22 +153,22 @@ def __init__( } # Get the shared orientation - self.shared_image_orientation = None + shared_image_orientation: Optional[List[float]] = None if hasattr(dataset, 'ImageOrientationSlide'): - self.shared_image_orientation = dataset.ImageOrientationSlide + shared_image_orientation = dataset.ImageOrientationSlide if hasattr(dataset, 'SharedFunctionalGroupsSequence'): sfgs = dataset.SharedFunctionalGroupsSequence[0] if hasattr(sfgs, 'PlaneOrientationSequence'): - self.shared_image_orientation = ( + shared_image_orientation = ( sfgs.PlaneOrientationSequence[0].ImageOrientationPatient ) - if self.shared_image_orientation is None: + if shared_image_orientation is None: # Get the orientation of the first frame, and in the later loop # check whether it is shared. if hasattr(dataset, 'PerFrameFunctionalGroupsSequence'): pfg1 = dataset.PerFrameFunctionalGroupsSequence[0] if hasattr(pfg1, 'PlaneOrientationSequence'): - self.shared_image_orientation = ( + shared_image_orientation = ( pfg1 .PlaneOrientationSequence[0] .ImageOrientationPatient @@ -326,15 +326,15 @@ def __init__( referenced_frames.append(frame_source_frames[0]) # Check that this doesn't have a conflicting orientation - if self.shared_image_orientation is not None: + if shared_image_orientation is not None: if hasattr(frame_item, 'PlaneOrientationSequence'): iop = ( frame_item .PlaneOrientationSequence[0] .ImageOrientationPatient ) - if iop != self.shared_image_orientation: - self.shared_image_orientation = None + if iop != shared_image_orientation: + shared_image_orientation = None # Summarise if any( @@ -435,13 +435,12 @@ def __init__( col_data.append(dim_values[t]) # Volume related information - self.number_of_volume_positions: Optional[int] = None - self.affine: Optional[np.ndarray] = None + self.volume_geometry: Optional[VolumeGeometry] = None if ( self._coordinate_system == CoordinateSystemNames.PATIENT - and self.shared_image_orientation is not None + and shared_image_orientation is not None ): - if self.shared_image_orientation is not None: + if shared_image_orientation is not None: if image_position_tag in self._dim_ind_pointers: image_positions = dim_values[image_position_tag] else: @@ -450,21 +449,22 @@ def __init__( ] volume_spacing, volume_positions = get_volume_positions( image_positions=image_positions, - image_orientation=self.shared_image_orientation, + image_orientation=shared_image_orientation, allow_missing=True, allow_duplicates=True, spacing_hint=slice_spacing_hint, ) if volume_positions is not None: origin_slice_index = volume_positions.index(0) - self.number_of_volume_positions = max(volume_positions) + 1 - self.affine = _create_affine_transformation_matrix( + number_of_slices = max(volume_positions) + 1 + self.volume_geometry = VolumeGeometry.from_attributes( image_position=image_positions[origin_slice_index], - image_orientation=self.shared_image_orientation, - pixel_spacing=self.shared_pixel_spacing, + image_orientation=shared_image_orientation, + rows=dataset.Rows, + columns=dataset.Columns, + pixel_spacing=shared_pixel_spacing, + number_of_frames=number_of_slices, spacing_between_slices=volume_spacing, - index_convention=VOLUME_INDEX_CONVENTION, - slices_first=True, ) col_defs.append('VolumePosition INTEGER NOT NULL') col_data.append(volume_positions) @@ -803,7 +803,7 @@ def get_image_position_at_volume_position( volume_position: int Zero-based index into the slice positions within the implied volume. Must be an integer between >= 0 and < - ``number_of_volume_positions``. + ``volume_geometry.spatial_shape[0]``. Returns ------- @@ -815,7 +815,7 @@ def get_image_position_at_volume_position( """ - if self.number_of_volume_positions is None: + if self.volume_geometry is None: raise RuntimeError( "This image does not represent a regularly-spaced 3D volume." ) @@ -824,11 +824,13 @@ def get_image_position_at_volume_position( raise ValueError( "Argument 'volume_position' should be non-negative." ) - elif volume_position >= self.number_of_volume_positions: - raise ValueError( - "Value of {volume_position} for argument 'volume_position' " - "is not valid for image with " - ) + else: + n_slices = self.volume_geometry.spatial_shape[0] + if volume_position >= n_slices: + raise ValueError( + f"Value of {volume_position} for argument 'volume_position' " + f"is not valid for image with {n_slices} volume positions." + ) cur = self._db_con.cursor() @@ -845,47 +847,6 @@ def get_image_position_at_volume_position( image_position = list(list(cur.execute(query))[0]) return image_position - def get_volume_affine( - self, - slice_start: int = 0, - ) -> np.ndarray: - """Get the affine matrix for the implied volume. - - This requires that the image represents a regularly-spaced 3D volume. - - Parameters - ---------- - slice_start: int, optional - Zero-based index into the slice positions within the implied - volume marking the beginning of the relevant region. - - Returns - ------- - numpy.ndarray: - 4 x 4 affine matrix. - - """ - if self.number_of_volume_positions is None: - raise RuntimeError( - "This image does not represent a regularly-spaced 3D volume." - ) - - if slice_start < 0: - raise ValueError( - "Argument 'slice_start' should be non-negative." - ) - elif slice_start >= self.number_of_volume_positions: - raise ValueError( - f"Value of {slice_start} for argument 'slice_start' " - 'is not valid for image with ' - f'{self.number_of_volume_positions} volume positions.' - ) - - if slice_start == 0: - return self.affine - else: - return _translate_affine_matrix(self.affine, [slice_start, 0, 0]) - @contextmanager def _generate_temp_table( self, diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 81f23d3a..515e25c4 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -87,7 +87,7 @@ _check_long_string, ) from highdicom.uid import UID as hd_UID -from highdicom.volume import Volume +from highdicom.volume import Volume, VolumeGeometry logger = logging.getLogger(__name__) @@ -583,7 +583,7 @@ def iterate_indices_for_volume( numpy arrays directly. """ # noqa: E501 - if self.number_of_volume_positions is None: + if self.volume_geometry is None: raise RuntimeError( 'This segmentation does not represent a regularly-spaced ' 'volume.' @@ -3554,25 +3554,13 @@ def segmented_property_types(self) -> List[CodedConcept]: return types @property - def number_of_volume_positions(self) -> Optional[int]: - """Union[int, None]: Number of volume positions, if the segmentation - represents a regularly-spaced 3D volume. ``None`` otherwise. + def volume_geometry(self) -> Optional[VolumeGeometry]: + """Union[highdicom.VolumeGeometry, None]: Geometry of the volume if the + segmentation represents a regularly-spaced 3D volume. ``None`` + otherwise. """ - return self._db_man.number_of_volume_positions - - @property - def spacing_between_slices(self) -> Optional[float]: - """Union[float, None]: Spacing between slices in the frame of reference - coordinate system if the segmentation represents a regularly-spaced 3D - volume. ``None`` otherwise. - - """ - if self._db_man.affine is None: - return None - slice_vec = self._db_man.affine[:3, 0] - spacing = np.sqrt((slice_vec ** 2).sum()).item() - return spacing + return self._db_man.volume_geometry def _get_pixels_by_seg_frame( self, @@ -4481,11 +4469,11 @@ def get_volume( 'Segment numbers may not be empty.' ) - if self.number_of_volume_positions is None: + if self.volume_geometry is None: raise RuntimeError( "This segmentation is not a regularly-spaced 3D volume." ) - n_vol_positions = self.number_of_volume_positions + n_vol_positions = self.volume_geometry.spatial_shape[0] if slice_start < 0: slice_start = n_vol_positions + slice_start @@ -4532,7 +4520,7 @@ def get_volume( dtype=dtype, ) - affine = self._db_man.get_volume_affine(slice_start) + affine = self._db_man.volume_geometry[slice_start].affine return Volume( array=array, diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index a67a9659..78ea1861 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -3113,7 +3113,7 @@ def get_volume_positions( slice spacing and the volume indices for each of the input positions. If the positions do not represent a volume, returns None for both outputs. - Note that we stipulate that a single image is a 3D volume for the purposes + Note that we stipulate that a single plane is a 3D volume for the purposes of this function. In this case, and it ``spacing_hint`` is not provied, the returned slice spacing will be 1.0. @@ -3221,8 +3221,10 @@ def get_volume_positions( "Argument 'image_positions' should contain at least 1 position." ) elif n == 1: - # Special case, we stipluate that this has spacing 0.0 - return 1.0, [0] + # Special case, we stipluate that this has spacing 1.0 + # if not otherwise specified + spacing = 1.0 if spacing_hint is None else spacing_hint + return spacing, [0] normal_vector = get_normal_vector( image_orientation, @@ -3243,6 +3245,12 @@ def get_volume_positions( unique_positions = image_positions_arr unique_index = np.arange(image_positions_arr.shape[0]) + if len(unique_positions) == 1: + # Special case, we stipluate that this has spacing 1.0 + # if not otherwise specified + spacing = 1.0 if spacing_hint is None else spacing_hint + return spacing, [0] * n + # Calculate distance of each slice from coordinate system origin along the # normal vector origin_distances = _get_slice_distances(unique_positions, normal_vector) @@ -3288,7 +3296,7 @@ def get_volume_positions( spacing = spacings.mean() if spacing_hint is not None: - if not np.isclose(spacing, spacing_hint): + if not np.isclose(abs(spacing), spacing_hint): raise RuntimeError( "Inferred spacing does not match the given 'spacing_hint'." ) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 3328e9eb..3426eec8 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -54,10 +54,9 @@ # TODO support slide coordinate system # TODO volume to volume transformer # TODO volread and metadata -# TODO make RIGHT handed the default # TODO constructors for geometry, do they make sense for volume? -# TODO ordering of frames in seg, applying 3D -# TODO use VolumeGeometry within MultiFrameDB manager +# TODO ordering of frames in seg, setting 3D dimension organization +# TODO tests for equality of geometry class _VolumeBase(ABC): @@ -1206,6 +1205,77 @@ def __init__( raise ValueError("Argument 'spatial_shape' must have length 3.") self._spatial_shape = tuple(spatial_shape) + @classmethod + def from_attributes( + cls, + image_position: Sequence[float], + image_orientation: Sequence[float], + rows: int, + columns: int, + pixel_spacing: Sequence[float], + spacing_between_slices: float, + number_of_frames: int, + frame_of_reference_uid: Optional[str] = None, + ) -> "VolumeGeometry": + """Create a volume from DICOM attributes. + + Parameters + ---------- + image_position: Sequence[float] + Position in the frame of reference space of the center of the top + left pixel of the image. Corresponds to DICOM attributes + "ImagePositionPatient". Should be a sequence of length 3. + image_orientation: Sequence[float] + Cosines of the row direction (first triplet: horizontal, left to + right, increasing column index) and the column direction (second + triplet: vertical, top to bottom, increasing row index) direction + expressed in the three-dimensional patient or slide coordinate + system defined by the frame of reference. Corresponds to the DICOM + attribute "ImageOrientationPatient". + rows: int + Number of rows in each frame. + columns: int + Number of columns in each frame. + pixel_spacing: Sequence[float] + Spacing between pixels in millimeter unit along the column + direction (first value: spacing between rows, vertical, top to + bottom, increasing row index) and the row direction (second value: + spacing between columns: horizontal, left to right, increasing + column index). Corresponds to DICOM attribute "PixelSpacing". + spacing_between_slices: float + Spacing between slices in millimeter units in the frame of + reference coordinate system space. Corresponds to the DICOM + attribute "SpacingBetweenSlices" (however, this may not be present in + many images and may need to be inferred from "ImagePositionPatient" + attributes of consecutive slices). + number_of_frames: int + Number of frames in the volume. + frame_of_reference_uid: Union[str, None], optional + Frame of reference UID, if known. Corresponds to DICOM attribute + FrameOfReferenceUID. + + Returns + ------- + highdicom.Volume: + New Volume using the given array and DICOM attributes. + + """ + affine = _create_affine_transformation_matrix( + image_position=image_position, + image_orientation=image_orientation, + pixel_spacing=pixel_spacing, + spacing_between_slices=spacing_between_slices, + index_convention=VOLUME_INDEX_CONVENTION, + slices_first=True, + ) + spatial_shape = (number_of_frames, rows, columns) + + return cls( + affine=affine, + spatial_shape=spatial_shape, + frame_of_reference_uid=frame_of_reference_uid, + ) + @property def spatial_shape(self) -> Tuple[int, int, int]: """Tuple[int, int, int]: Spatial shape of the array. @@ -1336,6 +1406,28 @@ def permute_axes(self, indices: Sequence[int]) -> 'VolumeGeometry': frame_of_reference_uid=self.frame_of_reference_uid, ) + def create_volume(self, array: np.ndarray) -> 'Volume': + """Crrate a volume using this geometry and an array. + + Parameters + ---------- + array: numpy.ndarray + Array of voxel data. Must be either 3D (three spatial dimensions), + or 4D (three spatial dimensions followed by a channel dimension). + Any datatype is permitted. + + Returns + ------- + highdicom.Volume: + Volume objects using this geometry and the given array. + + """ + return Volume( + array=array, + affine=self.affine, + frame_of_reference_uid=self.frame_of_reference_uid, + ) + class Volume(_VolumeBase): diff --git a/tests/test_multiframe.py b/tests/test_multiframe.py index 64d54a1f..5e4ed690 100644 --- a/tests/test_multiframe.py +++ b/tests/test_multiframe.py @@ -20,9 +20,8 @@ def test_slice_spacing(): [0.0, 0.0, 0.0, 1.0], ] ) - print(db.affine) - assert db.number_of_volume_positions == 2 - assert np.array_equal(db.affine, expected_affine) + assert db.volume_geometry.spatial_shape[0] == 2 + assert np.array_equal(db.volume_geometry.affine, expected_affine) def test_slice_spacing_irregular(): @@ -36,5 +35,4 @@ def test_slice_spacing_irregular(): db = MultiFrameDBManager(ct_multiframe) - assert db.number_of_volume_positions is None - assert db.affine is None + assert db.volume_geometry is None diff --git a/tests/test_seg.py b/tests/test_seg.py index 1a8ffae5..28b3e100 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -3807,7 +3807,7 @@ def test_get_volume_binary(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_seg.spacing_between_slices + self._ct_binary_seg.volume_geometry.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_binary_seg @@ -3834,7 +3834,7 @@ def test_get_volume_binary_multisegments(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_overlap_seg.spacing_between_slices + self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_binary_overlap_seg @@ -3861,7 +3861,7 @@ def test_get_volume_binary_multisegment2(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_overlap_seg.spacing_between_slices + self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_binary_overlap_seg @@ -3891,7 +3891,7 @@ def test_get_volume_binary_multisegment_combine(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_overlap_seg.spacing_between_slices + self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_binary_overlap_seg @@ -3920,7 +3920,7 @@ def test_get_volume_binary_multisegment_slice_start(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_overlap_seg.spacing_between_slices + self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_binary_overlap_seg @@ -3949,7 +3949,7 @@ def test_get_volume_binary_multisegment_slice_start_negative(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_overlap_seg.spacing_between_slices + self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_binary_overlap_seg @@ -3978,7 +3978,7 @@ def test_get_volume_binary_multisegment_slice_end(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_overlap_seg.spacing_between_slices + self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_binary_overlap_seg @@ -4007,7 +4007,7 @@ def test_get_volume_binary_multisegment_slice_end_negative(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_overlap_seg.spacing_between_slices + self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_binary_overlap_seg @@ -4037,7 +4037,7 @@ def test_get_volume_binary_multisegment_center(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_seg.spacing_between_slices + self._ct_binary_seg.volume_geometry.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_binary_overlap_seg @@ -4063,7 +4063,7 @@ def test_get_volume_binary_combine(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_binary_seg.spacing_between_slices + self._ct_binary_seg.volume_geometry.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_binary_seg @@ -4089,7 +4089,7 @@ def test_get_volume_fractional(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_true_fractional_seg.spacing_between_slices + self._ct_true_fractional_seg.volume_geometry.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_true_fractional_seg @@ -4116,7 +4116,7 @@ def test_get_volume_fractional_noscale(self): .PixelSpacing ) assert vol.spacing_between_slices == ( - self._ct_true_fractional_seg.spacing_between_slices + self._ct_true_fractional_seg.volume_geometry.spacing_between_slices ) assert vol.direction_cosines == ( self._ct_true_fractional_seg From 241a53992444a500d8d015ca8ea23324157f4bec Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 11 Aug 2024 17:37:58 -0400 Subject: [PATCH 54/93] Add rotation_for_patient_orientation --- src/highdicom/spatial.py | 49 ++++++++++++++++++++++++++++++++++++++++ tests/test_spatial.py | 37 +++++++++++++++++++++++++++++- 2 files changed, 85 insertions(+), 1 deletion(-) diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 78ea1861..97f26d3e 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -1268,6 +1268,55 @@ def _create_inv_affine_transformation_matrix( ) +def rotation_for_patient_orientation( + patient_orientation: Union[ + str, + Sequence[Union[str, PatientOrientationValuesBiped]], + ], + spacing: Union[float, Sequence[float]] = 1.0, +) -> np.ndarray: + """Create a (scaled) rotation matrix for a given patient orientation. + + The result is an axis-aligned rotation matrix. + + Parameters + ---------- + patient_orientation: Union[str, Sequence[Union[str, highdicom.enum.PatientOrientationValuesBiped]]] + Desired patient orientation, as either a sequence of three + highdicom.enum.PatientOrientationValuesBiped values, or a string + such as ``"FPL"`` using the same characters. + spacing: Union[float, Sequence[float]], optional + Spacing between voxels along each of the three dimensions in the frame + of reference coordinate system in pixel units. + + Returns + ------- + numpy.ndarray: + (Scaled) rotation matrix of shape (3 x 3). + + """ # noqa: E501 + norm_orientation = _normalize_patient_orientation(patient_orientation) + + direction_to_vector_mapping = { + PatientOrientationValuesBiped.L: np.array([ 1., 0., 0.]), + PatientOrientationValuesBiped.R: np.array([-1., 0., 0.]), + PatientOrientationValuesBiped.P: np.array([ 0., 1., 0.]), + PatientOrientationValuesBiped.A: np.array([ 0., -1., 0.]), + PatientOrientationValuesBiped.H: np.array([ 0., 0., 1.]), + PatientOrientationValuesBiped.F: np.array([ 0., 0., -1.]), + } + + if isinstance(spacing, float): + spacing = [spacing] * 3 + + return np.column_stack( + [ + s * direction_to_vector_mapping[d] + for d, s in zip(norm_orientation, spacing) + ] + ) + + def _transform_affine_matrix( affine: np.ndarray, shape: Sequence[int], diff --git a/tests/test_spatial.py b/tests/test_spatial.py index 1048b095..d0e65033 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -16,9 +16,10 @@ _transform_affine_matrix, create_rotation_matrix, get_closest_patient_orientation, - get_volume_positions, get_series_volume_positions, + get_volume_positions, is_tiled_image, + rotation_for_patient_orientation, ) @@ -746,6 +747,40 @@ def test_get_closest_patient_orientation( ) == codes +@pytest.mark.parametrize( + 'orientation_str', + ['LPH', 'PLF', 'RPF', 'FLA'] +) +def test_rotation_from_patient_orientation( + orientation_str, +): + codes = _normalize_patient_orientation(orientation_str) + rotation_matrix = rotation_for_patient_orientation( + orientation_str + ) + assert get_closest_patient_orientation( + rotation_matrix + ) == codes + + +def test_rotation_from_patient_orientation_spacing(): + rotation_matrix = rotation_for_patient_orientation( + ['F', 'P', 'L'], + spacing=(1.0, 2.0, 2.5) + ) + expected = np.array( + [ + [ 0.0, 0.0, 2.5], + [ 0.0, 2.0, 0.0], + [-1.0, 0.0, 0.0], + ] + ) + assert np.array_equal( + rotation_matrix, + expected, + ) + + all_single_image_transformer_classes = [ ImageToReferenceTransformer, PixelToReferenceTransformer, From a8cfe821c4f9840ffd78fee7def4411495dc0ece Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 18 Aug 2024 08:58:56 -0400 Subject: [PATCH 55/93] Add volume to volume transformer --- src/highdicom/__init__.py | 13 +---- src/highdicom/volume.py | 117 ++++++++++++++++++++++++++++++++++++++ tests/test_volume.py | 115 ++++++++++++++++++++++++++++++++++++- 3 files changed, 233 insertions(+), 12 deletions(-) diff --git a/src/highdicom/__init__.py b/src/highdicom/__init__.py index 7bc7cd4e..78abc09c 100644 --- a/src/highdicom/__init__.py +++ b/src/highdicom/__init__.py @@ -58,12 +58,8 @@ from highdicom.uid import UID from highdicom import utils from highdicom.version import __version__ -from highdicom.volume import ( - Volume, - VolumeGeometry, - volread, - concat_channels, -) +from highdicom import volume + __all__ = [ 'LUT', @@ -108,12 +104,9 @@ 'UniversalEntityIDTypeValues', 'VOILUTFunctionValues', 'VOILUTTransformation', - 'Volume', - 'VolumeGeometry', '__version__', 'ann', 'color', - 'concat_channels', 'frame', 'io', 'ko', @@ -125,5 +118,5 @@ 'spatial', 'sr', 'utils', - 'volread', + 'volume', ] diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 3426eec8..c0b113f5 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -2489,6 +2489,123 @@ def concat_channels(volumes: Sequence[Volume]) -> Volume: ) +class VolumeToVolumeTransformer: + + """Class for transforming voxel indices between two volumes. + + """ + + def __init__( + self, + volume_from: Union[Volume, VolumeGeometry], + volume_to: Union[Volume, VolumeGeometry], + round_output: bool = False, + check_bounds: bool = False, + ): + """Construct transformation object. + + The resulting object will map volume indices of the "from" volume to + volume indices of the "to" volume. + + Parameters + ---------- + volume_from: Union[highdicom.Volume, highdicom.VolumeGeometry] + Volume to which input volume indices refer. + volume_to: Union[highdicom.Volume, highdicom.VolumeGeometry] + Volume to which output volume indices refer. + round_output: bool, optional + Whether to round the output to the nearest integer (if ``True``) or + return with sub-voxel accuracy as floats (if ``False``). + check_bounds: bool, optional + Whether to perform a bounds check before returning the output + indices. Note there is no bounds check on the input indices. + + """ + self._affine = volume_to.inverse_affine @ volume_from.affine + self._output_shape = volume_to.spatial_shape + self._round_output = round_output + self._check_bounds = check_bounds + + @property + def affine(self) -> np.ndarray: + """numpy.ndarray: 4x4 affine transformation matrix""" + return self._affine.copy() + + def __call__(self, indices: np.ndarray) -> np.ndarray: + """Transform volume indices between two volumes. + + Parameters + ---------- + indices: numpy.ndarray + Array of voxel indices in the "from" volume. Array of integer or + floating-point values with shape ``(n, 3)``, where *n* is the + number of coordinates. The order of the three indices corresponds + to the three spatial dimensions volume in that order. Point ``(0, + 0, 0)`` refers to the center of the voxel at index ``(0, 0, 0)`` in + the array. + + Returns + ------- + numpy.ndarray + Array of indices in the output volume that spatially correspond to + those in the indices in the input array. This will have dtype an + integer datatype if ``round_output`` is ``True`` and a floating + point datatype otherwise. The output datatype will be matched to + the input datatype if possible, otherwise either ``np.int64`` or + ``np.float64`` is used. + + Raises + ------ + ValueError + If ``check_bounds`` is ``True`` and the output indices would + otherwise contain invalid indices for the "to" volume. + + """ + if indices.ndim != 2 or indices.shape[1] != 3: + raise ValueError( + 'Argument "indices" must be a two-dimensional array ' + 'with shape [n, 3].' + ) + input_is_int = indices.dtype.kind == 'i' + augmented_input = np.row_stack( + [ + indices.T, + np.ones((indices.shape[0], ), dtype=indices.dtype), + ] + ) + augmented_output = np.dot(self._affine, augmented_input) + output_indices = augmented_output[:3, :].T + + if self._round_output: + output_dtype = indices.dtype if input_is_int else np.int64 + output_indices = np.around(output_indices).astype(output_dtype) + else: + if not input_is_int: + output_indices = output_indices.astype(indices.dtype) + + if self._check_bounds: + bounds_fail = False + min_indices = np.min(output_indices, axis=1) + max_indices = np.max(output_indices, axis=1) + + for shape, min_ind, max_ind in zip( + self._output_shape, + min_indices, + max_indices, + ): + if min_ind < -0.5: + bounds_fail = True + break + if max_ind > shape - 0.5: + bounds_fail = True + break + + if bounds_fail: + raise ValueError("Bounds check failed.") + + return output_indices + + def volread( fp: Union[str, bytes, PathLike, List[Union[str, PathLike]]], glob: str = '*.dcm', diff --git a/tests/test_volume.py b/tests/test_volume.py index 1f68f991..ebe1ee20 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -6,8 +6,13 @@ from highdicom.spatial import _normalize_patient_orientation -from highdicom.volume import Volume, concat_channels, volread -from highdicom import UID +from highdicom.volume import ( + Volume, + VolumeGeometry, + VolumeToVolumeTransformer, + concat_channels, + volread, +) def read_multiframe_ct_volume(): @@ -407,6 +412,112 @@ def test_to_patient_orientation(desired): assert flipped.get_closest_patient_orientation() == desired_tup +def test_volume_transformer(): + + geometry = VolumeGeometry( + np.eye(4), + [32, 32, 32], + ) + + indices = np.array( + [ + [0, 0, 0], + [0, 0, 1], + ] + ) + + expected = np.array( + [ + [1, 5, 8], + [1, 5, 9], + ] + ) + + geometry2 = geometry[1:11, 5:15, 8:18] + + for round_output in [False, True]: + for check_bounds in [False, True]: + transformer = VolumeToVolumeTransformer( + geometry2, + geometry, + check_bounds=check_bounds, + round_output=round_output, + ) + + outputs = transformer(indices) + if round_output: + assert outputs.dtype == np.int64 + else: + assert outputs.dtype == np.float64 + assert np.array_equal(outputs, expected) + + transformer = VolumeToVolumeTransformer( + geometry2, + geometry, + check_bounds=True, + ) + out_of_bounds_indices = np.array([[31, 0, 0]]) + with pytest.raises(ValueError): + transformer(out_of_bounds_indices) + + expected = np.array( + [ + [-1, -5, -8], + [-1, -5, -7], + ] + ) + for round_output in [False, True]: + transformer = VolumeToVolumeTransformer( + geometry, + geometry2, + round_output=round_output, + ) + + outputs = transformer(indices) + if round_output: + assert outputs.dtype == np.int64 + else: + assert outputs.dtype == np.float64 + assert np.array_equal(outputs, expected) + + transformer = VolumeToVolumeTransformer( + geometry, + geometry2, + check_bounds=True, + ) + for oob_indices in [ + [0, 5, 8], + [0, 0, 1], + [11, 5, 8], + ]: + with pytest.raises(ValueError): + transformer(np.array([oob_indices])) + + geometry3 = geometry2.permute_axes([2, 1, 0]) + expected = np.array( + [ + [1, 5, 8], + [2, 5, 8], + ] + ) + + for round_output in [False, True]: + for check_bounds in [False, True]: + transformer = VolumeToVolumeTransformer( + geometry3, + geometry, + check_bounds=check_bounds, + round_output=round_output, + ) + + outputs = transformer(indices) + if round_output: + assert outputs.dtype == np.int64 + else: + assert outputs.dtype == np.float64 + assert np.array_equal(outputs, expected) + + @pytest.mark.parametrize( 'fp,glob', [ From 3aaebcd93fc9cf6783b02bd9dd417700d027ab3f Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 18 Aug 2024 09:34:06 -0400 Subject: [PATCH 56/93] WIP on match geometry --- src/highdicom/volume.py | 95 ++++++++++++++++++++++++++++++++++------- 1 file changed, 79 insertions(+), 16 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index c0b113f5..cec658c2 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -21,6 +21,7 @@ _stack_affine_matrix, _transform_affine_matrix, _translate_affine_matrix, + _DEFAULT_EQUALITY_TOLERANCE, PATIENT_ORIENTATION_OPPOSITES, VOLUME_INDEX_CONVENTION, get_closest_patient_orientation, @@ -385,10 +386,10 @@ def inverse_affine(self) -> np.ndarray: return np.linalg.inv(self._affine) @property - def direction_cosines(self) -> List[float]: - """List[float]: + def direction_cosines(self) -> Tuple[float, float, float, float, float, float]: + """Tuple[float, float, float, float, float float]: - List of 6 floats giving the direction cosines of the + Tuple of 6 floats giving the direction cosines of the vector along the rows and the vector along the columns, matching the format of the DICOM Image Orientation Patient attribute. @@ -397,11 +398,11 @@ def direction_cosines(self) -> List[float]: vec_along_columns = self._affine[:3, 1].copy() vec_along_columns /= np.sqrt((vec_along_columns ** 2).sum()) vec_along_rows /= np.sqrt((vec_along_rows ** 2).sum()) - return [*vec_along_rows.tolist(), *vec_along_columns.tolist()] + return tuple([*vec_along_rows.tolist(), *vec_along_columns.tolist()]) @property - def pixel_spacing(self) -> List[float]: - """List[float]: + def pixel_spacing(self) -> Tuple[float, float]: + """Tuple[float, float]: Within-plane pixel spacing in millimeter units. Two values (spacing between rows, spacing between columns). @@ -411,7 +412,7 @@ def pixel_spacing(self) -> List[float]: vec_along_columns = self._affine[:3, 1] spacing_between_columns = np.sqrt((vec_along_rows ** 2).sum()).item() spacing_between_rows = np.sqrt((vec_along_columns ** 2).sum()).item() - return [spacing_between_rows, spacing_between_columns] + return spacing_between_rows, spacing_between_columns @property def spacing_between_slices(self) -> float: @@ -425,8 +426,8 @@ def spacing_between_slices(self) -> float: return spacing @property - def spacing(self) -> List[float]: - """List[float]: + def spacing(self) -> Tuple[float]: + """Tuple[float, float, float]: Pixel spacing in millimeter units for the three spatial directions. Three values, one for each spatial dimension. @@ -434,7 +435,7 @@ def spacing(self) -> List[float]: """ dir_mat = self._affine[:3, :3] norms = np.sqrt((dir_mat ** 2).sum(axis=0)) - return norms.tolist() + return tuple(norms.tolist()) @property def voxel_volume(self) -> float: @@ -442,19 +443,21 @@ def voxel_volume(self) -> float: return np.prod(self.spacing).item() @property - def position(self) -> List[float]: - """List[float]: + def position(self) -> Tuple[float, float, float]: + """Tuple[float, float, float]: Position in the frame of reference space of the center of voxel at indices (0, 0, 0). """ - return self._affine[:3, 3].tolist() + return tuple(self._affine[:3, 3].tolist()) @property - def physical_extent(self) -> List[float]: + def physical_extent(self) -> Tuple[float, float, float]: """List[float]: Side lengths of the volume in millimeters.""" - return [n * d for n, d in zip(self.spatial_shape, self.spacing)] + return tuple( + [n * d for n, d in zip(self.spatial_shape, self.spacing)] + ) @property def physical_volume(self) -> float: @@ -678,7 +681,6 @@ def _prepare_pad_width( raise ValueError( f"Argument 'pad_width' cannot contain negative values." ) - origin_offset = [-pad_width] * 3 full_pad_width: List[List[int]] = [[pad_width, pad_width]] * 3 elif isinstance(pad_width, Sequence): if isinstance(pad_width[0], int): @@ -1173,6 +1175,67 @@ def random_crop(self, spatial_shape: Sequence[int]) -> '_VolumeBase': return self[tuple(crop_slices)] + def match_geometry( + self, + other: Union['Volume', 'VolumeGeometry'], + mode: PadModes = PadModes.CONSTANT, + constant_value: float = 0.0, + per_channel: bool = False, + tol: float = _DEFAULT_EQUALITY_TOLERANCE, + ) -> '_VolumeBase': + """ + + """ + + permute_indices = [] + step_sizes = [] + for u, s in zip(self.unit_vectors(), self.spacing): + for j, (v, t) in enumerate( + zip(other.unit_vectors(), other.spacing) + ): + dot_product = u @ v + if np.abs(dot_product) - 1.0 < tol: + + permute_indices.append(j) + + scale_factor = t / s + step = np.round(scale_factor) + if abs(scale_factor - step) > tol: + raise RuntimeError( + "Non-integer scale factor required." + ) + + if dot_product < 0.0: + step = -step + + step_sizes.append(step) + + break + else: + raise RuntimeError( + "Direction vectors could not be aligned." + ) + + if permute_indices != [0, 1, 2]: + new_volume = self.permute_axes(permute_indices) + else: + new_volume = self + + # Now figure out cropping + + # Finally padding + + + if new_volume is self: + # TODO make sure cannot return self + return self.copy() + + return new_volume + + + + + class VolumeGeometry(_VolumeBase): From f178ac1a6848544015d59e2f4ac3f63b88e835e8 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 18 Aug 2024 19:04:56 -0400 Subject: [PATCH 57/93] Add match_geometry method, tests --- src/highdicom/volume.py | 272 ++++++++++++++++++++++++++++++++-------- tests/test_seg.py | 53 ++++---- tests/test_volume.py | 159 +++++++++++++++++++++-- 3 files changed, 393 insertions(+), 91 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index cec658c2..a2bb0ae6 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -50,14 +50,11 @@ # TODO add pixel value transformations # TODO should methods copy arrays? # TODO random crop, random flip, random permute -# TODO match geometry # TODO trim non-zero # TODO support slide coordinate system -# TODO volume to volume transformer # TODO volread and metadata # TODO constructors for geometry, do they make sense for volume? # TODO ordering of frames in seg, setting 3D dimension organization -# TODO tests for equality of geometry class _VolumeBase(ABC): @@ -631,6 +628,7 @@ def _check_slice(val: slice, dim: int) -> None: def pad( self, pad_width: Union[int, Sequence[int], Sequence[Sequence[int]]], + *, mode: Union[PadModes, str] = PadModes.CONSTANT, constant_value: float = 0.0, per_channel: bool = False, @@ -741,6 +739,18 @@ def _permute_affine(self, indices: Sequence[int]) -> np.ndarray: permute_indices=indices, ) + @abstractmethod + def copy(self) -> '_VolumeBase': + """Create a copy of the object. + + Returns + ------- + highdicom.volume._VolumeBase: + Copy of the original object. + + """ + pass + @abstractmethod def permute_axes(self, indices: Sequence[int]) -> '_VolumeBase': """Create a new volume by permuting the spatial axes. @@ -799,7 +809,7 @@ def to_patient_orientation( Returns ------- - highdicom.Volume: + highdicom.volume.Volume: New volume with the requested patient orientation. """ # noqa: E501 @@ -839,7 +849,7 @@ def swap_axes(self, axis_1: int, axis_2: int) -> '_VolumeBase': Returns ------- - highdicom.Volume: + highdicom.volume.Volume: New volume with spatial axes swapped as requested. """ @@ -874,7 +884,7 @@ def flip(self, axis: Union[int, Sequence[int]]) -> '_VolumeBase': Returns ------- - highdicom.Volume: + highdicom.volume.Volume: New volume with spatial axes flipped as requested. """ @@ -908,6 +918,7 @@ def handedness(self) -> AxisHandedness: def ensure_handedness( self, handedness: Union[AxisHandedness, str], + *, flip_axis: Optional[int] = None, swap_axes: Optional[Sequence[int]] = None, ) -> '_VolumeBase': @@ -962,6 +973,7 @@ def ensure_handedness( def pad_to_shape( self, spatial_shape: Sequence[int], + *, mode: PadModes = PadModes.CONSTANT, constant_value: float = 0.0, per_channel: bool = False, @@ -999,7 +1011,7 @@ def pad_to_shape( Returns ------- - highdicom.Volume: + highdicom.volume.Volume: Volume with padding applied. """ @@ -1039,7 +1051,7 @@ def crop_to_shape(self, spatial_shape: Sequence[int]) -> '_VolumeBase': Returns ------- - highdicom.Volume: + highdicom.volume.Volume: Volume with padding applied. """ @@ -1069,6 +1081,7 @@ def crop_to_shape(self, spatial_shape: Sequence[int]) -> '_VolumeBase': def pad_or_crop_to_shape( self, spatial_shape: Sequence[int], + *, mode: PadModes = PadModes.CONSTANT, constant_value: float = 0.0, per_channel: bool = False, @@ -1107,7 +1120,7 @@ def pad_or_crop_to_shape( Returns ------- - highdicom.Volume: + highdicom.volume.Volume: Volume with padding and/or cropping applied. """ @@ -1158,7 +1171,7 @@ def random_crop(self, spatial_shape: Sequence[int]) -> '_VolumeBase': Returns ------- - highdicom.Volume: + highdicom.volume.Volume: New volume formed by cropping the volumes. """ @@ -1175,17 +1188,89 @@ def random_crop(self, spatial_shape: Sequence[int]) -> '_VolumeBase': return self[tuple(crop_slices)] + def geometry_equal( + self, + other: Union['Volume', 'VolumeGeometry'], + tol: Optional[float] = _DEFAULT_EQUALITY_TOLERANCE, + ) -> bool: + """Determine whether two volumes have the same geometry. + + Parameters + ---------- + other: Union[highdicom.volume.Volume, highdicom.volume.VolumeGeometry] + Volume or volume geometry to which this volume should be compared. + tol: Union[float, None], optional + Absolute Tolerance used to determine equality of affine matrices. + If None, affine matrices must match exactly. + + Return + ------ + bool: + True if the geometries match (up to the specified tolerance). False + otherwise. + + """ + if ( + self.frame_of_reference_uid is not None and + other.frame_of_reference_uid is not None + ): + if self.frame_of_reference_uid != self.frame_of_reference_uid: + return False + + if self.spatial_shape != other.spatial_shape: + return False + + if tol is None: + return np.array_equal(self._affine, other._affine) + else: + return np.allclose( + self._affine, + other._affine, + atol=tol, + ) + def match_geometry( self, other: Union['Volume', 'VolumeGeometry'], + *, mode: PadModes = PadModes.CONSTANT, constant_value: float = 0.0, per_channel: bool = False, tol: float = _DEFAULT_EQUALITY_TOLERANCE, ) -> '_VolumeBase': - """ + """Match the geometry of this volume to another. + + This performs a combination of permuting, padding and cropping, and + flipping (in that order) such that the geometry of this volume matches + that of ``other``. Notably, the voxels are not resampled. If the + geometry cannot be matched using these operations, then a + ``RuntimeError`` is raised. + + Parameters + ---------- + other: Union[highdicom.volume.Volume, highdicom.volume.VolumeGeometry] + Volume or volume geometry to which this volume should be matched. + + Returns + ------- + highdicom.volume._VolumeBase: + New volume formed by matching the geometry of this volume to that + of ``other``. + + Raises + ------ + RuntimeError: + If the geometries cannot be matched without resampling the array. """ + if ( + self.frame_of_reference_uid is not None and + other.frame_of_reference_uid is not None + ): + if self.frame_of_reference_uid != self.frame_of_reference_uid: + raise RuntimeError( + "Volumes do not have matching frame of reference UIDs." + ) permute_indices = [] step_sizes = [] @@ -1194,12 +1279,14 @@ def match_geometry( zip(other.unit_vectors(), other.spacing) ): dot_product = u @ v - if np.abs(dot_product) - 1.0 < tol: - + if ( + np.abs(dot_product - 1.0) < tol or + np.abs(dot_product + 1.0) < tol + ): permute_indices.append(j) scale_factor = t / s - step = np.round(scale_factor) + step = int(np.round(scale_factor)) if abs(scale_factor - step) > tol: raise RuntimeError( "Non-integer scale factor required." @@ -1216,25 +1303,87 @@ def match_geometry( "Direction vectors could not be aligned." ) - if permute_indices != [0, 1, 2]: + requires_permute = permute_indices != [0, 1, 2] + if requires_permute: new_volume = self.permute_axes(permute_indices) + step_sizes = [step_sizes[i] for i in permute_indices] else: new_volume = self # Now figure out cropping + origin_offset = ( + np.array(other.position) - + np.array(new_volume.position) + ) + + crop_slices = [] + pad_values = [] + requires_crop = False + requires_pad = False + + for v, spacing, step, out_shape, in_shape in zip( + new_volume.unit_vectors(), + new_volume.spacing, + step_sizes, + other.spatial_shape, + new_volume.spatial_shape, + ): + offset = v @ origin_offset + start_ind = offset / spacing + start_pos = int(np.round(start_ind)) + end_pos = start_pos + out_shape * step - # Finally padding + if abs(start_pos - start_ind) > tol: + raise RuntimeError( + "Required translation is non-integer " + "multiple of voxel spacing." + ) + if step > 0: + pad_before = max(-start_pos, 0) + pad_after = max(end_pos - in_shape, 0) + crop_start = start_pos + pad_before + crop_stop = end_pos + pad_before - if new_volume is self: - # TODO make sure cannot return self - return self.copy() + if crop_start > 0 or crop_stop < out_shape: + requires_crop = True + else: + pad_after = max(start_pos - in_shape + 1, 0) + pad_before = max(-end_pos - 1, 0) + crop_start = start_pos + pad_before + crop_stop = end_pos + pad_before - return new_volume + # Need the crop operation to flip + requires_crop = True + if crop_stop == -1: + crop_stop = None + if pad_before > 0 or pad_after > 0: + requires_pad = True + crop_slices.append( + slice(crop_start, crop_stop, step) + ) + pad_values.append((pad_before, pad_after)) + if not ( + requires_permute or requires_pad or requires_crop + ): + new_volume = new_volume.copy() + + if requires_pad: + new_volume = new_volume.pad( + pad_values, + mode=mode, + constant_value=constant_value, + per_channel=per_channel, + ) + + if requires_crop: + new_volume = new_volume[tuple(crop_slices)] + + return new_volume class VolumeGeometry(_VolumeBase): @@ -1319,7 +1468,7 @@ def from_attributes( Returns ------- - highdicom.Volume: + highdicom.volume.Volume: New Volume using the given array and DICOM attributes. """ @@ -1339,6 +1488,21 @@ def from_attributes( frame_of_reference_uid=frame_of_reference_uid, ) + def copy(self) -> 'VolumeGeometry': + """Get an unaltered copy of the geometry. + + Returns + ------- + highdicom.volume.VolumeGeometry: + Copy of the original geometry. + + """ + return self.__class__( + affine=self._affine.copy(), + spatial_shape=self.spatial_shape, + frame_of_reference_uid=self.frame_of_reference_uid, + ) + @property def spatial_shape(self) -> Tuple[int, int, int]: """Tuple[int, int, int]: Spatial shape of the array. @@ -1352,7 +1516,7 @@ def spatial_shape(self) -> Tuple[int, int, int]: def shape(self) -> Tuple[int, ...]: """Tuple[int, ...]: Shape of the underlying array. - For objects of type :class:`highdicom.VolumeGeometry`, this is + For objects of type :class:`highdicom.volume.VolumeGeometry`, this is equivalent to `.shape`. """ @@ -1373,7 +1537,7 @@ def __getitem__( Returns ------- - highdicom.VolumeGeometry: + highdicom.volume.VolumeGeometry: New volume representing a sub-volume of the original volume. """ @@ -1389,6 +1553,7 @@ def __getitem__( def pad( self, pad_width: Union[int, Sequence[int], Sequence[Sequence[int]]], + *, mode: Union[PadModes, str] = PadModes.CONSTANT, constant_value: float = 0.0, per_channel: bool = False, @@ -1419,15 +1584,15 @@ def pad( In all cases, all integer values must be non-negative. mode: Union[highdicom.PadModes, str], optional - Ignored for :class:`highdicom.VolumeGeometry`. + Ignored for :class:`highdicom.volume.VolumeGeometry`. constant_value: Union[float, Sequence[float]], optional - Ignored for :class:`highdicom.VolumeGeometry`. + Ignored for :class:`highdicom.volume.VolumeGeometry`. per_channel: bool, optional - Ignored for :class:`highdicom.VolumeGeometry`. + Ignored for :class:`highdicom.volume.VolumeGeometry`. Returns ------- - highdicom.VolumeGeometry: + highdicom.volume.VolumeGeometry: Volume with padding applied. """ @@ -1455,7 +1620,7 @@ def permute_axes(self, indices: Sequence[int]) -> 'VolumeGeometry': Returns ------- - highdicom.VolumeGeometry: + highdicom.volume.VolumeGeometry: New geometry with spatial axes permuted in the provided order. """ @@ -1469,8 +1634,8 @@ def permute_axes(self, indices: Sequence[int]) -> 'VolumeGeometry': frame_of_reference_uid=self.frame_of_reference_uid, ) - def create_volume(self, array: np.ndarray) -> 'Volume': - """Crrate a volume using this geometry and an array. + def with_array(self, array: np.ndarray) -> 'Volume': + """Create a volume using this geometry and an array. Parameters ---------- @@ -1481,7 +1646,7 @@ def create_volume(self, array: np.ndarray) -> 'Volume': Returns ------- - highdicom.Volume: + highdicom.volume.Volume: Volume objects using this geometry and the given array. """ @@ -1833,7 +1998,7 @@ def from_attributes( Returns ------- - highdicom.Volume: + highdicom.volume.Volume: New Volume using the given array and DICOM attributes. """ @@ -2007,7 +2172,7 @@ def astype(self, dtype: type) -> 'Volume': Returns ------- - highdicom.Volume: + highdicom.volume.Volume: New volume with given datatype, and metadata copied from this volume. @@ -2021,12 +2186,12 @@ def copy(self) -> 'Volume': Returns ------- - highdicom.Volume: + highdicom.volume.Volume: Copy of the original volume. """ return self.__class__( - array=self.array, # TODO should this copy? + array=self.array.copy(), # TODO should this copy? affine=self._affine.copy(), frame_of_reference_uid=self.frame_of_reference_uid, ) @@ -2046,7 +2211,7 @@ def with_array(self, array: np.ndarray) -> 'Volume': Returns ------- - highdicom.Volume: + highdicom.volume.Volume: New volume using the given array and the metadata of this volume. """ @@ -2079,7 +2244,7 @@ def __getitem__( Returns ------- - highdicom.Volume: + highdicom.volume.Volume: New volume representing a sub-volume of the original volume. """ @@ -2105,7 +2270,7 @@ def permute_axes(self, indices: Sequence[int]) -> 'Volume': Returns ------- - highdicom.Volume: + highdicom.volume.Volume: New volume with spatial axes permuted in the provided order. """ @@ -2146,7 +2311,7 @@ def normalize_mean_std( Returns ------- - highdicom.Volume: + highdicom.volume.Volume: Volume with normalized intensities. Note that the dtype will be promoted to floating point. @@ -2195,7 +2360,7 @@ def normalize_min_max( Returns ------- - highdicom.Volume: + highdicom.volume.Volume: Volume with normalized intensities. Note that the dtype will be promoted to floating point. @@ -2244,7 +2409,7 @@ def clip( Returns ------- - highdicom.Volume: + highdicom.volume.Volume: Volume with clipped intensities. """ @@ -2289,7 +2454,7 @@ def apply_window( Returns ------- - highdicom.Volume: + highdicom.volume.Volume: Volume with windowed intensities. """ @@ -2321,7 +2486,7 @@ def squeeze_channel(self) -> 'Volume': Returns ------- - highdicom.Volume: + highdicom.volume.Volume: Volume with channel axis removed. """ @@ -2341,7 +2506,7 @@ def ensure_channel(self) -> 'Volume': Returns ------- - highdicom.Volume: + highdicom.volume.Volume: Volume with added channel axis (if required). """ @@ -2352,6 +2517,7 @@ def ensure_channel(self) -> 'Volume': def pad( self, pad_width: Union[int, Sequence[int], Sequence[Sequence[int]]], + *, mode: Union[PadModes, str] = PadModes.CONSTANT, constant_value: float = 0.0, per_channel: bool = False, @@ -2400,7 +2566,7 @@ def pad( Returns ------- - highdicom.Volume: + highdicom.volume.Volume: Volume with padding applied. """ @@ -2498,14 +2664,14 @@ def concat_channels(volumes: Sequence[Volume]) -> Volume: Parameters ---------- - volumes: Sequence[highdicom.Volume] + volumes: Sequence[highdicom.volume.Volume] Sequence of one or more volumes to concatenate. Volumes must share the same spatial shape and affine matrix, but may differ by number and presence of channels. Returns ------- - highdicom.Volume: + highdicom.volume.Volume: New volume formed by concatenating the input volumes. """ @@ -2554,7 +2720,9 @@ def concat_channels(volumes: Sequence[Volume]) -> Volume: class VolumeToVolumeTransformer: - """Class for transforming voxel indices between two volumes. + """ + + Class for transforming voxel indices between two volumes. """ @@ -2572,9 +2740,9 @@ def __init__( Parameters ---------- - volume_from: Union[highdicom.Volume, highdicom.VolumeGeometry] + volume_from: Union[highdicom.volume.Volume, highdicom.volume.VolumeGeometry] Volume to which input volume indices refer. - volume_to: Union[highdicom.Volume, highdicom.VolumeGeometry] + volume_to: Union[highdicom.volume.Volume, highdicom.volume.VolumeGeometry] Volume to which output volume indices refer. round_output: bool, optional Whether to round the output to the nearest integer (if ``True``) or @@ -2583,7 +2751,7 @@ def __init__( Whether to perform a bounds check before returning the output indices. Note there is no bounds check on the input indices. - """ + """ # noqa: E501 self._affine = volume_to.inverse_affine @ volume_from.affine self._output_shape = volume_to.spatial_shape self._round_output = round_output @@ -2687,7 +2855,7 @@ def volread( Returns ------- - highdicom.Volume + highdicom.volume.Volume Volume formed from the specified image file(s). """ diff --git a/tests/test_seg.py b/tests/test_seg.py index 28b3e100..e4e412fd 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -32,7 +32,6 @@ CoordinateSystemNames, DimensionOrganizationTypeValues, PatientOrientationValuesBiped, - PixelIndexDirections, ) from highdicom.seg import ( create_segmentation_pyramid, @@ -1481,7 +1480,7 @@ def test_construction_volume(self): omit_empty_frames=False ) assert np.array_equal( - instance.pixel_array, + np.flip(instance.pixel_array, axis=0), self._ct_seg_volume.array, ) @@ -1497,7 +1496,7 @@ def test_construction_volume(self): self._ct_volume_orientation for plane_item, pp in zip( instance.PerFrameFunctionalGroupsSequence, - self._ct_seg_volume.get_plane_positions(), + self._ct_seg_volume.get_plane_positions()[::-1], ): assert ( plane_item.PlanePositionSequence[0].ImagePositionPatient == @@ -3800,7 +3799,7 @@ def test_get_volume_binary(self): assert isinstance(vol, Volume) assert vol.spatial_shape == (3, 16, 16) assert vol.shape == (3, 16, 16, 1) - assert vol.pixel_spacing == ( + assert vol.pixel_spacing == tuple( self._ct_binary_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] @@ -3809,7 +3808,7 @@ def test_get_volume_binary(self): assert vol.spacing_between_slices == ( self._ct_binary_seg.volume_geometry.spacing_between_slices ) - assert vol.direction_cosines == ( + assert vol.direction_cosines == tuple( self._ct_binary_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] @@ -3827,7 +3826,7 @@ def test_get_volume_binary_multisegments(self): # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (165, 16, 16) assert vol.shape == (165, 16, 16, 2) - assert vol.pixel_spacing == ( + assert vol.pixel_spacing == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] @@ -3836,7 +3835,7 @@ def test_get_volume_binary_multisegments(self): assert vol.spacing_between_slices == ( self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) - assert vol.direction_cosines == ( + assert vol.direction_cosines == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] @@ -3854,7 +3853,7 @@ def test_get_volume_binary_multisegment2(self): # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (165, 16, 16) assert vol.shape == (165, 16, 16, 1) - assert vol.pixel_spacing == ( + assert vol.pixel_spacing == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] @@ -3863,7 +3862,7 @@ def test_get_volume_binary_multisegment2(self): assert vol.spacing_between_slices == ( self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) - assert vol.direction_cosines == ( + assert vol.direction_cosines == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] @@ -3884,7 +3883,7 @@ def test_get_volume_binary_multisegment_combine(self): # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (165, 16, 16) assert vol.shape == (165, 16, 16) - assert vol.pixel_spacing == ( + assert vol.pixel_spacing == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] @@ -3893,7 +3892,7 @@ def test_get_volume_binary_multisegment_combine(self): assert vol.spacing_between_slices == ( self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) - assert vol.direction_cosines == ( + assert vol.direction_cosines == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] @@ -3913,7 +3912,7 @@ def test_get_volume_binary_multisegment_slice_start(self): # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (5, 16, 16) assert vol.shape == (5, 16, 16, 2) - assert vol.pixel_spacing == ( + assert vol.pixel_spacing == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] @@ -3922,7 +3921,7 @@ def test_get_volume_binary_multisegment_slice_start(self): assert vol.spacing_between_slices == ( self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) - assert vol.direction_cosines == ( + assert vol.direction_cosines == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] @@ -3942,7 +3941,7 @@ def test_get_volume_binary_multisegment_slice_start_negative(self): # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (6, 16, 16) assert vol.shape == (6, 16, 16, 2) - assert vol.pixel_spacing == ( + assert vol.pixel_spacing == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] @@ -3951,7 +3950,7 @@ def test_get_volume_binary_multisegment_slice_start_negative(self): assert vol.spacing_between_slices == ( self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) - assert vol.direction_cosines == ( + assert vol.direction_cosines == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] @@ -3971,7 +3970,7 @@ def test_get_volume_binary_multisegment_slice_end(self): # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (17, 16, 16) assert vol.shape == (17, 16, 16, 2) - assert vol.pixel_spacing == ( + assert vol.pixel_spacing == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] @@ -3980,7 +3979,7 @@ def test_get_volume_binary_multisegment_slice_end(self): assert vol.spacing_between_slices == ( self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) - assert vol.direction_cosines == ( + assert vol.direction_cosines == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] @@ -4000,7 +3999,7 @@ def test_get_volume_binary_multisegment_slice_end_negative(self): # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (155, 16, 16) assert vol.shape == (155, 16, 16, 2) - assert vol.pixel_spacing == ( + assert vol.pixel_spacing == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] @@ -4009,7 +4008,7 @@ def test_get_volume_binary_multisegment_slice_end_negative(self): assert vol.spacing_between_slices == ( self._ct_binary_overlap_seg.volume_geometry.spacing_between_slices ) - assert vol.direction_cosines == ( + assert vol.direction_cosines == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] @@ -4030,7 +4029,7 @@ def test_get_volume_binary_multisegment_center(self): # Note that this segmentation has a large number of missing slices assert vol.spatial_shape == (7, 16, 16) assert vol.shape == (7, 16, 16, 2) - assert vol.pixel_spacing == ( + assert vol.pixel_spacing == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] @@ -4039,7 +4038,7 @@ def test_get_volume_binary_multisegment_center(self): assert vol.spacing_between_slices == ( self._ct_binary_seg.volume_geometry.spacing_between_slices ) - assert vol.direction_cosines == ( + assert vol.direction_cosines == tuple( self._ct_binary_overlap_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] @@ -4056,7 +4055,7 @@ def test_get_volume_binary_combine(self): assert isinstance(vol, Volume) assert vol.spatial_shape == (3, 16, 16) assert vol.shape == (3, 16, 16) - assert vol.pixel_spacing == ( + assert vol.pixel_spacing == tuple( self._ct_binary_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] @@ -4065,7 +4064,7 @@ def test_get_volume_binary_combine(self): assert vol.spacing_between_slices == ( self._ct_binary_seg.volume_geometry.spacing_between_slices ) - assert vol.direction_cosines == ( + assert vol.direction_cosines == tuple( self._ct_binary_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] @@ -4082,7 +4081,7 @@ def test_get_volume_fractional(self): assert isinstance(vol, Volume) assert vol.spatial_shape == (3, 16, 16) assert vol.shape == (3, 16, 16, 1) - assert vol.pixel_spacing == ( + assert vol.pixel_spacing == tuple( self._ct_true_fractional_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] @@ -4091,7 +4090,7 @@ def test_get_volume_fractional(self): assert vol.spacing_between_slices == ( self._ct_true_fractional_seg.volume_geometry.spacing_between_slices ) - assert vol.direction_cosines == ( + assert vol.direction_cosines == tuple( self._ct_true_fractional_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] @@ -4109,7 +4108,7 @@ def test_get_volume_fractional_noscale(self): assert isinstance(vol, Volume) assert vol.spatial_shape == (3, 16, 16) assert vol.shape == (3, 16, 16, 1) - assert vol.pixel_spacing == ( + assert vol.pixel_spacing == tuple( self._ct_true_fractional_seg .SharedFunctionalGroupsSequence[0] .PixelMeasuresSequence[0] @@ -4118,7 +4117,7 @@ def test_get_volume_fractional_noscale(self): assert vol.spacing_between_slices == ( self._ct_true_fractional_seg.volume_geometry.spacing_between_slices ) - assert vol.direction_cosines == ( + assert vol.direction_cosines == tuple( self._ct_true_fractional_seg .SharedFunctionalGroupsSequence[0] .PlaneOrientationSequence[0] diff --git a/tests/test_volume.py b/tests/test_volume.py index ebe1ee20..19e371e8 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -2,10 +2,15 @@ import numpy as np import pydicom from pydicom.data import get_testdata_file +from pydicom.pixel_data_handlers.util import pixel_dtype import pytest +from highdicom import spatial -from highdicom.spatial import _normalize_patient_orientation +from highdicom.spatial import ( + _normalize_patient_orientation, + _translate_affine_matrix, +) from highdicom.volume import ( Volume, VolumeGeometry, @@ -102,9 +107,9 @@ def test_volume_from_attributes( pixel_spacing=pixel_spacing, spacing_between_slices=spacing_between_slices, ) - assert volume.position == list(image_position) - assert volume.direction_cosines == list(image_orientation) - assert volume.pixel_spacing == list(pixel_spacing) + assert volume.position == tuple(image_position) + assert volume.direction_cosines == tuple(image_orientation) + assert volume.pixel_spacing == tuple(pixel_spacing) assert volume.spacing_between_slices == spacing_between_slices assert volume.shape == (10, 10, 10) assert volume.spatial_shape == (10, 10, 10) @@ -157,7 +162,7 @@ def test_volume_single_frame(): assert volume.spatial_shape == volume.shape assert volume.number_of_channels is None orientation = ct_series[0].ImageOrientationPatient - assert volume.direction_cosines == orientation + assert volume.direction_cosines == tuple(orientation) direction = volume.direction assert np.array_equal(direction[:, 1], orientation[3:]) assert np.array_equal(direction[:, 2], orientation[:3]) @@ -165,10 +170,10 @@ def test_volume_single_frame(): assert direction[:, 0] @ direction[:, 1] == 0.0 assert direction[:, 0] @ direction[:, 2] == 0.0 assert (direction[:, 0] ** 2).sum() == 1.0 - assert volume.position == ct_series[1].ImagePositionPatient # sorting - assert volume.pixel_spacing == ct_series[0].PixelSpacing + assert volume.position == tuple(ct_series[1].ImagePositionPatient) # sorting + assert volume.pixel_spacing == tuple(ct_series[0].PixelSpacing) slice_spacing = 1.25 - assert volume.spacing == [slice_spacing, *ct_series[0].PixelSpacing[::-1]] + assert volume.spacing == (slice_spacing, *ct_series[0].PixelSpacing[::-1]) pixel_spacing = ct_series[0].PixelSpacing expected_voxel_volume = ( pixel_spacing[0] * pixel_spacing[1] * slice_spacing @@ -207,7 +212,7 @@ def test_volume_multiframe(): .PixelMeasuresSequence[0] .PixelSpacing ) - assert volume.direction_cosines == orientation + assert volume.direction_cosines == tuple(orientation) direction = volume.direction assert np.array_equal(direction[:, 1], orientation[3:]) assert np.array_equal(direction[:, 2], orientation[:3]) @@ -221,10 +226,10 @@ def test_volume_multiframe(): .PlanePositionSequence[0] .ImagePositionPatient ) - assert volume.position == first_frame_pos - assert volume.pixel_spacing == pixel_spacing + assert volume.position == tuple(first_frame_pos) + assert volume.pixel_spacing == tuple(pixel_spacing) slice_spacing = 10.0 - assert volume.spacing == [slice_spacing, *pixel_spacing[::-1]] + assert volume.spacing == (slice_spacing, *pixel_spacing[::-1]) assert volume.number_of_channels is None expected_voxel_volume = ( pixel_spacing[0] * pixel_spacing[1] * slice_spacing @@ -518,6 +523,136 @@ def test_volume_transformer(): assert np.array_equal(outputs, expected) +@pytest.mark.parametrize( + 'crop,pad,permute,reversible', + [ + ( + (slice(None), slice(14, None), slice(None, None, -1)), + ((0, 0), (0, 32), (3, 3)), + (1, 0, 2), + True, + ), + ( + (1, slice(256, 320), slice(256, 320)), + ((0, 0), (0, 0), (0, 0)), + (0, 2, 1), + True, + ), + ( + (slice(None), slice(None, None, -1), slice(None)), + ((12, 31), (1, 23), (5, 7)), + (0, 2, 1), + True, + ), + ( + (slice(None, None, -1), slice(None, None, -2), slice(None)), + ((0, 0), (0, 0), (0, 0)), + (2, 1, 0), + False, + ), + ], +) +def test_match_geometry(crop, pad, permute, reversible): + vol, _ = read_multiframe_ct_volume() + + transformed = ( + vol[crop] + .pad(pad) + .permute_axes(permute) + ) + + forward_matched = vol.match_geometry(transformed) + assert forward_matched.geometry_equal(transformed) + assert np.array_equal(forward_matched.array, transformed.array) + + if reversible: + reverse_matched = transformed.match_geometry(vol) + assert reverse_matched.geometry_equal(vol) + + # Perform the transform again on the recovered image to ensure that we + # end up with the transformed + inverted_transformed = ( + vol[crop] + .pad(pad) + .permute_axes(permute) + ) + assert inverted_transformed.geometry_equal(transformed) + assert np.array_equal(transformed.array, inverted_transformed.array) + + +def test_match_geometry_nonintersecting(): + vol, _ = read_multiframe_ct_volume() + + new_affine = _translate_affine_matrix( + vol.affine, + [0, -32, 32] + ) + + # This geometry has no overlap with the original volume + geometry = VolumeGeometry( + new_affine, + [2, 16, 16] + ) + + transformed = vol.match_geometry(geometry) + + # Result should be an empty array with the requested geometry + assert transformed.geometry_equal(geometry) + assert transformed.array.min() == 0 + assert transformed.array.max() == 0 + + +def test_match_geometry_failure_translation(): + vol, _ = read_multiframe_ct_volume() + + new_affine = _translate_affine_matrix( + vol.affine, + [0.0, 0.5, 0.0] + ) + geometry = VolumeGeometry( + new_affine, + vol.shape, + ) + + with pytest.raises(RuntimeError): + vol.match_geometry(geometry) + + +def test_match_geometry_failure_spacing(): + vol, _ = read_multiframe_ct_volume() + + new_affine = vol.affine.copy() + new_affine[:3, 2] *= 0.33 + geometry = VolumeGeometry( + new_affine, + vol.shape, + ) + + with pytest.raises(RuntimeError): + vol.match_geometry(geometry) + + +def test_match_geometry_failure_rotation(): + vol, _ = read_multiframe_ct_volume() + + # Geometry that is rotated with respect to input volume + geometry = VolumeGeometry.from_attributes( + image_orientation=( + np.cos(np.radians(30)), -np.sin(np.radians(30)), 0.0, + np.sin(np.radians(30)), np.cos(np.radians(30)), 0.0, + ), + image_position=vol.position, + pixel_spacing=vol.pixel_spacing, + spacing_between_slices=vol.spacing_between_slices, + number_of_frames=vol.shape[0], + columns=vol.shape[2], + rows=vol.shape[1], + ) + + with pytest.raises(RuntimeError): + vol.match_geometry(geometry) + + @pytest.mark.parametrize( 'fp,glob', [ From 9c6c6f2d4f91b4138a15af0a7cfcd44ae2a3b8c0 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 18 Aug 2024 20:04:26 -0400 Subject: [PATCH 58/93] Fix type hint --- src/highdicom/volume.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index a2bb0ae6..8daed5ca 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -423,7 +423,7 @@ def spacing_between_slices(self) -> float: return spacing @property - def spacing(self) -> Tuple[float]: + def spacing(self) -> Tuple[float, float, float]: """Tuple[float, float, float]: Pixel spacing in millimeter units for the three spatial directions. From 3ec987c7f30e27f04b5e644724d5a3901ee10b2b Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 19 Aug 2024 14:46:03 -0400 Subject: [PATCH 59/93] Small typos and better error message --- src/highdicom/spatial.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 97f26d3e..c46403fe 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -3256,7 +3256,7 @@ def get_volume_positions( if spacing_hint is not None and spacing_hint <= 0.0: raise ValueError( - "Argument 'spacing_hint' should be a postive value." + "Argument 'spacing_hint' should be a positive value." ) image_positions_arr = np.array(image_positions) @@ -3270,7 +3270,7 @@ def get_volume_positions( "Argument 'image_positions' should contain at least 1 position." ) elif n == 1: - # Special case, we stipluate that this has spacing 1.0 + # Special case, we stipulate that this has spacing 1.0 # if not otherwise specified spacing = 1.0 if spacing_hint is None else spacing_hint return spacing, [0] @@ -3295,7 +3295,7 @@ def get_volume_positions( unique_index = np.arange(image_positions_arr.shape[0]) if len(unique_positions) == 1: - # Special case, we stipluate that this has spacing 1.0 + # Special case, we stipulate that this has spacing 1.0 # if not otherwise specified spacing = 1.0 if spacing_hint is None else spacing_hint return spacing, [0] * n @@ -3347,7 +3347,8 @@ def get_volume_positions( if spacing_hint is not None: if not np.isclose(abs(spacing), spacing_hint): raise RuntimeError( - "Inferred spacing does not match the given 'spacing_hint'." + f"Inferred spacing ({abs(spacing):.3f}) does not match the " + f"given 'spacing_hint' ({spacing_hint})." ) is_regular = np.isclose( From 086a3709fc7f7df5e0d04c56cca1a771e9d52d47 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 20 Aug 2024 08:26:37 -0400 Subject: [PATCH 60/93] Implement MultiFrameImage class --- src/highdicom/_multiframe.py | 243 +- src/highdicom/ann/content.py | 4 +- src/highdicom/ann/sop.py | 4 +- src/highdicom/content.py | 18 +- src/highdicom/ko/content.py | 4 +- src/highdicom/ko/sop.py | 2 +- src/highdicom/seg/content.py | 2 +- src/highdicom/seg/sop.py | 4131 +++++++++++++++---------------- src/highdicom/sr/content.py | 24 +- src/highdicom/sr/sop.py | 8 +- src/highdicom/sr/templates.py | 15 +- src/highdicom/sr/value_types.py | 32 +- 12 files changed, 2189 insertions(+), 2298 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index c658b69b..0d56713d 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -1,6 +1,7 @@ """Tools for working with multiframe DICOM images.""" from collections import Counter from contextlib import contextmanager +from copy import deepcopy import logging import sqlite3 from typing import ( @@ -14,6 +15,7 @@ Sequence, Tuple, Union, + cast, ) import numpy as np from pydicom import Dataset @@ -21,14 +23,13 @@ from pydicom.datadict import get_entry, tag_for_keyword from pydicom.multival import MultiValue +from highdicom._module_utils import is_multiframe_image +from highdicom.base import SOPClass, _check_little_endian from highdicom.enum import ( CoordinateSystemNames, ) from highdicom.seg.enum import SpatialLocationsPreservedValues from highdicom.spatial import ( - _create_affine_transformation_matrix, - _translate_affine_matrix, - VOLUME_INDEX_CONVENTION, get_image_coordinate_system, get_volume_positions, ) @@ -66,39 +67,87 @@ logger = logging.getLogger(__name__) -class MultiFrameDBManager: +class MultiFrameImage(SOPClass): """Database manager for frame information in a multiframe image.""" - def __init__( - self, + _coordinate_system: CoordinateSystemNames + _is_tiled_full: bool + _single_source_frame_per_frame: bool + _dim_ind_pointers: List[BaseTag] + _dim_ind_col_names: Dict[int, str] + _locations_preserved: Optional[SpatialLocationsPreservedValues] + _db_con: sqlite3.Connection + _volume_geometry: Optional[VolumeGeometry] + + @classmethod + def from_dataset( + cls, dataset: Dataset, - ): - """ + copy: bool = True, + ) -> 'MultiFrameImage': + """Create a MultiFrameImage from an existing pydicom Dataset. Parameters ---------- dataset: pydicom.Dataset Dataset of a multi-frame image. + copy: bool + If True, the underlying dataset is deep-copied such that the + original dataset remains intact. If False, this operation will + alter the original dataset in place. + + """ + if not isinstance(dataset, Dataset): + raise TypeError( + 'Dataset must be of type pydicom.dataset.Dataset.' + ) + _check_little_endian(dataset) + + # Checks on integrity of input dataset + if not is_multiframe_image(dataset): + raise ValueError('Dataset is not a multiframe image.') + if copy: + im = deepcopy(dataset) + else: + im = dataset + im.__class__ = cls + im = cast(cls, im) + + im._build_luts() + return im + + def _build_luts(self) -> None: + """Build lookup tables for efficient querying. + + Two lookup tables are currently constructed. The first maps the + SOPInstanceUIDs of all datasets referenced in the image to a + tuple containing the StudyInstanceUID, SeriesInstanceUID and + SOPInstanceUID. + + The second look-up table contains information about each frame of the + segmentation, including the segment it contains, the instance and frame + from which it was derived (if these are unique), and its dimension + index values. """ self._coordinate_system = get_image_coordinate_system( - dataset + self ) - referenced_uids = self._get_ref_instance_uids(dataset) + referenced_uids = self._get_ref_instance_uids() all_referenced_sops = {uids[2] for uids in referenced_uids} self._is_tiled_full = ( - hasattr(dataset, 'DimensionOrganizationType') and - dataset.DimensionOrganizationType == 'TILED_FULL' + hasattr(self, 'DimensionOrganizationType') and + self.DimensionOrganizationType == 'TILED_FULL' ) self._dim_ind_pointers = [ dim_ind.DimensionIndexPointer - for dim_ind in dataset.DimensionIndexSequence + for dim_ind in self.DimensionIndexSequence ] func_grp_pointers = {} - for dim_ind in dataset.DimensionIndexSequence: + for dim_ind in self.DimensionIndexSequence: ptr = dim_ind.DimensionIndexPointer if ptr in self._dim_ind_pointers: grp_ptr = getattr(dim_ind, "FunctionalGroupPointer", None) @@ -120,8 +169,8 @@ def __init__( image_position_tag ] = plane_pos_seq_tag - if hasattr(dataset, 'SharedFunctionalGroupsSequence'): - sfgs = dataset.SharedFunctionalGroupsSequence[0] + if hasattr(self, 'SharedFunctionalGroupsSequence'): + sfgs = self.SharedFunctionalGroupsSequence[0] if hasattr(sfgs, 'PixelMeasuresSequence'): measures = sfgs.PixelMeasuresSequence[0] slice_spacing_hint = measures.get('SpacingBetweenSlices') @@ -129,8 +178,8 @@ def __init__( if slice_spacing_hint is None or shared_pixel_spacing is None: # Get the orientation of the first frame, and in the later loop # check whether it is shared. - if hasattr(dataset, 'PerFrameFunctionalGroupsSequence'): - pfg1 = dataset.PerFrameFunctionalGroupsSequence[0] + if hasattr(self, 'PerFrameFunctionalGroupsSequence'): + pfg1 = self.PerFrameFunctionalGroupsSequence[0] if hasattr(pfg1, 'PixelMeasuresSequence'): slice_spacing_hint = pfg1.PixelMeasuresSequence[0].get( 'SpacingBetweenSlices' @@ -139,7 +188,7 @@ def __init__( dim_ind_positions = { dim_ind.DimensionIndexPointer: i - for i, dim_ind in enumerate(dataset.DimensionIndexSequence) + for i, dim_ind in enumerate(self.DimensionIndexSequence) } dim_indices: Dict[int, List[int]] = { ptr: [] for ptr in self._dim_ind_pointers @@ -154,10 +203,10 @@ def __init__( # Get the shared orientation shared_image_orientation: Optional[List[float]] = None - if hasattr(dataset, 'ImageOrientationSlide'): - shared_image_orientation = dataset.ImageOrientationSlide - if hasattr(dataset, 'SharedFunctionalGroupsSequence'): - sfgs = dataset.SharedFunctionalGroupsSequence[0] + if hasattr(self, 'ImageOrientationSlide'): + shared_image_orientation = self.ImageOrientationSlide + if hasattr(self, 'SharedFunctionalGroupsSequence'): + sfgs = self.SharedFunctionalGroupsSequence[0] if hasattr(sfgs, 'PlaneOrientationSequence'): shared_image_orientation = ( sfgs.PlaneOrientationSequence[0].ImageOrientationPatient @@ -165,8 +214,8 @@ def __init__( if shared_image_orientation is None: # Get the orientation of the first frame, and in the later loop # check whether it is shared. - if hasattr(dataset, 'PerFrameFunctionalGroupsSequence'): - pfg1 = dataset.PerFrameFunctionalGroupsSequence[0] + if hasattr(self, 'PerFrameFunctionalGroupsSequence'): + pfg1 = self.PerFrameFunctionalGroupsSequence[0] if hasattr(pfg1, 'PlaneOrientationSequence'): shared_image_orientation = ( pfg1 @@ -202,12 +251,12 @@ def __init__( dim_values[x_tag], dim_values[y_tag], dim_values[z_tag], - ) = zip(*iter_tiled_full_frame_data(dataset)) + ) = zip(*iter_tiled_full_frame_data(self)) - if hasattr(dataset, 'SegmentSequence'): + if hasattr(self, 'SegmentSequence'): segment_tag = tag_for_keyword('ReferencedSegmentNumber') dim_values[segment_tag] = channel_numbers - elif hasattr(dataset, 'OpticalPathSequence'): + elif hasattr(self, 'OpticalPathSequence'): op_tag = tag_for_keyword('OpticalPathIdentifier') dim_values[op_tag] = channel_numbers @@ -233,7 +282,7 @@ def __init__( ] locations_preserved: locations_list_type = [] - for frame_item in dataset.PerFrameFunctionalGroupsSequence: + for frame_item in self.PerFrameFunctionalGroupsSequence: # Get dimension indices for this frame content_seq = frame_item.FrameContentSequence[0] indices = content_seq.DimensionIndexValues @@ -319,7 +368,7 @@ def __init__( 'the source image sequence is not included in the ' 'Referenced Series Sequence or Studies Containing ' 'Other Referenced Instances Sequence. This is an ' - 'error with the integrity of the Segmentation ' + 'error with the integrity of the ' 'object.' ) referenced_instances.append(ref_instance_uid) @@ -343,9 +392,7 @@ def __init__( for v in locations_preserved ): - self._locations_preserved: Optional[ - SpatialLocationsPreservedValues - ] = SpatialLocationsPreservedValues.NO + self._locations_preserved = SpatialLocationsPreservedValues.NO elif all( isinstance(v, SpatialLocationsPreservedValues) and v == SpatialLocationsPreservedValues.YES @@ -359,21 +406,19 @@ def __init__( referenced_instances = None referenced_frames = None - self._db_con: sqlite3.Connection = sqlite3.connect(":memory:") + self._db_con = sqlite3.connect(":memory:") self._create_ref_instance_table(referenced_uids) - self._number_of_frames = dataset.NumberOfFrames - # Construct the columns and values to put into a frame look-up table # table within sqlite. There will be one row per frame in the - # segmentation instance + # image col_defs = [] # SQL column definitions col_data = [] # lists of column data # Frame number column col_defs.append('FrameNumber INTEGER PRIMARY KEY') - col_data.append(list(range(1, self._number_of_frames + 1))) + col_data.append(list(range(1, self.NumberOfFrames + 1))) self._dim_ind_col_names = {} for i, t in enumerate(dim_indices.keys()): @@ -435,7 +480,7 @@ def __init__( col_data.append(dim_values[t]) # Volume related information - self.volume_geometry: Optional[VolumeGeometry] = None + self._volume_geometry = None if ( self._coordinate_system == CoordinateSystemNames.PATIENT and shared_image_orientation is not None @@ -457,11 +502,11 @@ def __init__( if volume_positions is not None: origin_slice_index = volume_positions.index(0) number_of_slices = max(volume_positions) + 1 - self.volume_geometry = VolumeGeometry.from_attributes( + self._volume_geometry = VolumeGeometry.from_attributes( image_position=image_positions[origin_slice_index], image_orientation=shared_image_orientation, - rows=dataset.Rows, - columns=dataset.Columns, + rows=self.Rows, + columns=self.Columns, pixel_spacing=shared_pixel_spacing, number_of_frames=number_of_slices, spacing_between_slices=volume_spacing, @@ -498,16 +543,9 @@ def __init__( zip(*col_data), ) - def _get_ref_instance_uids( - self, - dataset: Dataset, - ) -> List[Tuple[str, str, str]]: + def _get_ref_instance_uids(self) -> List[Tuple[str, str, str]]: """List all instances referenced in the image. - Parameters - ---------- - dataset - Returns ------- List[Tuple[str, str, str]] @@ -516,19 +554,19 @@ def _get_ref_instance_uids( """ instance_data = [] - if hasattr(dataset, 'ReferencedSeriesSequence'): - for ref_series in dataset.ReferencedSeriesSequence: + if hasattr(self, 'ReferencedSeriesSequence'): + for ref_series in self.ReferencedSeriesSequence: for ref_ins in ref_series.ReferencedInstanceSequence: instance_data.append( ( - dataset.StudyInstanceUID, + self.StudyInstanceUID, ref_series.SeriesInstanceUID, ref_ins.ReferencedSOPInstanceUID ) ) other_studies_kw = 'StudiesContainingOtherReferencedInstancesSequence' - if hasattr(dataset, other_studies_kw): - for ref_study in getattr(dataset, other_studies_kw): + if hasattr(self, other_studies_kw): + for ref_study in getattr(self, other_studies_kw): for ref_series in ref_study.ReferencedSeriesSequence: for ref_ins in ref_series.ReferencedInstanceSequence: instance_data.append( @@ -552,7 +590,7 @@ def _get_ref_instance_uids( display_str = ', '.join(duplicate_sop_uids) logger.warning( 'Duplicate entries found in the ReferencedSeriesSequence. ' - f"SOP Instance UID: '{dataset.SOPInstanceUID}', " + f"SOP Instance UID: '{self.SOPInstanceUID}', " f'duplicated referenced SOP Instance UID items: {display_str}.' ) @@ -570,7 +608,7 @@ def _check_indexing_with_source_frames( * Spatial locations are not preserved. * The dataset does not specify that spatial locations are preserved and the user has not asserted that they are. - * At least one frame in the segmentation lists multiple + * At least one frame in the image lists multiple source frames. Parameters @@ -584,8 +622,8 @@ def _check_indexing_with_source_frames( # dataset if self._is_tiled_full: raise RuntimeError( - 'Indexing via source frames is not possible when a ' - 'segmentation is stored using the DimensionOrganizationType ' + 'Indexing via source frames is not possible when an ' + 'image is stored using the DimensionOrganizationType ' '"TILED_FULL".' ) elif self._locations_preserved is None: @@ -593,7 +631,7 @@ def _check_indexing_with_source_frames( raise RuntimeError( 'Indexing via source frames is not permissible since this ' 'image does not specify that spatial locations are ' - 'preserved in the course of deriving the segmentation ' + 'preserved in the course of deriving the image ' 'from the source image. If you are confident that spatial ' 'locations are preserved, or do not require that spatial ' 'locations are preserved, you may override this behavior ' @@ -604,7 +642,7 @@ def _check_indexing_with_source_frames( raise RuntimeError( 'Indexing via source frames is not permissible since this ' 'image specifies that spatial locations are not preserved ' - 'in the course of deriving the segmentation from the ' + 'in the course of deriving the image from the ' 'source image. If you do not require that spatial ' ' locations are preserved you may override this behavior ' "with the 'ignore_spatial_locations' parameter." @@ -612,7 +650,7 @@ def _check_indexing_with_source_frames( if not self._single_source_frame_per_frame: raise RuntimeError( 'Indexing via source frames is not permissible since some ' - 'frames in the segmentation specify multiple source frames.' + 'frames in the image specify multiple source frames.' ) @property @@ -629,13 +667,12 @@ def _create_ref_instance_table( """Create a table of referenced instances. The resulting table (called InstanceUIDs) contains Study, Series and - SOP instance UIDs for each instance referenced by the segmentation - image. + SOP instance UIDs for each instance referenced by the image. Parameters ---------- referenced_uids: List[Tuple[str, str, str]] - List of UIDs for each instance referenced in the segmentation. + List of UIDs for each instance referenced in the image. Each tuple should be in the format (StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID). @@ -665,7 +702,7 @@ def are_dimension_indices_unique( For a given list of dimension index pointers, check whether every combination of index values for these pointers identifies a unique - frame image. This is a pre-requisite for indexing using this list of + image frame. This is a pre-requisite for indexing using this list of dimension index pointers. Parameters @@ -687,7 +724,7 @@ def are_dimension_indices_unique( n_unique_combos = cur.execute( f"SELECT COUNT(*) FROM (SELECT 1 FROM FrameLUT GROUP BY {col_str})" ).fetchone()[0] - return n_unique_combos == self._number_of_frames + return n_unique_combos == self.NumberOfFrames def get_source_image_uids(self) -> List[Tuple[hd_UID, hd_UID, hd_UID]]: """Get UIDs of source image instances referenced in the image. @@ -696,7 +733,7 @@ def get_source_image_uids(self) -> List[Tuple[hd_UID, hd_UID, hd_UID]]: ------- List[Tuple[highdicom.UID, highdicom.UID, highdicom.UID]] (Study Instance UID, Series Instance UID, SOP Instance UID) triplet - for every image instance referenced in the segmentation. + for every image instance referenced in the image. """ cur = self._db_con.cursor() @@ -709,7 +746,7 @@ def get_source_image_uids(self) -> List[Tuple[hd_UID, hd_UID, hd_UID]]: (hd_UID(a), hd_UID(b), hd_UID(c)) for a, b, c in res.fetchall() ] - def get_unique_referenced_sop_instance_uids(self) -> Set[str]: + def _get_unique_referenced_sop_instance_uids(self) -> Set[str]: """Get set of unique Referenced SOP Instance UIDs. Returns @@ -726,7 +763,7 @@ def get_unique_referenced_sop_instance_uids(self) -> Set[str]: ) } - def get_max_referenced_frame_number(self) -> int: + def _get_max_referenced_frame_number(self) -> int: """Get highest frame number of any referenced frame. Absent access to the referenced dataset itself, being less than this @@ -736,7 +773,7 @@ def get_max_referenced_frame_number(self) -> int: Returns ------- int - Highest frame number referenced in the segmentation image. + Highest frame number referenced in the image. """ cur = self._db_con.cursor() @@ -750,7 +787,7 @@ def is_indexable_as_total_pixel_matrix(self) -> bool: Returns ------- bool: - True if the segmentation may be indexed using row and column + True if the image may be indexed using row and column positions in the total pixel matrix. False otherwise. """ @@ -761,7 +798,7 @@ def is_indexable_as_total_pixel_matrix(self) -> bool: col_pos_kw in self._dim_ind_col_names ) - def get_unique_dim_index_values( + def _get_unique_dim_index_values( self, dimension_index_pointers: Sequence[int], ) -> Set[Tuple[int, ...]]: @@ -790,62 +827,14 @@ def get_unique_dim_index_values( ) } - def get_image_position_at_volume_position( - self, - volume_position: int, - ) -> List[float]: - """Get the image position at a location in the implied volume. - - This requires that the image represents a regularly-spaced 3D volume. - - Parameters - ---------- - volume_position: int - Zero-based index into the slice positions within the implied - volume. Must be an integer between >= 0 and < - ``volume_geometry.spatial_shape[0]``. - - Returns - ------- - List[float]: - Image position (x, y, z) in the frame of reference coordinate - system of the center of the top-left pixel. This definition matches - the standard DICOM definition used in the ImagePositionPatient - attribute. + @property + def volume_geometry(self) -> Optional[VolumeGeometry]: + """Union[highdicom.VolumeGeometry, None]: Geometry of the volume if the + image represents a regularly-spaced 3D volume. ``None`` + otherwise. """ - - if self.volume_geometry is None: - raise RuntimeError( - "This image does not represent a regularly-spaced 3D volume." - ) - - if volume_position < 0: - raise ValueError( - "Argument 'volume_position' should be non-negative." - ) - else: - n_slices = self.volume_geometry.spatial_shape[0] - if volume_position >= n_slices: - raise ValueError( - f"Value of {volume_position} for argument 'volume_position' " - f"is not valid for image with {n_slices} volume positions." - ) - - cur = self._db_con.cursor() - - query = ( - 'SELECT ' - 'ImagePositionPatient_0, ' - 'ImagePositionPatient_1, ' - 'ImagePositionPatient_2 ' - 'FROM FrameLUT ' - f'WHERE VolumePosition={volume_position} ' - 'LIMIT 1;' - ) - - image_position = list(list(cur.execute(query))[0]) - return image_position + return self._volume_geometry @contextmanager def _generate_temp_table( diff --git a/src/highdicom/ann/content.py b/src/highdicom/ann/content.py index b2629c0b..91296b7e 100644 --- a/src/highdicom/ann/content.py +++ b/src/highdicom/ann/content.py @@ -165,7 +165,7 @@ def from_dataset( ) ] - return cast(Measurements, measurements) + return cast(cls, measurements) class AnnotationGroup(Dataset): @@ -838,4 +838,4 @@ def from_dataset( for ds in group.PrimaryAnatomicStructureSequence ] - return cast(AnnotationGroup, group) + return cast(cls, group) diff --git a/src/highdicom/ann/sop.py b/src/highdicom/ann/sop.py index a58f7199..1f361b64 100644 --- a/src/highdicom/ann/sop.py +++ b/src/highdicom/ann/sop.py @@ -450,14 +450,14 @@ def from_dataset( ann = deepcopy(dataset) else: ann = dataset - ann.__class__ = MicroscopyBulkSimpleAnnotations + ann.__class__ = cls ann.AnnotationGroupSequence = [ AnnotationGroup.from_dataset(item, copy=copy) for item in ann.AnnotationGroupSequence ] - return cast(MicroscopyBulkSimpleAnnotations, ann) + return cast(cls, ann) def annread( diff --git a/src/highdicom/content.py b/src/highdicom/content.py index 0d7d8698..834ffadd 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -135,8 +135,8 @@ def from_sequence( algo_id_sequence = deepcopy(sequence) else: algo_id_sequence = sequence - algo_id_sequence.__class__ = AlgorithmIdentificationSequence - return cast(AlgorithmIdentificationSequence, algo_id_sequence) + algo_id_sequence.__class__ = cls + return cast(cls, algo_id_sequence) @property def name(self) -> str: @@ -389,8 +389,8 @@ def from_sequence( pixel_measures = deepcopy(sequence) else: pixel_measures = sequence - pixel_measures.__class__ = PixelMeasuresSequence - return cast(PixelMeasuresSequence, pixel_measures) + pixel_measures.__class__ = cls + return cast(cls, pixel_measures) def __eq__(self, other: DataElementSequence) -> bool: """Determine whether two sets of pixel measures are the same. @@ -599,8 +599,8 @@ def from_sequence( plane_position = deepcopy(sequence) else: plane_position = sequence - plane_position.__class__ = PlanePositionSequence - return cast(PlanePositionSequence, plane_position) + plane_position.__class__ = cls + return cast(cls, plane_position) class PlaneOrientationSequence(DataElementSequence): @@ -736,8 +736,8 @@ def from_sequence( plane_orientation = deepcopy(sequence) else: plane_orientation = sequence - plane_orientation.__class__ = PlaneOrientationSequence - return cast(PlaneOrientationSequence, plane_orientation) + plane_orientation.__class__ = cls + return cast(cls, plane_orientation) class IssuerOfIdentifier(Dataset): @@ -835,7 +835,7 @@ def from_dataset( issuer_of_identifier._issuer_of_identifier = issuer_id issuer_of_identifier._issuer_of_identifier_type = issuer_type - return cast(IssuerOfIdentifier, issuer_of_identifier) + return cast(cls, issuer_of_identifier) class SpecimenCollection(ContentSequence): diff --git a/src/highdicom/ko/content.py b/src/highdicom/ko/content.py index 2978ee7a..922c6e99 100644 --- a/src/highdicom/ko/content.py +++ b/src/highdicom/ko/content.py @@ -174,8 +174,8 @@ def from_sequence( 'because it does not have Template Identifier "2010".' ) instance = ContentSequence.from_sequence(sequence, is_root=True) - instance.__class__ = KeyObjectSelection - return cast(KeyObjectSelection, instance) + instance.__class__ = cls + return cast(cls, instance) def get_observer_contexts( self, diff --git a/src/highdicom/ko/sop.py b/src/highdicom/ko/sop.py index 44c93034..cacc1600 100644 --- a/src/highdicom/ko/sop.py +++ b/src/highdicom/ko/sop.py @@ -228,4 +228,4 @@ def from_dataset(cls, dataset: Dataset) -> 'KeyObjectSelectionDocument': sop_instance_uid ) - return cast(KeyObjectSelectionDocument, sop_instance) + return cast(cls, sop_instance) diff --git a/src/highdicom/seg/content.py b/src/highdicom/seg/content.py index 7029dd73..3f2cab8c 100644 --- a/src/highdicom/seg/content.py +++ b/src/highdicom/seg/content.py @@ -216,7 +216,7 @@ def from_dataset( CodedConcept.from_dataset(ds, copy=False) for ds in desc.PrimaryAnatomicStructureSequence ] - return cast(SegmentDescription, desc) + return cast(cls, desc) @property def segment_number(self) -> int: diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 515e25c4..150d084e 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -45,8 +45,8 @@ get_module_usage, is_multiframe_image, ) -from highdicom._multiframe import MultiFrameDBManager -from highdicom.base import SOPClass, _check_little_endian +from highdicom._multiframe import MultiFrameImage +from highdicom.base import _check_little_endian from highdicom.content import ( ContentCreatorIdentificationCodeSequence, PlaneOrientationSequence, @@ -158,858 +158,126 @@ def _check_numpy_value_representation( ) -class _SegDBManager(MultiFrameDBManager): +class Segmentation(MultiFrameImage): - """Database manager for data associated with a segmentation image.""" - - def are_referenced_sop_instances_unique(self) -> bool: - """Check if Referenced SOP Instance UIDs uniquely identify frames. - - This is a pre-requisite for requesting segmentation masks defined by - the SOP Instance UIDs of their source frames, such as using the - Segmentation.get_pixels_by_source_instance() method and - _SegDBManager.iterate_indices_by_source_instance() method. - - Returns - ------- - bool - True if the ReferencedSOPInstanceUID (in combination with the - segment number) uniquely identifies frames of the segmentation - image. - - """ - cur = self._db_con.cursor() - n_unique_combos = cur.execute( - 'SELECT COUNT(*) FROM ' - '(SELECT 1 FROM FrameLUT GROUP BY ReferencedSOPInstanceUID, ' - 'ReferencedSegmentNumber)' - ).fetchone()[0] - return n_unique_combos == self._number_of_frames - - def are_referenced_frames_unique(self) -> bool: - """Check if Referenced Frame Numbers uniquely identify frames. - - Returns - ------- - bool - True if the ReferencedFrameNumber (in combination with the - segment number) uniquely identifies frames of the segmentation - image. - - """ - cur = self._db_con.cursor() - n_unique_combos = cur.execute( - 'SELECT COUNT(*) FROM ' - '(SELECT 1 FROM FrameLUT GROUP BY ReferencedFrameNumber, ' - 'ReferencedSegmentNumber)' - ).fetchone()[0] - return n_unique_combos == self._number_of_frames + """SOP class for the Segmentation IOD.""" - @contextmanager - def _generate_temp_segment_table( + def __init__( self, - segment_numbers: Sequence[int], - combine_segments: bool, - relabel: bool - ) -> Generator[None, None, None]: - """Context manager that handles a temporary table for segments. - - The temporary table is named "TemporarySegmentNumbers" with columns - OutputSegmentNumber and SegmentNumber that are populated with values - derived from the input. Control flow then returns to code within the - "with" block. After the "with" block has completed, the cleanup of - the table is automatically handled. - + source_images: Sequence[Dataset], + pixel_array: Union[np.ndarray, Volume], + segmentation_type: Union[str, SegmentationTypeValues], + segment_descriptions: Sequence[SegmentDescription], + series_instance_uid: str, + series_number: int, + sop_instance_uid: str, + instance_number: int, + manufacturer: str, + manufacturer_model_name: str, + software_versions: Union[str, Tuple[str]], + device_serial_number: str, + fractional_type: Optional[ + Union[str, SegmentationFractionalTypeValues] + ] = SegmentationFractionalTypeValues.PROBABILITY, + max_fractional_value: int = 255, + content_description: Optional[str] = None, + content_creator_name: Optional[Union[str, PersonName]] = None, + transfer_syntax_uid: Union[str, UID] = ExplicitVRLittleEndian, + pixel_measures: Optional[PixelMeasuresSequence] = None, + plane_orientation: Optional[PlaneOrientationSequence] = None, + plane_positions: Optional[Sequence[PlanePositionSequence]] = None, + omit_empty_frames: bool = True, + content_label: Optional[str] = None, + content_creator_identification: Optional[ + ContentCreatorIdentificationCodeSequence + ] = None, + workers: Union[int, Executor] = 0, + dimension_organization_type: Union[ + DimensionOrganizationTypeValues, + str, + None, + ] = None, + tile_pixel_array: bool = False, + tile_size: Union[Sequence[int], None] = None, + pyramid_uid: Optional[str] = None, + pyramid_label: Optional[str] = None, + **kwargs: Any + ) -> None: + """ Parameters ---------- - segment_numbers: Sequence[int] - Segment numbers to include, in the order desired. - combine_segments: bool - Whether the segments will be combined into a label map. - relabel: bool - Whether the output segment numbers should be relabelled to 1-n - (True) or retain their values in the original segmentation object. + source_images: Sequence[Dataset] + One or more single- or multi-frame images (or metadata of images) + from which the segmentation was derived + pixel_array: numpy.ndarray + Array of segmentation pixel data of boolean, unsigned integer or + floating point data type representing a mask image. The array may + be a 2D, 3D or 4D numpy array. - Yields - ------ - None: - Yields control to the "with" block, with the temporary table - created. + If it is a 2D numpy array, it represents the segmentation of a + single frame image, such as a planar x-ray or single instance from + a CT or MR series. - """ - if combine_segments: - if relabel: - # Output segment numbers are consecutive and start at 1 - data = enumerate(segment_numbers, 1) - else: - # Output segment numbers are the same as the input - # segment numbers - data = zip(segment_numbers, segment_numbers) - else: - # Output segment numbers are indices along the output - # array's segment dimension, so are consecutive starting at - # 0 - data = enumerate(segment_numbers) + If it is a 3D array, it represents the segmentation of either a + series of source images (such as a series of CT or MR images) a + single 3D multi-frame image (such as a multi-frame CT/MR image), or + a single 2D tiled image (such as a slide microscopy image). - cmd = ( - 'CREATE TABLE TemporarySegmentNumbers(' - ' SegmentNumber INTEGER UNIQUE NOT NULL,' - ' OutputSegmentNumber INTEGER UNIQUE NOT NULL' - ')' - ) + If ``pixel_array`` represents the segmentation of a 3D image, the + first dimension represents individual 2D planes. Unless the + ``plane_positions`` parameter is provided, the frame in + ``pixel_array[i, ...]`` should correspond to either + ``source_images[i]`` (if ``source_images`` is a list of single + frame instances) or ``source_images[0].pixel_array[i, ...]`` if + ``source_images`` is a single multiframe instance. - with self._db_con: - self._db_con.execute(cmd) - self._db_con.executemany( - 'INSERT INTO ' - 'TemporarySegmentNumbers(' - ' OutputSegmentNumber, SegmentNumber' - ')' - 'VALUES(?, ?)', - data - ) + Similarly, if ``pixel_array`` is a 3D array representing the + segmentation of a tiled 2D image, the first dimension represents + individual 2D tiles (for one channel and z-stack) and these tiles + correspond to the frames in the source image dataset. - # Yield execution to "with" block - yield + If ``pixel_array`` is an unsigned integer or boolean array with + binary data (containing only the values ``True`` and ``False`` or + ``0`` and ``1``) or a floating-point array, it represents a single + segment. In the case of a floating-point array, values must be in + the range 0.0 to 1.0. - # Clean up table after user code executes - with self._db_con: - self._db_con.execute('DROP TABLE TemporarySegmentNumbers') + Otherwise, if ``pixel_array`` is a 2D or 3D array containing multiple + unsigned integer values, each value is treated as a different + segment whose segment number is that integer value. This is + referred to as a *label map* style segmentation. In this case, all + segments from 1 through ``pixel_array.max()`` (inclusive) must be + described in `segment_descriptions`, regardless of whether they are + present in the image. Note that this is valid for segmentations + encoded using the ``"BINARY"`` or ``"FRACTIONAL"`` methods. - @contextmanager - def iterate_indices_by_source_instance( - self, - source_sop_instance_uids: Sequence[str], - segment_numbers: Sequence[int], - combine_segments: bool = False, - relabel: bool = False, - ) -> Generator[ - Iterator[ - Tuple[ - Tuple[Union[slice, int], ...], - Tuple[Union[slice, int], ...], - int - ] - ], - None, - None, - ]: - """Iterate over segmentation frame indices for given source image - instances. + Note that that a 2D numpy array and a 3D numpy array with a + single frame along the first dimension may be used interchangeably + as segmentations of a single frame, regardless of their data type. - This is intended for the case of a segmentation image that references - multiple single frame sources images (typically a series). In this - case, the user supplies a list of SOP Instance UIDs of the source - images of interest, and this method returns information about the - frames of the segmentation image relevant to these source images. + If ``pixel_array`` is a 4D numpy array, the first three dimensions + are used in the same way as the 3D case and the fourth dimension + represents multiple segments. In this case + ``pixel_array[:, :, :, i]`` represents segment number ``i + 1`` + (since numpy indexing is 0-based but segment numbering is 1-based), + and all segments from 1 through ``pixel_array.shape[-1] + 1`` must + be described in ``segment_descriptions``. - This yields an iterator to the underlying database result that iterates - over information on the steps required to construct the requested - segmentation mask from the stored frames of the segmentation image. + Furthermore, a 4D array with unsigned integer data type must + contain only binary data (``True`` and ``False`` or ``0`` and + ``1``). In other words, a 4D array is incompatible with the *label + map* style encoding of the segmentation. - This method is intended to be used as a context manager that yields the - requested iterator. The iterator is only valid while the context - manager is active. + Where there are multiple segments that are mutually exclusive (do + not overlap) and binary, they may be passed using either a *label + map* style array or a 4D array. A 4D array is required if either + there are multiple segments and they are not mutually exclusive + (i.e. they overlap) or there are multiple segments and the + segmentation is fractional. - Parameters - ---------- - source_sop_instance_uids: str - SOP Instance UID of the source instances for which segmentation - image frames are requested. - segment_numbers: Sequence[int] - Numbers of segments to include. - combine_segments: bool, optional - If True, produce indices to combine the different segments into a - single label map in which the value of a pixel represents its - segment. If False (the default), segments are binary and stacked - down the last dimension of the output array. - relabel: bool, optional - If True and ``combine_segments`` is ``True``, the output segment - numbers are relabelled into the range ``0`` to - ``len(segment_numbers)`` (inclusive) according to the position of - the original segment numbers in ``segment_numbers`` parameter. If - ``combine_segments`` is ``False``, this has no effect. - - Yields - ------ - Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: - Indices required to construct the requested mask. Each - triplet denotes the (output indexer, segmentation indexer, - output segment number) representing a list of "instructions" to - create the requested output array by copying frames from the - segmentation dataset and inserting them into the output array with - a given segment value. Output indexer and segmentation indexer are - tuples that can be used to index the output and segmentations - numpy arrays directly. - - """ # noqa: E501 - # Run query to create the iterable of indices needed to construct the - # desired pixel array. The approach here is to create two temporary - # tables in the SQLite database, one for the desired source UIDs, and - # another for the desired segments, then use table joins with the - # referenced UIDs table and the frame LUT at the relevant rows, before - # clearing up the temporary tables. - - # Create temporary table of desired frame numbers - table_name = 'TemporarySOPInstanceUIDs' - column_defs = [ - 'OutputFrameIndex INTEGER UNIQUE NOT NULL', - 'SourceSOPInstanceUID VARCHAR UNIQUE NOT NULL' - ] - column_data = enumerate(source_sop_instance_uids) - - # Construct the query The ORDER BY is not logically necessary - # but seems to improve performance of the downstream numpy - # operations, presumably as it is more cache efficient - query = ( - 'SELECT ' - ' T.OutputFrameIndex,' - ' L.FrameNumber - 1,' - ' S.OutputSegmentNumber ' - 'FROM TemporarySOPInstanceUIDs T ' - 'INNER JOIN FrameLUT L' - ' ON T.SourceSOPInstanceUID = L.ReferencedSOPInstanceUID ' - 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' - 'ORDER BY T.OutputFrameIndex' - ) - - with self._generate_temp_table( - table_name=table_name, - column_defs=column_defs, - column_data=column_data, - ): - with self._generate_temp_segment_table( - segment_numbers=segment_numbers, - combine_segments=combine_segments, - relabel=relabel - ): - yield ( - ( - (fo, slice(None), slice(None)), - (fi, slice(None), slice(None)), - seg_no - ) - for (fo, fi, seg_no) in self._db_con.execute(query) - ) - - @contextmanager - def iterate_indices_by_source_frame( - self, - source_sop_instance_uid: str, - source_frame_numbers: Sequence[int], - segment_numbers: Sequence[int], - combine_segments: bool = False, - relabel: bool = False, - ) -> Generator[ - Iterator[ - Tuple[ - Tuple[Union[slice, int], ...], - Tuple[Union[slice, int], ...], - int - ] - ], - None, - None, - ]: - """Iterate over frame indices for given source image frames. - - This is intended for the case of a segmentation image that references a - single multi-frame source image instance. In this case, the user - supplies a list of frames numbers of interest within the single source - instance, and this method returns information about the frames - of the segmentation image relevant to these frames. - - This yields an iterator to the underlying database result that iterates - over information on the steps required to construct the requested - segmentation mask from the stored frames of the segmentation image. - - This method is intended to be used as a context manager that yields the - requested iterator. The iterator is only valid while the context - manager is active. - - Parameters - ---------- - source_sop_instance_uid: str - SOP Instance UID of the source instance that contains the source - frames. - source_frame_numbers: Sequence[int] - A sequence of frame numbers (1-based) within the source instance - for which segmentations are requested. - segment_numbers: Sequence[int] - Sequence containing segment numbers to include. - combine_segments: bool, optional - If True, produce indices to combine the different segments into a - single label map in which the value of a pixel represents its - segment. If False (the default), segments are binary and stacked - down the last dimension of the output array. - relabel: bool, optional - If True and ``combine_segments`` is ``True``, the output segment - numbers are relabelled into the range ``0`` to - ``len(segment_numbers)`` (inclusive) according to the position of - the original segment numbers in ``segment_numbers`` parameter. If - ``combine_segments`` is ``False``, this has no effect. - - Yields - ------ - Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: - Indices required to construct the requested mask. Each - triplet denotes the (output indexer, segmentation indexer, - output segment number) representing a list of "instructions" to - create the requested output array by copying frames from the - segmentation dataset and inserting them into the output array with - a given segment value. Output indexer and segmentation indexer are - tuples that can be used to index the output and segmentations - numpy arrays directly. - - """ # noqa: E501 - # Run query to create the iterable of indices needed to construct the - # desired pixel array. The approach here is to create two temporary - # tables in the SQLite database, one for the desired frame numbers, and - # another for the desired segments, then use table joins with the frame - # LUT to arrive at the relevant rows, before clearing up the temporary - # tables. - - # Create temporary table of desired frame numbers - table_name = 'TemporaryFrameNumbers' - column_defs = [ - 'OutputFrameIndex INTEGER UNIQUE NOT NULL', - 'SourceFrameNumber INTEGER UNIQUE NOT NULL' - ] - column_data = enumerate(source_frame_numbers) - - # Construct the query The ORDER BY is not logically necessary - # but seems to improve performance of the downstream numpy - # operations, presumably as it is more cache efficient - query = ( - 'SELECT ' - ' F.OutputFrameIndex,' - ' L.FrameNumber - 1,' - ' S.OutputSegmentNumber ' - 'FROM TemporaryFrameNumbers F ' - 'INNER JOIN FrameLUT L' - ' ON F.SourceFrameNumber = L.ReferencedFrameNumber ' - 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' - 'ORDER BY F.OutputFrameIndex' - ) - - with self._generate_temp_table( - table_name=table_name, - column_defs=column_defs, - column_data=column_data, - ): - with self._generate_temp_segment_table( - segment_numbers=segment_numbers, - combine_segments=combine_segments, - relabel=relabel - ): - yield ( - ( - (fo, slice(None), slice(None)), - (fi, slice(None), slice(None)), - seg_no - ) - for (fo, fi, seg_no) in self._db_con.execute(query) - ) - - @contextmanager - def iterate_indices_for_volume( - self, - slice_start: int, - slice_end: int, - segment_numbers: Sequence[int], - combine_segments: bool = False, - relabel: bool = False, - ) -> Generator[ - Iterator[ - Tuple[ - Tuple[Union[slice, int], ...], - Tuple[Union[slice, int], ...], - int - ] - ], - None, - None, - ]: - """Iterate over frame indices sorted by volume. - - This yields an iterator to the underlying database result that iterates - over information on the steps required to construct the requested - segmentation mask from the stored frames of the segmentation image. - - This method is intended to be used as a context manager that yields the - requested iterator. The iterator is only valid while the context - manager is active. - - Parameters - ---------- - slice_start: int, optional - Zero-based index of the "volume position" of the first slice of the - returned volume. The "volume position" refers to the position of - slices after sorting spatially, and may correspond to any frame in - the segmentation file, depending on its construction. Must be a - non-negative integer. - slice_end: Union[int, None], optional - Zero-based index of the "volume position" one beyond the last slice - of the returned volume. The "volume position" refers to the - position of slices after sorting spatially, and may correspond to - any frame in the segmentation file, depending on its construction. - Must be a positive integer. - segment_numbers: Sequence[int] - Sequence containing segment numbers to include. - combine_segments: bool, optional - If True, produce indices to combine the different segments into a - single label map in which the value of a pixel represents its - segment. If False (the default), segments are binary and stacked - down the last dimension of the output array. - relabel: bool, optional - If True and ``combine_segments`` is ``True``, the output segment - numbers are relabelled into the range ``0`` to - ``len(segment_numbers)`` (inclusive) according to the position of - the original segment numbers in ``segment_numbers`` parameter. If - ``combine_segments`` is ``False``, this has no effect. - - Yields - ------ - Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: - Indices required to construct the requested mask. Each - triplet denotes the (output indexer, segmentation indexer, - output segment number) representing a list of "instructions" to - create the requested output array by copying frames from the - segmentation dataset and inserting them into the output array with - a given segment value. Output indexer and segmentation indexer are - tuples that can be used to index the output and segmentations - numpy arrays directly. - - """ # noqa: E501 - if self.volume_geometry is None: - raise RuntimeError( - 'This segmentation does not represent a regularly-spaced ' - 'volume.' - ) - - # Construct the query The ORDER BY is not logically necessary - # but seems to improve performance of the downstream numpy - # operations, presumably as it is more cache efficient - query = ( - 'SELECT ' - f' L.VolumePosition - {slice_start},' - ' L.FrameNumber - 1,' - ' S.OutputSegmentNumber ' - 'FROM FrameLUT L ' - 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' - 'WHERE ' - f' L.VolumePosition >= {slice_start} AND ' - f' L.VolumePosition < {slice_end} ' - 'ORDER BY L.VolumePosition' - ) - - with self._generate_temp_segment_table( - segment_numbers=segment_numbers, - combine_segments=combine_segments, - relabel=relabel - ): - yield ( - ( - (fo, slice(None), slice(None)), - (fi, slice(None), slice(None)), - seg_no - ) - for (fo, fi, seg_no) in self._db_con.execute(query) - ) - - @contextmanager - def iterate_indices_by_dimension_index_values( - self, - dimension_index_values: Sequence[Sequence[int]], - dimension_index_pointers: Sequence[int], - segment_numbers: Sequence[int], - combine_segments: bool = False, - relabel: bool = False, - ) -> Generator[ - Iterator[ - Tuple[ - Tuple[Union[slice, int], ...], - Tuple[Union[slice, int], ...], - int - ] - ], - None, - None, - ]: - """Iterate over frame indices for given dimension index values. - - This is intended to be the most flexible and lowest-level (and there - also least convenient) method to request information about - segmentation frames. The user can choose to specify which segmentation - frames are of interest using arbitrary dimension indices and their - associated values. This makes no assumptions about the dimension - organization of the underlying segmentation, except that the given - dimension indices can be used to uniquely identify frames in the - segmentation image. - - This yields an iterator to the underlying database result that iterates - over information on the steps required to construct the requested - segmentation mask from the stored frames of the segmentation image. - - This method is intended to be used as a context manager that yields the - requested iterator. The iterator is only valid while the context - manager is active. - - Parameters - ---------- - dimension_index_values: Sequence[Sequence[int]] - Dimension index values for the requested frames. - dimension_index_pointers: Sequence[Union[int, pydicom.tag.BaseTag]] - The data element tags that identify the indices used in the - ``dimension_index_values`` parameter. - segment_numbers: Sequence[int] - Sequence containing segment numbers to include. - combine_segments: bool, optional - If True, produce indices to combine the different segments into a - single label map in which the value of a pixel represents its - segment. If False (the default), segments are binary and stacked - down the last dimension of the output array. - relabel: bool, optional - If True and ``combine_segments`` is ``True``, the output segment - numbers are relabelled into the range ``0`` to - ``len(segment_numbers)`` (inclusive) according to the position of - the original segment numbers in ``segment_numbers`` parameter. If - ``combine_segments`` is ``False``, this has no effect. - - Yields - ------ - Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: - Indices required to construct the requested mask. Each - triplet denotes the (output indexer, segmentation indexer, - output segment number) representing a list of "instructions" to - create the requested output array by copying frames from the - segmentation dataset and inserting them into the output array with - a given segment value. Output indexer and segmentation indexer are - tuples that can be used to index the output and segmentations - numpy arrays directly. - - """ # noqa: E501 - # Create temporary table of desired dimension indices - table_name = 'TemporaryDimensionIndexValues' - - dim_ind_cols = [ - self._dim_ind_col_names[p] for p in dimension_index_pointers - ] - column_defs = ( - ['OutputFrameIndex INTEGER UNIQUE NOT NULL'] + - [f'{col} INTEGER NOT NULL' for col in dim_ind_cols] - ) - column_data = ( - (i, *tuple(row)) - for i, row in enumerate(dimension_index_values) - ) - - # Construct the query The ORDER BY is not logically necessary - # but seems to improve performance of the downstream numpy - # operations, presumably as it is more cache efficient - join_str = ' AND '.join(f'D.{col} = L.{col}' for col in dim_ind_cols) - query = ( - 'SELECT ' - ' D.OutputFrameIndex,' # frame index of the output array - ' L.FrameNumber - 1,' # frame *index* of segmentation image - ' S.OutputSegmentNumber ' # output segment number - 'FROM TemporaryDimensionIndexValues D ' - 'INNER JOIN FrameLUT L' - f' ON {join_str} ' - 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' - 'ORDER BY D.OutputFrameIndex' - ) - - with self._generate_temp_table( - table_name=table_name, - column_defs=column_defs, - column_data=column_data, - ): - with self._generate_temp_segment_table( - segment_numbers=segment_numbers, - combine_segments=combine_segments, - relabel=relabel - ): - yield ( - ( - (fo, slice(None), slice(None)), - (fi, slice(None), slice(None)), - seg_no - ) - for (fo, fi, seg_no) in self._db_con.execute(query) - ) - - @contextmanager - def iterate_indices_for_tiled_region( - self, - row_start: int, - row_end: int, - column_start: int, - column_end: int, - tile_shape: Tuple[int, int], - segment_numbers: Sequence[int], - combine_segments: bool = False, - relabel: bool = False, - ) -> Generator[ - Iterator[ - Tuple[ - Tuple[Union[slice, int], ...], - Tuple[Union[slice, int], ...], - int - ] - ], - None, - None, - ]: - """Iterate over segmentation frame indices for a given region of the - segmentation's total pixel matrix. - - This is intended for the case of a segmentation image that is stored as - a tiled representation of total pixel matrix. - - This yields an iterator to the underlying database result that iterates - over information on the steps required to construct the requested - segmentation mask from the stored frames of the segmentation image. - - This method is intended to be used as a context manager that yields the - requested iterator. The iterator is only valid while the context - manager is active. - - Parameters - ---------- - row_start: int - Row index (1-based) in the total pixel matrix of the first row of - the output array. May be negative (last row is -1). - row_end: int - Row index (1-based) in the total pixel matrix one beyond the last - row of the output array. May be negative (last row is -1). - column_start: int - Column index (1-based) in the total pixel matrix of the first - column of the output array. May be negative (last column is -1). - column_end: int - Column index (1-based) in the total pixel matrix one beyond the last - column of the output array. May be negative (last column is -1). - tile_shape: Tuple[int, int] - Shape of each tile (rows, columns). - segment_numbers: Sequence[int] - Numbers of segments to include. - combine_segments: bool, optional - If True, produce indices to combine the different segments into a - single label map in which the value of a pixel represents its - segment. If False (the default), segments are binary and stacked - down the last dimension of the output array. - relabel: bool, optional - If True and ``combine_segments`` is ``True``, the output segment - numbers are relabelled into the range ``0`` to - ``len(segment_numbers)`` (inclusive) according to the position of - the original segment numbers in ``segment_numbers`` parameter. If - ``combine_segments`` is ``False``, this has no effect. - - Yields - ------ - Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: - Indices required to construct the requested mask. Each - triplet denotes the (output indexer, segmentation indexer, - output segment number) representing a list of "instructions" to - create the requested output array by copying frames from the - segmentation dataset and inserting them into the output array with - a given segment value. Output indexer and segmentation indexer are - tuples that can be used to index the output and segmentations - numpy arrays directly. - - """ # noqa: E501 - th, tw = tile_shape - - oh = row_end - row_start - ow = column_end - column_start - - row_offset_start = row_start - th + 1 - column_offset_start = column_start - tw + 1 - - # Construct the query The ORDER BY is not logically necessary - # but seems to improve performance of the downstream numpy - # operations, presumably as it is more cache efficient - query = ( - 'SELECT ' - ' L.RowPositionInTotalImagePixelMatrix,' - ' L.ColumnPositionInTotalImagePixelMatrix,' - ' L.FrameNumber - 1,' - ' S.OutputSegmentNumber ' - 'FROM FrameLUT L ' - 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' - 'WHERE (' - ' L.RowPositionInTotalImagePixelMatrix >= ' - f' {row_offset_start}' - f' AND L.RowPositionInTotalImagePixelMatrix < {row_end}' - ' AND L.ColumnPositionInTotalImagePixelMatrix >= ' - f' {column_offset_start}' - f' AND L.ColumnPositionInTotalImagePixelMatrix < {column_end}' - ')' - 'ORDER BY ' - ' L.RowPositionInTotalImagePixelMatrix,' - ' L.ColumnPositionInTotalImagePixelMatrix,' - ' S.OutputSegmentNumber' - ) - - with self._generate_temp_segment_table( - segment_numbers=segment_numbers, - combine_segments=combine_segments, - relabel=relabel - ): - yield ( - ( - ( - slice( - max(rp - row_start, 0), - min(rp + th - row_start, oh) - ), - slice( - max(cp - column_start, 0), - min(cp + tw - column_start, ow) - ), - ), - ( - fi, - slice( - max(row_start - rp, 0), - min(row_end - rp, th) - ), - slice( - max(column_start - cp, 0), - min(column_end - cp, tw) - ), - ), - seg_no - ) - for (rp, cp, fi, seg_no) in self._db_con.execute(query) - ) - - -class Segmentation(SOPClass): - - """SOP class for the Segmentation IOD.""" - - def __init__( - self, - source_images: Sequence[Dataset], - pixel_array: Union[np.ndarray, Volume], - segmentation_type: Union[str, SegmentationTypeValues], - segment_descriptions: Sequence[SegmentDescription], - series_instance_uid: str, - series_number: int, - sop_instance_uid: str, - instance_number: int, - manufacturer: str, - manufacturer_model_name: str, - software_versions: Union[str, Tuple[str]], - device_serial_number: str, - fractional_type: Optional[ - Union[str, SegmentationFractionalTypeValues] - ] = SegmentationFractionalTypeValues.PROBABILITY, - max_fractional_value: int = 255, - content_description: Optional[str] = None, - content_creator_name: Optional[Union[str, PersonName]] = None, - transfer_syntax_uid: Union[str, UID] = ExplicitVRLittleEndian, - pixel_measures: Optional[PixelMeasuresSequence] = None, - plane_orientation: Optional[PlaneOrientationSequence] = None, - plane_positions: Optional[Sequence[PlanePositionSequence]] = None, - omit_empty_frames: bool = True, - content_label: Optional[str] = None, - content_creator_identification: Optional[ - ContentCreatorIdentificationCodeSequence - ] = None, - workers: Union[int, Executor] = 0, - dimension_organization_type: Union[ - DimensionOrganizationTypeValues, - str, - None, - ] = None, - tile_pixel_array: bool = False, - tile_size: Union[Sequence[int], None] = None, - pyramid_uid: Optional[str] = None, - pyramid_label: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - Parameters - ---------- - source_images: Sequence[Dataset] - One or more single- or multi-frame images (or metadata of images) - from which the segmentation was derived - pixel_array: numpy.ndarray - Array of segmentation pixel data of boolean, unsigned integer or - floating point data type representing a mask image. The array may - be a 2D, 3D or 4D numpy array. - - If it is a 2D numpy array, it represents the segmentation of a - single frame image, such as a planar x-ray or single instance from - a CT or MR series. - - If it is a 3D array, it represents the segmentation of either a - series of source images (such as a series of CT or MR images) a - single 3D multi-frame image (such as a multi-frame CT/MR image), or - a single 2D tiled image (such as a slide microscopy image). - - If ``pixel_array`` represents the segmentation of a 3D image, the - first dimension represents individual 2D planes. Unless the - ``plane_positions`` parameter is provided, the frame in - ``pixel_array[i, ...]`` should correspond to either - ``source_images[i]`` (if ``source_images`` is a list of single - frame instances) or ``source_images[0].pixel_array[i, ...]`` if - ``source_images`` is a single multiframe instance. - - Similarly, if ``pixel_array`` is a 3D array representing the - segmentation of a tiled 2D image, the first dimension represents - individual 2D tiles (for one channel and z-stack) and these tiles - correspond to the frames in the source image dataset. - - If ``pixel_array`` is an unsigned integer or boolean array with - binary data (containing only the values ``True`` and ``False`` or - ``0`` and ``1``) or a floating-point array, it represents a single - segment. In the case of a floating-point array, values must be in - the range 0.0 to 1.0. - - Otherwise, if ``pixel_array`` is a 2D or 3D array containing multiple - unsigned integer values, each value is treated as a different - segment whose segment number is that integer value. This is - referred to as a *label map* style segmentation. In this case, all - segments from 1 through ``pixel_array.max()`` (inclusive) must be - described in `segment_descriptions`, regardless of whether they are - present in the image. Note that this is valid for segmentations - encoded using the ``"BINARY"`` or ``"FRACTIONAL"`` methods. - - Note that that a 2D numpy array and a 3D numpy array with a - single frame along the first dimension may be used interchangeably - as segmentations of a single frame, regardless of their data type. - - If ``pixel_array`` is a 4D numpy array, the first three dimensions - are used in the same way as the 3D case and the fourth dimension - represents multiple segments. In this case - ``pixel_array[:, :, :, i]`` represents segment number ``i + 1`` - (since numpy indexing is 0-based but segment numbering is 1-based), - and all segments from 1 through ``pixel_array.shape[-1] + 1`` must - be described in ``segment_descriptions``. - - Furthermore, a 4D array with unsigned integer data type must - contain only binary data (``True`` and ``False`` or ``0`` and - ``1``). In other words, a 4D array is incompatible with the *label - map* style encoding of the segmentation. - - Where there are multiple segments that are mutually exclusive (do - not overlap) and binary, they may be passed using either a *label - map* style array or a 4D array. A 4D array is required if either - there are multiple segments and they are not mutually exclusive - (i.e. they overlap) or there are multiple segments and the - segmentation is fractional. - - Note that if the segmentation of a single source image with - multiple stacked segments is required, it is necessary to include - the singleton first dimension in order to give a 4D array. + Note that if the segmentation of a single source image with + multiple stacked segments is required, it is necessary to include + the singleton first dimension in order to give a 4D array. For ``"FRACTIONAL"`` segmentations, values either encode the probability of a given pixel belonging to a segment @@ -2389,1509 +1657,2145 @@ def _add_slide_coordinate_metadata( self.ImageCenterPointCoordinatesSequence = [center_item] @staticmethod - def _check_tiled_dimension_organization_type( - dimension_organization_type: Union[ - DimensionOrganizationTypeValues, - str, - None, - ], - is_tiled: bool, - omit_empty_frames: bool, + def _check_tiled_dimension_organization_type( + dimension_organization_type: Union[ + DimensionOrganizationTypeValues, + str, + None, + ], + is_tiled: bool, + omit_empty_frames: bool, + plane_positions: Sequence[PlanePositionSequence], + tile_pixel_array: bool, + rows: int, + columns: int, + ) -> Optional[DimensionOrganizationTypeValues]: + """Checks that the specified Dimension Organization Type is valid. + + Parameters + ---------- + dimension_organization_type: Union[highdicom.enum.DimensionOrganizationTypeValues, str, None] + The specified DimensionOrganizationType for the output Segmentation. + is_tiled: bool + Whether the source image is a tiled image. + omit_empty_frames: bool + Whether it was specified to omit empty frames. + tile_pixel_array: bool + Whether the total pixel matrix was passed. + plane_positions: Sequence[highdicom.PlanePositionSequence] + Plane positions of all frames. + rows: int + Number of rows in each frame of the segmentation image. + columns: int + Number of columns in each frame of the segmentation image. + + Returns + ------- + Optional[highdicom.enum.DimensionOrganizationTypeValues]: + DimensionOrganizationType to use for the output Segmentation. + + """ # noqa: E501 + if is_tiled and dimension_organization_type is None: + dimension_organization_type = \ + DimensionOrganizationTypeValues.TILED_SPARSE + + if dimension_organization_type is not None: + dimension_organization_type = DimensionOrganizationTypeValues( + dimension_organization_type + ) + tiled_dimension_organization_types = [ + DimensionOrganizationTypeValues.TILED_SPARSE, + DimensionOrganizationTypeValues.TILED_FULL + ] + + if ( + dimension_organization_type in + tiled_dimension_organization_types + ): + if not is_tiled: + raise ValueError( + f"A value of {dimension_organization_type.value} " + 'for parameter "dimension_organization_type" is ' + 'only valid if the source images are tiled.' + ) + + if ( + dimension_organization_type == + DimensionOrganizationTypeValues.TILED_FULL + ): + # Need to check positions if they were not generated by us + # when using tile_pixel_array + if ( + not tile_pixel_array and + not are_plane_positions_tiled_full( + plane_positions, + rows, + columns, + ) + ): + raise ValueError( + 'A value of "TILED_FULL" for parameter ' + '"dimension_organization_type" is not permitted because ' + 'the "plane_positions" of the segmentation ' + 'do not follow the relevant requirements. See ' + 'https://dicom.nema.org/medical/dicom/current/output/' + 'chtml/part03/sect_C.7.6.17.3.html#sect_C.7.6.17.3.' + ) + if omit_empty_frames: + raise ValueError( + 'Parameter "omit_empty_frames" should be False if ' + 'using "dimension_organization_type" of "TILED_FULL".' + ) + + return dimension_organization_type + + @staticmethod + def _check_and_cast_pixel_array( + pixel_array: np.ndarray, + number_of_segments: int, + segmentation_type: SegmentationTypeValues + ) -> Tuple[np.ndarray, SegmentsOverlapValues]: + """Checks on the shape and data type of the pixel array. + + Also checks for overlapping segments and returns the result. + + Parameters + ---------- + pixel_array: numpy.ndarray + The segmentation pixel array. + number_of_segments: int + The segment numbers from the segment descriptions, in the order + they were passed. 1D array of integers. + segmentation_type: highdicom.seg.SegmentationTypeValues + The segmentation_type parameter. + + Returns + ------- + pixel_array: numpyp.ndarray + Input pixel array with the data type simplified if possible. + segments_overlap: highdicom.seg.SegmentationOverlaps + The value for the SegmentationOverlaps attribute, inferred from the + pixel array. + + """ + if pixel_array.ndim == 4: + # Check that the number of segments in the array matches + if pixel_array.shape[-1] != number_of_segments: + raise ValueError( + 'The number of segments in last dimension of the pixel ' + f'array ({pixel_array.shape[-1]}) does not match the ' + 'number of described segments ' + f'({number_of_segments}).' + ) + + if pixel_array.dtype in (np.bool_, np.uint8, np.uint16): + max_pixel = pixel_array.max() + + if pixel_array.ndim == 3: + # A label-map style array where pixel values represent + # segment associations + + # The pixel values in the pixel array must all belong to + # a described segment + if max_pixel > number_of_segments: + raise ValueError( + 'Pixel array contains segments that lack ' + 'descriptions.' + ) + + # By construction of the pixel array, we know that the segments + # cannot overlap + segments_overlap = SegmentsOverlapValues.NO + else: + # Pixel array is 4D where each segment is stacked down + # the last dimension + # In this case, each segment of the pixel array should be binary + if max_pixel > 1: + raise ValueError( + 'When passing a 4D stack of segments with an integer ' + 'pixel type, the pixel array must be binary.' + ) + + # Need to check whether or not segments overlap + if max_pixel == 0: + # Empty segments can't overlap (this skips an unnecessary + # further test) + segments_overlap = SegmentsOverlapValues.NO + elif pixel_array.shape[-1] == 1: + # A single segment does not overlap + segments_overlap = SegmentsOverlapValues.NO + else: + sum_over_segments = pixel_array.sum(axis=-1) + if np.any(sum_over_segments > 1): + segments_overlap = SegmentsOverlapValues.YES + else: + segments_overlap = SegmentsOverlapValues.NO + + elif pixel_array.dtype in (np.float_, np.float32, np.float64): + unique_values = np.unique(pixel_array) + if np.min(unique_values) < 0.0 or np.max(unique_values) > 1.0: + raise ValueError( + 'Floating point pixel array values must be in the ' + 'range [0, 1].' + ) + if segmentation_type == SegmentationTypeValues.BINARY: + non_boolean_values = np.logical_and( + unique_values > 0.0, + unique_values < 1.0 + ) + if np.any(non_boolean_values): + raise ValueError( + 'Floating point pixel array values must be either ' + '0.0 or 1.0 in case of BINARY segmentation type.' + ) + pixel_array = pixel_array.astype(np.uint8) + + # Need to check whether or not segments overlap + if len(unique_values) == 1 and unique_values[0] == 0.0: + # All pixels are zero: there can be no overlap + segments_overlap = SegmentsOverlapValues.NO + elif pixel_array.ndim == 3 or pixel_array.shape[-1] == 1: + # A single segment does not overlap + segments_overlap = SegmentsOverlapValues.NO + elif pixel_array.sum(axis=-1).max() > 1: + segments_overlap = SegmentsOverlapValues.YES + else: + segments_overlap = SegmentsOverlapValues.NO + else: + if (pixel_array.ndim == 3) or (pixel_array.shape[-1] == 1): + # A single segment does not overlap + segments_overlap = SegmentsOverlapValues.NO + else: + # A truly fractional segmentation with multiple segments. + # Unclear how overlap should be interpreted in this case + segments_overlap = SegmentsOverlapValues.UNDEFINED + else: + raise TypeError('Pixel array has an invalid data type.') + + return pixel_array, segments_overlap + + @staticmethod + def _get_nonempty_plane_indices( + pixel_array: np.ndarray + ) -> Tuple[List[int], bool]: + """Get a list of all indices of original planes that are non-empty. + + Empty planes (without any positive pixels in any of the segments) do + not need to be included in the segmentation image. This method finds a + list of indices of the input frames that are non-empty, and therefore + should be included in the segmentation image. + + Parameters + ---------- + pixel_array: numpy.ndarray + Segmentation pixel array + + Returns + ------- + included_plane_indices : List[int] + List giving for each plane position in the resulting segmentation + image the index of the corresponding frame in the original pixel + array. + is_empty: bool + Whether the entire image is empty. If so, empty frames should not + be omitted. + + """ + # This list tracks which source image each non-empty frame came from + source_image_indices = [ + i for i, frm in enumerate(pixel_array) + if np.any(frm) + ] + + if len(source_image_indices) == 0: + logger.warning( + 'Encoding an empty segmentation with "omit_empty_frames" ' + 'set to True. Reverting to encoding all frames since omitting ' + 'all frames is not possible.' + ) + return (list(range(pixel_array.shape[0])), True) + + return (source_image_indices, False) + + @staticmethod + def _get_nonempty_tile_indices( + pixel_array: np.ndarray, plane_positions: Sequence[PlanePositionSequence], - tile_pixel_array: bool, rows: int, columns: int, - ) -> Optional[DimensionOrganizationTypeValues]: - """Checks that the specified Dimension Organization Type is valid. + ) -> Tuple[List[int], bool]: + """Get a list of all indices of tile locations that are non-empty. + + This is similar to _get_nonempty_plane_indices, but works on a total + pixel matrix rather than a set of frames. Empty planes (without any + positive pixels in any of the segments) do not need to be included in + the segmentation image. This method finds a list of indices of the + input frames that are non-empty, and therefore should be included in + the segmentation image. Parameters ---------- - dimension_organization_type: Union[highdicom.enum.DimensionOrganizationTypeValues, str, None] - The specified DimensionOrganizationType for the output Segmentation. - is_tiled: bool - Whether the source image is a tiled image. - omit_empty_frames: bool - Whether it was specified to omit empty frames. - tile_pixel_array: bool - Whether the total pixel matrix was passed. + pixel_array: numpy.ndarray + Segmentation pixel array plane_positions: Sequence[highdicom.PlanePositionSequence] - Plane positions of all frames. + Plane positions of each tile. rows: int - Number of rows in each frame of the segmentation image. + Number of rows in each tile. columns: int - Number of columns in each frame of the segmentation image. + Number of columns in each tile. Returns ------- - Optional[highdicom.enum.DimensionOrganizationTypeValues]: - DimensionOrganizationType to use for the output Segmentation. + included_plane_indices : List[int] + List giving for each plane position in the resulting segmentation + image the index of the corresponding frame in the original pixel + array. + is_empty: bool + Whether the entire image is empty. If so, empty frames should not + be omitted. - """ # noqa: E501 - if is_tiled and dimension_organization_type is None: - dimension_organization_type = \ - DimensionOrganizationTypeValues.TILED_SPARSE + """ + # This list tracks which source image each non-empty frame came from + source_image_indices = [ + i for i, pos in enumerate(plane_positions) + if np.any( + get_tile_array( + pixel_array[0], + row_offset=pos[0].RowPositionInTotalImagePixelMatrix, + column_offset=pos[0].ColumnPositionInTotalImagePixelMatrix, + tile_rows=rows, + tile_columns=columns, + ) + ) + ] - if dimension_organization_type is not None: - dimension_organization_type = DimensionOrganizationTypeValues( - dimension_organization_type + if len(source_image_indices) == 0: + logger.warning( + 'Encoding an empty segmentation with "omit_empty_frames" ' + 'set to True. Reverting to encoding all frames since omitting ' + 'all frames is not possible.' ) - tiled_dimension_organization_types = [ - DimensionOrganizationTypeValues.TILED_SPARSE, - DimensionOrganizationTypeValues.TILED_FULL - ] + return (list(range(len(plane_positions))), True) - if ( - dimension_organization_type in - tiled_dimension_organization_types - ): - if not is_tiled: - raise ValueError( - f"A value of {dimension_organization_type.value} " - 'for parameter "dimension_organization_type" is ' - 'only valid if the source images are tiled.' - ) + return (source_image_indices, False) - if ( - dimension_organization_type == - DimensionOrganizationTypeValues.TILED_FULL - ): - # Need to check positions if they were not generated by us - # when using tile_pixel_array - if ( - not tile_pixel_array and - not are_plane_positions_tiled_full( - plane_positions, - rows, - columns, - ) - ): - raise ValueError( - 'A value of "TILED_FULL" for parameter ' - '"dimension_organization_type" is not permitted because ' - 'the "plane_positions" of the segmentation ' - 'do not follow the relevant requirements. See ' - 'https://dicom.nema.org/medical/dicom/current/output/' - 'chtml/part03/sect_C.7.6.17.3.html#sect_C.7.6.17.3.' - ) - if omit_empty_frames: - raise ValueError( - 'Parameter "omit_empty_frames" should be False if ' - 'using "dimension_organization_type" of "TILED_FULL".' - ) + @staticmethod + def _get_segment_pixel_array( + pixel_array: np.ndarray, + segment_number: int, + number_of_segments: int, + segmentation_type: SegmentationTypeValues, + max_fractional_value: int + ) -> np.ndarray: + """Get pixel data array for a specific segment and plane. + + This is a helper method used during the constructor. Note that the + pixel array is expected to have been processed using the + ``_check_and_cast_pixel_array`` method before being passed to this + method. + + Parameters + ---------- + pixel_array: numpy.ndarray + Segmentation pixel array containing all segments for a single plane. + Array is therefore either (Rows x Columns x Segments) or (Rows x + Columns) in case of a "label map" style array. + segment_number: int + The segment of interest. + number_of_segments: int + Number of segments in the the segmentation. + segmentation_type: highdicom.seg.SegmentationTypeValues + Desired output segmentation type. + max_fractional_value: int + Value for scaling FRACTIONAL segmentations. + + Returns + ------- + numpy.ndarray: + Pixel data array consisting of pixel data for a single segment for + a single plane. Output array has dtype np.uint8 and binary values + (0 or 1). + + """ + if pixel_array.dtype in (np.float_, np.float32, np.float64): + # Based on the previous checks and casting, if we get here the + # output is a FRACTIONAL segmentation Floating-point numbers must + # be mapped to 8-bit integers in the range [0, + # max_fractional_value]. + if pixel_array.ndim == 3: + segment_array = pixel_array[:, :, segment_number - 1] + else: + segment_array = pixel_array + segment_array = np.around( + segment_array * float(max_fractional_value) + ) + segment_array = segment_array.astype(np.uint8) + else: + if pixel_array.ndim == 2: + # "Label maps" that must be converted to binary masks. + if number_of_segments == 1: + # We wish to avoid unnecessary comparison or casting + # operations here, for efficiency reasons. If there is only + # a single segment, the label map pixel array is already + # correct + if pixel_array.dtype != np.uint8: + segment_array = pixel_array.astype(np.uint8) + else: + segment_array = pixel_array + else: + segment_array = ( + pixel_array == segment_number + ).astype(np.uint8) + else: + segment_array = pixel_array[:, :, segment_number - 1] + if segment_array.dtype != np.uint8: + segment_array = segment_array.astype(np.uint8) + + # It may happen that a binary valued array is passed that should be + # stored as a fractional segmentation. In this case, we also need + # to stretch pixel values to 8-bit unsigned integer range by + # multiplying with the maximum fractional value. + if segmentation_type == SegmentationTypeValues.FRACTIONAL: + # Avoid an unnecessary multiplication operation if max + # fractional value is 1 + if int(max_fractional_value) != 1: + segment_array *= int(max_fractional_value) + + return segment_array + + @staticmethod + def _get_dimension_index_values( + unique_dimension_values: List[np.ndarray], + plane_position_value: np.ndarray, + coordinate_system: Optional[CoordinateSystemNames], + ) -> List[int]: + """Get Dimension Index Values for a frame. + + The Dimension Index Values are a list of integer indices that describe + the position of a frame as indices along each of the dimensions of + the Dimension Index Sequence. See + :class:`highdicom.seg.DimensionIndexSequence`. + + Parameters + ---------- + unique_dimension_values: List[numpy.ndarray] + List of arrays containing, for each dimension in the dimension + index sequence (except ReferencedSegment), the sorted unique + values of all planes along that dimension. Each array in the list + corresponds to one dimension, and has shape (N x m) where N is the + number of unique values for that dimension and m is the + multiplicity of values for that dimension. + plane_position_value: numpy.ndarray + Plane position of the plane. This is a 1D or 2D array containing + each of the raw values for this plane of the attributes listed as + dimension index pointers (except ReferencedSegment). For dimension + indices where the value multiplicity of all attributes is 1, the + array will be 1D. If the value multiplicity of attributes is + greater than 1, these values are stacked along the second + dimension. + coordinate_system: Optional[highdicom.CoordinateSystemNames] + The type of coordinate system used (if any). + + Returns + ------- + dimension_index_values: List[int] + The dimension index values (except the segment number) for the + given plane. + + """ + # Look up the position of the plane relative to the indexed + # dimension. + if ( + coordinate_system == + CoordinateSystemNames.SLIDE + ): + index_values = [ + int( + np.where( + unique_dimension_values[idx] == pos + )[0][0] + 1 + ) + for idx, pos in enumerate(plane_position_value) + ] + else: + # In case of the patient coordinate system, the + # value of the attribute the Dimension Index + # Sequence points to (Image Position Patient) has a + # value multiplicity greater than one. + index_values = [ + int( + np.where( + (unique_dimension_values[idx] == pos).all( + axis=1 + ) + )[0][0] + 1 + ) + for idx, pos in enumerate(plane_position_value) + ] - return dimension_organization_type + return index_values @staticmethod - def _check_and_cast_pixel_array( - pixel_array: np.ndarray, - number_of_segments: int, - segmentation_type: SegmentationTypeValues - ) -> Tuple[np.ndarray, SegmentsOverlapValues]: - """Checks on the shape and data type of the pixel array. + def _get_pffg_item( + segment_number: int, + dimension_index_values: List[int], + plane_position: PlanePositionSequence, + source_images: List[Dataset], + source_image_index: int, + are_spatial_locations_preserved: bool, + has_ref_frame_uid: bool, + coordinate_system: Optional[CoordinateSystemNames], + is_multiframe: bool, + ) -> Dataset: + """Get a single item of the Per Frame Functional Groups Sequence. - Also checks for overlapping segments and returns the result. + This is a helper method used in the constructor. Parameters ---------- - pixel_array: numpy.ndarray - The segmentation pixel array. - number_of_segments: int - The segment numbers from the segment descriptions, in the order - they were passed. 1D array of integers. - segmentation_type: highdicom.seg.SegmentationTypeValues - The segmentation_type parameter. + segment_number: int + Segment number of this segmentation frame. + dimension_index_values: List[int] + Dimension index values (except segment number) for this frame. + plane_position: highdicom.seg.PlanePositionSequence + Plane position of this frame. + source_images: List[Dataset] + Full list of source images. + source_image_index: int + Index of this frame in the original list of source images. + are_spatial_locations_preserved: bool + Whether spatial locations are preserved between the segmentation + and the source images. + has_ref_frame_uid: bool + Whether the sources images have a frame of reference UID. + coordinate_system: Optional[highdicom.CoordinateSystemNames] + Coordinate system used, if any. + is_multiframe: bool + Whether source images are multiframe. Returns ------- - pixel_array: numpyp.ndarray - Input pixel array with the data type simplified if possible. - segments_overlap: highdicom.seg.SegmentationOverlaps - The value for the SegmentationOverlaps attribute, inferred from the - pixel array. + pydicom.Dataset + Dataset representing the item of the + Per Frame Functional Groups Sequence for this segmentation frame. """ - if pixel_array.ndim == 4: - # Check that the number of segments in the array matches - if pixel_array.shape[-1] != number_of_segments: - raise ValueError( - 'The number of segments in last dimension of the pixel ' - f'array ({pixel_array.shape[-1]}) does not match the ' - 'number of described segments ' - f'({number_of_segments}).' - ) - - if pixel_array.dtype in (np.bool_, np.uint8, np.uint16): - max_pixel = pixel_array.max() - - if pixel_array.ndim == 3: - # A label-map style array where pixel values represent - # segment associations + # NB this function is called many times in a loop when there are a + # large number of frames, and has been observed to dominate the + # creation time of some segmentations. Therefore we use low-level + # pydicom primitives to improve performance as much as possible + pffg_item = Dataset() + frame_content_item = Dataset() - # The pixel values in the pixel array must all belong to - # a described segment - if max_pixel > number_of_segments: - raise ValueError( - 'Pixel array contains segments that lack ' - 'descriptions.' + frame_content_item.add( + DataElement( + 0x00209157, # DimensionIndexValues + 'UL', + [int(segment_number)] + dimension_index_values + ) + ) + pffg_item.add( + DataElement( + 0x00209111, # FrameContentSequence + 'SQ', + [frame_content_item] + ) + ) + if has_ref_frame_uid: + if coordinate_system == CoordinateSystemNames.SLIDE: + pffg_item.add( + DataElement( + 0x0048021a, # PlanePositionSlideSequence + 'SQ', + plane_position ) - - # By construction of the pixel array, we know that the segments - # cannot overlap - segments_overlap = SegmentsOverlapValues.NO + ) else: - # Pixel array is 4D where each segment is stacked down - # the last dimension - # In this case, each segment of the pixel array should be binary - if max_pixel > 1: - raise ValueError( - 'When passing a 4D stack of segments with an integer ' - 'pixel type, the pixel array must be binary.' + pffg_item.add( + DataElement( + 0x00209113, # PlanePositionSequence + 'SQ', + plane_position ) - - # Need to check whether or not segments overlap - if max_pixel == 0: - # Empty segments can't overlap (this skips an unnecessary - # further test) - segments_overlap = SegmentsOverlapValues.NO - elif pixel_array.shape[-1] == 1: - # A single segment does not overlap - segments_overlap = SegmentsOverlapValues.NO - else: - sum_over_segments = pixel_array.sum(axis=-1) - if np.any(sum_over_segments > 1): - segments_overlap = SegmentsOverlapValues.YES - else: - segments_overlap = SegmentsOverlapValues.NO - - elif pixel_array.dtype in (np.float_, np.float32, np.float64): - unique_values = np.unique(pixel_array) - if np.min(unique_values) < 0.0 or np.max(unique_values) > 1.0: - raise ValueError( - 'Floating point pixel array values must be in the ' - 'range [0, 1].' ) - if segmentation_type == SegmentationTypeValues.BINARY: - non_boolean_values = np.logical_and( - unique_values > 0.0, - unique_values < 1.0 + + if are_spatial_locations_preserved: + derivation_image_item = Dataset() + derivation_image_item.add( + DataElement( + 0x00089215, # DerivationCodeSequence + 'SQ', + [_DERIVATION_CODE] ) - if np.any(non_boolean_values): - raise ValueError( - 'Floating point pixel array values must be either ' - '0.0 or 1.0 in case of BINARY segmentation type.' - ) - pixel_array = pixel_array.astype(np.uint8) + ) - # Need to check whether or not segments overlap - if len(unique_values) == 1 and unique_values[0] == 0.0: - # All pixels are zero: there can be no overlap - segments_overlap = SegmentsOverlapValues.NO - elif pixel_array.ndim == 3 or pixel_array.shape[-1] == 1: - # A single segment does not overlap - segments_overlap = SegmentsOverlapValues.NO - elif pixel_array.sum(axis=-1).max() > 1: - segments_overlap = SegmentsOverlapValues.YES - else: - segments_overlap = SegmentsOverlapValues.NO + derivation_src_img_item = Dataset() + if is_multiframe: + # A single multi-frame source image + src_img_item = source_images[0] + # Frame numbers are one-based + derivation_src_img_item.add( + DataElement( + 0x00081160, # ReferencedFrameNumber + 'IS', + source_image_index + 1 + ) + ) else: - if (pixel_array.ndim == 3) or (pixel_array.shape[-1] == 1): - # A single segment does not overlap - segments_overlap = SegmentsOverlapValues.NO - else: - # A truly fractional segmentation with multiple segments. - # Unclear how overlap should be interpreted in this case - segments_overlap = SegmentsOverlapValues.UNDEFINED + # Multiple single-frame source images + src_img_item = source_images[source_image_index] + derivation_src_img_item.add( + DataElement( + 0x00081150, # ReferencedSOPClassUID + 'UI', + src_img_item[0x00080016].value # SOPClassUID + ) + ) + derivation_src_img_item.add( + DataElement( + 0x00081155, # ReferencedSOPInstanceUID + 'UI', + src_img_item[0x00080018].value # SOPInstanceUID + ) + ) + derivation_src_img_item.add( + DataElement( + 0x0040a170, # PurposeOfReferenceCodeSequence + 'SQ', + [_PURPOSE_CODE] + ) + ) + derivation_src_img_item.add( + DataElement( + 0x0028135a, # SpatialLocationsPreserved + 'CS', + 'YES' + ) + ) + derivation_image_item.add( + DataElement( + 0x00082112, # SourceImageSequence + 'SQ', + [derivation_src_img_item] + ) + ) + pffg_item.add( + DataElement( + 0x00089124, # DerivationImageSequence + 'SQ', + [derivation_image_item] + ) + ) else: - raise TypeError('Pixel array has an invalid data type.') + # Determining the source images that map to the frame is not + # always trivial. Since DerivationImageSequence is a type 2 + # attribute, we leave its value empty. + pffg_item.add( + DataElement( + 0x00089124, # DerivationImageSequence + 'SQ', + [] + ) + ) + logger.debug('spatial locations not preserved') - return pixel_array, segments_overlap + identification = Dataset() + identification.add( + DataElement( + 0x0062000b, # ReferencedSegmentNumber + 'US', + int(segment_number) + ) + ) + pffg_item.add( + DataElement( + 0x0062000a, # SegmentIdentificationSequence + 'SQ', + [identification] + ) + ) - @staticmethod - def _get_nonempty_plane_indices( - pixel_array: np.ndarray - ) -> Tuple[List[int], bool]: - """Get a list of all indices of original planes that are non-empty. + return pffg_item - Empty planes (without any positive pixels in any of the segments) do - not need to be included in the segmentation image. This method finds a - list of indices of the input frames that are non-empty, and therefore - should be included in the segmentation image. + def _encode_pixels_native(self, planes: np.ndarray) -> bytes: + """Encode pixel planes using a native transfer syntax. Parameters ---------- - pixel_array: numpy.ndarray - Segmentation pixel array + planes: numpy.ndarray + Array representing one or more segmentation image planes. If + multiple image planes, planes stacked down the first dimension + (index 0). Returns ------- - included_plane_indices : List[int] - List giving for each plane position in the resulting segmentation - image the index of the corresponding frame in the original pixel - array. - is_empty: bool - Whether the entire image is empty. If so, empty frames should not - be omitted. + bytes + Encoded pixels """ - # This list tracks which source image each non-empty frame came from - source_image_indices = [ - i for i, frm in enumerate(pixel_array) - if np.any(frm) - ] - - if len(source_image_indices) == 0: - logger.warning( - 'Encoding an empty segmentation with "omit_empty_frames" ' - 'set to True. Reverting to encoding all frames since omitting ' - 'all frames is not possible.' - ) - return (list(range(pixel_array.shape[0])), True) - - return (source_image_indices, False) - - @staticmethod - def _get_nonempty_tile_indices( - pixel_array: np.ndarray, - plane_positions: Sequence[PlanePositionSequence], - rows: int, - columns: int, - ) -> Tuple[List[int], bool]: - """Get a list of all indices of tile locations that are non-empty. + if self.SegmentationType == SegmentationTypeValues.BINARY.value: + return pack_bits(planes, pad=False) + else: + return planes.tobytes() - This is similar to _get_nonempty_plane_indices, but works on a total - pixel matrix rather than a set of frames. Empty planes (without any - positive pixels in any of the segments) do not need to be included in - the segmentation image. This method finds a list of indices of the - input frames that are non-empty, and therefore should be included in - the segmentation image. + @classmethod + def from_dataset( + cls, + dataset: Dataset, + copy: bool = True, + ) -> 'Segmentation': + """Create instance from an existing dataset. Parameters ---------- - pixel_array: numpy.ndarray - Segmentation pixel array - plane_positions: Sequence[highdicom.PlanePositionSequence] - Plane positions of each tile. - rows: int - Number of rows in each tile. - columns: int - Number of columns in each tile. + dataset: pydicom.dataset.Dataset + Dataset representing a Segmentation image. + copy: bool + If True, the underlying dataset is deep-copied such that the + original dataset remains intact. If False, this operation will + alter the original dataset in place. Returns ------- - included_plane_indices : List[int] - List giving for each plane position in the resulting segmentation - image the index of the corresponding frame in the original pixel - array. - is_empty: bool - Whether the entire image is empty. If so, empty frames should not - be omitted. + highdicom.seg.Segmentation + Representation of the supplied dataset as a highdicom + Segmentation. """ - # This list tracks which source image each non-empty frame came from - source_image_indices = [ - i for i, pos in enumerate(plane_positions) - if np.any( - get_tile_array( - pixel_array[0], - row_offset=pos[0].RowPositionInTotalImagePixelMatrix, - column_offset=pos[0].ColumnPositionInTotalImagePixelMatrix, - tile_rows=rows, - tile_columns=columns, - ) + if not isinstance(dataset, Dataset): + raise TypeError( + 'Dataset must be of type pydicom.dataset.Dataset.' ) + _check_little_endian(dataset) + # Checks on integrity of input dataset + if dataset.SOPClassUID != '1.2.840.10008.5.1.4.1.1.66.4': + raise ValueError('Dataset is not a Segmentation.') + if copy: + seg = deepcopy(dataset) + else: + seg = dataset + seg.__class__ = cls + + sf_groups = seg.SharedFunctionalGroupsSequence[0] + if hasattr(seg, 'PlaneOrientationSequence'): + plane_ori_seq = sf_groups.PlaneOrientationSequence[0] + if hasattr(plane_ori_seq, 'ImageOrientationSlide'): + seg._coordinate_system = CoordinateSystemNames.SLIDE + elif hasattr(plane_ori_seq, 'ImageOrientationPatient'): + seg._coordinate_system = CoordinateSystemNames.PATIENT + else: + seg._coordinate_system = None + else: + seg._coordinate_system = None + + for i, segment in enumerate(seg.SegmentSequence, 1): + if segment.SegmentNumber != i: + raise AttributeError( + 'Segments are expected to start at 1 and be consecutive ' + 'integers.' + ) + + for i, s in enumerate(seg.SegmentSequence, 1): + if s.SegmentNumber != i: + raise ValueError( + 'Segment numbers in the segmentation image must start at ' + '1 and increase by 1 with the segments sequence.' + ) + + # Convert contained items to highdicom types + # Segment descriptions + seg.SegmentSequence = [ + SegmentDescription.from_dataset(ds, copy=False) + for ds in seg.SegmentSequence ] - if len(source_image_indices) == 0: - logger.warning( - 'Encoding an empty segmentation with "omit_empty_frames" ' - 'set to True. Reverting to encoding all frames since omitting ' - 'all frames is not possible.' + # Shared functional group elements + if hasattr(sf_groups, 'PlanePositionSequence'): + plane_pos = PlanePositionSequence.from_sequence( + sf_groups.PlanePositionSequence, + copy=False, ) - return (list(range(len(plane_positions))), True) + sf_groups.PlanePositionSequence = plane_pos + if hasattr(sf_groups, 'PlaneOrientationSequence'): + plane_ori = PlaneOrientationSequence.from_sequence( + sf_groups.PlaneOrientationSequence, + copy=False, + ) + sf_groups.PlaneOrientationSequence = plane_ori + if hasattr(sf_groups, 'PixelMeasuresSequence'): + pixel_measures = PixelMeasuresSequence.from_sequence( + sf_groups.PixelMeasuresSequence, + copy=False, + ) + sf_groups.PixelMeasuresSequence = pixel_measures - return (source_image_indices, False) + # Per-frame functional group items + if hasattr(seg, 'PerFrameFunctionalGroupsSequence'): + for pffg_item in seg.PerFrameFunctionalGroupsSequence: + if hasattr(pffg_item, 'PlanePositionSequence'): + plane_pos = PlanePositionSequence.from_sequence( + pffg_item.PlanePositionSequence, + copy=False + ) + pffg_item.PlanePositionSequence = plane_pos + if hasattr(pffg_item, 'PlaneOrientationSequence'): + plane_ori = PlaneOrientationSequence.from_sequence( + pffg_item.PlaneOrientationSequence, + copy=False, + ) + pffg_item.PlaneOrientationSequence = plane_ori + if hasattr(pffg_item, 'PixelMeasuresSequence'): + pixel_measures = PixelMeasuresSequence.from_sequence( + pffg_item.PixelMeasuresSequence, + copy=False, + ) + pffg_item.PixelMeasuresSequence = pixel_measures + + seg = super().from_dataset(seg, copy=False) + + return cast(cls, seg) + + @property + def segmentation_type(self) -> SegmentationTypeValues: + """highdicom.seg.SegmentationTypeValues: Segmentation type.""" + return SegmentationTypeValues(self.SegmentationType) + + @property + def segmentation_fractional_type( + self + ) -> Union[SegmentationFractionalTypeValues, None]: + """ + highdicom.seg.SegmentationFractionalTypeValues: + Segmentation fractional type. + + """ + if not hasattr(self, 'SegmentationFractionalType'): + return None + return SegmentationFractionalTypeValues( + self.SegmentationFractionalType + ) + + def iter_segments(self): + """Iterates over segments in this segmentation image. + + Returns + ------- + Iterator[Tuple[numpy.ndarray, Tuple[pydicom.dataset.Dataset, ...], pydicom.dataset.Dataset]] + For each segment in the Segmentation image instance, provides the + Pixel Data frames representing the segment, items of the Per-Frame + Functional Groups Sequence describing the individual frames, and + the item of the Segment Sequence describing the segment + + """ # noqa + return iter_segments(self) + + @property + def number_of_segments(self) -> int: + """int: The number of segments in this SEG image.""" + return len(self.SegmentSequence) - @staticmethod - def _get_segment_pixel_array( - pixel_array: np.ndarray, - segment_number: int, - number_of_segments: int, - segmentation_type: SegmentationTypeValues, - max_fractional_value: int - ) -> np.ndarray: - """Get pixel data array for a specific segment and plane. + @property + def segment_numbers(self) -> range: + """range: The segment numbers present in the SEG image as a range.""" + return range(1, self.number_of_segments + 1) - This is a helper method used during the constructor. Note that the - pixel array is expected to have been processed using the - ``_check_and_cast_pixel_array`` method before being passed to this - method. + def get_segment_description( + self, + segment_number: int + ) -> SegmentDescription: + """Get segment description for a segment. Parameters ---------- - pixel_array: numpy.ndarray - Segmentation pixel array containing all segments for a single plane. - Array is therefore either (Rows x Columns x Segments) or (Rows x - Columns) in case of a "label map" style array. segment_number: int - The segment of interest. - number_of_segments: int - Number of segments in the the segmentation. - segmentation_type: highdicom.seg.SegmentationTypeValues - Desired output segmentation type. - max_fractional_value: int - Value for scaling FRACTIONAL segmentations. + Segment number for the segment, as a 1-based index. Returns ------- - numpy.ndarray: - Pixel data array consisting of pixel data for a single segment for - a single plane. Output array has dtype np.uint8 and binary values - (0 or 1). + highdicom.seg.SegmentDescription + Description of the given segment. """ - if pixel_array.dtype in (np.float_, np.float32, np.float64): - # Based on the previous checks and casting, if we get here the - # output is a FRACTIONAL segmentation Floating-point numbers must - # be mapped to 8-bit integers in the range [0, - # max_fractional_value]. - if pixel_array.ndim == 3: - segment_array = pixel_array[:, :, segment_number - 1] - else: - segment_array = pixel_array - segment_array = np.around( - segment_array * float(max_fractional_value) + if segment_number < 1 or segment_number > self.number_of_segments: + raise IndexError( + f'{segment_number} is an invalid segment number for this ' + 'dataset.' ) - segment_array = segment_array.astype(np.uint8) - else: - if pixel_array.ndim == 2: - # "Label maps" that must be converted to binary masks. - if number_of_segments == 1: - # We wish to avoid unnecessary comparison or casting - # operations here, for efficiency reasons. If there is only - # a single segment, the label map pixel array is already - # correct - if pixel_array.dtype != np.uint8: - segment_array = pixel_array.astype(np.uint8) - else: - segment_array = pixel_array - else: - segment_array = ( - pixel_array == segment_number - ).astype(np.uint8) - else: - segment_array = pixel_array[:, :, segment_number - 1] - if segment_array.dtype != np.uint8: - segment_array = segment_array.astype(np.uint8) - - # It may happen that a binary valued array is passed that should be - # stored as a fractional segmentation. In this case, we also need - # to stretch pixel values to 8-bit unsigned integer range by - # multiplying with the maximum fractional value. - if segmentation_type == SegmentationTypeValues.FRACTIONAL: - # Avoid an unnecessary multiplication operation if max - # fractional value is 1 - if int(max_fractional_value) != 1: - segment_array *= int(max_fractional_value) - - return segment_array + return self.SegmentSequence[segment_number - 1] - @staticmethod - def _get_dimension_index_values( - unique_dimension_values: List[np.ndarray], - plane_position_value: np.ndarray, - coordinate_system: Optional[CoordinateSystemNames], + def get_segment_numbers( + self, + segment_label: Optional[str] = None, + segmented_property_category: Optional[Union[Code, CodedConcept]] = None, + segmented_property_type: Optional[Union[Code, CodedConcept]] = None, + algorithm_type: Optional[Union[SegmentAlgorithmTypeValues, str]] = None, + tracking_uid: Optional[str] = None, + tracking_id: Optional[str] = None, ) -> List[int]: - """Get Dimension Index Values for a frame. + """Get a list of segment numbers matching provided criteria. - The Dimension Index Values are a list of integer indices that describe - the position of a frame as indices along each of the dimensions of - the Dimension Index Sequence. See - :class:`highdicom.seg.DimensionIndexSequence`. + Any number of optional filters may be provided. A segment must match + all provided filters to be included in the returned list. Parameters ---------- - unique_dimension_values: List[numpy.ndarray] - List of arrays containing, for each dimension in the dimension - index sequence (except ReferencedSegment), the sorted unique - values of all planes along that dimension. Each array in the list - corresponds to one dimension, and has shape (N x m) where N is the - number of unique values for that dimension and m is the - multiplicity of values for that dimension. - plane_position_value: numpy.ndarray - Plane position of the plane. This is a 1D or 2D array containing - each of the raw values for this plane of the attributes listed as - dimension index pointers (except ReferencedSegment). For dimension - indices where the value multiplicity of all attributes is 1, the - array will be 1D. If the value multiplicity of attributes is - greater than 1, these values are stacked along the second - dimension. - coordinate_system: Optional[highdicom.CoordinateSystemNames] - The type of coordinate system used (if any). + segment_label: Union[str, None], optional + Segment label filter to apply. + segmented_property_category: Union[Code, CodedConcept, None], optional + Segmented property category filter to apply. + segmented_property_type: Union[Code, CodedConcept, None], optional + Segmented property type filter to apply. + algorithm_type: Union[SegmentAlgorithmTypeValues, str, None], optional + Segmented property type filter to apply. + tracking_uid: Union[str, None], optional + Tracking unique identifier filter to apply. + tracking_id: Union[str, None], optional + Tracking identifier filter to apply. Returns ------- - dimension_index_values: List[int] - The dimension index values (except the segment number) for the - given plane. + List[int] + List of all segment numbers matching the provided criteria. - """ - # Look up the position of the plane relative to the indexed - # dimension. - if ( - coordinate_system == - CoordinateSystemNames.SLIDE - ): - index_values = [ - int( - np.where( - unique_dimension_values[idx] == pos - )[0][0] + 1 - ) - for idx, pos in enumerate(plane_position_value) - ] - else: - # In case of the patient coordinate system, the - # value of the attribute the Dimension Index - # Sequence points to (Image Position Patient) has a - # value multiplicity greater than one. - index_values = [ - int( - np.where( - (unique_dimension_values[idx] == pos).all( - axis=1 - ) - )[0][0] + 1 - ) - for idx, pos in enumerate(plane_position_value) - ] + Examples + -------- + + Get segment numbers of all segments that both represent tumors and were + generated by an automatic algorithm from a segmentation object ``seg``: + + >>> from pydicom.sr.codedict import codes + >>> from highdicom.seg import SegmentAlgorithmTypeValues, Segmentation + >>> from pydicom import dcmread + >>> ds = dcmread('data/test_files/seg_image_sm_control.dcm') + >>> seg = Segmentation.from_dataset(ds) + >>> segment_numbers = seg.get_segment_numbers( + ... segmented_property_type=codes.SCT.ConnectiveTissue, + ... algorithm_type=SegmentAlgorithmTypeValues.AUTOMATIC + ... ) + >>> segment_numbers + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + + Get segment numbers of all segments identified by a given + institution-specific tracking ID: + + >>> segment_numbers = seg.get_segment_numbers( + ... tracking_id='Segment #4' + ... ) + >>> segment_numbers + [4] + + Get segment numbers of all segments identified a globally unique + tracking UID: + + >>> uid = '1.2.826.0.1.3680043.8.498.42540123542017542395135803252098380233' + >>> segment_numbers = seg.get_segment_numbers(tracking_uid=uid) + >>> segment_numbers + [13] + + """ # noqa: E501 + filter_funcs = [] + if segment_label is not None: + filter_funcs.append( + lambda desc: desc.segment_label == segment_label + ) + if segmented_property_category is not None: + filter_funcs.append( + lambda desc: + desc.segmented_property_category == segmented_property_category + ) + if segmented_property_type is not None: + filter_funcs.append( + lambda desc: + desc.segmented_property_type == segmented_property_type + ) + if algorithm_type is not None: + algo_type = SegmentAlgorithmTypeValues(algorithm_type) + filter_funcs.append( + lambda desc: + SegmentAlgorithmTypeValues(desc.algorithm_type) == algo_type + ) + if tracking_uid is not None: + filter_funcs.append( + lambda desc: desc.tracking_uid == tracking_uid + ) + if tracking_id is not None: + filter_funcs.append( + lambda desc: desc.tracking_id == tracking_id + ) + + return [ + desc.segment_number + for desc in self.SegmentSequence + if all(f(desc) for f in filter_funcs) + ] + + def get_tracking_ids( + self, + segmented_property_category: Optional[Union[Code, CodedConcept]] = None, + segmented_property_type: Optional[Union[Code, CodedConcept]] = None, + algorithm_type: Optional[Union[SegmentAlgorithmTypeValues, str]] = None + ) -> List[Tuple[str, UID]]: + """Get all unique tracking identifiers in this SEG image. - return index_values + Any number of optional filters may be provided. A segment must match + all provided filters to be included in the returned list. - @staticmethod - def _get_pffg_item( - segment_number: int, - dimension_index_values: List[int], - plane_position: PlanePositionSequence, - source_images: List[Dataset], - source_image_index: int, - are_spatial_locations_preserved: bool, - has_ref_frame_uid: bool, - coordinate_system: Optional[CoordinateSystemNames], - is_multiframe: bool, - ) -> Dataset: - """Get a single item of the Per Frame Functional Groups Sequence. + The tracking IDs and the accompanying tracking UIDs are returned + in a list of tuples. - This is a helper method used in the constructor. + Note that the order of the returned list is not significant and will + not in general match the order of segments. Parameters ---------- - segment_number: int - Segment number of this segmentation frame. - dimension_index_values: List[int] - Dimension index values (except segment number) for this frame. - plane_position: highdicom.seg.PlanePositionSequence - Plane position of this frame. - source_images: List[Dataset] - Full list of source images. - source_image_index: int - Index of this frame in the original list of source images. - are_spatial_locations_preserved: bool - Whether spatial locations are preserved between the segmentation - and the source images. - has_ref_frame_uid: bool - Whether the sources images have a frame of reference UID. - coordinate_system: Optional[highdicom.CoordinateSystemNames] - Coordinate system used, if any. - is_multiframe: bool - Whether source images are multiframe. + segmented_property_category: Union[pydicom.sr.coding.Code, highdicom.sr.CodedConcept, None], optional + Segmented property category filter to apply. + segmented_property_type: Union[pydicom.sr.coding.Code, highdicom.sr.CodedConcept, None], optional + Segmented property type filter to apply. + algorithm_type: Union[highdicom.seg.SegmentAlgorithmTypeValues, str, None], optional + Segmented property type filter to apply. Returns ------- - pydicom.Dataset - Dataset representing the item of the - Per Frame Functional Groups Sequence for this segmentation frame. + List[Tuple[str, pydicom.uid.UID]] + List of all unique (Tracking Identifier, Unique Tracking Identifier) + tuples that are referenced in segment descriptions in this + Segmentation image that match all provided filters. - """ - # NB this function is called many times in a loop when there are a - # large number of frames, and has been observed to dominate the - # creation time of some segmentations. Therefore we use low-level - # pydicom primitives to improve performance as much as possible - pffg_item = Dataset() - frame_content_item = Dataset() + Examples + -------- - frame_content_item.add( - DataElement( - 0x00209157, # DimensionIndexValues - 'UL', - [int(segment_number)] + dimension_index_values - ) - ) - pffg_item.add( - DataElement( - 0x00209111, # FrameContentSequence - 'SQ', - [frame_content_item] - ) - ) - if has_ref_frame_uid: - if coordinate_system == CoordinateSystemNames.SLIDE: - pffg_item.add( - DataElement( - 0x0048021a, # PlanePositionSlideSequence - 'SQ', - plane_position - ) - ) - else: - pffg_item.add( - DataElement( - 0x00209113, # PlanePositionSequence - 'SQ', - plane_position - ) - ) + Read in an example segmentation image in the highdicom test data: - if are_spatial_locations_preserved: - derivation_image_item = Dataset() - derivation_image_item.add( - DataElement( - 0x00089215, # DerivationCodeSequence - 'SQ', - [_DERIVATION_CODE] - ) - ) + >>> import highdicom as hd + >>> from pydicom.sr.codedict import codes + >>> + >>> seg = hd.seg.segread('data/test_files/seg_image_ct_binary_overlap.dcm') - derivation_src_img_item = Dataset() - if is_multiframe: - # A single multi-frame source image - src_img_item = source_images[0] - # Frame numbers are one-based - derivation_src_img_item.add( - DataElement( - 0x00081160, # ReferencedFrameNumber - 'IS', - source_image_index + 1 - ) - ) - else: - # Multiple single-frame source images - src_img_item = source_images[source_image_index] - derivation_src_img_item.add( - DataElement( - 0x00081150, # ReferencedSOPClassUID - 'UI', - src_img_item[0x00080016].value # SOPClassUID - ) - ) - derivation_src_img_item.add( - DataElement( - 0x00081155, # ReferencedSOPInstanceUID - 'UI', - src_img_item[0x00080018].value # SOPInstanceUID - ) - ) - derivation_src_img_item.add( - DataElement( - 0x0040a170, # PurposeOfReferenceCodeSequence - 'SQ', - [_PURPOSE_CODE] - ) - ) - derivation_src_img_item.add( - DataElement( - 0x0028135a, # SpatialLocationsPreserved - 'CS', - 'YES' - ) - ) - derivation_image_item.add( - DataElement( - 0x00082112, # SourceImageSequence - 'SQ', - [derivation_src_img_item] - ) + List the tracking IDs and UIDs present in the segmentation image: + + >>> sorted(seg.get_tracking_ids(), reverse=True) # otherwise its a random order + [('Spine', '1.2.826.0.1.3680043.10.511.3.10042414969629429693880339016394772'), ('Bone', '1.2.826.0.1.3680043.10.511.3.83271046815894549094043330632275067')] + + >>> for seg_num in seg.segment_numbers: + ... desc = seg.get_segment_description(seg_num) + ... print(desc.segmented_property_type.meaning) + Bone + Spine + + List tracking IDs only for those segments with a segmented property + category of 'Spine': + + >>> seg.get_tracking_ids(segmented_property_type=codes.SCT.Spine) + [('Spine', '1.2.826.0.1.3680043.10.511.3.10042414969629429693880339016394772')] + + """ # noqa: E501 + filter_funcs = [] + if segmented_property_category is not None: + filter_funcs.append( + lambda desc: + desc.segmented_property_category == segmented_property_category ) - pffg_item.add( - DataElement( - 0x00089124, # DerivationImageSequence - 'SQ', - [derivation_image_item] - ) + if segmented_property_type is not None: + filter_funcs.append( + lambda desc: + desc.segmented_property_type == segmented_property_type ) - else: - # Determining the source images that map to the frame is not - # always trivial. Since DerivationImageSequence is a type 2 - # attribute, we leave its value empty. - pffg_item.add( - DataElement( - 0x00089124, # DerivationImageSequence - 'SQ', - [] - ) + if algorithm_type is not None: + algo_type = SegmentAlgorithmTypeValues(algorithm_type) + filter_funcs.append( + lambda desc: + SegmentAlgorithmTypeValues(desc.algorithm_type) == algo_type ) - logger.debug('spatial locations not preserved') - identification = Dataset() - identification.add( - DataElement( - 0x0062000b, # ReferencedSegmentNumber - 'US', - int(segment_number) - ) - ) - pffg_item.add( - DataElement( - 0x0062000a, # SegmentIdentificationSequence - 'SQ', - [identification] - ) - ) + return list({ + (desc.tracking_id, UID(desc.tracking_uid)) + for desc in self.SegmentSequence + if desc.tracking_id is not None and + desc.tracking_uid is not None and + all(f(desc) for f in filter_funcs) + }) - return pffg_item + @property + def segmented_property_categories(self) -> List[CodedConcept]: + """Get all unique segmented property categories in this SEG image. - def _encode_pixels_native(self, planes: np.ndarray) -> bytes: - """Encode pixel planes using a native transfer syntax. + Returns + ------- + List[CodedConcept] + All unique segmented property categories referenced in segment + descriptions in this SEG image. - Parameters - ---------- - planes: numpy.ndarray - Array representing one or more segmentation image planes. If - multiple image planes, planes stacked down the first dimension - (index 0). + """ + categories = [] + for desc in self.SegmentSequence: + if desc.segmented_property_category not in categories: + categories.append(desc.segmented_property_category) + + return categories + + @property + def segmented_property_types(self) -> List[CodedConcept]: + """Get all unique segmented property types in this SEG image. Returns ------- - bytes - Encoded pixels + List[CodedConcept] + All unique segmented property types referenced in segment + descriptions in this SEG image. """ - if self.SegmentationType == SegmentationTypeValues.BINARY.value: - return pack_bits(planes, pad=False) - else: - return planes.tobytes() + types = [] + for desc in self.SegmentSequence: + if desc.segmented_property_type not in types: + types.append(desc.segmented_property_type) - @classmethod - def from_dataset( - cls, - dataset: Dataset, - copy: bool = True, - ) -> 'Segmentation': - """Create instance from an existing dataset. + return types + + def _get_pixels_by_seg_frame( + self, + output_shape: Union[int, Tuple[int, int]], + indices_iterator: Iterator[ + Tuple[ + Tuple[Union[slice, int], ...], + Tuple[Union[slice, int], ...], + int + ] + ], + segment_numbers: np.ndarray, + combine_segments: bool = False, + relabel: bool = False, + rescale_fractional: bool = True, + skip_overlap_checks: bool = False, + dtype: Union[type, str, np.dtype, None] = None, + ) -> np.ndarray: + """Construct a segmentation array given an array of frame numbers. + + The output array is either 4D (combine_segments=False) or 3D + (combine_segments=True), where dimensions are frames x rows x columns x + segments. Parameters ---------- - dataset: pydicom.dataset.Dataset - Dataset representing a Segmentation image. - copy: bool - If True, the underlying dataset is deep-copied such that the - original dataset remains intact. If False, this operation will - alter the original dataset in place. + output_shape: Union[int, Tuple[int, int]] + Shape of the output array. If an integer is False, this is the + number of frames in the output array and the number of rows and + columns are taken to match those of each segmentation frame. If a + tuple of integers, it contains the number of (rows, columns) in the + output array and there is no frame dimension (this is the tiled + case). Note in either case, the segments dimension (if relevant) is + omitted. + indices_iterator: Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int ]] + An iterable object that yields tuples of (output_indexer, + segmentation_indexer, output_segment_number) that describes how to + construct the desired output pixel array from the segmentation + image's pixel array. 'output_indexer' is a tuple that may be used + directly to index the output array to place a single frame's pixels + into the output array. Similarly 'segmentation_indexer' is a tuple + that may be used directly to index the segmentation pixel array + to retrieve the pixels to place into the output array. + with as segment number 'output_segment_number'. Note that in both + cases the indexers access the frame, row and column dimensions of + the relevant array, but not the segment dimension (if relevant). + segment_numbers: np.ndarray + One dimensional numpy array containing segment numbers + corresponding to the columns of the seg frames matrix. + combine_segments: bool + If True, combine the different segments into a single label + map in which the value of a pixel represents its segment. + If False (the default), segments are binary and stacked down the + last dimension of the output array. + relabel: bool + If True and ``combine_segments`` is ``True``, the pixel values in + the output array are relabelled into the range ``0`` to + ``len(segment_numbers)`` (inclusive) according to the position of + the original segment numbers in ``segment_numbers`` parameter. If + ``combine_segments`` is ``False``, this has no effect. + rescale_fractional: bool + If this is a FRACTIONAL segmentation and ``rescale_fractional`` is + True, the raw integer-valued array stored in the segmentation image + output will be rescaled by the MaximumFractionalValue such that + each pixel lies in the range 0.0 to 1.0. If False, the raw integer + values are returned. If the segmentation has BINARY type, this + parameter has no effect. + skip_overlap_checks: bool + If True, skip checks for overlap between different segments. By + default, checks are performed to ensure that the segments do not + overlap. However, this reduces performance. If checks are skipped + and multiple segments do overlap, the segment with the highest + segment number (after relabelling, if applicable) will be placed + into the output array. + dtype: Union[type, str, np.dtype, None] + Data type of the returned array. If None, an appropriate type will + be chosen automatically. If the returned values are rescaled + fractional values, this will be numpy.float32. Otherwise, the + smallest unsigned integer type that accommodates all of the output + values will be chosen. Returns ------- - highdicom.seg.Segmentation - Representation of the supplied dataset as a highdicom - Segmentation. + pixel_array: np.ndarray + Segmentation pixel array - """ - if not isinstance(dataset, Dataset): - raise TypeError( - 'Dataset must be of type pydicom.dataset.Dataset.' + """ # noqa: E501 + if ( + segment_numbers.min() < 1 or + segment_numbers.max() > self.number_of_segments + ): + raise ValueError( + 'Segment numbers array contains invalid values.' + ) + + # Determine output type + if combine_segments: + max_output_val = ( + segment_numbers.shape[0] if relabel else segment_numbers.max() ) - _check_little_endian(dataset) - # Checks on integrity of input dataset - if dataset.SOPClassUID != '1.2.840.10008.5.1.4.1.1.66.4': - raise ValueError('Dataset is not a Segmentation.') - if copy: - seg = deepcopy(dataset) else: - seg = dataset - seg.__class__ = Segmentation + max_output_val = 1 - sf_groups = seg.SharedFunctionalGroupsSequence[0] - if hasattr(seg, 'PlaneOrientationSequence'): - plane_ori_seq = sf_groups.PlaneOrientationSequence[0] - if hasattr(plane_ori_seq, 'ImageOrientationSlide'): - seg._coordinate_system = CoordinateSystemNames.SLIDE - elif hasattr(plane_ori_seq, 'ImageOrientationPatient'): - seg._coordinate_system = CoordinateSystemNames.PATIENT + will_be_rescaled = ( + rescale_fractional and + self.segmentation_type == SegmentationTypeValues.FRACTIONAL and + not combine_segments + ) + if dtype is None: + if will_be_rescaled: + dtype = np.float32 else: - seg._coordinate_system = None - else: - seg._coordinate_system = None + dtype = _get_unsigned_dtype(max_output_val) + dtype = np.dtype(dtype) - for i, segment in enumerate(seg.SegmentSequence, 1): - if segment.SegmentNumber != i: - raise AttributeError( - 'Segments are expected to start at 1 and be consecutive ' - 'integers.' - ) + # Check dtype is suitable + if dtype.kind not in ('u', 'i', 'f'): + raise ValueError( + f'Data type "{dtype}" is not suitable.' + ) - for i, s in enumerate(seg.SegmentSequence, 1): - if s.SegmentNumber != i: + if will_be_rescaled: + intermediate_dtype = np.uint8 + if dtype.kind != 'f': raise ValueError( - 'Segment numbers in the segmentation image must start at ' - '1 and increase by 1 with the segments sequence.' + 'If rescaling a fractional segmentation, the output dtype ' + 'must be a floating-point type.' ) + else: + intermediate_dtype = dtype + _check_numpy_value_representation(max_output_val, dtype) - # Convert contained items to highdicom types - # Segment descriptions - seg.SegmentSequence = [ - SegmentDescription.from_dataset(ds, copy=False) - for ds in seg.SegmentSequence - ] - - # Shared functional group elements - if hasattr(sf_groups, 'PlanePositionSequence'): - plane_pos = PlanePositionSequence.from_sequence( - sf_groups.PlanePositionSequence, - copy=False, - ) - sf_groups.PlanePositionSequence = plane_pos - if hasattr(sf_groups, 'PlaneOrientationSequence'): - plane_ori = PlaneOrientationSequence.from_sequence( - sf_groups.PlaneOrientationSequence, - copy=False, - ) - sf_groups.PlaneOrientationSequence = plane_ori - if hasattr(sf_groups, 'PixelMeasuresSequence'): - pixel_measures = PixelMeasuresSequence.from_sequence( - sf_groups.PixelMeasuresSequence, - copy=False, - ) - sf_groups.PixelMeasuresSequence = pixel_measures + num_segments = len(segment_numbers) + if self.pixel_array.ndim == 2: + h, w = self.pixel_array.shape + else: + _, h, w = self.pixel_array.shape - # Per-frame functional group items - if hasattr(seg, 'PerFrameFunctionalGroupsSequence'): - for pffg_item in seg.PerFrameFunctionalGroupsSequence: - if hasattr(pffg_item, 'PlanePositionSequence'): - plane_pos = PlanePositionSequence.from_sequence( - pffg_item.PlanePositionSequence, - copy=False - ) - pffg_item.PlanePositionSequence = plane_pos - if hasattr(pffg_item, 'PlaneOrientationSequence'): - plane_ori = PlaneOrientationSequence.from_sequence( - pffg_item.PlaneOrientationSequence, - copy=False, + if combine_segments: + # Check whether segmentation is binary, or fractional with only + # binary values + if self.segmentation_type == SegmentationTypeValues.FRACTIONAL: + if not rescale_fractional: + raise ValueError( + 'In order to combine segments of a FRACTIONAL ' + 'segmentation image, argument "rescale_fractional" ' + 'must be set to True.' ) - pffg_item.PlaneOrientationSequence = plane_ori - if hasattr(pffg_item, 'PixelMeasuresSequence'): - pixel_measures = PixelMeasuresSequence.from_sequence( - pffg_item.PixelMeasuresSequence, - copy=False, + # Combining fractional segs is only possible if there are + # two unique values in the array: 0 and MaximumFractionalValue + is_binary = np.isin( + np.unique(self.pixel_array), + np.array([0, self.MaximumFractionalValue]), + assume_unique=True + ).all() + if not is_binary: + raise ValueError( + 'Combining segments of a FRACTIONAL segmentation is ' + 'only possible if the pixel array contains only 0s ' + 'and the specified MaximumFractionalValue ' + f'({self.MaximumFractionalValue}).' ) - pffg_item.PixelMeasuresSequence = pixel_measures - - seg._build_luts() + pixel_array = self.pixel_array // self.MaximumFractionalValue + pixel_array = pixel_array.astype(np.uint8) + else: + pixel_array = self.pixel_array - return cast(Segmentation, seg) + if pixel_array.ndim == 2: + pixel_array = pixel_array[None, :, :] - def _get_ref_instance_uids(self) -> List[Tuple[str, str, str]]: - """List all instances referenced in the segmentation. + # Initialize empty pixel array + full_output_shape = ( + output_shape + if isinstance(output_shape, tuple) + else (output_shape, h, w) + ) + out_array = np.zeros( + full_output_shape, + dtype=intermediate_dtype + ) - Returns - ------- - List[Tuple[str, str, str]] - List of all instances referenced in the segmentation in the format - (StudyInstanceUID, SeriesInstanceUID, SOPInstanceUID). + # Loop over the supplied iterable + for (output_indexer, seg_indexer, seg_n) in indices_iterator: + pix_value = intermediate_dtype.type(seg_n) - """ - instance_data = [] - if hasattr(self, 'ReferencedSeriesSequence'): - for ref_series in self.ReferencedSeriesSequence: - for ref_ins in ref_series.ReferencedInstanceSequence: - instance_data.append( - ( - self.StudyInstanceUID, - ref_series.SeriesInstanceUID, - ref_ins.ReferencedSOPInstanceUID + if not skip_overlap_checks: + if np.any( + np.logical_and( + pixel_array[seg_indexer] > 0, + out_array[output_indexer] > 0 ) - ) - other_studies_kw = 'StudiesContainingOtherReferencedInstancesSequence' - if hasattr(self, other_studies_kw): - for ref_study in getattr(self, other_studies_kw): - for ref_series in ref_study.ReferencedSeriesSequence: - for ref_ins in ref_series.ReferencedInstanceSequence: - instance_data.append( - ( - ref_study.StudyInstanceUID, - ref_series.SeriesInstanceUID, - ref_ins.ReferencedSOPInstanceUID, - ) + ): + raise RuntimeError( + "Cannot combine segments because segments " + "overlap." ) + out_array[output_indexer] = np.maximum( + pixel_array[seg_indexer] * pix_value, + out_array[output_indexer] + ) - # There shouldn't be duplicates here, but there's no explicit rule - # preventing it. - # Since dictionary ordering is preserved, this trick deduplicates - # the list without changing the order - unique_instance_data = list(dict.fromkeys(instance_data)) - if len(unique_instance_data) != len(instance_data): - counts = Counter(instance_data) - duplicate_sop_uids = [ - f"'{key[2]}'" for key, value in counts.items() if value > 1 - ] - display_str = ', '.join(duplicate_sop_uids) - logger.warning( - 'Duplicate entries found in the ReferencedSeriesSequence. ' - f"Segmentation SOP Instance UID: '{self.SOPInstanceUID}', " - f'duplicated referenced SOP Instance UID items: {display_str}.' + else: + # Initialize empty pixel array + full_output_shape = ( + (*output_shape, num_segments) + if isinstance(output_shape, tuple) + else (output_shape, h, w, num_segments) + ) + out_array = np.zeros( + full_output_shape, + dtype=intermediate_dtype ) - return unique_instance_data - - def _build_luts(self) -> None: - """Build lookup tables for efficient querying. + # loop through output frames + for (output_indexer, seg_indexer, seg_n) in indices_iterator: - Two lookup tables are currently constructed. The first maps the - SOPInstanceUIDs of all datasets referenced in the segmentation to a - tuple containing the StudyInstanceUID, SeriesInstanceUID and - SOPInstanceUID. + # Output indexer needs segment index + output_indexer = (*output_indexer, seg_n) - The second look-up table contains information about each frame of the - segmentation, including the segment it contains, the instance and frame - from which it was derived (if these are unique), and its dimension - index values. + # Copy data to to output array + if self.pixel_array.ndim == 2: + # Special case with a single segmentation frame + out_array[output_indexer] = \ + self.pixel_array.copy() + else: + out_array[output_indexer] = \ + self.pixel_array[seg_indexer].copy() - """ - self._db_man = _SegDBManager(self) + if rescale_fractional: + if self.segmentation_type == SegmentationTypeValues.FRACTIONAL: + if out_array.max() > self.MaximumFractionalValue: + raise RuntimeError( + 'Segmentation image contains values greater than ' + 'the MaximumFractionalValue recorded in the ' + 'dataset.' + ) + max_val = self.MaximumFractionalValue + out_array = out_array.astype(dtype) / max_val - @property - def segmentation_type(self) -> SegmentationTypeValues: - """highdicom.seg.SegmentationTypeValues: Segmentation type.""" - return SegmentationTypeValues(self.SegmentationType) + return out_array - @property - def segmentation_fractional_type( + def get_default_dimension_index_pointers( self - ) -> Union[SegmentationFractionalTypeValues, None]: - """ - highdicom.seg.SegmentationFractionalTypeValues: - Segmentation fractional type. - - """ - if not hasattr(self, 'SegmentationFractionalType'): - return None - return SegmentationFractionalTypeValues( - self.SegmentationFractionalType - ) - - def iter_segments(self): - """Iterates over segments in this segmentation image. - - Returns - ------- - Iterator[Tuple[numpy.ndarray, Tuple[pydicom.dataset.Dataset, ...], pydicom.dataset.Dataset]] - For each segment in the Segmentation image instance, provides the - Pixel Data frames representing the segment, items of the Per-Frame - Functional Groups Sequence describing the individual frames, and - the item of the Segment Sequence describing the segment - - """ # noqa - return iter_segments(self) - - @property - def number_of_segments(self) -> int: - """int: The number of segments in this SEG image.""" - return len(self.SegmentSequence) - - @property - def segment_numbers(self) -> range: - """range: The segment numbers present in the SEG image as a range.""" - return range(1, self.number_of_segments + 1) - - def get_segment_description( - self, - segment_number: int - ) -> SegmentDescription: - """Get segment description for a segment. + ) -> List[BaseTag]: + """Get the default list of tags used to index frames. - Parameters - ---------- - segment_number: int - Segment number for the segment, as a 1-based index. + The list of tags used to index dimensions depends upon how the + segmentation image was constructed, and is stored in the + DimensionIndexPointer attribute within the DimensionIndexSequence. The + list returned by this method matches the order of items in the + DimensionIndexSequence, but omits the ReferencedSegmentNumber + attribute, since this is handled differently to other tags when + indexing frames in highdicom. Returns ------- - highdicom.seg.SegmentDescription - Description of the given segment. + List[pydicom.tag.BaseTag] + List of tags used as the default dimension index pointers. """ - if segment_number < 1 or segment_number > self.number_of_segments: - raise IndexError( - f'{segment_number} is an invalid segment number for this ' - 'dataset.' - ) - return self.SegmentSequence[segment_number - 1] + referenced_segment_number = tag_for_keyword('ReferencedSegmentNumber') + return [ + t for t in self.dimension_index_pointers + if t != referenced_segment_number + ] - def get_segment_numbers( + def are_dimension_indices_unique( self, - segment_label: Optional[str] = None, - segmented_property_category: Optional[Union[Code, CodedConcept]] = None, - segmented_property_type: Optional[Union[Code, CodedConcept]] = None, - algorithm_type: Optional[Union[SegmentAlgorithmTypeValues, str]] = None, - tracking_uid: Optional[str] = None, - tracking_id: Optional[str] = None, - ) -> List[int]: - """Get a list of segment numbers matching provided criteria. + dimension_index_pointers: Sequence[Union[int, BaseTag]] + ) -> bool: + """Check if a list of index pointers uniquely identifies frames. - Any number of optional filters may be provided. A segment must match - all provided filters to be included in the returned list. + For a given list of dimension index pointers, check whether every + combination of index values for these pointers identifies a unique + frame per segment in the segmentation image. This is a pre-requisite + for indexing using this list of dimension index pointers in the + :meth:`Segmentation.get_pixels_by_dimension_index_values()` method. Parameters ---------- - segment_label: Union[str, None], optional - Segment label filter to apply. - segmented_property_category: Union[Code, CodedConcept, None], optional - Segmented property category filter to apply. - segmented_property_type: Union[Code, CodedConcept, None], optional - Segmented property type filter to apply. - algorithm_type: Union[SegmentAlgorithmTypeValues, str, None], optional - Segmented property type filter to apply. - tracking_uid: Union[str, None], optional - Tracking unique identifier filter to apply. - tracking_id: Union[str, None], optional - Tracking identifier filter to apply. + dimension_index_pointers: Sequence[Union[int, pydicom.tag.BaseTag]] + Sequence of tags serving as dimension index pointers. Returns ------- - List[int] - List of all segment numbers matching the provided criteria. - - Examples - -------- - - Get segment numbers of all segments that both represent tumors and were - generated by an automatic algorithm from a segmentation object ``seg``: - - >>> from pydicom.sr.codedict import codes - >>> from highdicom.seg import SegmentAlgorithmTypeValues, Segmentation - >>> from pydicom import dcmread - >>> ds = dcmread('data/test_files/seg_image_sm_control.dcm') - >>> seg = Segmentation.from_dataset(ds) - >>> segment_numbers = seg.get_segment_numbers( - ... segmented_property_type=codes.SCT.ConnectiveTissue, - ... algorithm_type=SegmentAlgorithmTypeValues.AUTOMATIC - ... ) - >>> segment_numbers - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] - - Get segment numbers of all segments identified by a given - institution-specific tracking ID: - - >>> segment_numbers = seg.get_segment_numbers( - ... tracking_id='Segment #4' - ... ) - >>> segment_numbers - [4] - - Get segment numbers of all segments identified a globally unique - tracking UID: + bool + True if the specified list of dimension index pointers uniquely + identifies frames in the segmentation image. False otherwise. - >>> uid = '1.2.826.0.1.3680043.8.498.42540123542017542395135803252098380233' - >>> segment_numbers = seg.get_segment_numbers(tracking_uid=uid) - >>> segment_numbers - [13] + Raises + ------ + KeyError + If any of the elements of the ``dimension_index_pointers`` are not + valid dimension index pointers in this segmentation image. - """ # noqa: E501 - filter_funcs = [] - if segment_label is not None: - filter_funcs.append( - lambda desc: desc.segment_label == segment_label - ) - if segmented_property_category is not None: - filter_funcs.append( - lambda desc: - desc.segmented_property_category == segmented_property_category - ) - if segmented_property_type is not None: - filter_funcs.append( - lambda desc: - desc.segmented_property_type == segmented_property_type - ) - if algorithm_type is not None: - algo_type = SegmentAlgorithmTypeValues(algorithm_type) - filter_funcs.append( - lambda desc: - SegmentAlgorithmTypeValues(desc.algorithm_type) == algo_type - ) - if tracking_uid is not None: - filter_funcs.append( - lambda desc: desc.tracking_uid == tracking_uid - ) - if tracking_id is not None: - filter_funcs.append( - lambda desc: desc.tracking_id == tracking_id + """ + if len(dimension_index_pointers) == 0: + raise ValueError( + 'Argument "dimension_index_pointers" may not be empty.' ) + dimension_index_pointers = list(dimension_index_pointers) + for ptr in dimension_index_pointers: + if ptr not in self.dimension_index_pointers: + kw = keyword_for_tag(ptr) + if kw == '': + kw = '' + raise KeyError( + f'Tag {ptr} ({kw}) is not used as a dimension index ' + 'in this image.' + ) - return [ - desc.segment_number - for desc in self.SegmentSequence - if all(f(desc) for f in filter_funcs) - ] + dimension_index_pointers.append( + tag_for_keyword('ReferencedSegmentNumber') + ) + return super().are_dimension_indices_unique( + dimension_index_pointers + ) - def get_tracking_ids( - self, - segmented_property_category: Optional[Union[Code, CodedConcept]] = None, - segmented_property_type: Optional[Union[Code, CodedConcept]] = None, - algorithm_type: Optional[Union[SegmentAlgorithmTypeValues, str]] = None - ) -> List[Tuple[str, UID]]: - """Get all unique tracking identifiers in this SEG image. + def _are_referenced_sop_instances_unique(self) -> bool: + """Check if Referenced SOP Instance UIDs uniquely identify frames. - Any number of optional filters may be provided. A segment must match - all provided filters to be included in the returned list. + This is a pre-requisite for requesting segmentation masks defined by + the SOP Instance UIDs of their source frames, such as using the + Segmentation.get_pixels_by_source_instance() method and + Segmentation._iterate_indices_by_source_instance() method. - The tracking IDs and the accompanying tracking UIDs are returned - in a list of tuples. + Returns + ------- + bool + True if the ReferencedSOPInstanceUID (in combination with the + segment number) uniquely identifies frames of the segmentation + image. - Note that the order of the returned list is not significant and will - not in general match the order of segments. + """ + cur = self._db_con.cursor() + n_unique_combos = cur.execute( + 'SELECT COUNT(*) FROM ' + '(SELECT 1 FROM FrameLUT GROUP BY ReferencedSOPInstanceUID, ' + 'ReferencedSegmentNumber)' + ).fetchone()[0] + return n_unique_combos == self.NumberOfFrames - Parameters - ---------- - segmented_property_category: Union[pydicom.sr.coding.Code, highdicom.sr.CodedConcept, None], optional - Segmented property category filter to apply. - segmented_property_type: Union[pydicom.sr.coding.Code, highdicom.sr.CodedConcept, None], optional - Segmented property type filter to apply. - algorithm_type: Union[highdicom.seg.SegmentAlgorithmTypeValues, str, None], optional - Segmented property type filter to apply. + def _are_referenced_frames_unique(self) -> bool: + """Check if Referenced Frame Numbers uniquely identify frames. Returns ------- - List[Tuple[str, pydicom.uid.UID]] - List of all unique (Tracking Identifier, Unique Tracking Identifier) - tuples that are referenced in segment descriptions in this - Segmentation image that match all provided filters. - - Examples - -------- + bool + True if the ReferencedFrameNumber (in combination with the + segment number) uniquely identifies frames of the segmentation + image. - Read in an example segmentation image in the highdicom test data: + """ + cur = self._db_con.cursor() + n_unique_combos = cur.execute( + 'SELECT COUNT(*) FROM ' + '(SELECT 1 FROM FrameLUT GROUP BY ReferencedFrameNumber, ' + 'ReferencedSegmentNumber)' + ).fetchone()[0] + return n_unique_combos == self.NumberOfFrames - >>> import highdicom as hd - >>> from pydicom.sr.codedict import codes - >>> - >>> seg = hd.seg.segread('data/test_files/seg_image_ct_binary_overlap.dcm') + @contextmanager + def _generate_temp_segment_table( + self, + segment_numbers: Sequence[int], + combine_segments: bool, + relabel: bool + ) -> Generator[None, None, None]: + """Context manager that handles a temporary table for segments. - List the tracking IDs and UIDs present in the segmentation image: + The temporary table is named "TemporarySegmentNumbers" with columns + OutputSegmentNumber and SegmentNumber that are populated with values + derived from the input. Control flow then returns to code within the + "with" block. After the "with" block has completed, the cleanup of + the table is automatically handled. - >>> sorted(seg.get_tracking_ids(), reverse=True) # otherwise its a random order - [('Spine', '1.2.826.0.1.3680043.10.511.3.10042414969629429693880339016394772'), ('Bone', '1.2.826.0.1.3680043.10.511.3.83271046815894549094043330632275067')] + Parameters + ---------- + segment_numbers: Sequence[int] + Segment numbers to include, in the order desired. + combine_segments: bool + Whether the segments will be combined into a label map. + relabel: bool + Whether the output segment numbers should be relabelled to 1-n + (True) or retain their values in the original segmentation object. - >>> for seg_num in seg.segment_numbers: - ... desc = seg.get_segment_description(seg_num) - ... print(desc.segmented_property_type.meaning) - Bone - Spine + Yields + ------ + None: + Yields control to the "with" block, with the temporary table + created. - List tracking IDs only for those segments with a segmented property - category of 'Spine': + """ + if combine_segments: + if relabel: + # Output segment numbers are consecutive and start at 1 + data = enumerate(segment_numbers, 1) + else: + # Output segment numbers are the same as the input + # segment numbers + data = zip(segment_numbers, segment_numbers) + else: + # Output segment numbers are indices along the output + # array's segment dimension, so are consecutive starting at + # 0 + data = enumerate(segment_numbers) - >>> seg.get_tracking_ids(segmented_property_type=codes.SCT.Spine) - [('Spine', '1.2.826.0.1.3680043.10.511.3.10042414969629429693880339016394772')] + cmd = ( + 'CREATE TABLE TemporarySegmentNumbers(' + ' SegmentNumber INTEGER UNIQUE NOT NULL,' + ' OutputSegmentNumber INTEGER UNIQUE NOT NULL' + ')' + ) - """ # noqa: E501 - filter_funcs = [] - if segmented_property_category is not None: - filter_funcs.append( - lambda desc: - desc.segmented_property_category == segmented_property_category - ) - if segmented_property_type is not None: - filter_funcs.append( - lambda desc: - desc.segmented_property_type == segmented_property_type - ) - if algorithm_type is not None: - algo_type = SegmentAlgorithmTypeValues(algorithm_type) - filter_funcs.append( - lambda desc: - SegmentAlgorithmTypeValues(desc.algorithm_type) == algo_type + with self._db_con: + self._db_con.execute(cmd) + self._db_con.executemany( + 'INSERT INTO ' + 'TemporarySegmentNumbers(' + ' OutputSegmentNumber, SegmentNumber' + ')' + 'VALUES(?, ?)', + data ) - return list({ - (desc.tracking_id, UID(desc.tracking_uid)) - for desc in self.SegmentSequence - if desc.tracking_id is not None and - desc.tracking_uid is not None and - all(f(desc) for f in filter_funcs) - }) + # Yield execution to "with" block + yield - @property - def segmented_property_categories(self) -> List[CodedConcept]: - """Get all unique segmented property categories in this SEG image. + # Clean up table after user code executes + with self._db_con: + self._db_con.execute('DROP TABLE TemporarySegmentNumbers') - Returns - ------- - List[CodedConcept] - All unique segmented property categories referenced in segment - descriptions in this SEG image. + @contextmanager + def _iterate_indices_by_source_instance( + self, + source_sop_instance_uids: Sequence[str], + segment_numbers: Sequence[int], + combine_segments: bool = False, + relabel: bool = False, + ) -> Generator[ + Iterator[ + Tuple[ + Tuple[Union[slice, int], ...], + Tuple[Union[slice, int], ...], + int + ] + ], + None, + None, + ]: + """Iterate over segmentation frame indices for given source image + instances. - """ - categories = [] - for desc in self.SegmentSequence: - if desc.segmented_property_category not in categories: - categories.append(desc.segmented_property_category) + This is intended for the case of a segmentation image that references + multiple single frame sources images (typically a series). In this + case, the user supplies a list of SOP Instance UIDs of the source + images of interest, and this method returns information about the + frames of the segmentation image relevant to these source images. - return categories + This yields an iterator to the underlying database result that iterates + over information on the steps required to construct the requested + segmentation mask from the stored frames of the segmentation image. - @property - def segmented_property_types(self) -> List[CodedConcept]: - """Get all unique segmented property types in this SEG image. + This method is intended to be used as a context manager that yields the + requested iterator. The iterator is only valid while the context + manager is active. - Returns - ------- - List[CodedConcept] - All unique segmented property types referenced in segment - descriptions in this SEG image. + Parameters + ---------- + source_sop_instance_uids: str + SOP Instance UID of the source instances for which segmentation + image frames are requested. + segment_numbers: Sequence[int] + Numbers of segments to include. + combine_segments: bool, optional + If True, produce indices to combine the different segments into a + single label map in which the value of a pixel represents its + segment. If False (the default), segments are binary and stacked + down the last dimension of the output array. + relabel: bool, optional + If True and ``combine_segments`` is ``True``, the output segment + numbers are relabelled into the range ``0`` to + ``len(segment_numbers)`` (inclusive) according to the position of + the original segment numbers in ``segment_numbers`` parameter. If + ``combine_segments`` is ``False``, this has no effect. + + Yields + ------ + Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: + Indices required to construct the requested mask. Each + triplet denotes the (output indexer, segmentation indexer, + output segment number) representing a list of "instructions" to + create the requested output array by copying frames from the + segmentation dataset and inserting them into the output array with + a given segment value. Output indexer and segmentation indexer are + tuples that can be used to index the output and segmentations + numpy arrays directly. - """ - types = [] - for desc in self.SegmentSequence: - if desc.segmented_property_type not in types: - types.append(desc.segmented_property_type) + """ # noqa: E501 + # Run query to create the iterable of indices needed to construct the + # desired pixel array. The approach here is to create two temporary + # tables in the SQLite database, one for the desired source UIDs, and + # another for the desired segments, then use table joins with the + # referenced UIDs table and the frame LUT at the relevant rows, before + # clearing up the temporary tables. - return types + # Create temporary table of desired frame numbers + table_name = 'TemporarySOPInstanceUIDs' + column_defs = [ + 'OutputFrameIndex INTEGER UNIQUE NOT NULL', + 'SourceSOPInstanceUID VARCHAR UNIQUE NOT NULL' + ] + column_data = enumerate(source_sop_instance_uids) - @property - def volume_geometry(self) -> Optional[VolumeGeometry]: - """Union[highdicom.VolumeGeometry, None]: Geometry of the volume if the - segmentation represents a regularly-spaced 3D volume. ``None`` - otherwise. + # Construct the query The ORDER BY is not logically necessary + # but seems to improve performance of the downstream numpy + # operations, presumably as it is more cache efficient + query = ( + 'SELECT ' + ' T.OutputFrameIndex,' + ' L.FrameNumber - 1,' + ' S.OutputSegmentNumber ' + 'FROM TemporarySOPInstanceUIDs T ' + 'INNER JOIN FrameLUT L' + ' ON T.SourceSOPInstanceUID = L.ReferencedSOPInstanceUID ' + 'INNER JOIN TemporarySegmentNumbers S' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' + 'ORDER BY T.OutputFrameIndex' + ) - """ - return self._db_man.volume_geometry + with self._generate_temp_table( + table_name=table_name, + column_defs=column_defs, + column_data=column_data, + ): + with self._generate_temp_segment_table( + segment_numbers=segment_numbers, + combine_segments=combine_segments, + relabel=relabel + ): + yield ( + ( + (fo, slice(None), slice(None)), + (fi, slice(None), slice(None)), + seg_no + ) + for (fo, fi, seg_no) in self._db_con.execute(query) + ) - def _get_pixels_by_seg_frame( + @contextmanager + def _iterate_indices_by_source_frame( self, - output_shape: Union[int, Tuple[int, int]], - indices_iterator: Iterator[ - Tuple[ - Tuple[Union[slice, int], ...], - Tuple[Union[slice, int], ...], - int - ] - ], - segment_numbers: np.ndarray, + source_sop_instance_uid: str, + source_frame_numbers: Sequence[int], + segment_numbers: Sequence[int], combine_segments: bool = False, relabel: bool = False, - rescale_fractional: bool = True, - skip_overlap_checks: bool = False, - dtype: Union[type, str, np.dtype, None] = None, - ) -> np.ndarray: - """Construct a segmentation array given an array of frame numbers. + ) -> Generator[ + Iterator[ + Tuple[ + Tuple[Union[slice, int], ...], + Tuple[Union[slice, int], ...], + int + ] + ], + None, + None, + ]: + """Iterate over frame indices for given source image frames. - The output array is either 4D (combine_segments=False) or 3D - (combine_segments=True), where dimensions are frames x rows x columns x - segments. + This is intended for the case of a segmentation image that references a + single multi-frame source image instance. In this case, the user + supplies a list of frames numbers of interest within the single source + instance, and this method returns information about the frames + of the segmentation image relevant to these frames. + + This yields an iterator to the underlying database result that iterates + over information on the steps required to construct the requested + segmentation mask from the stored frames of the segmentation image. + + This method is intended to be used as a context manager that yields the + requested iterator. The iterator is only valid while the context + manager is active. Parameters ---------- - output_shape: Union[int, Tuple[int, int]] - Shape of the output array. If an integer is False, this is the - number of frames in the output array and the number of rows and - columns are taken to match those of each segmentation frame. If a - tuple of integers, it contains the number of (rows, columns) in the - output array and there is no frame dimension (this is the tiled - case). Note in either case, the segments dimension (if relevant) is - omitted. - indices_iterator: Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int ]] - An iterable object that yields tuples of (output_indexer, - segmentation_indexer, output_segment_number) that describes how to - construct the desired output pixel array from the segmentation - image's pixel array. 'output_indexer' is a tuple that may be used - directly to index the output array to place a single frame's pixels - into the output array. Similarly 'segmentation_indexer' is a tuple - that may be used directly to index the segmentation pixel array - to retrieve the pixels to place into the output array. - with as segment number 'output_segment_number'. Note that in both - cases the indexers access the frame, row and column dimensions of - the relevant array, but not the segment dimension (if relevant). - segment_numbers: np.ndarray - One dimensional numpy array containing segment numbers - corresponding to the columns of the seg frames matrix. - combine_segments: bool - If True, combine the different segments into a single label - map in which the value of a pixel represents its segment. - If False (the default), segments are binary and stacked down the - last dimension of the output array. - relabel: bool - If True and ``combine_segments`` is ``True``, the pixel values in - the output array are relabelled into the range ``0`` to + source_sop_instance_uid: str + SOP Instance UID of the source instance that contains the source + frames. + source_frame_numbers: Sequence[int] + A sequence of frame numbers (1-based) within the source instance + for which segmentations are requested. + segment_numbers: Sequence[int] + Sequence containing segment numbers to include. + combine_segments: bool, optional + If True, produce indices to combine the different segments into a + single label map in which the value of a pixel represents its + segment. If False (the default), segments are binary and stacked + down the last dimension of the output array. + relabel: bool, optional + If True and ``combine_segments`` is ``True``, the output segment + numbers are relabelled into the range ``0`` to ``len(segment_numbers)`` (inclusive) according to the position of the original segment numbers in ``segment_numbers`` parameter. If ``combine_segments`` is ``False``, this has no effect. - rescale_fractional: bool - If this is a FRACTIONAL segmentation and ``rescale_fractional`` is - True, the raw integer-valued array stored in the segmentation image - output will be rescaled by the MaximumFractionalValue such that - each pixel lies in the range 0.0 to 1.0. If False, the raw integer - values are returned. If the segmentation has BINARY type, this - parameter has no effect. - skip_overlap_checks: bool - If True, skip checks for overlap between different segments. By - default, checks are performed to ensure that the segments do not - overlap. However, this reduces performance. If checks are skipped - and multiple segments do overlap, the segment with the highest - segment number (after relabelling, if applicable) will be placed - into the output array. - dtype: Union[type, str, np.dtype, None] - Data type of the returned array. If None, an appropriate type will - be chosen automatically. If the returned values are rescaled - fractional values, this will be numpy.float32. Otherwise, the - smallest unsigned integer type that accommodates all of the output - values will be chosen. - Returns - ------- - pixel_array: np.ndarray - Segmentation pixel array + Yields + ------ + Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: + Indices required to construct the requested mask. Each + triplet denotes the (output indexer, segmentation indexer, + output segment number) representing a list of "instructions" to + create the requested output array by copying frames from the + segmentation dataset and inserting them into the output array with + a given segment value. Output indexer and segmentation indexer are + tuples that can be used to index the output and segmentations + numpy arrays directly. """ # noqa: E501 - if ( - segment_numbers.min() < 1 or - segment_numbers.max() > self.number_of_segments - ): - raise ValueError( - 'Segment numbers array contains invalid values.' - ) + # Run query to create the iterable of indices needed to construct the + # desired pixel array. The approach here is to create two temporary + # tables in the SQLite database, one for the desired frame numbers, and + # another for the desired segments, then use table joins with the frame + # LUT to arrive at the relevant rows, before clearing up the temporary + # tables. - # Determine output type - if combine_segments: - max_output_val = ( - segment_numbers.shape[0] if relabel else segment_numbers.max() - ) - else: - max_output_val = 1 + # Create temporary table of desired frame numbers + table_name = 'TemporaryFrameNumbers' + column_defs = [ + 'OutputFrameIndex INTEGER UNIQUE NOT NULL', + 'SourceFrameNumber INTEGER UNIQUE NOT NULL' + ] + column_data = enumerate(source_frame_numbers) - will_be_rescaled = ( - rescale_fractional and - self.segmentation_type == SegmentationTypeValues.FRACTIONAL and - not combine_segments + # Construct the query The ORDER BY is not logically necessary + # but seems to improve performance of the downstream numpy + # operations, presumably as it is more cache efficient + query = ( + 'SELECT ' + ' F.OutputFrameIndex,' + ' L.FrameNumber - 1,' + ' S.OutputSegmentNumber ' + 'FROM TemporaryFrameNumbers F ' + 'INNER JOIN FrameLUT L' + ' ON F.SourceFrameNumber = L.ReferencedFrameNumber ' + 'INNER JOIN TemporarySegmentNumbers S' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' + 'ORDER BY F.OutputFrameIndex' ) - if dtype is None: - if will_be_rescaled: - dtype = np.float32 - else: - dtype = _get_unsigned_dtype(max_output_val) - dtype = np.dtype(dtype) - - # Check dtype is suitable - if dtype.kind not in ('u', 'i', 'f'): - raise ValueError( - f'Data type "{dtype}" is not suitable.' - ) - if will_be_rescaled: - intermediate_dtype = np.uint8 - if dtype.kind != 'f': - raise ValueError( - 'If rescaling a fractional segmentation, the output dtype ' - 'must be a floating-point type.' + with self._generate_temp_table( + table_name=table_name, + column_defs=column_defs, + column_data=column_data, + ): + with self._generate_temp_segment_table( + segment_numbers=segment_numbers, + combine_segments=combine_segments, + relabel=relabel + ): + yield ( + ( + (fo, slice(None), slice(None)), + (fi, slice(None), slice(None)), + seg_no + ) + for (fo, fi, seg_no) in self._db_con.execute(query) ) - else: - intermediate_dtype = dtype - _check_numpy_value_representation(max_output_val, dtype) - num_segments = len(segment_numbers) - if self.pixel_array.ndim == 2: - h, w = self.pixel_array.shape - else: - _, h, w = self.pixel_array.shape + @contextmanager + def _iterate_indices_for_tiled_region( + self, + row_start: int, + row_end: int, + column_start: int, + column_end: int, + tile_shape: Tuple[int, int], + segment_numbers: Sequence[int], + combine_segments: bool = False, + relabel: bool = False, + ) -> Generator[ + Iterator[ + Tuple[ + Tuple[Union[slice, int], ...], + Tuple[Union[slice, int], ...], + int + ] + ], + None, + None, + ]: + """Iterate over segmentation frame indices for a given region of the + segmentation's total pixel matrix. - if combine_segments: - # Check whether segmentation is binary, or fractional with only - # binary values - if self.segmentation_type == SegmentationTypeValues.FRACTIONAL: - if not rescale_fractional: - raise ValueError( - 'In order to combine segments of a FRACTIONAL ' - 'segmentation image, argument "rescale_fractional" ' - 'must be set to True.' - ) - # Combining fractional segs is only possible if there are - # two unique values in the array: 0 and MaximumFractionalValue - is_binary = np.isin( - np.unique(self.pixel_array), - np.array([0, self.MaximumFractionalValue]), - assume_unique=True - ).all() - if not is_binary: - raise ValueError( - 'Combining segments of a FRACTIONAL segmentation is ' - 'only possible if the pixel array contains only 0s ' - 'and the specified MaximumFractionalValue ' - f'({self.MaximumFractionalValue}).' - ) - pixel_array = self.pixel_array // self.MaximumFractionalValue - pixel_array = pixel_array.astype(np.uint8) - else: - pixel_array = self.pixel_array + This is intended for the case of a segmentation image that is stored as + a tiled representation of total pixel matrix. - if pixel_array.ndim == 2: - pixel_array = pixel_array[None, :, :] + This yields an iterator to the underlying database result that iterates + over information on the steps required to construct the requested + segmentation mask from the stored frames of the segmentation image. - # Initialize empty pixel array - full_output_shape = ( - output_shape - if isinstance(output_shape, tuple) - else (output_shape, h, w) - ) - out_array = np.zeros( - full_output_shape, - dtype=intermediate_dtype - ) + This method is intended to be used as a context manager that yields the + requested iterator. The iterator is only valid while the context + manager is active. - # Loop over the supplied iterable - for (output_indexer, seg_indexer, seg_n) in indices_iterator: - pix_value = intermediate_dtype.type(seg_n) + Parameters + ---------- + row_start: int + Row index (1-based) in the total pixel matrix of the first row of + the output array. May be negative (last row is -1). + row_end: int + Row index (1-based) in the total pixel matrix one beyond the last + row of the output array. May be negative (last row is -1). + column_start: int + Column index (1-based) in the total pixel matrix of the first + column of the output array. May be negative (last column is -1). + column_end: int + Column index (1-based) in the total pixel matrix one beyond the last + column of the output array. May be negative (last column is -1). + tile_shape: Tuple[int, int] + Shape of each tile (rows, columns). + segment_numbers: Sequence[int] + Numbers of segments to include. + combine_segments: bool, optional + If True, produce indices to combine the different segments into a + single label map in which the value of a pixel represents its + segment. If False (the default), segments are binary and stacked + down the last dimension of the output array. + relabel: bool, optional + If True and ``combine_segments`` is ``True``, the output segment + numbers are relabelled into the range ``0`` to + ``len(segment_numbers)`` (inclusive) according to the position of + the original segment numbers in ``segment_numbers`` parameter. If + ``combine_segments`` is ``False``, this has no effect. - if not skip_overlap_checks: - if np.any( - np.logical_and( - pixel_array[seg_indexer] > 0, - out_array[output_indexer] > 0 - ) - ): - raise RuntimeError( - "Cannot combine segments because segments " - "overlap." - ) - out_array[output_indexer] = np.maximum( - pixel_array[seg_indexer] * pix_value, - out_array[output_indexer] - ) + Yields + ------ + Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: + Indices required to construct the requested mask. Each + triplet denotes the (output indexer, segmentation indexer, + output segment number) representing a list of "instructions" to + create the requested output array by copying frames from the + segmentation dataset and inserting them into the output array with + a given segment value. Output indexer and segmentation indexer are + tuples that can be used to index the output and segmentations + numpy arrays directly. - else: - # Initialize empty pixel array - full_output_shape = ( - (*output_shape, num_segments) - if isinstance(output_shape, tuple) - else (output_shape, h, w, num_segments) - ) - out_array = np.zeros( - full_output_shape, - dtype=intermediate_dtype - ) + """ # noqa: E501 + th, tw = tile_shape - # loop through output frames - for (output_indexer, seg_indexer, seg_n) in indices_iterator: + oh = row_end - row_start + ow = column_end - column_start - # Output indexer needs segment index - output_indexer = (*output_indexer, seg_n) + row_offset_start = row_start - th + 1 + column_offset_start = column_start - tw + 1 - # Copy data to to output array - if self.pixel_array.ndim == 2: - # Special case with a single segmentation frame - out_array[output_indexer] = \ - self.pixel_array.copy() - else: - out_array[output_indexer] = \ - self.pixel_array[seg_indexer].copy() + # Construct the query The ORDER BY is not logically necessary + # but seems to improve performance of the downstream numpy + # operations, presumably as it is more cache efficient + query = ( + 'SELECT ' + ' L.RowPositionInTotalImagePixelMatrix,' + ' L.ColumnPositionInTotalImagePixelMatrix,' + ' L.FrameNumber - 1,' + ' S.OutputSegmentNumber ' + 'FROM FrameLUT L ' + 'INNER JOIN TemporarySegmentNumbers S' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' + 'WHERE (' + ' L.RowPositionInTotalImagePixelMatrix >= ' + f' {row_offset_start}' + f' AND L.RowPositionInTotalImagePixelMatrix < {row_end}' + ' AND L.ColumnPositionInTotalImagePixelMatrix >= ' + f' {column_offset_start}' + f' AND L.ColumnPositionInTotalImagePixelMatrix < {column_end}' + ')' + 'ORDER BY ' + ' L.RowPositionInTotalImagePixelMatrix,' + ' L.ColumnPositionInTotalImagePixelMatrix,' + ' S.OutputSegmentNumber' + ) - if rescale_fractional: - if self.segmentation_type == SegmentationTypeValues.FRACTIONAL: - if out_array.max() > self.MaximumFractionalValue: - raise RuntimeError( - 'Segmentation image contains values greater than ' - 'the MaximumFractionalValue recorded in the ' - 'dataset.' - ) - max_val = self.MaximumFractionalValue - out_array = out_array.astype(dtype) / max_val + with self._generate_temp_segment_table( + segment_numbers=segment_numbers, + combine_segments=combine_segments, + relabel=relabel + ): + yield ( + ( + ( + slice( + max(rp - row_start, 0), + min(rp + th - row_start, oh) + ), + slice( + max(cp - column_start, 0), + min(cp + tw - column_start, ow) + ), + ), + ( + fi, + slice( + max(row_start - rp, 0), + min(row_end - rp, th) + ), + slice( + max(column_start - cp, 0), + min(column_end - cp, tw) + ), + ), + seg_no + ) + for (rp, cp, fi, seg_no) in self._db_con.execute(query) + ) - return out_array + @contextmanager + def _iterate_indices_for_volume( + self, + slice_start: int, + slice_end: int, + segment_numbers: Sequence[int], + combine_segments: bool = False, + relabel: bool = False, + ) -> Generator[ + Iterator[ + Tuple[ + Tuple[Union[slice, int], ...], + Tuple[Union[slice, int], ...], + int + ] + ], + None, + None, + ]: + """Iterate over frame indices sorted by volume. - def get_source_image_uids(self) -> List[Tuple[hd_UID, hd_UID, hd_UID]]: - """Get UIDs for all source SOP instances referenced in the dataset. + This yields an iterator to the underlying database result that iterates + over information on the steps required to construct the requested + segmentation mask from the stored frames of the segmentation image. - Returns - ------- - List[Tuple[highdicom.UID, highdicom.UID, highdicom.UID]] - List of tuples containing Study Instance UID, Series Instance UID - and SOP Instance UID for every SOP Instance referenced in the - dataset. + This method is intended to be used as a context manager that yields the + requested iterator. The iterator is only valid while the context + manager is active. - """ - return self._db_man.get_source_image_uids() + Parameters + ---------- + slice_start: int, optional + Zero-based index of the "volume position" of the first slice of the + returned volume. The "volume position" refers to the position of + slices after sorting spatially, and may correspond to any frame in + the segmentation file, depending on its construction. Must be a + non-negative integer. + slice_end: Union[int, None], optional + Zero-based index of the "volume position" one beyond the last slice + of the returned volume. The "volume position" refers to the + position of slices after sorting spatially, and may correspond to + any frame in the segmentation file, depending on its construction. + Must be a positive integer. + segment_numbers: Sequence[int] + Sequence containing segment numbers to include. + combine_segments: bool, optional + If True, produce indices to combine the different segments into a + single label map in which the value of a pixel represents its + segment. If False (the default), segments are binary and stacked + down the last dimension of the output array. + relabel: bool, optional + If True and ``combine_segments`` is ``True``, the output segment + numbers are relabelled into the range ``0`` to + ``len(segment_numbers)`` (inclusive) according to the position of + the original segment numbers in ``segment_numbers`` parameter. If + ``combine_segments`` is ``False``, this has no effect. - def get_default_dimension_index_pointers( - self - ) -> List[BaseTag]: - """Get the default list of tags used to index frames. + Yields + ------ + Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: + Indices required to construct the requested mask. Each + triplet denotes the (output indexer, segmentation indexer, + output segment number) representing a list of "instructions" to + create the requested output array by copying frames from the + segmentation dataset and inserting them into the output array with + a given segment value. Output indexer and segmentation indexer are + tuples that can be used to index the output and segmentations + numpy arrays directly. - The list of tags used to index dimensions depends upon how the - segmentation image was constructed, and is stored in the - DimensionIndexPointer attribute within the DimensionIndexSequence. The - list returned by this method matches the order of items in the - DimensionIndexSequence, but omits the ReferencedSegmentNumber - attribute, since this is handled differently to other tags when - indexing frames in highdicom. + """ # noqa: E501 + if self.volume_geometry is None: + raise RuntimeError( + 'This segmentation does not represent a regularly-spaced ' + 'volume.' + ) - Returns - ------- - List[pydicom.tag.BaseTag] - List of tags used as the default dimension index pointers. + # Construct the query The ORDER BY is not logically necessary + # but seems to improve performance of the downstream numpy + # operations, presumably as it is more cache efficient + query = ( + 'SELECT ' + f' L.VolumePosition - {slice_start},' + ' L.FrameNumber - 1,' + ' S.OutputSegmentNumber ' + 'FROM FrameLUT L ' + 'INNER JOIN TemporarySegmentNumbers S' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' + 'WHERE ' + f' L.VolumePosition >= {slice_start} AND ' + f' L.VolumePosition < {slice_end} ' + 'ORDER BY L.VolumePosition' + ) - """ - referenced_segment_number = tag_for_keyword('ReferencedSegmentNumber') - return [ - t for t in self._db_man.dimension_index_pointers[:] - if t != referenced_segment_number - ] + with self._generate_temp_segment_table( + segment_numbers=segment_numbers, + combine_segments=combine_segments, + relabel=relabel + ): + yield ( + ( + (fo, slice(None), slice(None)), + (fi, slice(None), slice(None)), + seg_no + ) + for (fo, fi, seg_no) in self._db_con.execute(query) + ) - def are_dimension_indices_unique( + @contextmanager + def _iterate_indices_by_dimension_index_values( self, - dimension_index_pointers: Sequence[Union[int, BaseTag]] - ) -> bool: - """Check if a list of index pointers uniquely identifies frames. + dimension_index_values: Sequence[Sequence[int]], + dimension_index_pointers: Sequence[int], + segment_numbers: Sequence[int], + combine_segments: bool = False, + relabel: bool = False, + ) -> Generator[ + Iterator[ + Tuple[ + Tuple[Union[slice, int], ...], + Tuple[Union[slice, int], ...], + int + ] + ], + None, + None, + ]: + """Iterate over frame indices for given dimension index values. - For a given list of dimension index pointers, check whether every - combination of index values for these pointers identifies a unique - frame per segment in the segmentation image. This is a pre-requisite - for indexing using this list of dimension index pointers in the - :meth:`Segmentation.get_pixels_by_dimension_index_values()` method. + This is intended to be the most flexible and lowest-level (and there + also least convenient) method to request information about + segmentation frames. The user can choose to specify which segmentation + frames are of interest using arbitrary dimension indices and their + associated values. This makes no assumptions about the dimension + organization of the underlying segmentation, except that the given + dimension indices can be used to uniquely identify frames in the + segmentation image. + + This yields an iterator to the underlying database result that iterates + over information on the steps required to construct the requested + segmentation mask from the stored frames of the segmentation image. + + This method is intended to be used as a context manager that yields the + requested iterator. The iterator is only valid while the context + manager is active. Parameters ---------- + dimension_index_values: Sequence[Sequence[int]] + Dimension index values for the requested frames. dimension_index_pointers: Sequence[Union[int, pydicom.tag.BaseTag]] - Sequence of tags serving as dimension index pointers. - - Returns - ------- - bool - True if the specified list of dimension index pointers uniquely - identifies frames in the segmentation image. False otherwise. + The data element tags that identify the indices used in the + ``dimension_index_values`` parameter. + segment_numbers: Sequence[int] + Sequence containing segment numbers to include. + combine_segments: bool, optional + If True, produce indices to combine the different segments into a + single label map in which the value of a pixel represents its + segment. If False (the default), segments are binary and stacked + down the last dimension of the output array. + relabel: bool, optional + If True and ``combine_segments`` is ``True``, the output segment + numbers are relabelled into the range ``0`` to + ``len(segment_numbers)`` (inclusive) according to the position of + the original segment numbers in ``segment_numbers`` parameter. If + ``combine_segments`` is ``False``, this has no effect. - Raises + Yields ------ - KeyError - If any of the elements of the ``dimension_index_pointers`` are not - valid dimension index pointers in this segmentation image. + Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: + Indices required to construct the requested mask. Each + triplet denotes the (output indexer, segmentation indexer, + output segment number) representing a list of "instructions" to + create the requested output array by copying frames from the + segmentation dataset and inserting them into the output array with + a given segment value. Output indexer and segmentation indexer are + tuples that can be used to index the output and segmentations + numpy arrays directly. - """ - if len(dimension_index_pointers) == 0: - raise ValueError( - 'Argument "dimension_index_pointers" may not be empty.' - ) - dimension_index_pointers = list(dimension_index_pointers) - for ptr in dimension_index_pointers: - if ptr not in self._db_man.dimension_index_pointers: - kw = keyword_for_tag(ptr) - if kw == '': - kw = '' - raise KeyError( - f'Tag {ptr} ({kw}) is not used as a dimension index ' - 'in this image.' - ) + """ # noqa: E501 + # Create temporary table of desired dimension indices + table_name = 'TemporaryDimensionIndexValues' - dimension_index_pointers.append( - tag_for_keyword('ReferencedSegmentNumber') + dim_ind_cols = [ + self._dim_ind_col_names[p] for p in dimension_index_pointers + ] + column_defs = ( + ['OutputFrameIndex INTEGER UNIQUE NOT NULL'] + + [f'{col} INTEGER NOT NULL' for col in dim_ind_cols] ) - return self._db_man.are_dimension_indices_unique( - dimension_index_pointers + column_data = ( + (i, *tuple(row)) + for i, row in enumerate(dimension_index_values) + ) + + # Construct the query The ORDER BY is not logically necessary + # but seems to improve performance of the downstream numpy + # operations, presumably as it is more cache efficient + join_str = ' AND '.join(f'D.{col} = L.{col}' for col in dim_ind_cols) + query = ( + 'SELECT ' + ' D.OutputFrameIndex,' # frame index of the output array + ' L.FrameNumber - 1,' # frame *index* of segmentation image + ' S.OutputSegmentNumber ' # output segment number + 'FROM TemporaryDimensionIndexValues D ' + 'INNER JOIN FrameLUT L' + f' ON {join_str} ' + 'INNER JOIN TemporarySegmentNumbers S' + ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' + 'ORDER BY D.OutputFrameIndex' ) + with self._generate_temp_table( + table_name=table_name, + column_defs=column_defs, + column_data=column_data, + ): + with self._generate_temp_segment_table( + segment_numbers=segment_numbers, + combine_segments=combine_segments, + relabel=relabel + ): + yield ( + ( + (fo, slice(None), slice(None)), + (fi, slice(None), slice(None)), + seg_no + ) + for (fo, fi, seg_no) in self._db_con.execute(query) + ) + def get_pixels_by_source_instance( self, source_sop_instance_uids: Sequence[str], @@ -4047,7 +3951,7 @@ def get_pixels_by_source_instance( """ # Check that indexing in this way is possible - self._db_man._check_indexing_with_source_frames( + self._check_indexing_with_source_frames( ignore_spatial_locations ) @@ -4070,7 +3974,7 @@ def get_pixels_by_source_instance( # Check that the combination of source instances and segment numbers # uniquely identify segmentation frames - if not self._db_man.are_referenced_sop_instances_unique(): + if not self._are_referenced_sop_instances_unique(): raise RuntimeError( 'Source SOP instance UIDs and segment numbers do not ' 'uniquely identify frames of the segmentation image.' @@ -4079,7 +3983,7 @@ def get_pixels_by_source_instance( # Check that all frame numbers requested actually exist if not assert_missing_frames_are_empty: unique_uids = ( - self._db_man.get_unique_referenced_sop_instance_uids() + self._get_unique_referenced_sop_instance_uids() ) missing_uids = set(source_sop_instance_uids) - unique_uids if len(missing_uids) > 0: @@ -4091,7 +3995,7 @@ def get_pixels_by_source_instance( ) raise KeyError(msg) - with self._db_man.iterate_indices_by_source_instance( + with self._iterate_indices_by_source_instance( source_sop_instance_uids=source_sop_instance_uids, segment_numbers=segment_numbers, combine_segments=combine_segments, @@ -4303,7 +4207,7 @@ def get_pixels_by_source_frame( """ # Check that indexing in this way is possible - self._db_man._check_indexing_with_source_frames( + self._check_indexing_with_source_frames( ignore_spatial_locations ) @@ -4326,7 +4230,7 @@ def get_pixels_by_source_frame( # Check that the combination of frame numbers and segment numbers # uniquely identify segmentation frames - if not self._db_man.are_referenced_frames_unique(): + if not self._are_referenced_frames_unique(): raise RuntimeError( 'Source frame numbers and segment numbers do not ' 'uniquely identify frames of the segmentation image.' @@ -4335,7 +4239,7 @@ def get_pixels_by_source_frame( # Check that all frame numbers requested actually exist if not assert_missing_frames_are_empty: max_frame_number = ( - self._db_man.get_max_referenced_frame_number() + self._get_max_referenced_frame_number() ) for f in source_frame_numbers: if f > max_frame_number: @@ -4348,7 +4252,7 @@ def get_pixels_by_source_frame( ) raise ValueError(msg) - with self._db_man.iterate_indices_by_source_frame( + with self._iterate_indices_by_source_frame( source_sop_instance_uid=source_sop_instance_uid, source_frame_numbers=source_frame_numbers, segment_numbers=segment_numbers, @@ -4369,6 +4273,7 @@ def get_pixels_by_source_frame( def get_volume( self, + *, slice_start: int = 0, slice_end: Optional[int] = None, segment_numbers: Optional[Sequence[int]] = None, @@ -4501,7 +4406,7 @@ def get_volume( "empty volume." ) - with self._db_man.iterate_indices_for_volume( + with self._iterate_indices_for_volume( slice_start=slice_start, slice_end=cast(int, slice_end), segment_numbers=segment_numbers, @@ -4520,7 +4425,7 @@ def get_volume( dtype=dtype, ) - affine = self._db_man.volume_geometry[slice_start].affine + affine = self._volume_geometry[slice_start].affine return Volume( array=array, @@ -4710,7 +4615,7 @@ def get_pixels_by_dimension_index_values( ) if dimension_index_pointers is None: dimension_index_pointers = [ - t for t in self._db_man.dimension_index_pointers + t for t in self.dimension_index_pointers if t != referenced_segment_number_tag ] else: @@ -4724,7 +4629,7 @@ def get_pixels_by_dimension_index_values( "Do not include the ReferencedSegmentNumber in the " "argument 'dimension_index_pointers'." ) - if ptr not in self._db_man.dimension_index_pointers: + if ptr not in self.dimension_index_pointers: kw = keyword_for_tag(ptr) if kw == '': kw = '' @@ -4754,7 +4659,7 @@ def get_pixels_by_dimension_index_values( # Check that all frame numbers requested actually exist if not assert_missing_frames_are_empty: - unique_dim_ind_vals = self._db_man.get_unique_dim_index_values( + unique_dim_ind_vals = self._get_unique_dim_index_values( dimension_index_pointers ) queried_dim_inds = set(tuple(r) for r in dimension_index_values) @@ -4769,7 +4674,7 @@ def get_pixels_by_dimension_index_values( ) raise ValueError(msg) - with self._db_man.iterate_indices_by_dimension_index_values( + with self._iterate_indices_by_dimension_index_values( dimension_index_values=dimension_index_values, dimension_index_pointers=dimension_index_pointers, segment_numbers=segment_numbers, @@ -4924,7 +4829,7 @@ def get_total_pixel_matrix( # Check whether this segmentation is appropriate for tile-based indexing if not is_tiled_image(self): raise RuntimeError("Segmentation is not a tiled image.") - if not self._db_man.is_indexable_as_total_pixel_matrix(): + if not self.is_indexable_as_total_pixel_matrix(): raise RuntimeError( "Segmentation does not have appropriate dimension indices " "to be indexed as a total pixel matrix." @@ -4983,7 +4888,7 @@ def get_total_pixel_matrix( column_end - column_start, ) - with self._db_man.iterate_indices_for_tiled_region( + with self._iterate_indices_for_tiled_region( row_start=row_start, row_end=row_end, column_start=column_start, diff --git a/src/highdicom/sr/content.py b/src/highdicom/sr/content.py index dc4d5922..bb3ab2e6 100644 --- a/src/highdicom/sr/content.py +++ b/src/highdicom/sr/content.py @@ -181,7 +181,7 @@ def from_dataset( else: dataset_copy = dataset item = super()._from_dataset_base(dataset_copy) - return cast(LongitudinalTemporalOffsetFromEvent, item) + return cast(cls, item) class SourceImageForMeasurementGroup(ImageContentItem): @@ -293,7 +293,7 @@ def from_dataset( else: dataset_copy = dataset item = super()._from_dataset_base(dataset_copy) - return cast(SourceImageForMeasurementGroup, item) + return cast(cls, item) class SourceImageForMeasurement(ImageContentItem): @@ -405,7 +405,7 @@ def from_dataset( else: dataset_copy = dataset item = super()._from_dataset_base(dataset_copy) - return cast(SourceImageForMeasurement, item) + return cast(cls, item) class SourceImageForRegion(ImageContentItem): @@ -514,7 +514,7 @@ def from_dataset( """ dataset_copy = deepcopy(dataset) item = super()._from_dataset_base(dataset_copy) - return cast(SourceImageForRegion, item) + return cast(cls, item) class SourceImageForSegmentation(ImageContentItem): @@ -626,7 +626,7 @@ def from_dataset( else: dataset_copy = dataset item = super()._from_dataset_base(dataset_copy) - return cast(SourceImageForSegmentation, item) + return cast(cls, item) class SourceSeriesForSegmentation(UIDRefContentItem): @@ -705,7 +705,7 @@ def from_dataset( else: dataset_copy = dataset item = super()._from_dataset_base(dataset_copy) - return cast(SourceSeriesForSegmentation, item) + return cast(cls, item) class ImageRegion(ScoordContentItem): @@ -806,7 +806,7 @@ def from_dataset( else: dataset_copy = dataset item = super()._from_dataset_base(dataset_copy) - return cast(ImageRegion, item) + return cast(cls, item) class ImageRegion3D(Scoord3DContentItem): @@ -882,7 +882,7 @@ def from_dataset( else: dataset_copy = dataset item = super()._from_dataset_base(dataset_copy) - return cast(ImageRegion3D, item) + return cast(cls, item) class VolumeSurface(ContentSequence): @@ -1270,7 +1270,7 @@ def from_dataset( else: dataset_copy = dataset item = super()._from_dataset_base(dataset_copy) - return cast(RealWorldValueMap, item) + return cast(cls, item) class FindingSite(CodeContentItem): @@ -1394,7 +1394,7 @@ def from_dataset( else: dataset_copy = dataset item = super()._from_dataset_base(dataset_copy) - return cast(FindingSite, item) + return cast(cls, item) class ReferencedSegmentationFrame(ContentSequence): @@ -1516,7 +1516,7 @@ def from_sequence( new_seq = ContentSequence([seg_frame_items[0], source_image_items[0]]) new_seq.__class__ = cls - return cast(ReferencedSegmentationFrame, new_seq) + return cast(cls, new_seq) @classmethod def from_segmentation( @@ -1902,7 +1902,7 @@ def from_sequence( ) new_seq.__class__ = cls - return cast(ReferencedSegment, new_seq) + return cast(cls, new_seq) @classmethod def from_segmentation( diff --git a/src/highdicom/sr/sop.py b/src/highdicom/sr/sop.py index 44ed97a2..7fb5fa73 100644 --- a/src/highdicom/sr/sop.py +++ b/src/highdicom/sr/sop.py @@ -643,8 +643,8 @@ def from_dataset( if dataset.SOPClassUID != ComprehensiveSRStorage: raise ValueError('Dataset is not a Comprehensive SR document.') sop_instance = super().from_dataset(dataset, copy=copy) - sop_instance.__class__ = ComprehensiveSR - return cast(ComprehensiveSR, sop_instance) + sop_instance.__class__ = cls + return cast(cls, sop_instance) class Comprehensive3DSR(_SR): @@ -794,8 +794,8 @@ def from_dataset( if dataset.SOPClassUID != Comprehensive3DSRStorage: raise ValueError('Dataset is not a Comprehensive 3D SR document.') sop_instance = super().from_dataset(dataset, copy=copy) - sop_instance.__class__ = Comprehensive3DSR - return cast(Comprehensive3DSR, sop_instance) + sop_instance.__class__ = cls + return cast(cls, sop_instance) def srread( diff --git a/src/highdicom/sr/templates.py b/src/highdicom/sr/templates.py index f102d357..37f82ab7 100644 --- a/src/highdicom/sr/templates.py +++ b/src/highdicom/sr/templates.py @@ -3487,8 +3487,8 @@ def from_sequence( """ instance = super().from_sequence(sequence) - instance.__class__ = PlanarROIMeasurementsAndQualitativeEvaluations - return cast(PlanarROIMeasurementsAndQualitativeEvaluations, instance) + instance.__class__ = cls + return cast(cls, instance) class VolumetricROIMeasurementsAndQualitativeEvaluations( @@ -3762,11 +3762,8 @@ def from_sequence( """ instance = super().from_sequence(sequence) - instance.__class__ = VolumetricROIMeasurementsAndQualitativeEvaluations - return cast( - VolumetricROIMeasurementsAndQualitativeEvaluations, - instance - ) + instance.__class__ = cls + return cast(cls, instance) class ImageLibraryEntryDescriptors(Template): @@ -4221,8 +4218,8 @@ def from_sequence( is_root=True, copy=copy ) - instance.__class__ = MeasurementReport - return cast(MeasurementReport, instance) + instance.__class__ = cls + return cast(cls, instance) def get_observer_contexts( self, diff --git a/src/highdicom/sr/value_types.py b/src/highdicom/sr/value_types.py index 04a6421b..152a12e8 100644 --- a/src/highdicom/sr/value_types.py +++ b/src/highdicom/sr/value_types.py @@ -226,7 +226,7 @@ def _from_dataset_base(cls, dataset: Dataset) -> 'ContentItem': copy=False ) ] - return cast(ContentItem, item) + return cast(cls, item) @property def name(self) -> CodedConcept: @@ -715,7 +715,7 @@ def from_dataset( item.ConceptCodeSequence = DataElementSequence([ CodedConcept.from_dataset(item.ConceptCodeSequence[0], copy=False) ]) - return cast(CodeContentItem, item) + return cast(cls, item) class PnameContentItem(ContentItem): @@ -779,7 +779,7 @@ def from_dataset( dataset_copy = dataset _assert_value_type(dataset_copy, ValueTypeValues.PNAME) item = super()._from_dataset_base(dataset_copy) - return cast(PnameContentItem, item) + return cast(cls, item) class TextContentItem(ContentItem): @@ -842,7 +842,7 @@ def from_dataset( dataset_copy = dataset _assert_value_type(dataset_copy, ValueTypeValues.TEXT) item = super()._from_dataset_base(dataset_copy) - return cast(TextContentItem, item) + return cast(cls, item) class TimeContentItem(ContentItem): @@ -914,7 +914,7 @@ def from_dataset( dataset_copy = dataset _assert_value_type(dataset_copy, ValueTypeValues.TIME) item = super()._from_dataset_base(dataset_copy) - return cast(TimeContentItem, item) + return cast(cls, item) class DateContentItem(ContentItem): @@ -986,7 +986,7 @@ def from_dataset( dataset_copy = dataset _assert_value_type(dataset_copy, ValueTypeValues.DATE) item = super()._from_dataset_base(dataset_copy) - return cast(DateContentItem, item) + return cast(cls, item) class DateTimeContentItem(ContentItem): @@ -1058,7 +1058,7 @@ def from_dataset( dataset_copy = dataset _assert_value_type(dataset_copy, ValueTypeValues.DATETIME) item = super()._from_dataset_base(dataset_copy) - return cast(DateTimeContentItem, item) + return cast(cls, item) class UIDRefContentItem(ContentItem): @@ -1121,7 +1121,7 @@ def from_dataset( dataset_copy = dataset _assert_value_type(dataset_copy, ValueTypeValues.UIDREF) item = super()._from_dataset_base(dataset_copy) - return cast(UIDRefContentItem, item) + return cast(cls, item) class NumContentItem(ContentItem): @@ -1253,7 +1253,7 @@ def from_dataset( item.NumericValueQualifierCodeSequence = DataElementSequence([ CodedConcept.from_dataset(qualifier_item, copy=False) ]) - return cast(NumContentItem, item) + return cast(cls, item) class ContainerContentItem(ContentItem): @@ -1332,7 +1332,7 @@ def from_dataset( dataset_copy = dataset _assert_value_type(dataset_copy, ValueTypeValues.CONTAINER) item = super()._from_dataset_base(dataset_copy) - return cast(ContainerContentItem, item) + return cast(cls, item) class CompositeContentItem(ContentItem): @@ -1417,7 +1417,7 @@ def from_dataset( dataset_copy = dataset _assert_value_type(dataset_copy, ValueTypeValues.COMPOSITE) item = super()._from_dataset_base(dataset_copy) - return cast(CompositeContentItem, item) + return cast(cls, item) class ImageContentItem(ContentItem): @@ -1548,7 +1548,7 @@ def from_dataset( dataset_copy = dataset _assert_value_type(dataset_copy, ValueTypeValues.IMAGE) item = super()._from_dataset_base(dataset_copy) - return cast(ImageContentItem, item) + return cast(cls, item) class ScoordContentItem(ContentItem): @@ -1678,7 +1678,7 @@ def from_dataset( dataset_copy = dataset _assert_value_type(dataset_copy, ValueTypeValues.SCOORD) item = super()._from_dataset_base(dataset_copy) - return cast(ScoordContentItem, item) + return cast(cls, item) class Scoord3DContentItem(ContentItem): @@ -1823,7 +1823,7 @@ def from_dataset( dataset_copy = dataset _assert_value_type(dataset_copy, ValueTypeValues.SCOORD3D) item = super()._from_dataset_base(dataset_copy) - return cast(Scoord3DContentItem, item) + return cast(cls, item) class TcoordContentItem(ContentItem): @@ -1928,7 +1928,7 @@ def from_dataset( dataset_copy = dataset _assert_value_type(dataset_copy, ValueTypeValues.TCOORD) item = super()._from_dataset_base(dataset_copy) - return cast(TcoordContentItem, item) + return cast(cls, item) class WaveformContentItem(ContentItem): @@ -2047,4 +2047,4 @@ def from_dataset( dataset_copy = dataset _assert_value_type(dataset_copy, ValueTypeValues.IMAGE) item = super()._from_dataset_base(dataset_copy) - return cast(WaveformContentItem, item) + return cast(cls, item) From 282e004c8f9edb40f4437cb16fb2050704022656 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 20 Aug 2024 13:26:51 -0400 Subject: [PATCH 61/93] Add randon flip and permute --- src/highdicom/seg/sop.py | 2 +- src/highdicom/spatial.py | 17 +++--- src/highdicom/volume.py | 118 ++++++++++++++++++++++++++++++++++++--- tests/test_multiframe.py | 14 ++--- 4 files changed, 127 insertions(+), 24 deletions(-) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 150d084e..4ab9f150 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -4425,7 +4425,7 @@ def get_volume( dtype=dtype, ) - affine = self._volume_geometry[slice_start].affine + affine = self.volume_geometry[slice_start].affine return Volume( array=array, diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index c46403fe..a6615d53 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -3052,9 +3052,9 @@ def get_series_volume_positions( Allow for slices missing from the volume. If True, the smallest distance between two consective slices is found and returned as the slice spacing, provided all other spacings are an integer multiple of - this value (within tolerance). Alternatively, if ``spacing_hint`` is - used, that value will be used instead of the minimum consecutive - spacing. If False, any gaps will result in failure. + this value (within tolerance). Alternatively, if a SpacingBetweenSlices + value is found in the datasets, that value will be used instead of the + minimum consecutive spacing. If False, any gaps will result in failure. allow_duplicates: bool, optional Allow multiple slices to map to the same position within the volume. If False, duplicated image positions will result in failure. @@ -3254,10 +3254,13 @@ def get_volume_positions( "Argument 'allow_missing' requires 'sort'." ) - if spacing_hint is not None and spacing_hint <= 0.0: - raise ValueError( - "Argument 'spacing_hint' should be a positive value." - ) + if spacing_hint is not None: + if spacing_hint < 0.0: + # There are some edge cases of the standard where this is valid + spacing_hint = abs(spacing_hint) + if spacing_hint == 0.0: + raise ValueError("Argument 'spacing_hint' cannot be 0.") + image_positions_arr = np.array(image_positions) if image_positions_arr.ndim != 2 or image_positions_arr.shape[1] != 3: diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 8daed5ca..dc8ef863 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -1,5 +1,4 @@ from abc import ABC, abstractmethod -from copy import deepcopy from os import PathLike from pathlib import Path from typing import List, Optional, Sequence, Union, Tuple, cast @@ -46,15 +45,19 @@ ) -# TODO add basic arithmetric operations # TODO add pixel value transformations # TODO should methods copy arrays? -# TODO random crop, random flip, random permute # TODO trim non-zero # TODO support slide coordinate system # TODO volread and metadata # TODO constructors for geometry, do they make sense for volume? # TODO ordering of frames in seg, setting 3D dimension organization +# TODO get_volume to multiframe image +# TODO lazy loading for multiframe +# TODO pickalble sqlite +# TODO get volume from legacy series +# TODO make multiframe public +# TODO figure out type hinting for _VolumeBase class _VolumeBase(ABC): @@ -770,6 +773,49 @@ def permute_axes(self, indices: Sequence[int]) -> '_VolumeBase': """ pass + def random_permute_axes( + self, + axes: Sequence[int] = (0, 1, 2) + ) -> '_VolumeBase': + """Create a new geometry by randomly permuting the spatial axes. + + Parameters + ---------- + axes: Optional[Sequence[int]] + Sequence of three integers containing the values 0, 1 and 2 in some + order. The sequence must contain 2 or 3 elements. This subset of + axes will axes will be included when generating indices for + permutation. Any axis not in this sequence will remain in its + original position. + + Returns + ------- + highdicom.volume._VolumeBase: + New geometry with spatial axes permuted randomly. + + """ + if len(axes) < 2 or len(axes) > 3: + raise ValueError( + "Argument 'axes' must contain 2 or 3 items." + ) + + if len(set(axes)) != len(axes): + raise ValueError( + "Argument 'axes' should contain unique values." + ) + + if set(axes) <= {0, 1, 2}: + raise ValueError( + "Argument 'axes' should contain only 0, 1, and 2." + ) + + indices = np.random.permutation(axes).tolist() + if len(indices) == 2: + missing_index = {0, 1, 2} - set(indices) + indices.insert(missing_index, missing_index) + + return self.permute_axes(indices) + def get_closest_patient_orientation(self) -> Tuple[ PatientOrientationValuesBiped, PatientOrientationValuesBiped, @@ -870,7 +916,7 @@ def swap_axes(self, axis_1: int, axis_2: int) -> '_VolumeBase': return self.permute_axes(permutation) - def flip(self, axis: Union[int, Sequence[int]]) -> '_VolumeBase': + def flip(self, axes: Union[int, Sequence[int]]) -> '_VolumeBase': """Flip the spatial axes of the array. Note that this flips the array and updates the affine to reflect the @@ -878,7 +924,7 @@ def flip(self, axis: Union[int, Sequence[int]]) -> '_VolumeBase': Parameters ---------- - axis: Union[int, Sequence[int]] + axes: Union[int, Sequence[int]] Axis or list of axis indices that should be flipped. These should include only the spatial axes (0, 1, and/or 2). @@ -888,10 +934,10 @@ def flip(self, axis: Union[int, Sequence[int]]) -> '_VolumeBase': New volume with spatial axes flipped as requested. """ - if isinstance(axis, int): - axis = [axis] + if isinstance(axes, int): + axes = [axes] - if len(axis) > 3 or len(set(axis) - {0, 1, 2}) > 0: + if len(axes) > 3 or len(set(axes) - {0, 1, 2}) > 0: raise ValueError( 'Argument "axis" must contain only values 0, 1, and/or 2.' ) @@ -900,13 +946,59 @@ def flip(self, axis: Union[int, Sequence[int]]) -> '_VolumeBase': # this logic figured out already index = [] for d in range(3): - if d in axis: + if d in axes: index.append(slice(-1, None, -1)) else: index.append(slice(None)) return self[tuple(index)] + def random_flip(self, axes: Sequence[int] = (0, 1, 2)) -> '_VolumeBase': + """Randomly flip the spatial axes of the array. + + Note that this flips the array and updates the affine to reflect the + flip. + + Parameters + ---------- + axes: Union[int, Sequence[int]] + Axis or list of axis indices that may be flipped. These should + include only the spatial axes (0, 1, and/or 2). Each axis in this + list is flipped in the output volume with probability 0.5. + + Returns + ------- + highdicom.volume.Volume: + New volume with selected spatial axes randomly flipped. + + """ + if len(axes) < 2 or len(axes) > 3: + raise ValueError( + "Argument 'axes' must contain 2 or 3 items." + ) + + if len(set(axes)) != len(axes): + raise ValueError( + "Argument 'axes' should contain unique values." + ) + + if set(axes) <= {0, 1, 2}: + raise ValueError( + "Argument 'axes' should contain only 0, 1, and 2." + ) + + slices = [] + for d in range(3): + if d in axes: + if np.random.randint(2) == 1: + slices.append(slice(None, None, -1)) + else: + slices.append(slice(None)) + else: + slices.append(slice(None)) + + return self[tuple(slices)] + @property def handedness(self) -> AxisHandedness: """highdicom.AxisHandedness: Axis handedness of the volume.""" @@ -1388,6 +1480,14 @@ def match_geometry( class VolumeGeometry(_VolumeBase): + """Class encapsulating the geometry of a volume. + + Unlike the similar :class:`highdicom.volume.Volume`, items of this class do + not contain voxel data for the underlying volume, just a description of the + geometry. + + """ + def __init__( self, affine: np.ndarray, diff --git a/tests/test_multiframe.py b/tests/test_multiframe.py index 5e4ed690..7803bf99 100644 --- a/tests/test_multiframe.py +++ b/tests/test_multiframe.py @@ -3,25 +3,25 @@ from pydicom import dcmread from pydicom.data import get_testdata_file, get_testdata_files -from highdicom._multiframe import MultiFrameDBManager +from highdicom._multiframe import MultiFrameImage def test_slice_spacing(): ct_multiframe = dcmread( get_testdata_file('eCT_Supplemental.dcm') ) - db = MultiFrameDBManager(ct_multiframe) + image = MultiFrameImage.from_dataset(ct_multiframe) expected_affine = np.array( [ [0.0, 0.0, -0.388672, 99.5], [0.0, 0.388672, 0.0, -301.5], - [-10.0, 0.0, 0.0, -149], + [10.0, 0.0, 0.0, -159], [0.0, 0.0, 0.0, 1.0], ] ) - assert db.volume_geometry.spatial_shape[0] == 2 - assert np.array_equal(db.volume_geometry.affine, expected_affine) + assert image.volume_geometry.spatial_shape[0] == 2 + assert np.array_equal(image.volume_geometry.affine, expected_affine) def test_slice_spacing_irregular(): @@ -33,6 +33,6 @@ def test_slice_spacing_irregular(): ct_multiframe.PerFrameFunctionalGroupsSequence[0].\ PlanePositionSequence[0].ImagePositionPatient = [1.0, 0.0, 0.0] - db = MultiFrameDBManager(ct_multiframe) + image = MultiFrameImage.from_dataset(ct_multiframe) - assert db.volume_geometry is None + assert image.volume_geometry is None From 1e6ce19e982859a3c65886256d1b27b9f143a895 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 20 Aug 2024 13:37:38 -0400 Subject: [PATCH 62/93] Add volume to docs --- docs/package.rst | 9 +++++++++ src/highdicom/volume.py | 14 -------------- 2 files changed, 9 insertions(+), 14 deletions(-) diff --git a/docs/package.rst b/docs/package.rst index 8a6b787c..0683c41c 100644 --- a/docs/package.rst +++ b/docs/package.rst @@ -75,6 +75,15 @@ highdicom.utils module :undoc-members: :show-inheritance: +highdicom.volume module ++++++++++++++++++++++++ + +.. automodule:: highdicom.volume + :members: + :inherited-members: pydicom.dataset.Dataset,pydicom.sequence.Sequence,Dataset,Sequence,list,str,DataElementSequence,enum.Enum,Enum, + :special-members: __call__ + :undoc-members: + :show-inheritance: .. _highdicom-legacy-subpackage: diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index dc8ef863..efc63f4f 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -1774,20 +1774,6 @@ class Volume(_VolumeBase): geometry, however this can be constructed from or transformed to other conventions with appropriate optional parameters to its methods: - * The pixel indices are ordered (slice index, row index, column index). - * Pixel indices are zero-based and represent the center of the pixel. - * Column indices are ordered top to bottom, row indices are ordered left to - right. The interpretation of the slice indices direction is not defined. - * The x, y, z coordinates of frame-of-reference coordinate system follow - the "LPS" convention used in DICOM (see - :dcm:`Part 3 Section C.7.6.2.1.1 `). - I.e. - * The first coordinate (``x``) increases from the patient's right to left - * The second coordinate (``y``) increases from the patient's anterior to - posterior. - * The third coordinate (``z``) increases from the patient's caudal - direction (inferior) to cranial direction (superior). - Note ---- The ordering of pixel indices used by this class (slice, row, column) From 82d79d2618024e749982ac5a96bf944c610ad37f Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 20 Aug 2024 20:48:31 -0400 Subject: [PATCH 63/93] Handle pickling of multiframe images --- src/highdicom/_multiframe.py | 57 ++++++++++++++++++++++++++++++++++++ src/highdicom/volume.py | 1 - tests/test_multiframe.py | 26 ++++++++++++++++ 3 files changed, 83 insertions(+), 1 deletion(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 0d56713d..e64d3c0b 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -117,6 +117,63 @@ def from_dataset( im._build_luts() return im + def __getstate__(self) -> Dict[str, Any]: + """Get the state for pickling. + + This is required to work around the fact that a sqlite3 + Connection object cannot be pickled. + + Returns + ------- + Dict[str, Any]: + State of the object. + + """ + state = super().__getstate__().copy() + + db_data = self._serialize_db() + + del state['_db_con'] + state['db_data'] = db_data + + return state + + def __setstate__(self, state: Dict[str, Any]) -> None: + """Set the state of the object. + + This is required to work around the fact that a sqlite3 + Connection object cannot be pickled. + + Parameters + ---------- + state: Dict[str, Any] + State of the object. + + """ + self._db_con = sqlite3.connect(':memory:') + with self._db_con: + self._db_con.executescript(state['db_data'].decode('utf-8')) + + del state['db_data'] + + super().__setstate__(state) + + def _serialize_db(self) -> bytes: + """Get a serialized copy of the internal database. + + Returns + ------- + bytes: + Serialized copy of the internal database. + + """ + return b''.join( + [ + line.encode('utf-8') + for line in self._db_con.iterdump() + ] + ) + def _build_luts(self) -> None: """Build lookup tables for efficient querying. diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index efc63f4f..d5f74349 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -54,7 +54,6 @@ # TODO ordering of frames in seg, setting 3D dimension organization # TODO get_volume to multiframe image # TODO lazy loading for multiframe -# TODO pickalble sqlite # TODO get volume from legacy series # TODO make multiframe public # TODO figure out type hinting for _VolumeBase diff --git a/tests/test_multiframe.py b/tests/test_multiframe.py index 7803bf99..576e7bce 100644 --- a/tests/test_multiframe.py +++ b/tests/test_multiframe.py @@ -1,4 +1,5 @@ """Tests for the highdicom._multiframe module.""" +import pickle import numpy as np from pydicom import dcmread from pydicom.data import get_testdata_file, get_testdata_files @@ -20,6 +21,7 @@ def test_slice_spacing(): [0.0, 0.0, 0.0, 1.0], ] ) + assert image.volume_geometry is not None assert image.volume_geometry.spatial_shape[0] == 2 assert np.array_equal(image.volume_geometry.affine, expected_affine) @@ -36,3 +38,27 @@ def test_slice_spacing_irregular(): image = MultiFrameImage.from_dataset(ct_multiframe) assert image.volume_geometry is None + + +def test_pickle(): + # Check that the database is successfully serialized and deserialized + ct_multiframe = dcmread( + get_testdata_file('eCT_Supplemental.dcm') + ) + image = MultiFrameImage.from_dataset(ct_multiframe) + + ptr = image.dimension_index_pointers[0] + + pickled = pickle.dumps(image) + + # Check that the pickling process has not damaged the db on the existing + # instance + # This is just an example operation that requires the db + assert not image.are_dimension_indices_unique([ptr]) + + unpickled = pickle.loads(pickled) + assert isinstance(unpickled, MultiFrameImage) + + # Check that the database has been successfully restored in the + # deserialization process + assert not unpickled.are_dimension_indices_unique([ptr]) From c4632d0f5a6007382a7b0803f67d3ba8c3c2acb1 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 10 Sep 2024 21:43:43 -0400 Subject: [PATCH 64/93] Work in progress pixel transforms --- src/highdicom/io.py | 72 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/src/highdicom/io.py b/src/highdicom/io.py index 67417f8c..6f268f68 100644 --- a/src/highdicom/io.py +++ b/src/highdicom/io.py @@ -633,6 +633,78 @@ def read_frame(self, index: int, correct_color: bool = True) -> np.ndarray: return frame_array + def read_frame_transformed( + self, + index: int, + correct_color: bool = True, + apply_modality_transform: bool = True, + apply_voi_transform: bool = False, + voi_transform_index: int = 0, + apply_palette_color_lut: bool = True, + apply_icc_transform: bool = True, + ) -> np.ndarray: + """Return a frame with pixel transformations applied. + + Parameters + ---------- + apply_modality_transform: bool, optional + Whether to apply the modality transform (either a rescale intercept + and slope or modality LUT) to the pixel values, if present in the + datasets. + apply_voi_transform: bool, optional + Whether to apply the value of interest (VOI) transform (either a + windowing operation or VOI LUT) to the pixel values, if present in + the datasets. + voi_transform_index: int, optional + Index of the VOI transform to apply if multiple are included in the + datasets. Ignored if ``apply_voi_transform`` is ``False`` or no VOI + transform is included in the datasets. + apply_palette_color_lut: bool, optional + Whether to apply the palette color LUT if a dataset has photometric + interpretation ``'PALETTE_COLOR'``. + apply_icc_transform: bool, optional + Whether to apply an ICC color profile, if present in the datasets. + convert_color_space: bool, optional + Whether to convert the color space to a standardized space. If + True, images with photometric interpretation ``MONOCHROME1`` are + inverted to mimic ``MONOCHROME2``, and images with photometric + interpretation ``YBR_FULL`` or ``YBR_FULL_422`` are converted to + ``RGB``. + + Returns + ------- + numpy.ndarray: + Numpy array of frame with requested transformations applied. + + """ + frame = self.read_frame(index, correct_color=False) + + # TODO fix this from here + if apply_modality_transform: + frame = apply_modality_lut(frame, ds) + if apply_voi_transform: + frame = apply_voi_lut(frame, ds, voi_transform_index) + if ( + apply_palette_color_lut and + ds.PhotometricInterpretation == 'PALETTE_COLOR' + ): + frame = apply_color_lut(frame, ds) + if apply_icc_transform and 'ICCProfile' in ds: + manager = ColorManager(ds.ICCProfile) + frame = manager.transform_frame(frame) + if standardize_color_space: + if ds.PhotometricInterpretation == 'MONOCHROME1': + # TODO what if a VOI_LUT has been applied + frame = max_value - frame + elif ds.PhotometricInterpretation in ( + 'YBR_FULL', 'YBR_FULL_422' + ): + frame = convert_color_space( + frame, + current=ds.PhotometricInterpretation, + desired='RGB' + ) + @property def number_of_frames(self) -> int: """int: Number of frames""" From ad63ad28d7395db79e67fde86cd28745779087b4 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 4 Oct 2024 20:43:54 -0400 Subject: [PATCH 65/93] Fix serialization --- src/highdicom/_multiframe.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index c6ba4a84..d9a46c49 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -129,7 +129,7 @@ def __getstate__(self) -> Dict[str, Any]: State of the object. """ - state = super().__getstate__().copy() + state = super().__dict__.copy() db_data = self._serialize_db() From 6b9df123575a9eb52e2dd383700e27549d8c9d38 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Fri, 4 Oct 2024 20:56:04 -0400 Subject: [PATCH 66/93] Add tests for labelmap volumes --- tests/test_seg.py | 86 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/tests/test_seg.py b/tests/test_seg.py index fc32d0d8..037da42b 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -1843,6 +1843,92 @@ def test_construction_volume(self): pp[0].ImagePositionPatient ) + def test_construction_volume_fractional(self): + # Segmentation instance from a series of single-frame CT images + # with empty frames kept in + instance = Segmentation( + [self._ct_image], + self._ct_seg_volume, + SegmentationTypeValues.FRACTIONAL.value, + self._segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number, + max_fractional_value=1, + omit_empty_frames=False + ) + assert np.array_equal( + np.flip(instance.pixel_array, axis=0), + self._ct_seg_volume.array, + ) + + assert instance.DimensionOrganizationType == '3D' + shared_item = instance.SharedFunctionalGroupsSequence[0] + assert len(shared_item.PixelMeasuresSequence) == 1 + pm_item = shared_item.PixelMeasuresSequence[0] + assert pm_item.PixelSpacing == self._ct_volume_pixel_spacing + assert not hasattr(pm_item, 'SliceThickness') + assert len(shared_item.PlaneOrientationSequence) == 1 + po_item = shared_item.PlaneOrientationSequence[0] + assert po_item.ImageOrientationPatient == \ + self._ct_volume_orientation + for plane_item, pp in zip( + instance.PerFrameFunctionalGroupsSequence, + self._ct_seg_volume.get_plane_positions()[::-1], + ): + assert ( + plane_item.PlanePositionSequence[0].ImagePositionPatient == + pp[0].ImagePositionPatient + ) + + def test_construction_volume_labelmap(self): + # Segmentation instance from a series of single-frame CT images + # with empty frames kept in + instance = Segmentation( + [self._ct_image], + self._ct_seg_volume, + SegmentationTypeValues.LABELMAP, + self._segment_descriptions, + self._series_instance_uid, + self._series_number, + self._sop_instance_uid, + self._instance_number, + self._manufacturer, + self._manufacturer_model_name, + self._software_versions, + self._device_serial_number, + max_fractional_value=1, + omit_empty_frames=False + ) + assert np.array_equal( + np.flip(instance.pixel_array, axis=0), + self._ct_seg_volume.array, + ) + + assert instance.DimensionOrganizationType == '3D' + shared_item = instance.SharedFunctionalGroupsSequence[0] + assert len(shared_item.PixelMeasuresSequence) == 1 + pm_item = shared_item.PixelMeasuresSequence[0] + assert pm_item.PixelSpacing == self._ct_volume_pixel_spacing + assert not hasattr(pm_item, 'SliceThickness') + assert len(shared_item.PlaneOrientationSequence) == 1 + po_item = shared_item.PlaneOrientationSequence[0] + assert po_item.ImageOrientationPatient == \ + self._ct_volume_orientation + for plane_item, pp in zip( + instance.PerFrameFunctionalGroupsSequence, + self._ct_seg_volume.get_plane_positions()[::-1], + ): + assert ( + plane_item.PlanePositionSequence[0].ImagePositionPatient == + pp[0].ImagePositionPatient + ) + def test_construction_3d_multiframe(self): # The CT multiframe image is already a volume, but the frames are # ordered the wrong way From 5a2bdd6755b9fb6f6aa53adbccc34a730af66013 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sat, 12 Oct 2024 01:56:38 +0200 Subject: [PATCH 67/93] Implement generalized parsing for multiframe images --- src/highdicom/_multiframe.py | 897 ++++++++++++++++++++++++++++++++-- src/highdicom/seg/sop.py | 908 +++++++---------------------------- src/highdicom/volume.py | 4 + tests/test_seg.py | 24 +- 4 files changed, 1044 insertions(+), 789 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index d9a46c49..5fad2b55 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -7,6 +7,7 @@ from typing import ( Any, Iterable, + Iterator, Dict, Generator, List, @@ -20,7 +21,10 @@ import numpy as np from pydicom import Dataset from pydicom.tag import BaseTag -from pydicom.datadict import get_entry, tag_for_keyword +from pydicom.datadict import ( + get_entry, + tag_for_keyword, +) from pydicom.multival import MultiValue from highdicom._module_utils import is_multiframe_image @@ -42,25 +46,45 @@ # Dictionary mapping DCM VRs to appropriate SQLite types _DCM_SQL_TYPE_MAP = { - 'CS': 'VARCHAR', - 'DS': 'REAL', - 'FD': 'REAL', - 'FL': 'REAL', - 'IS': 'INTEGER', - 'LO': 'TEXT', - 'LT': 'TEXT', - 'PN': 'TEXT', - 'SH': 'TEXT', - 'SL': 'INTEGER', - 'SS': 'INTEGER', - 'ST': 'TEXT', - 'UI': 'TEXT', - 'UL': 'INTEGER', - 'UR': 'TEXT', - 'US or SS': 'INTEGER', - 'US': 'INTEGER', - 'UT': 'TEXT', - } + 'CS': 'VARCHAR', + 'DS': 'REAL', + 'FD': 'REAL', + 'FL': 'REAL', + 'IS': 'INTEGER', + 'LO': 'TEXT', + 'LT': 'TEXT', + 'PN': 'TEXT', + 'SH': 'TEXT', + 'SL': 'INTEGER', + 'SS': 'INTEGER', + 'ST': 'TEXT', + 'UI': 'TEXT', + 'UL': 'INTEGER', + 'UR': 'TEXT', + 'US or SS': 'INTEGER', + 'US': 'INTEGER', + 'UT': 'TEXT', +} +_DCM_PYTHON_TYPE_MAP = { + 'CS': str, + 'DS': float, + 'FD': float, + 'FL': float, + 'IS': int, + 'LO': str, + 'LT': str, + 'PN': str, + 'SH': str, + 'SL': int, + 'SS': int, + 'ST': str, + 'UI': str, + 'UL': int, + 'UR': str, + 'US or SS': int, + 'US': int, + 'UT': str, +} _NO_FRAME_REF_VALUE = -1 @@ -75,7 +99,8 @@ class MultiFrameImage(SOPClass): _is_tiled_full: bool _single_source_frame_per_frame: bool _dim_ind_pointers: List[BaseTag] - _dim_ind_col_names: Dict[int, str] + # Mapping of tag value to (index column name, val column name(s)) + _dim_ind_col_names: Dict[int, Tuple[str, Union[str, Tuple[str, ...], None]]] _locations_preserved: Optional[SpatialLocationsPreservedValues] _db_con: sqlite3.Connection _volume_geometry: Optional[VolumeGeometry] @@ -210,7 +235,7 @@ def _build_luts(self) -> None: grp_ptr = getattr(dim_ind, "FunctionalGroupPointer", None) func_grp_pointers[ptr] = grp_ptr - # We mav want to gather additional information that is not one of the + # We may want to gather additional information that is not one of the # indices extra_collection_pointers = [] extra_collection_func_pointers = {} @@ -475,9 +500,11 @@ def _build_luts(self) -> None: # image col_defs = [] # SQL column definitions col_data = [] # lists of column data + self._col_types = {} # dictionary from column name to SQL type # Frame number column col_defs.append('FrameNumber INTEGER PRIMARY KEY') + self._col_types['FrameNumber'] = 'INTEGER' col_data.append(list(range(1, self.NumberOfFrames + 1))) self._dim_ind_col_names = {} @@ -486,10 +513,10 @@ def _build_luts(self) -> None: if kw == '': kw = f'UnknownDimensionIndex{i}' ind_col_name = kw + '_DimensionIndexValues' - self._dim_ind_col_names[t] = ind_col_name # Add column for dimension index col_defs.append(f'{ind_col_name} INTEGER NOT NULL') + self._col_types[ind_col_name] = 'INTEGER' col_data.append(dim_indices[t]) # Add column for dimension value @@ -503,21 +530,31 @@ def _build_luts(self) -> None: try: vm = int(vm_str) except ValueError: + self._dim_ind_col_names[t] = (ind_col_name, None) continue try: sql_type = _DCM_SQL_TYPE_MAP[vr] except KeyError: + self._dim_ind_col_names[t] = (ind_col_name, None) continue if vm > 1: + val_col_names = [] for d in range(vm): data = [el[d] for el in dim_values[t]] - col_defs.append(f'{kw}_{d} {sql_type} NOT NULL') + col_name = f'{kw}_{d}' + col_defs.append(f'{col_name} {sql_type} NOT NULL') + self._col_types[col_name] = sql_type col_data.append(data) + val_col_names.append(col_name) + + self._dim_ind_col_names[t] = (ind_col_name, tuple(val_col_names)) else: # Single column col_defs.append(f'{kw} {sql_type} NOT NULL') + self._col_types[kw] = sql_type col_data.append(dim_values[t]) + self._dim_ind_col_names[t] = (ind_col_name, kw) for i, t in enumerate(extra_collection_pointers): vr, vm_str, _, _, kw = get_entry(t) @@ -532,11 +569,14 @@ def _build_luts(self) -> None: if vm > 1: for d in range(vm): data = [el[d] for el in extra_collection_values[t]] - col_defs.append(f'{kw}_{d} {sql_type} NOT NULL') + col_name = f'{kw}_{d}' + col_defs.append(f'{col_name} {sql_type} NOT NULL') + self._col_types[col_name] = sql_type col_data.append(data) else: # Single column col_defs.append(f'{kw} {sql_type} NOT NULL') + self._col_types[kw] = sql_type col_data.append(dim_values[t]) # Volume related information @@ -572,6 +612,7 @@ def _build_luts(self) -> None: spacing_between_slices=volume_spacing, ) col_defs.append('VolumePosition INTEGER NOT NULL') + self._col_types['VolumePosition'] = 'INTEGER' col_data.append(volume_positions) # Columns related to source frames, if they are usable for indexing @@ -582,7 +623,9 @@ def _build_luts(self) -> None: ) if referenced_instances is not None: col_defs.append('ReferencedFrameNumber INTEGER') + self._col_types['ReferencedFrameNumber'] = 'INTEGER' col_defs.append('ReferencedSOPInstanceUID VARCHAR NOT NULL') + self._col_types['ReferencedSOPInstanceUID'] = 'VARCHAR' col_defs.append( 'FOREIGN KEY(ReferencedSOPInstanceUID) ' 'REFERENCES InstanceUIDs(SOPInstanceUID)' @@ -739,13 +782,11 @@ def _create_ref_instance_table( """ with self._db_con: self._db_con.execute( - """ - CREATE TABLE InstanceUIDs( - StudyInstanceUID VARCHAR NOT NULL, - SeriesInstanceUID VARCHAR NOT NULL, - SOPInstanceUID VARCHAR PRIMARY KEY - ) - """ + "CREATE TABLE InstanceUIDs(" + "StudyInstanceUID VARCHAR NOT NULL, " + "SeriesInstanceUID VARCHAR NOT NULL, " + "SOPInstanceUID VARCHAR PRIMARY KEY" + ")" ) self._db_con.executemany( "INSERT INTO InstanceUIDs " @@ -754,6 +795,35 @@ def _create_ref_instance_table( referenced_uids, ) + def _are_columns_unique( + self, + column_names: Sequence[str], + ) -> bool: + """Check if a list of columns uniquely identifies frames. + + For a given list of columns, check whether every combination of values + for these column identifies a unique image frame. This is a + pre-requisite for indexing frames using this list of columns. + + Parameters + ---------- + column_names: Sequence[str] + Column names. + + Returns + ------- + bool + True if combination of columns is sufficient to identify unique + frames. + + """ + col_str = ", ".join(column_names) + cur = self._db_con.cursor() + n_unique_combos = cur.execute( + f"SELECT COUNT(*) FROM (SELECT 1 FROM FrameLUT GROUP BY {col_str})" + ).fetchone()[0] + return n_unique_combos == self.NumberOfFrames + def are_dimension_indices_unique( self, dimension_index_pointers: Sequence[Union[int, BaseTag]], @@ -778,13 +848,8 @@ def are_dimension_indices_unique( """ column_names = [] for ptr in dimension_index_pointers: - column_names.append(self._dim_ind_col_names[ptr]) - col_str = ", ".join(column_names) - cur = self._db_con.cursor() - n_unique_combos = cur.execute( - f"SELECT COUNT(*) FROM (SELECT 1 FROM FrameLUT GROUP BY {col_str})" - ).fetchone()[0] - return n_unique_combos == self.NumberOfFrames + column_names.append(self._dim_ind_col_names[ptr][0]) + return self._are_columns_unique(column_names) def get_source_image_uids(self) -> List[Tuple[hd_UID, hd_UID, hd_UID]]: """Get UIDs of source image instances referenced in the image. @@ -851,11 +916,11 @@ def is_indexable_as_total_pixel_matrix(self) -> bool: positions in the total pixel matrix. False otherwise. """ - row_pos_kw = tag_for_keyword('RowPositionInTotalImagePixelMatrix') - col_pos_kw = tag_for_keyword('ColumnPositionInTotalImagePixelMatrix') + row_pos_tag = tag_for_keyword('RowPositionInTotalImagePixelMatrix') + col_pos_tag = tag_for_keyword('ColumnPositionInTotalImagePixelMatrix') return ( - row_pos_kw in self._dim_ind_col_names and - col_pos_kw in self._dim_ind_col_names + row_pos_tag in self._dim_ind_col_names and + col_pos_tag in self._dim_ind_col_names ) def _get_unique_dim_index_values( @@ -877,7 +942,7 @@ def _get_unique_dim_index_values( input dimension index pointers. """ - cols = [self._dim_ind_col_names[p] for p in dimension_index_pointers] + cols = [self._dim_ind_col_names[p][0] for p in dimension_index_pointers] cols_str = ', '.join(cols) cur = self._db_con.cursor() return { @@ -944,3 +1009,747 @@ def _generate_temp_table( cmd = (f'DROP TABLE {table_name}') with self._db_con: self._db_con.execute(cmd) + + def _get_pixels_by_frame( + self, + output_shape: Union[int, Tuple[int, int]], + indices_iterator: Iterator[ + Tuple[ + Tuple[Union[slice, int], ...], + Tuple[Union[slice, int], ...], + int + ] + ], + num_channels: int = 0, + dtype: Union[type, str, np.dtype, None] = None, + ) -> np.ndarray: + """Construct a pixel array given an array of frame numbers. + + The output array is either 4D (``num_channels=0``) or 3D + (``num_channels>0``), where dimensions are frames x rows x columns x + channels. + + Parameters + ---------- + output_shape: Union[int, Tuple[int, int]] + Shape of the output array. If an integer, this is the number of + frames in the output array and the number of rows and columns are + taken to match those of each frame. If a tuple of integers, it + contains the number of (rows, columns) in the output array and + there is no frame dimension (this is the tiled case). Note in + either case, the channels dimension (if relevant) is omitted. + indices_iterator: Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int ]] + An iterable object that yields tuples of (output_indexer, + frame_indexer, channel_number) that describes how to construct the + desired output pixel array from the multiframe image's pixel array. + 'output_indexer' is a tuple that may be used directly to index the + output array to place a single frame's pixels into the output + array. Similarly 'frame_indexer' is a tuple that may be used + directly to index the image's pixel array to retrieve the pixels to + place into the output array. with channel number 'channel_number'. + The channel indexer may be ``None`` if the output array has no + channels. Note that in both cases the indexers access the frame, + row and column dimensions of the relevant array, but not the + channel dimension (if relevant). + num_channels: int + Number of channels in the output array. The use of channels depends + on image type, for example it may be segments in a segmentation, + optical paths in a microscopy image, or B-values in an MRI. + dtype: Union[type, str, np.dtype, None] + Data type of the returned array. If None, an appropriate type will + be chosen automatically. If the returned values are rescaled + fractional values, this will be numpy.float32. Otherwise, the + smallest unsigned integer type that accommodates all of the output + values will be chosen. + + Returns + ------- + pixel_array: np.ndarray + Segmentation pixel array + + """ # noqa: E501 + # TODO multiple samples per pixel + if dtype is None: + if self.BitsAllocated == 1: + dtype = np.uint8 + else: + if hasattr(self, 'FloatPixelData'): + dtype = np.float32 + elif hasattr(self, 'DoubleFloatPixelData'): + dtype = np.float64 + else: + dtype = np.dtype(f"uint{self.BitsAllocated}") + dtype = np.dtype(dtype) + + # Check dtype is suitable + if dtype.kind not in ('u', 'i', 'f'): + raise ValueError( + f'Data type "{dtype}" is not suitable.' + ) + + if self.pixel_array.ndim == 2: + h, w = self.pixel_array.shape + else: + _, h, w = self.pixel_array.shape + + # Initialize empty pixel array + spatial_shape = ( + output_shape + if isinstance(output_shape, tuple) + else (output_shape, h, w) + ) + if num_channels > 0: + full_output_shape = (*spatial_shape, num_channels) + else: + full_output_shape = spatial_shape + + out_array = np.zeros( + full_output_shape, + dtype=dtype + ) + + # loop through output frames + for (output_indexer, input_indexer, channel) in indices_iterator: + + # Output indexer needs segment index + if channel is not None: + output_indexer = (*output_indexer, channel) + + # Copy data to to output array + if self.pixel_array.ndim == 2: + # Special case vith a single frame + out_array[output_indexer] = self.pixel_array[input_indexer[1:]] + else: + out_array[output_indexer] = self.pixel_array[input_indexer] + + return out_array + + def _normalize_dimension_queries( + self, + queries: Dict[Union[int, str], Any], + use_indices: bool, + multiple_values: bool, + ) -> Dict[str, Any]: + normalized_queries: Dict[str, Any] = {} + tag: BaseTag | None = None + + if len(queries) == 0: + raise ValueError("Query definitions must not be empty.") + + if multiple_values: + n_values = len(list(queries.values())[0]) + + for p, value in queries.items(): + if isinstance(p, int): # also covers BaseTag + tag = BaseTag(p) + + elif isinstance(p, str): + # Special cases + if p == 'VolumePosition': + col_name = 'VolumePosition' + python_type = int + elif p == 'ReferencedSOPInstanceUID': + col_name = 'ReferencedSOPInstanceUID' + python_type = str + elif p == 'ReferencedFrameNumber': + col_name = 'ReferencedFrameNumber' + python_type = int + else: + t = tag_for_keyword(p) + + if t is None: + raise ValueError( + f'No attribute found with name {p}.' + ) + + tag = BaseTag(t) + + else: + raise TypeError( + "Every item in 'stack_dimension_pointers' must be an " + 'int, str, or pydicom.tag.BaseTag.' + ) + + if tag is None: + if use_indices: + raise ValueError( + f'Cannot query by index value for column {p}.' + ) + else: + vr, _, _, _, kw = get_entry(tag) + if kw == '': + kw = '' + + try: + ind_col_name, val_col_name = self._dim_ind_col_names[tag] + except KeyError as e: + msg = ( + f'The tag {BaseTag(tag)} ({kw}) is not used as ' + 'a dimension index for this image.' + ) + raise KeyError(msg) from e + + if use_indices: + col_name = ind_col_name + python_type = int + else: + col_name = val_col_name + python_type = _DCM_PYTHON_TYPE_MAP[vr] + if col_name is None: + raise RuntimeError( + f'Cannot query attribute with tag {BaseTag(p)} ' + 'by value. Try querying by index value instead. ' + 'If you think this should be possible, please ' + 'report an issue to the highdicom maintainers.' + ) + elif isinstance(col_name, tuple): + raise ValueError( + f'Cannot query attribute with tag {BaseTag(p)} ' + 'by value because it is a multi-valued attribute. ' + 'Try querying by index value instead. ' + ) + + if multiple_values: + if len(value) != n_values: + raise ValueError( + f'Number of values along all dimensions must match.' + ) + for v in value: + if not isinstance(v, python_type): + raise TypeError( + f'For dimension {p}, expected all values to be of type ' + f'{python_type}.' + ) + else: + if not isinstance(value, python_type): + raise TypeError( + f'For dimension {p}, expected value to be of type ' + f'{python_type}.' + ) + + if col_name in normalized_queries: + raise ValueError( + 'All dimensions must be unique.' + ) + normalized_queries[col_name] = value + + return normalized_queries + + + @contextmanager + def _iterate_indices_for_stack( + self, + stack_indices: Dict[Union[int, str], Sequence[Any]], + stack_dimension_use_indices: bool = False, + channel_indices: Optional[Dict[Union[int, str], Sequence[Any]]] = None, + channel_dimension_use_indices: bool = False, + remap_channel_indices: Optional[Sequence[int]] = None, + filters: Optional[Dict[Union[int, str], Any]] = None, + filters_use_indices: bool = False, + ) -> Generator[ + Iterator[ + Tuple[ + Tuple[Union[slice, int], ...], + Tuple[Union[slice, int], ...], + Optional[int], + ] + ], + None, + None, + ]: + """Get indices required to reconstruct pixels into a stack of frames. + + The frames will be stacked down dimension 0 of the returned array. + There may optionally be a channel dimension at dimension 3. + + Parameters + ---------- + stack_indices: Dict[Union[int, str], Sequence[Any]] + Dictionary defining the stack dimension (axis 0 of the output + array). The keys define the dimensions used. They may be either the + tags or keywords of attributes in the image's dimension index, or + the special values 'VolumePosition', 'ReferencedSOPInstanceUID', + and 'ReferencedFrameNumber'. The values of the dictionary give + sequences of values of corresponding dimension that define each + slice of the output array. Note that multiple dimensions may be + used, in which case a frame must match the values of all provided + dimensions to be placed in the output array. + stack_dimension_use_indices: bool, optional + If True, the values in ``stack_indices`` are integer-valued + dimension *index* values. If False the dimension values themselves + are used, whose type depends on the choice of dimension. + channel_indices: Union[Dict[Union[int, str], Sequence[Any]], None], optional + Dictionary defining the channel dimension at axis 3 of the output + array, if any. Definition is identical to that of + ``stack_indices``, however the dimensions used must be distinct. + channel_dimension_use_indices: bool, optional + As ``stack_dimension_use_indices`` but for the channel axis. + remap_channel_indices: Union[Sequence[int], None], optional + Use these values to remap the channel indices returned in the + output iterator. Index ``i`` is mapped to + ``remap_channel_indices[i]``. Ignored if ``channel_indices`` is + ``None``. If ``None`` no mapping is performed. + filters: Union[Dict[Union[int, str], Any], None], optional + Additional filters to use to limit frames. Definition is similar to + ``stack_indices`` except that the dictionary's values are single + values rather than lists. + filters_use_indices: bool, optional + As ``stack_dimension_use_indices`` but for the filters. + + Yields + ------ + Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: + Indices required to construct the requested mask. Each triplet + denotes the (output indexer, input indexer, output channel number) + representing a list of "instructions" to create the requested + output array by copying frames from the image dataset and inserting + them into the output array. Output indexer and input indexer are + tuples that can be used to index the output array and image numpy + arrays directly. Output channel number will be `None`` if + ``channel_indices`` is ``None``. + + """ + norm_stack_indices = self._normalize_dimension_queries( + stack_indices, + stack_dimension_use_indices, + True, + ) + all_columns = list(norm_stack_indices.keys()) + + if channel_indices is not None: + norm_channel_indices = self._normalize_dimension_queries( + channel_indices, + channel_dimension_use_indices, + True, + ) + all_columns.extend(list(norm_channel_indices.keys())) + else: + norm_channel_indices = None + + if filters is not None: + norm_filters = self._normalize_dimension_queries( + filters, + filters_use_indices, + False, + ) + all_columns.extend(list(norm_filters.keys())) + else: + norm_filters = None + + all_dimensions = [ + c.replace('_DimensionIndexValues', '') + for c in all_columns + ] + if len(set(all_dimensions)) != len(all_dimensions): + raise ValueError( + 'Dimensions used for stack, channel and filter must all be ' + 'distinct.' + ) + + # Check for uniqueness + if not self._are_columns_unique(all_columns): + raise RuntimeError( + 'The chosen dimensions do not uniquely identify frames of ' + 'the image. You may need to provide further dimensions or ' + 'a filter to disambiguate.' + ) + + # Create temporary table of desired dimension indices + stack_table_name = 'TemporaryStackTable' + + stack_column_defs = ( + ['OutputFrameIndex INTEGER UNIQUE NOT NULL'] + + [ + f'{c} {self._col_types[c]} NOT NULL' + for c in norm_stack_indices.keys() + ] + ) + stack_column_data = ( + (i, *row) + for i, row in enumerate(zip(*norm_stack_indices.values())) + ) + stack_join_str = ' AND '.join( + f'F.{col} = L.{col}' for col in norm_stack_indices.keys() + ) + + # Filters + if norm_filters is not None: + filter_comparisons = [] + for c, v in norm_filters: + if isinstance(v, str): + v = f"'{v}'" + filter_comparisons.append(f'L.{c} = {v}') + filter_str = 'WHERE ' + ' AND '.join(filter_comparisons) + else: + filter_str = '' + + if norm_channel_indices is None: + + # Construct the query. The ORDER BY is not logically necessary but + # seems to improve performance of the downstream numpy operations, + # presumably as it is more cache efficient + query = ( + 'SELECT ' + ' F.OutputFrameIndex,' # frame index of the output array + ' L.FrameNumber - 1,' # frame *index* of segmentation image + f'FROM {stack_table_name} F ' + 'INNER JOIN FrameLUT L' + f' ON {stack_join_str} ' + f'{filter_str} ' + 'ORDER BY F.OutputFrameIndex' + ) + + with self._generate_temp_table( + table_name=stack_table_name, + column_defs=stack_column_defs, + column_data=stack_column_data, + ): + yield ( + ( + (fo, slice(None), slice(None)), + (fi, slice(None), slice(None)), + None + ) + for (fo, fi) in self._db_con.execute(query) + ) + else: + # Create temporary table of channel indices + channel_table_name = 'TemporaryChannelTable' + + channel_column_defs = ( + ['OutputChannelIndex INTEGER UNIQUE NOT NULL'] + + [ + f'{c} {self._col_types[c]} NOT NULL' + for c in norm_channel_indices.keys() + ] + ) + + num_channels = len(list(norm_channel_indices.values())[0]) + if remap_channel_indices is not None: + output_channel_indices = remap_channel_indices + else: + output_channel_indices = range(num_channels) + + channel_column_data = zip( + output_channel_indices, + *norm_channel_indices.values() + ) + channel_join_str = ' AND '.join( + f'L.{col} = C.{col}' for col in norm_channel_indices.keys() + ) + + # Construct the query. The ORDER BY is not logically necessary but + # seems to improve performance of the downstream numpy operations, + # presumably as it is more cache efficient + query = ( + 'SELECT ' + ' F.OutputFrameIndex,' # frame index of the output array + ' L.FrameNumber - 1,' # frame *index* of segmentation image + ' C.OutputChannelIndex ' # frame index of the output array + f'FROM {stack_table_name} F ' + 'INNER JOIN FrameLUT L' + f' ON {stack_join_str} ' + f'INNER JOIN {channel_table_name} C' + f' ON {channel_join_str} ' + f'{filter_str} ' + 'ORDER BY F.OutputFrameIndex' + ) + + with self._generate_temp_table( + table_name=stack_table_name, + column_defs=stack_column_defs, + column_data=stack_column_data, + ): + with self._generate_temp_table( + table_name=channel_table_name, + column_defs=channel_column_defs, + column_data=channel_column_data, + ): + yield ( + ( + (fo, slice(None), slice(None)), + (fi, slice(None), slice(None)), + channel + ) + for (fo, fi, channel) in self._db_con.execute(query) + ) + + @contextmanager + def _iterate_indices_for_tiled_region( + self, + row_start: int, + row_end: int, + column_start: int, + column_end: int, + tile_shape: Tuple[int, int], + channel_indices: Optional[Dict[Union[int, str], Sequence[Any]]] = None, + channel_dimension_use_indices: bool = False, + remap_channel_indices: Optional[Sequence[int]] = None, + filters: Optional[Dict[Union[int, str], Any]] = None, + filters_use_indices: bool = False, + ) -> Generator[ + Iterator[ + Tuple[ + Tuple[Union[slice, int], ...], + Tuple[Union[slice, int], ...], + Optional[int], + ] + ], + None, + None, + ]: + """Iterate over segmentation frame indices for a given region of the + image's total pixel matrix. + + This is intended for the case of an image that is stored as a tiled + representation of total pixel matrix. + + This yields an iterator to the underlying database result that iterates + over information on the steps required to construct the requested + image from the stored frame. + + This method is intended to be used as a context manager that yields the + requested iterator. The iterator is only valid while the context + manager is active. + + Parameters + ---------- + row_start: int + Row index (1-based) in the total pixel matrix of the first row of + the output array. May be negative (last row is -1). + row_end: int + Row index (1-based) in the total pixel matrix one beyond the last + row of the output array. May be negative (last row is -1). + column_start: int + Column index (1-based) in the total pixel matrix of the first + column of the output array. May be negative (last column is -1). + column_end: int + Column index (1-based) in the total pixel matrix one beyond the last + column of the output array. May be negative (last column is -1). + tile_shape: Tuple[int, int] + Shape of each tile (rows, columns). + channel_indices: Union[Dict[Union[int, str], Sequence[Any]], None], optional + Dictionary defining the channel dimension at axis 2 of the output + array, if any. Definition is identical to that of + ``stack_indices``, however the dimensions used must be distinct. + channel_dimension_use_indices: bool, optional + As ``stack_dimension_use_indices`` but for the channel axis. + remap_channel_indices: Union[Sequence[int], None], optional + Use these values to remap the channel indices returned in the + output iterator. Index ``i`` is mapped to + ``remap_channel_indices[i]``. Ignored if ``channel_indices`` is + ``None``. If ``None`` no mapping is performed. + filters: Union[Dict[Union[int, str], Any], None], optional + Additional filters to use to limit frames. Definition is similar to + ``stack_indices`` except that the dictionary's values are single + values rather than lists. + filters_use_indices: bool, optional + As ``stack_dimension_use_indices`` but for the filters. + + Yields + ------ + Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: + Indices required to construct the requested mask. Each triplet + denotes the (output indexer, input indexer, output channel number) + representing a list of "instructions" to create the requested + output array by copying frames from the image dataset and inserting + them into the output array. Output indexer and input indexer are + tuples that can be used to index the output array and image numpy + arrays directly. Output channel number will be `None`` if + ``channel_indices`` is ``None``. + + """ # noqa: E501 + all_columns = [ + 'RowPositionInTotalImagePixelMatrix', + 'ColumnPositionInTotalImagePixelMatrix', + ] + if channel_indices is not None: + norm_channel_indices = self._normalize_dimension_queries( + channel_indices, + channel_dimension_use_indices, + True, + ) + all_columns.extend(list(norm_channel_indices.keys())) + else: + norm_channel_indices = None + + if filters is not None: + norm_filters = self._normalize_dimension_queries( + filters, + filters_use_indices, + False, + ) + all_columns.extend(list(norm_filters.keys())) + else: + norm_filters = None + + all_dimensions = [ + c.replace('_DimensionIndexValues', '') + for c in all_columns + ] + if len(all_dimensions) != len(all_dimensions): + raise ValueError( + 'Dimensions used for stack, channel and filter must all be ' + 'distinct.' + ) + + # Check for uniqueness + if not self._are_columns_unique(all_columns): + raise RuntimeError( + 'The chosen dimensions do not uniquely identify frames of' + 'the image. You may need to provide further dimensions or ' + 'a filter to disambiguate.' + ) + + # Filters + if norm_filters is not None: + filter_comparisons = [] + for c, v in norm_filters: + if isinstance(v, str): + v = f"'{v}'" + filter_comparisons.append(f'L.{c} = {v}') + filter_str = ' AND ' + ' AND '.join(filter_comparisons) + else: + filter_str = '' + + th, tw = tile_shape + + oh = row_end - row_start + ow = column_end - column_start + + row_offset_start = row_start - th + 1 + column_offset_start = column_start - tw + 1 + + # Construct the query The ORDER BY is not logically necessary + # but seems to improve performance of the downstream numpy + # operations, presumably as it is more cache efficient + if norm_channel_indices is None: + query = ( + 'SELECT ' + ' L.RowPositionInTotalImagePixelMatrix,' + ' L.ColumnPositionInTotalImagePixelMatrix,' + ' L.FrameNumber - 1 ' + 'FROM FrameLUT L ' + 'WHERE (' + ' L.RowPositionInTotalImagePixelMatrix >= ' + f' {row_offset_start}' + f' AND L.RowPositionInTotalImagePixelMatrix < {row_end}' + ' AND L.ColumnPositionInTotalImagePixelMatrix >= ' + f' {column_offset_start}' + f' AND L.ColumnPositionInTotalImagePixelMatrix < {column_end}' + f' {filter_str} ' + ')' + 'ORDER BY ' + ' L.RowPositionInTotalImagePixelMatrix,' + ' L.ColumnPositionInTotalImagePixelMatrix' + ) + + yield ( + ( + ( + slice( + max(rp - row_start, 0), + min(rp + th - row_start, oh) + ), + slice( + max(cp - column_start, 0), + min(cp + tw - column_start, ow) + ), + ), + ( + fi, + slice( + max(row_start - rp, 0), + min(row_end - rp, th) + ), + slice( + max(column_start - cp, 0), + min(column_end - cp, tw) + ), + ), + None, + ) + for (rp, cp, fi) in self._db_con.execute(query) + ) + + else: + # Create temporary table of channel indices + channel_table_name = 'TemporaryChannelTable' + + channel_column_defs = ( + ['OutputChannelIndex INTEGER UNIQUE NOT NULL'] + + [ + f'{c} {self._col_types[c]} NOT NULL' + for c in norm_channel_indices.keys() + ] + ) + + num_channels = len(list(norm_channel_indices.values())[0]) + if remap_channel_indices is not None: + output_channel_indices = remap_channel_indices + else: + output_channel_indices = range(num_channels) + + channel_column_data = zip( + output_channel_indices, + *norm_channel_indices.values() + ) + channel_join_str = ' AND '.join( + f'L.{col} = C.{col}' for col in norm_channel_indices.keys() + ) + + query = ( + 'SELECT ' + ' L.RowPositionInTotalImagePixelMatrix,' + ' L.ColumnPositionInTotalImagePixelMatrix,' + ' L.FrameNumber - 1,' + ' C.OutputChannelIndex ' + 'FROM FrameLUT L ' + f'INNER JOIN {channel_table_name} C' + f' ON {channel_join_str} ' + 'WHERE (' + ' L.RowPositionInTotalImagePixelMatrix >= ' + f' {row_offset_start}' + f' AND L.RowPositionInTotalImagePixelMatrix < {row_end}' + ' AND L.ColumnPositionInTotalImagePixelMatrix >= ' + f' {column_offset_start}' + f' AND L.ColumnPositionInTotalImagePixelMatrix < {column_end}' + f' {filter_str} ' + ')' + 'ORDER BY ' + ' L.RowPositionInTotalImagePixelMatrix,' + ' L.ColumnPositionInTotalImagePixelMatrix' + ) + + with self._generate_temp_table( + table_name=channel_table_name, + column_defs=channel_column_defs, + column_data=channel_column_data, + ): + yield ( + ( + ( + slice( + max(rp - row_start, 0), + min(rp + th - row_start, oh) + ), + slice( + max(cp - column_start, 0), + min(cp + tw - column_start, ow) + ), + ), + ( + fi, + slice( + max(row_start - rp, 0), + min(row_end - rp, th) + ), + slice( + max(column_start - cp, 0), + min(column_end - cp, tw) + ), + ), + channel + ) + for (rp, cp, fi, channel) in self._db_con.execute(query) + ) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index f6b2de52..0a5ec8a3 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -13,6 +13,7 @@ Dict, Generator, Iterator, + Iterable, List, Optional, Sequence, @@ -2868,7 +2869,7 @@ def get_segment_numbers( tracking_uid: Optional[str] = None, tracking_id: Optional[str] = None, ) -> List[int]: - """Get a list of segment numbers matching provided criteria. + """Get a list of non-background segment numbers with given criteria. Any number of optional filters may be provided. A segment must match all provided filters to be included in the returned list. @@ -2891,7 +2892,8 @@ def get_segment_numbers( Returns ------- List[int] - List of all segment numbers matching the provided criteria. + List of all non-background segment numbers matching the provided + criteria. Examples -------- @@ -2958,6 +2960,10 @@ def get_segment_numbers( filter_funcs.append( lambda desc: desc.tracking_id == tracking_id ) + if hasattr(self, 'PixelPaddingValue'): + filter_funcs.append( + lambda desc: desc.segment_number != self.PixelPaddingValue + ) return [ desc.segment_number @@ -3114,7 +3120,7 @@ def _get_pixels_by_seg_frame( Parameters ---------- output_shape: Union[int, Tuple[int, int]] - Shape of the output array. If an integer is False, this is the + Shape of the output array. If an integer, this is the number of frames in the output array and the number of rows and columns are taken to match those of each segmentation frame. If a tuple of integers, it contains the number of (rows, columns) in the @@ -3129,10 +3135,11 @@ def _get_pixels_by_seg_frame( directly to index the output array to place a single frame's pixels into the output array. Similarly 'segmentation_indexer' is a tuple that may be used directly to index the segmentation pixel array - to retrieve the pixels to place into the output array. - with as segment number 'output_segment_number'. Note that in both - cases the indexers access the frame, row and column dimensions of - the relevant array, but not the segment dimension (if relevant). + to retrieve the pixels to place into the output array + with zero-based segment number 'output_segment_number'. Note that + in both cases the indexers access the frame, row and column + dimensions of the relevant array, but not the segment dimension (if + relevant). segment_numbers: np.ndarray One dimensional numpy array containing segment numbers corresponding to the columns of the seg frames matrix. @@ -3174,14 +3181,49 @@ def _get_pixels_by_seg_frame( Segmentation pixel array """ # noqa: E501 - if ( - segment_numbers.min() < 1 or - segment_numbers.max() > self.number_of_segments - ): + if not np.all(np.isin(segment_numbers, self.segment_numbers)): raise ValueError( 'Segment numbers array contains invalid values.' ) + if self.segmentation_type == SegmentationTypeValues.LABELMAP: + out_array = self._get_pixels_by_frame( + output_shape=output_shape, + indices_iterator=indices_iterator, + ) + max_segment = segment_numbers.max() + segment_numbers_list = segment_numbers.tolist() + remapping = np.zeros( max_segment + 1, np.uint16) + bg_val = self.get('PixelPaddingValue', 0) + if relabel and not combine_segments: + for s in range(max_segment + 1): + remapping[s] = ( + s if s in segment_numbers_list + else bg_val + ) + else: + for s in range(max_segment + 1): + remapping[s] = ( + segment_numbers_list.index(s) + 1 + if s in segment_numbers_list + else bg_val + ) + + if not np.array_equal( + remapping, + np.arange(max_segment + 1), + ): + out_array = remapping[out_array] + + if not combine_segments: + # Obscure trick to calculate one-hot + shape = out_array.shape + flat_array = out_array.flatten() + out_array = np.eye(max_segment + 1)[flat_array] + out_array = out_array.reshape(shape) + + return out_array + # Determine output type if combine_segments: max_output_val = ( @@ -3220,12 +3262,13 @@ def _get_pixels_by_seg_frame( _check_numpy_value_representation(max_output_val, dtype) num_segments = len(segment_numbers) - if self.pixel_array.ndim == 2: - h, w = self.pixel_array.shape - else: - _, h, w = self.pixel_array.shape if combine_segments: + if self.pixel_array.ndim == 2: + h, w = self.pixel_array.shape + else: + _, h, w = self.pixel_array.shape + # Check whether segmentation is binary, or fractional with only # binary values if self.segmentation_type == SegmentationTypeValues.FRACTIONAL: @@ -3289,32 +3332,13 @@ def _get_pixels_by_seg_frame( ) else: - # Initialize empty pixel array - full_output_shape = ( - (*output_shape, num_segments) - if isinstance(output_shape, tuple) - else (output_shape, h, w, num_segments) - ) - out_array = np.zeros( - full_output_shape, - dtype=intermediate_dtype + out_array = self._get_pixels_by_frame( + output_shape=output_shape, + indices_iterator=indices_iterator, + num_channels=num_segments, + dtype=intermediate_dtype, ) - # loop through output frames - for (output_indexer, seg_indexer, seg_n) in indices_iterator: - - # Output indexer needs segment index - output_indexer = (*output_indexer, seg_n) - - # Copy data to to output array - if self.pixel_array.ndim == 2: - # Special case with a single segmentation frame - out_array[output_indexer] = \ - self.pixel_array.copy() - else: - out_array[output_indexer] = \ - self.pixel_array[seg_indexer].copy() - if rescale_fractional: if self.segmentation_type == SegmentationTypeValues.FRACTIONAL: if out_array.max() > self.MaximumFractionalValue: @@ -3410,8 +3434,7 @@ def _are_referenced_sop_instances_unique(self) -> bool: This is a pre-requisite for requesting segmentation masks defined by the SOP Instance UIDs of their source frames, such as using the - Segmentation.get_pixels_by_source_instance() method and - Segmentation._iterate_indices_by_source_instance() method. + Segmentation.get_pixels_by_source_instance() method. Returns ------- @@ -3448,689 +3471,43 @@ def _are_referenced_frames_unique(self) -> bool: ).fetchone()[0] return n_unique_combos == self.NumberOfFrames - @contextmanager - def _generate_temp_segment_table( + def _get_segment_remap_values( self, segment_numbers: Sequence[int], combine_segments: bool, - relabel: bool - ) -> Generator[None, None, None]: - """Context manager that handles a temporary table for segments. - - The temporary table is named "TemporarySegmentNumbers" with columns - OutputSegmentNumber and SegmentNumber that are populated with values - derived from the input. Control flow then returns to code within the - "with" block. After the "with" block has completed, the cleanup of - the table is automatically handled. - - Parameters - ---------- - segment_numbers: Sequence[int] - Segment numbers to include, in the order desired. - combine_segments: bool - Whether the segments will be combined into a label map. - relabel: bool - Whether the output segment numbers should be relabelled to 1-n - (True) or retain their values in the original segmentation object. - - Yields - ------ - None: - Yields control to the "with" block, with the temporary table - created. - - """ - if combine_segments: - if relabel: - # Output segment numbers are consecutive and start at 1 - data = enumerate(segment_numbers, 1) - else: - # Output segment numbers are the same as the input - # segment numbers - data = zip(segment_numbers, segment_numbers) - else: - # Output segment numbers are indices along the output - # array's segment dimension, so are consecutive starting at - # 0 - data = enumerate(segment_numbers) - - cmd = ( - 'CREATE TABLE TemporarySegmentNumbers(' - ' SegmentNumber INTEGER UNIQUE NOT NULL,' - ' OutputSegmentNumber INTEGER UNIQUE NOT NULL' - ')' - ) - - with self._db_con: - self._db_con.execute(cmd) - self._db_con.executemany( - 'INSERT INTO ' - 'TemporarySegmentNumbers(' - ' OutputSegmentNumber, SegmentNumber' - ')' - 'VALUES(?, ?)', - data - ) - - # Yield execution to "with" block - yield - - # Clean up table after user code executes - with self._db_con: - self._db_con.execute('DROP TABLE TemporarySegmentNumbers') - - @contextmanager - def _iterate_indices_by_source_instance( - self, - source_sop_instance_uids: Sequence[str], - segment_numbers: Sequence[int], - combine_segments: bool = False, - relabel: bool = False, - ) -> Generator[ - Iterator[ - Tuple[ - Tuple[Union[slice, int], ...], - Tuple[Union[slice, int], ...], - int - ] - ], - None, - None, - ]: - """Iterate over segmentation frame indices for given source image - instances. - - This is intended for the case of a segmentation image that references - multiple single frame sources images (typically a series). In this - case, the user supplies a list of SOP Instance UIDs of the source - images of interest, and this method returns information about the - frames of the segmentation image relevant to these source images. - - This yields an iterator to the underlying database result that iterates - over information on the steps required to construct the requested - segmentation mask from the stored frames of the segmentation image. - - This method is intended to be used as a context manager that yields the - requested iterator. The iterator is only valid while the context - manager is active. - - Parameters - ---------- - source_sop_instance_uids: str - SOP Instance UID of the source instances for which segmentation - image frames are requested. - segment_numbers: Sequence[int] - Numbers of segments to include. - combine_segments: bool, optional - If True, produce indices to combine the different segments into a - single label map in which the value of a pixel represents its - segment. If False (the default), segments are binary and stacked - down the last dimension of the output array. - relabel: bool, optional - If True and ``combine_segments`` is ``True``, the output segment - numbers are relabelled into the range ``0`` to - ``len(segment_numbers)`` (inclusive) according to the position of - the original segment numbers in ``segment_numbers`` parameter. If - ``combine_segments`` is ``False``, this has no effect. - - Yields - ------ - Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: - Indices required to construct the requested mask. Each - triplet denotes the (output indexer, segmentation indexer, - output segment number) representing a list of "instructions" to - create the requested output array by copying frames from the - segmentation dataset and inserting them into the output array with - a given segment value. Output indexer and segmentation indexer are - tuples that can be used to index the output and segmentations - numpy arrays directly. - - """ # noqa: E501 - # Run query to create the iterable of indices needed to construct the - # desired pixel array. The approach here is to create two temporary - # tables in the SQLite database, one for the desired source UIDs, and - # another for the desired segments, then use table joins with the - # referenced UIDs table and the frame LUT at the relevant rows, before - # clearing up the temporary tables. - - # Create temporary table of desired frame numbers - table_name = 'TemporarySOPInstanceUIDs' - column_defs = [ - 'OutputFrameIndex INTEGER UNIQUE NOT NULL', - 'SourceSOPInstanceUID VARCHAR UNIQUE NOT NULL' - ] - column_data = enumerate(source_sop_instance_uids) - - # Construct the query The ORDER BY is not logically necessary - # but seems to improve performance of the downstream numpy - # operations, presumably as it is more cache efficient - query = ( - 'SELECT ' - ' T.OutputFrameIndex,' - ' L.FrameNumber - 1,' - ' S.OutputSegmentNumber ' - 'FROM TemporarySOPInstanceUIDs T ' - 'INNER JOIN FrameLUT L' - ' ON T.SourceSOPInstanceUID = L.ReferencedSOPInstanceUID ' - 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' - 'ORDER BY T.OutputFrameIndex' - ) - - with self._generate_temp_table( - table_name=table_name, - column_defs=column_defs, - column_data=column_data, - ): - with self._generate_temp_segment_table( - segment_numbers=segment_numbers, - combine_segments=combine_segments, - relabel=relabel - ): - yield ( - ( - (fo, slice(None), slice(None)), - (fi, slice(None), slice(None)), - seg_no - ) - for (fo, fi, seg_no) in self._db_con.execute(query) - ) - - @contextmanager - def _iterate_indices_by_source_frame( - self, - source_sop_instance_uid: str, - source_frame_numbers: Sequence[int], - segment_numbers: Sequence[int], - combine_segments: bool = False, - relabel: bool = False, - ) -> Generator[ - Iterator[ - Tuple[ - Tuple[Union[slice, int], ...], - Tuple[Union[slice, int], ...], - int - ] - ], - None, - None, - ]: - """Iterate over frame indices for given source image frames. - - This is intended for the case of a segmentation image that references a - single multi-frame source image instance. In this case, the user - supplies a list of frames numbers of interest within the single source - instance, and this method returns information about the frames - of the segmentation image relevant to these frames. - - This yields an iterator to the underlying database result that iterates - over information on the steps required to construct the requested - segmentation mask from the stored frames of the segmentation image. - - This method is intended to be used as a context manager that yields the - requested iterator. The iterator is only valid while the context - manager is active. - - Parameters - ---------- - source_sop_instance_uid: str - SOP Instance UID of the source instance that contains the source - frames. - source_frame_numbers: Sequence[int] - A sequence of frame numbers (1-based) within the source instance - for which segmentations are requested. - segment_numbers: Sequence[int] - Sequence containing segment numbers to include. - combine_segments: bool, optional - If True, produce indices to combine the different segments into a - single label map in which the value of a pixel represents its - segment. If False (the default), segments are binary and stacked - down the last dimension of the output array. - relabel: bool, optional - If True and ``combine_segments`` is ``True``, the output segment - numbers are relabelled into the range ``0`` to - ``len(segment_numbers)`` (inclusive) according to the position of - the original segment numbers in ``segment_numbers`` parameter. If - ``combine_segments`` is ``False``, this has no effect. - - Yields - ------ - Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: - Indices required to construct the requested mask. Each - triplet denotes the (output indexer, segmentation indexer, - output segment number) representing a list of "instructions" to - create the requested output array by copying frames from the - segmentation dataset and inserting them into the output array with - a given segment value. Output indexer and segmentation indexer are - tuples that can be used to index the output and segmentations - numpy arrays directly. - - """ # noqa: E501 - # Run query to create the iterable of indices needed to construct the - # desired pixel array. The approach here is to create two temporary - # tables in the SQLite database, one for the desired frame numbers, and - # another for the desired segments, then use table joins with the frame - # LUT to arrive at the relevant rows, before clearing up the temporary - # tables. - - # Create temporary table of desired frame numbers - table_name = 'TemporaryFrameNumbers' - column_defs = [ - 'OutputFrameIndex INTEGER UNIQUE NOT NULL', - 'SourceFrameNumber INTEGER UNIQUE NOT NULL' - ] - column_data = enumerate(source_frame_numbers) - - # Construct the query The ORDER BY is not logically necessary - # but seems to improve performance of the downstream numpy - # operations, presumably as it is more cache efficient - query = ( - 'SELECT ' - ' F.OutputFrameIndex,' - ' L.FrameNumber - 1,' - ' S.OutputSegmentNumber ' - 'FROM TemporaryFrameNumbers F ' - 'INNER JOIN FrameLUT L' - ' ON F.SourceFrameNumber = L.ReferencedFrameNumber ' - 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' - 'ORDER BY F.OutputFrameIndex' - ) - - with self._generate_temp_table( - table_name=table_name, - column_defs=column_defs, - column_data=column_data, - ): - with self._generate_temp_segment_table( - segment_numbers=segment_numbers, - combine_segments=combine_segments, - relabel=relabel - ): - yield ( - ( - (fo, slice(None), slice(None)), - (fi, slice(None), slice(None)), - seg_no - ) - for (fo, fi, seg_no) in self._db_con.execute(query) - ) - - @contextmanager - def _iterate_indices_for_tiled_region( - self, - row_start: int, - row_end: int, - column_start: int, - column_end: int, - tile_shape: Tuple[int, int], - segment_numbers: Sequence[int], - combine_segments: bool = False, - relabel: bool = False, - ) -> Generator[ - Iterator[ - Tuple[ - Tuple[Union[slice, int], ...], - Tuple[Union[slice, int], ...], - int - ] - ], - None, - None, - ]: - """Iterate over segmentation frame indices for a given region of the - segmentation's total pixel matrix. - - This is intended for the case of a segmentation image that is stored as - a tiled representation of total pixel matrix. - - This yields an iterator to the underlying database result that iterates - over information on the steps required to construct the requested - segmentation mask from the stored frames of the segmentation image. - - This method is intended to be used as a context manager that yields the - requested iterator. The iterator is only valid while the context - manager is active. - - Parameters - ---------- - row_start: int - Row index (1-based) in the total pixel matrix of the first row of - the output array. May be negative (last row is -1). - row_end: int - Row index (1-based) in the total pixel matrix one beyond the last - row of the output array. May be negative (last row is -1). - column_start: int - Column index (1-based) in the total pixel matrix of the first - column of the output array. May be negative (last column is -1). - column_end: int - Column index (1-based) in the total pixel matrix one beyond the last - column of the output array. May be negative (last column is -1). - tile_shape: Tuple[int, int] - Shape of each tile (rows, columns). - segment_numbers: Sequence[int] - Numbers of segments to include. - combine_segments: bool, optional - If True, produce indices to combine the different segments into a - single label map in which the value of a pixel represents its - segment. If False (the default), segments are binary and stacked - down the last dimension of the output array. - relabel: bool, optional - If True and ``combine_segments`` is ``True``, the output segment - numbers are relabelled into the range ``0`` to - ``len(segment_numbers)`` (inclusive) according to the position of - the original segment numbers in ``segment_numbers`` parameter. If - ``combine_segments`` is ``False``, this has no effect. - - Yields - ------ - Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: - Indices required to construct the requested mask. Each - triplet denotes the (output indexer, segmentation indexer, - output segment number) representing a list of "instructions" to - create the requested output array by copying frames from the - segmentation dataset and inserting them into the output array with - a given segment value. Output indexer and segmentation indexer are - tuples that can be used to index the output and segmentations - numpy arrays directly. - - """ # noqa: E501 - th, tw = tile_shape - - oh = row_end - row_start - ow = column_end - column_start - - row_offset_start = row_start - th + 1 - column_offset_start = column_start - tw + 1 - - # Construct the query The ORDER BY is not logically necessary - # but seems to improve performance of the downstream numpy - # operations, presumably as it is more cache efficient - query = ( - 'SELECT ' - ' L.RowPositionInTotalImagePixelMatrix,' - ' L.ColumnPositionInTotalImagePixelMatrix,' - ' L.FrameNumber - 1,' - ' S.OutputSegmentNumber ' - 'FROM FrameLUT L ' - 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' - 'WHERE (' - ' L.RowPositionInTotalImagePixelMatrix >= ' - f' {row_offset_start}' - f' AND L.RowPositionInTotalImagePixelMatrix < {row_end}' - ' AND L.ColumnPositionInTotalImagePixelMatrix >= ' - f' {column_offset_start}' - f' AND L.ColumnPositionInTotalImagePixelMatrix < {column_end}' - ')' - 'ORDER BY ' - ' L.RowPositionInTotalImagePixelMatrix,' - ' L.ColumnPositionInTotalImagePixelMatrix,' - ' S.OutputSegmentNumber' - ) - - with self._generate_temp_segment_table( - segment_numbers=segment_numbers, - combine_segments=combine_segments, - relabel=relabel - ): - yield ( - ( - ( - slice( - max(rp - row_start, 0), - min(rp + th - row_start, oh) - ), - slice( - max(cp - column_start, 0), - min(cp + tw - column_start, ow) - ), - ), - ( - fi, - slice( - max(row_start - rp, 0), - min(row_end - rp, th) - ), - slice( - max(column_start - cp, 0), - min(column_end - cp, tw) - ), - ), - seg_no - ) - for (rp, cp, fi, seg_no) in self._db_con.execute(query) - ) - - @contextmanager - def _iterate_indices_for_volume( - self, - slice_start: int, - slice_end: int, - segment_numbers: Sequence[int], - combine_segments: bool = False, - relabel: bool = False, - ) -> Generator[ - Iterator[ - Tuple[ - Tuple[Union[slice, int], ...], - Tuple[Union[slice, int], ...], - int - ] - ], - None, - None, - ]: - """Iterate over frame indices sorted by volume. - - This yields an iterator to the underlying database result that iterates - over information on the steps required to construct the requested - segmentation mask from the stored frames of the segmentation image. - - This method is intended to be used as a context manager that yields the - requested iterator. The iterator is only valid while the context - manager is active. - - Parameters - ---------- - slice_start: int, optional - Zero-based index of the "volume position" of the first slice of the - returned volume. The "volume position" refers to the position of - slices after sorting spatially, and may correspond to any frame in - the segmentation file, depending on its construction. Must be a - non-negative integer. - slice_end: Union[int, None], optional - Zero-based index of the "volume position" one beyond the last slice - of the returned volume. The "volume position" refers to the - position of slices after sorting spatially, and may correspond to - any frame in the segmentation file, depending on its construction. - Must be a positive integer. - segment_numbers: Sequence[int] - Sequence containing segment numbers to include. - combine_segments: bool, optional - If True, produce indices to combine the different segments into a - single label map in which the value of a pixel represents its - segment. If False (the default), segments are binary and stacked - down the last dimension of the output array. - relabel: bool, optional - If True and ``combine_segments`` is ``True``, the output segment - numbers are relabelled into the range ``0`` to - ``len(segment_numbers)`` (inclusive) according to the position of - the original segment numbers in ``segment_numbers`` parameter. If - ``combine_segments`` is ``False``, this has no effect. - - Yields - ------ - Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: - Indices required to construct the requested mask. Each - triplet denotes the (output indexer, segmentation indexer, - output segment number) representing a list of "instructions" to - create the requested output array by copying frames from the - segmentation dataset and inserting them into the output array with - a given segment value. Output indexer and segmentation indexer are - tuples that can be used to index the output and segmentations - numpy arrays directly. - - """ # noqa: E501 - if self.volume_geometry is None: - raise RuntimeError( - 'This segmentation does not represent a regularly-spaced ' - 'volume.' - ) - - # Construct the query The ORDER BY is not logically necessary - # but seems to improve performance of the downstream numpy - # operations, presumably as it is more cache efficient - query = ( - 'SELECT ' - f' L.VolumePosition - {slice_start},' - ' L.FrameNumber - 1,' - ' S.OutputSegmentNumber ' - 'FROM FrameLUT L ' - 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' - 'WHERE ' - f' L.VolumePosition >= {slice_start} AND ' - f' L.VolumePosition < {slice_end} ' - 'ORDER BY L.VolumePosition' - ) - - with self._generate_temp_segment_table( - segment_numbers=segment_numbers, - combine_segments=combine_segments, - relabel=relabel - ): - yield ( - ( - (fo, slice(None), slice(None)), - (fi, slice(None), slice(None)), - seg_no - ) - for (fo, fi, seg_no) in self._db_con.execute(query) - ) - - @contextmanager - def _iterate_indices_by_dimension_index_values( - self, - dimension_index_values: Sequence[Sequence[int]], - dimension_index_pointers: Sequence[int], - segment_numbers: Sequence[int], - combine_segments: bool = False, - relabel: bool = False, - ) -> Generator[ - Iterator[ - Tuple[ - Tuple[Union[slice, int], ...], - Tuple[Union[slice, int], ...], - int - ] - ], - None, - None, - ]: - """Iterate over frame indices for given dimension index values. - - This is intended to be the most flexible and lowest-level (and there - also least convenient) method to request information about - segmentation frames. The user can choose to specify which segmentation - frames are of interest using arbitrary dimension indices and their - associated values. This makes no assumptions about the dimension - organization of the underlying segmentation, except that the given - dimension indices can be used to uniquely identify frames in the - segmentation image. - - This yields an iterator to the underlying database result that iterates - over information on the steps required to construct the requested - segmentation mask from the stored frames of the segmentation image. - - This method is intended to be used as a context manager that yields the - requested iterator. The iterator is only valid while the context - manager is active. + relabel: bool, + ): + """Get output segment numbers for retrieving pixels. Parameters ---------- - dimension_index_values: Sequence[Sequence[int]] - Dimension index values for the requested frames. - dimension_index_pointers: Sequence[Union[int, pydicom.tag.BaseTag]] - The data element tags that identify the indices used in the - ``dimension_index_values`` parameter. - segment_numbers: Sequence[int] + segment_numbers: Union[Sequence[int], None] Sequence containing segment numbers to include. combine_segments: bool, optional - If True, produce indices to combine the different segments into a - single label map in which the value of a pixel represents its - segment. If False (the default), segments are binary and stacked - down the last dimension of the output array. + If True, combine the different segments into a single label + map in which the value of a pixel represents its segment. + If False, segments are binary and stacked down the + last dimension of the output array. relabel: bool, optional - If True and ``combine_segments`` is ``True``, the output segment - numbers are relabelled into the range ``0`` to + If True and ``combine_segments`` is ``True``, the pixel values in + the output array are relabelled into the range ``0`` to ``len(segment_numbers)`` (inclusive) according to the position of the original segment numbers in ``segment_numbers`` parameter. If ``combine_segments`` is ``False``, this has no effect. - Yields - ------ - Iterator[Tuple[Tuple[Union[slice, int], ...], Tuple[Union[slice, int], ...], int]]: - Indices required to construct the requested mask. Each - triplet denotes the (output indexer, segmentation indexer, - output segment number) representing a list of "instructions" to - create the requested output array by copying frames from the - segmentation dataset and inserting them into the output array with - a given segment value. Output indexer and segmentation indexer are - tuples that can be used to index the output and segmentations - numpy arrays directly. - - """ # noqa: E501 - # Create temporary table of desired dimension indices - table_name = 'TemporaryDimensionIndexValues' - - dim_ind_cols = [ - self._dim_ind_col_names[p] for p in dimension_index_pointers - ] - column_defs = ( - ['OutputFrameIndex INTEGER UNIQUE NOT NULL'] + - [f'{col} INTEGER NOT NULL' for col in dim_ind_cols] - ) - column_data = ( - (i, *tuple(row)) - for i, row in enumerate(dimension_index_values) - ) - - # Construct the query The ORDER BY is not logically necessary - # but seems to improve performance of the downstream numpy - # operations, presumably as it is more cache efficient - join_str = ' AND '.join(f'D.{col} = L.{col}' for col in dim_ind_cols) - query = ( - 'SELECT ' - ' D.OutputFrameIndex,' # frame index of the output array - ' L.FrameNumber - 1,' # frame *index* of segmentation image - ' S.OutputSegmentNumber ' # output segment number - 'FROM TemporaryDimensionIndexValues D ' - 'INNER JOIN FrameLUT L' - f' ON {join_str} ' - 'INNER JOIN TemporarySegmentNumbers S' - ' ON L.ReferencedSegmentNumber = S.SegmentNumber ' - 'ORDER BY D.OutputFrameIndex' - ) + Returns + ------- + Optional[Sequence[int]]: + Sequence of output segments for each item of the input segment + numbers, or None if no remapping is required. - with self._generate_temp_table( - table_name=table_name, - column_defs=column_defs, - column_data=column_data, - ): - with self._generate_temp_segment_table( - segment_numbers=segment_numbers, - combine_segments=combine_segments, - relabel=relabel - ): - yield ( - ( - (fo, slice(None), slice(None)), - (fi, slice(None), slice(None)), - seg_no - ) - for (fo, fi, seg_no) in self._db_con.execute(query) - ) + """ + if combine_segments: + if relabel: + return range(1, len(segment_numbers) + 1) + else: + return segment_numbers + return None def get_pixels_by_source_instance( self, @@ -4293,7 +3670,7 @@ def get_pixels_by_source_instance( # Checks on validity of the inputs if segment_numbers is None: - segment_numbers = list(self.segment_numbers) + segment_numbers = self.segment_numbers if len(segment_numbers) == 0: raise ValueError( 'Segment numbers may not be empty.' @@ -4331,11 +3708,21 @@ def get_pixels_by_source_instance( ) raise KeyError(msg) - with self._iterate_indices_by_source_instance( - source_sop_instance_uids=source_sop_instance_uids, - segment_numbers=segment_numbers, + remap_channel_indices = self._get_segment_remap_values( + segment_numbers, combine_segments=combine_segments, - relabel=relabel, + relabel=relabel + ) + + if self.segmentation_type == SegmentationTypeValues.LABELMAP: + channel_indices = None + else: + channel_indices = {'ReferencedSegmentNumber': segment_numbers} + + with self._iterate_indices_for_stack( + stack_indices={'ReferencedSOPInstanceUID': source_sop_instance_uids}, + channel_indices=channel_indices, + remap_channel_indices=remap_channel_indices, ) as indices: return self._get_pixels_by_seg_frame( @@ -4588,12 +3975,21 @@ def get_pixels_by_source_frame( ) raise ValueError(msg) - with self._iterate_indices_by_source_frame( - source_sop_instance_uid=source_sop_instance_uid, - source_frame_numbers=source_frame_numbers, - segment_numbers=segment_numbers, + if self.segmentation_type == SegmentationTypeValues.LABELMAP: + channel_indices = None + else: + channel_indices = {'ReferencedSegmentNumber': segment_numbers} + + remap_channel_indices = self._get_segment_remap_values( + segment_numbers, combine_segments=combine_segments, - relabel=relabel, + relabel=relabel + ) + + with self._iterate_indices_for_stack( + stack_indices={'ReferencedFrameNumber': source_frame_numbers}, + channel_indices=channel_indices, + remap_channel_indices=remap_channel_indices, ) as indices: return self._get_pixels_by_seg_frame( @@ -4742,12 +4138,24 @@ def get_volume( "empty volume." ) - with self._iterate_indices_for_volume( - slice_start=slice_start, - slice_end=cast(int, slice_end), - segment_numbers=segment_numbers, + + remap_channel_indices = self._get_segment_remap_values( + segment_numbers, combine_segments=combine_segments, - relabel=relabel, + relabel=relabel + ) + + volume_positions = range(slice_start, slice_end) + + if self.segmentation_type == SegmentationTypeValues.LABELMAP: + channel_indices = None + else: + channel_indices = {'ReferencedSegmentNumber': segment_numbers} + + with self._iterate_indices_for_stack( + stack_indices={'VolumePosition': volume_positions}, + channel_indices=channel_indices, + remap_channel_indices=remap_channel_indices, ) as indices: array = self._get_pixels_by_seg_frame( @@ -4986,13 +4394,6 @@ def get_pixels_by_dimension_index_values( 'per dimension index pointer specified.' ) - if not self.are_dimension_indices_unique(dimension_index_pointers): - raise RuntimeError( - 'The chosen dimension indices do not uniquely identify ' - 'frames of the segmentation image. You may need to provide ' - 'further indices to disambiguate.' - ) - # Check that all frame numbers requested actually exist if not assert_missing_frames_are_empty: unique_dim_ind_vals = self._get_unique_dim_index_values( @@ -5010,12 +4411,31 @@ def get_pixels_by_dimension_index_values( ) raise ValueError(msg) - with self._iterate_indices_by_dimension_index_values( - dimension_index_values=dimension_index_values, - dimension_index_pointers=dimension_index_pointers, - segment_numbers=segment_numbers, + + if self.segmentation_type == SegmentationTypeValues.LABELMAP: + channel_indices = None + else: + channel_indices = {'ReferencedSegmentNumber': segment_numbers} + + remap_channel_indices = self._get_segment_remap_values( + segment_numbers, combine_segments=combine_segments, relabel=relabel, + ) + + stack_indices = { + ptr: vals + for ptr, vals in zip( + dimension_index_pointers, + zip(*dimension_index_values), + ) + } + + with self._iterate_indices_for_stack( + stack_indices=stack_indices, + stack_dimension_use_indices=True, + channel_indices=channel_indices, + remap_channel_indices=remap_channel_indices, ) as indices: return self._get_pixels_by_seg_frame( @@ -5224,15 +4644,25 @@ def get_total_pixel_matrix( column_end - column_start, ) + if self.segmentation_type == SegmentationTypeValues.LABELMAP: + channel_indices = None + else: + channel_indices = {'ReferencedSegmentNumber': segment_numbers} + + remap_channel_indices = self._get_segment_remap_values( + segment_numbers, + combine_segments=combine_segments, + relabel=relabel, + ) + with self._iterate_indices_for_tiled_region( row_start=row_start, row_end=row_end, column_start=column_start, column_end=column_end, tile_shape=(self.Rows, self.Columns), - segment_numbers=segment_numbers, - combine_segments=combine_segments, - relabel=relabel, + channel_indices=channel_indices, + remap_channel_indices=remap_channel_indices, ) as indices: return self._get_pixels_by_seg_frame( diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index d5f74349..d64e4ebd 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -57,6 +57,10 @@ # TODO get volume from legacy series # TODO make multiframe public # TODO figure out type hinting for _VolumeBase +# TODO inheritance of are_dimension_indices_unique +# TODO tests for labelmap segmentation with combine_segments False +# TODO include labelmap test case +# TODO test filter class _VolumeBase(ABC): diff --git a/tests/test_seg.py b/tests/test_seg.py index 037da42b..61fc662a 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -2255,11 +2255,6 @@ def test_construction_autotile( self.get_array_after_writing(instance) assert len(w) == 0 - # TODO remove this after implementing full "reconstruction" - # of LABELMAP segmentation arrays - if segmentation_type == SegmentationTypeValues.LABELMAP: - continue - # Check that full reconstructed array matches the input reconstructed_array = instance.get_total_pixel_matrix( combine_segments=True, @@ -5160,7 +5155,7 @@ def test_multiple_source_multiple_pixel_arrays_multisegment(self): pix[0] ) - def test_multiple_source_multiple_pixel_arrays_multisegment_labelmap(self): + def test_multiple_source_multiple_pixel_arrays_multisegment_from_labelmap(self): # Test construction when given multiple source images and multiple # segmentation images mask = np.argmax(self._seg_pix_multisegment, axis=3).astype(np.uint8) @@ -5185,3 +5180,20 @@ def test_multiple_source_multiple_pixel_arrays_multisegment_labelmap(self): seg.get_total_pixel_matrix(combine_segments=True), mask[0] ) + + def test_multiple_source_multiple_pixel_arrays_multisegment_labelmap(self): + # Test construction when given multiple source images and multiple + # segmentation images + mask = np.argmax(self._seg_pix_multisegment, axis=3).astype(np.uint8) + segs = create_segmentation_pyramid( + source_images=self._source_pyramid, + pixel_arrays=mask, + segmentation_type=SegmentationTypeValues.LABELMAP, + segment_descriptions=self._segment_descriptions_multi, + series_instance_uid=UID(), + series_number=1, + manufacturer='Foo', + manufacturer_model_name='Bar', + software_versions='1', + device_serial_number='123', + ) From 2a408ad59479c52acaf24af015db68e8f14fcd5d Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sat, 12 Oct 2024 19:34:30 -0400 Subject: [PATCH 68/93] Fixes to labelmap reading --- src/highdicom/_multiframe.py | 2 +- src/highdicom/seg/sop.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 5fad2b55..648c8de6 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -1391,7 +1391,7 @@ def _iterate_indices_for_stack( query = ( 'SELECT ' ' F.OutputFrameIndex,' # frame index of the output array - ' L.FrameNumber - 1,' # frame *index* of segmentation image + ' L.FrameNumber - 1 ' # frame *index* of segmentation image f'FROM {stack_table_name} F ' 'INNER JOIN FrameLUT L' f' ON {stack_join_str} ' diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 0a5ec8a3..8d1ad95f 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -3191,9 +3191,9 @@ def _get_pixels_by_seg_frame( output_shape=output_shape, indices_iterator=indices_iterator, ) - max_segment = segment_numbers.max() + max_segment = max(self.segment_numbers) segment_numbers_list = segment_numbers.tolist() - remapping = np.zeros( max_segment + 1, np.uint16) + remapping = np.zeros(max_segment + 1, np.uint16) bg_val = self.get('PixelPaddingValue', 0) if relabel and not combine_segments: for s in range(max_segment + 1): From 93999e80cd6f0a2bc9aea4d13bb352c4bbb6739a Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 13 Oct 2024 13:01:04 -0400 Subject: [PATCH 69/93] Fixes for labelmap reading dtypes, add test cases --- .../seg_image_sm_control_labelmap.dcm | Bin 0 -> 30094 bytes ...mage_sm_control_labelmap_palette_color.dcm | Bin 0 -> 91284 bytes src/highdicom/_multiframe.py | 2 +- src/highdicom/content.py | 23 +++ src/highdicom/seg/sop.py | 177 ++++++++---------- tests/test_seg.py | 134 +++++++++++-- 6 files changed, 221 insertions(+), 115 deletions(-) create mode 100644 data/test_files/seg_image_sm_control_labelmap.dcm create mode 100644 data/test_files/seg_image_sm_control_labelmap_palette_color.dcm diff --git a/data/test_files/seg_image_sm_control_labelmap.dcm b/data/test_files/seg_image_sm_control_labelmap.dcm new file mode 100644 index 0000000000000000000000000000000000000000..6e043a2620aa636b429173b9e26b6133ac59f426 GIT binary patch literal 30094 zcmeHQ-)|d7e*apQZANh%O7;bY5%|_Qqfo9odS-U^S8zB`mXwHwWGSL_mjs76`a#J& zNTfhgKKrRD(8t5!(jNLy6h(3qsG8 z+x8s~QMRqLk`t6iKDSOii=Qk}iWywHoBE8P;AtC=Dj0)9S&~z3OiLNEDP^ z5LS+Iv{25|;?hRxr%Q`JU*9OL&To8*_;>ogU5;)Kb{w*rmA92jlBNO*5imREp_31XYDQ(Qd`xo3CZ6Q^9D~nnH+JHiXa-=JL z?du>w)6=%l+L5;JdCFHxc^2I`6VG-&jwgIAr7uO`_{w(B=xxVSt{rH{^`)mB;5BoP z-|oA6lRWy%`s3>n9^0*IrPj)$m94Bz()_%TdAe^}hK}lb4#&7<+(P(t%Q$M(n#uN% zM~mbZD?L9Dj&Sn0d!al*Yu0TdU_M2Qm1ou6iuK8-pWP-4F%$GmTJ%=``t?=d3C%Ss z0^9X$)ODadVQbe`j*qL_c^OV^sRA5;M*zvB9p$=8>Hrz}fn(zfKX6?OwRF1ASTs!~ zyot0eNDLFQEZRW1Un*fBh%74NJ<$VKN{dTR7S=W%FKt+K?{B#!tVi{{jH5++U!B!j zJEgaMTMGf{6$D!Fn+S+UZm$mB#kL)#Eqc%7{XY=L+`E4>(7Y>O&2LW9GJSu-qJt=W z2jP3aU0Iu@jruRG^?I|~s@7{(sb+1~Tib_L>1g|)Vy#!7R}QV!?N+1uoXhpw*HW&# zDdn1@h5F&iQ7u!^EH2xxUPIZ6^uuetb#|XVvDWtYo0XPTty$&6>R!cKsMi~N)!KHe zVwGPsTa}~NSf&qB%k=4YTBb*-W%?{bnb1rBZlMhMR$5tFd{SPY-zYE5f(dM)=U$>& znuIjM2(|WjZDDz1ZFNamvlw5vH!EkBxdcK}NH^UM(It+{A?{K;G+{#55O>*x2qCK- z%HLy&Cq$8Eu9$wG7&F$V$<3`uUznVpza7epV;u3m?{PU5`T_TcAL1D14%rSe2BwD5^>p4> z>0x6Vt++@Zr@Z;*m95eose*C}%9p;9uH)(evRnJUuyNw~%99Xn62c5J-L;Ff)ANq; zXmmcFQlx({IsG_#A0FR2wIzJ#jM@TosWzJ@sHw^>%d{fpuX4St+z9LC^D8_)&X~9j zgt@MMfxL@U{F*t^=F%sd7y-XNjd}QNFgGQImT^rkF04i<-J){DxvbJ2#^~yPwbDSn ze(?=+G~6BYIU^Zo#8)!Kp8dVwZeq^I9-Z9a-} zdDzY+YWE;s8nt-3ZN5w8?W5yE&IfPUicpO!nalGWIfdoTfx~PrZI)KsWiCcf^V?4Y zcBo0A;wN=#@*P@)kxo5jsrZ8k4F1=jwT_zHvK-?yI+DV3$ZQcwhM ziQjUy(t29oYu6W-oZnKk-(ZSo3fc+ML_gWd>^y!7|mp>UX1ePp4Ek>(_1rl zXnnhZ)|}SB;qe!<^8@wWGnTg-U7TvmL2X-)TV-SN5!(7gqD6 zaZud_4>N6X5A!lpx4%21{BHErLh`A$@LllEAmyn!`mp*OQ;BC7Ny5Il9OY2#%0WvP z4L)s5x`x*odx*T#T1IN^zmk-l!V}uF zW>bz|pYhDz_YI}PpXKZYkh6~Phe^&JpV7t!EoUK5;?{lPCGEc?>g4rE+TPk_HUXK3 zxi#y{PP2)3=p%6GW-5>P$IPXTo?)j;#XKz??Ns(K5j?PtDtpzFqgcN7GN>uC!A5LW z1E!SrI*&5j`}+{5zuY!+?_XVJ=^66YFA;v(2tPQ9dcD);j2@DodNE~cl6NFqTyM=s zJn&rHHEo2&tjYrw6MhPQIV$sLj)uTB_AT>NXx+Qz=hx?H-%8uuhrE=x!fe~n`sT?_ zv$b7=QZR1cy)DknI=OX2hkCj%&d_q@XuDN^{(|`evWi&4?_Ya4nSC-MTwcgi%(S6{ zhuR%Dwz*zs=6#2LhIv85YC;#ab6Bw|(38dGU8bvMHHGIy%XBRf2KJck%tcSSk0a}m z7RytFWeTkW-?LF~AA~VZ!40J2X=@p^v{=4Fq9A_$BMGl7fD@#+e3{p8E+NMq_zUG6 zt;|1MT3MZ6-=PWf)RlYT`~41ycIdK+OVbFj0v!|Rly|-w&7XImc?oqeJ9Guw<{8@f z=;5sG%~{Hp@ErSnYX|Sm?^=uAbsxuldSK!nMseK#!kXr}Gh`lCJx-tq?U`5y?HTACYc-xfJ*&jS#13@4JbyRLvT}eGm6r7bxkLA$MNVI(@C!h=;AkHt z#t)#~g^olAo~^)WCDy+@Cx9A7qavi>mN+=1CV+SXDFu*}h9|_>BBlBxg;k^gi%FQH zxx#b3z=ek1bv>*%cuEOgQL?doF<7K7cOm6+pOVg;CAA(~r0o7kNd-CSJCfIu9M8A0 zz>LMeK-%b2S~(6DQlO+9EK-wQNcpBa`9Ml~V~dpTj}-co%~IC$To7_B*2ko64? zr(73K7!2nu2ZX})$F)EC8E}%yWg`?9ODPH?RS>wa$E~83)fg{(w@&2jGiw*@4)uJ`!usZ6~GGA8SotH%~MEWfbucr zb%HUHd)-LM3`nuqgZ&v+24Ox4OEf>wo(o+L4E12uAYrl!3(5f_b-f!Y&8!@(&l%r7 zr7|D|qUQ$z#wZX!1kl4ITR^grCR^X2quGI@So;0*z{aSzBBVlHA1qxkNcjxaSn2w@ zdzGC5D`+3Iz#s)RgfPkoKqGAjHd4w_BJk1WKmZ4e)s1efG-MwPlQ5YI#z`~Ljg`)T zm1BcYz+BvhS{D6Ed!QH^*4{9cfSy*lfroD94i>AoQ?QbZ)mZ8Ky0LOHU?l?#PChhy zu<#ZdAuvH0AW9`1wkLOt8r{r$2PXsb-B>Bs_IZreSZUF_v2wFuWlI~#*suh3d`Dzo(4U|}!X^+2 z>W=*|wr%u&gX>o_DOlOS%67+hyz(<(g)s?AB|rj|1Wc~MB)}!&u3&#Y$z-Sx8K!czYUp7nXSgg+E9^w(iGHO&_8Wwks~yk8i@xV1gd69n+V_ z;-FMsds5o4pvyg`zlOyDVGrfsV22*e95&uJySS2;57?pd6#fHDPl5@Nxvc5KeB^$8{(g+rhaer}@1`D{>Wj>yo>S8%)fdu&cwfBj4wX_3Puj&xDUs2JLcGD@~;HO=jUI;gR~T*G-GCT znSaI7Iyowz1L06U(n-o0)8_!rNihl77fHEI}h zkB-OZWN$u;WA|d5jeiXh#`dUT%=pINSHw6I|B5lb`20)iQNuXq*ID^jvLBzHe+?1R z_?5WP8kKo8=HARbIoX@f;$JC9pM!tJNDl%3az_d2m|@3^JBjp}V>5Q^y$b(g&GwM+ zFK^T^W_aeoQZ$M7!Lve3PufM?!R$gPRi;t zN9Ba9K0p5&9;DSs>Ax6d=3k6-X8zTS^f~xfjP%9lUuvZEU$M@MQI5m?jg5l~dB z?qs>&xf({8wCy`GWFIEEhQl>D_#EPoIps5RCB7O(MCr^OCP@a5m%ORldoy;p@Fp%r z9;f4koy2@=7B_=LGjQ-Z#2<6YXXXk{!w{dDP#7O3;E2BNEu?r04nC(f0Y2q`xe~^Q z7fhng9h_o}+;uo2-gO%uq7ZTXb@*Hr$&fhg|B)Oj<7Gwd^@4=ocLOFH4Jfz z_8ojJi@OR(#)D7zsb73Jz@gYPN#c_*ad=0T2O!N2IQX2J114l*%mowAX*ixg=4*Vu zBcr=){>M89e-_CZb;NZFcbm6XG-+NF?T4vp8<|ohHxiKk5+X z=I|;9O*i7xurrxX!e?0=)9p#Pka#1==O&Ktj?XxaISc*h4o;E9F}X8Lub*ewU`z?a J#4pYN{{erb_0#|W literal 0 HcmV?d00001 diff --git a/data/test_files/seg_image_sm_control_labelmap_palette_color.dcm b/data/test_files/seg_image_sm_control_labelmap_palette_color.dcm new file mode 100644 index 0000000000000000000000000000000000000000..bdafb037e01e3de914bfa4227019e0690af39e3c GIT binary patch literal 91284 zcmc$^cUYQRvnY<~#T3(fOiZG&(u*P}h=2_Q3o42z*g*vpD|T$CDA;?)-g}QGF}?1z zo!)!f>Gk41-~FE7Ilu3mbN{&Kxy$E$XV%QDS+i!%nppw>!2jE3MaJ7NQk&pxw*+um z4Hp4402JH;R5y)9YUoVf1ppa?jge5GF$f3*B8_3jAY-Wd0U5*L#)w6L#cCOgf0F^j zjnz`d5I7PDghJHv#xRh&Ym7p|fe1JR4n{yAFfbSbhoK-a2n+~?BB2l<3=BoW76I0) z^=$eNb1lpaQd_Y>&D--Y=Rg@Fpb+&`#-K%jjQ~1nJs=>^KY$bH8_EM3gCWL1?G=C( z06cv?z=}X4yAZ6HR2tddlIEtSZ~1Rmw*s*3|H|rC0JH&CmMZ{YAP5D71EDJbI{)Z{ zp&%d#vH}1EIN2`)sByKAR{$XD-dX?%3`Rm=3$6zu;c9!S>h);?=pd94h-JwFu|R5? zBY^G*0ASSH_3Jm_E?f;Q0APT#69BMBjR%NCVWuV~BCb$m94hcP5r%Pn19@D&&}6|9 z3-vsB`VxRMjjYz>{Wnf05Y>1Xz>cB`uxIll{Mf#t2mx2H0w4!a9o7Pz)T+Fw0nx%h z-#|8BOjN3%~}hVUHpe-C)IETz~*mS_*M@xytNVYqlJ)BILUqvm$9}Ly zfn2_5y*kS5C`$mAmO#*Yz=6Lb1NskN8>->|j*Lxe{69tpPrw)cf8$}jIwBV$7KA{e zfG{9zy?QjBz6e0k-Ua}u_k0z=iW?c|%hhJMxo-n#t0{{Bksxirn*ZLvNH7Wkgf4g# zh=RZo5VfySU<42Xg+stFq?!wb{a5?+&#VFgq82p3)T0&>6HKiEf`UPy>U~z9BNzw> zMro_P^dHZRHb4tNR<8sE(N<@SzcFoX08MT8f0<#_iAWn@p=0Kon z1NIdxL<036ekQ4*Hh`1=pGE6Gt%pJo2m}lP0U}Tcq&iknU=$n%TcE)pAQ%(@hpF}K z|6BK4bvj-+-SYpdy1Dt5PD=nJz}`jL0RMmBJJk3-r5!~NKnoRXQ$vM;qQFqTHkq&O z6e?neXp?ztf37w)Fp3+ZZO;}70;3k}QvQ$FwdY^hWeC8BhD7lA|3{{2FW4sgAF#~= zpuhb8@3R8{hBn2|PskN%2lBP)A%PsOHa;{|zzO8DMOFWGOwj)@v=u}G@ouH?_2{@}yt_DC4utc4W)&i)Ob_6FUf;OIF zN1z^1vV}Z6pP1E=%w&-tcxtg~4Zw#QuwEu}|XgSlD z0XC~+XkpZ%f7e_5?|P;GX?^+=5Qu05(UBe^BFwypV{6xfa@j#hP5cGhGec)z2<~P+bOy0GvAUMgpMfHyMCe z|7okQLjnL7;HQ2TARpkXelJiL`U`j%Km>>YumK@zYM`1g`ddn*?)j)Ok(vYGS>m+S za{v}EUU;iH|5h>*)v#Ob`IljAfm-SxO#=Y<@B07d@BckO4b@}*UHcFH|4!e3Y?%%BoMw#N$@9kAa%HGXq!zU#kFsGivVums zv0bD3J}ztLiMlrPS9`1)KC3g-^QMn*2b~esJUI6dwzbfBP*2gGr4KON(MdhnZ*snS z@!rmrrBTbIjJ3d+;yC|x&m=d6I$F~y$RO^<&*`r0?k#OHeK+}bJ9(d@V%KivO`BtT z*(JSJp8MZc9Kt+0w6f~0>3cmzEe=7||5k4aIbx)2+yHa{#x&hGw1C8^iuLGl>sGA; zBb#bMe`ZUzY!EmmBe&*8WJV2dw~WpSW$g5jSotm9ou8uTX{@~{!_n#P{@1dv_JIcn zRnadwzzHZs$>y5&iF;y8)O~Gs1gdDh9=eQ0`U;&I)Kql({1&w`rhd0ji^Ik z_9*=$@{{*81ZHIH+w&m!k)%%>7Pd^(vG0y(ckuoLe$gA)H+Ah2mbz^@oSeFeI$}u7 zC?tP2ekL2lUjm(!XJU$=D#fg+5xlAh2fJmuuXF(HW3H+Mm~637Rl^J>@!mCkdf|uf zvuMSm`g-2-+~2xPwoB?)y>(n*Y@}XJAT;c!!7#tiUt+vi_{+->I2vQ*>%N0-X# zU>L}?sHhook1>%m3TkwJm68vf@zRe{fd#%BcwbZ-8&Y3vM&?1*#Z-#R^3zwp~$+=<+Fbg>73^T^Np<=hwil6{&LW; zT@uwc>?7d^-5|0Ed(CB6>?JcVh9FT4f8s7pMS^9_tLZbwE*^hkRD&%{EcSTkQMU=K zQ?rK4M@&n#7L98jS(rdA!2Zbcba;u!#s8x0C*28qOr<$)^Btq>Gt%AeI}fpJ91#pU z_fH#J_hTXZ@zb8)1Sky6n<6HlDt$PypJB^6ltd~pH1IHrHoi3=6SaH5#9xIx-AUrE zMBtkB*nHT!>Uqy2FnA%-Lj-@HdEWhiX>DAr`!?J>e-m?soy2?8OP~J8)t0@)y^P}R z)5u;#a^Uv*r{Zh^guE{F)gYPRG2&tfQ!E4Di7bH~pZ1Ozgsm7U5Y<9;`jP~hV4n`l z&<)0IP2YJ{hF5CR1HyDCiaz_njU6(&In@x;7!>!lxkIqf-`x6yS8Jfe;gj>Opg*0N z_BuRErnB`Qq1SwFSR5DB2O69Cist!8VfT~mAr?o-$y(r!@u(yoC~ydx7-Z7gyDk2T z5wzVf=A8bm#vt)^y`@zNB7=h>B|5xOcU8J1>`xO!6d>FHxhY^kz{G7~N`?2x-)Wa4 z4>*q6o{0)(%o6%yPO}2AXX7ThOw=hE3)DJWE2Dq{Cs$-21Fjr_XI2^J_m5?A4L)?; zmBt(nYbj4w9n7m|C*~Y@P>zeY)v?as9Gh@(XX@doZ->T3S7X+IL%1Q)0<$9wLEHr4 zI5kOf*PcXfOFr)GXT?pu&0JyrLirPDIX73R2P909l`l*jMn{z?#ybb`iWi0*J*Nuf z2K(BRH8>szd76u zm1#>j=eR#R#qO#NHpiKc#3xt0G|oG=wUTT6*K}?r)7W=>P32yr!lAB;bB3*bl#0U! z#ho2xLHheuZ%Qik3hQA-ox0`~16PQSN8WdR2;79 z_Ni~y`=f1pU9j$l#;%$>hkn$cs!b1_E2*uN9x|5CR7C2INF&Q1>leowm5WV0L)VlK z!25i6RHR_&j9Zlys{rbO4*KEF`LT{x{jIZ@4%y+X$?T3aJ?yA^N1(3u;G&MegM&Ti zIgz96Wh8lq; z6ux7l0bI-HbR*0>*7f^};Pmn|(CWdg=dtuP;+*G#*>xxKX+dPIv&vESvrXD1o$lBz zcPpZ3Av+G&=-RgL`c>~lxV*QsNz;P5AEd&e9S(eJ-HZ&?oo_n{ZP%agumfosUF^~@ zJPLf<-K=*IGS&0?fNH}j$(zi%Eo&Ak=9c5Bh2qd{IvIF=)DD7-#?9H)CYLjR?MYR( zJ1^O{v!uYj>VTkvOr+>7s=jA&QlC;sK;JR!Y4Aj0OyErz*a+xD^Iu>h^lnR@aTDTH z+Zz4nrj6}hx)D3w#9Fz^-P%$2(i-->kA=q#X&+BmC1mZVr#J-dJ+MAK+xy)iTN#Z} zsFy1*rurEGmAN*%jQ5uW;7dTj@=1&(RI@7G)E8b_^B#T%wWIDE#Letj!#R_u7-bX5 zFyGR$x$rRgATM;S5^!iXY$|(??wp`A*+cJQ#B|gTeWU20&{RW`#K~{9@%j`n^QFnv z^k+^2u%`@Z4~Ln_qlvo^_mlw^GSlqh3N+FjQl>&$W8YSeKr?VnHEzy+ zngVeIqf^s|ZOX848O>^y?s`hh!b!^=OItas3@+F&q@ z?)CDn)if_+m6yz8MBbO=v*z|b!|BH8lU&Pqnz?^qwt$O;@$&;z7Fj|r%LeBbz1XG9 zYE*)DIAVP)Wfv)e^eny8^0qBL`#AcueQ{nQa+1yT~qyzbe^GE+)w($?Qou$c*jGQh9|9Okz=?dl<)IU1lb}Gz)iHZ<13l%c4tLf zoirV$qgi&zjw>aT)>~asV4T zmAIuHnfx!fy-mJ+1ddw0C}a^HQJ599#A-6zJRphamCzLccGw`0_36(}!XM&>17b*oWw+ zy&q$n%~!W0Vye(1jhtv(bX3(g@ow`~%Fzfm=2PYaVLt9aJVB60+8cUN7)$Z@Z4;h$ zUgr)IJ#{}z+aEd2HnB~PLi)WStcZRd;)UsqZ4A#t{wXy;?K{#SjYJ{G!=yswcJJ|!?Rgst2i_lUa5(Fs2^!Ji3bN^V~cwan}XB~gaCcwK! z5<~s$dRubM#g%e1@tWs;@@#UHuNFQ!bwxlH`h$Es{N0?BJRPxnk|Ixo=Z)yg!(g}j z_sF-vG&{fNra_%mrLq%{lset)#SrVVotc)9n0(EQ5ZIlxo#_`)&!cvy7g_`bT})RJ z_jwIvlsoKkvCX8=k2>7XdggYY#FQnm`Yn|uG}xx2ttHQ3Ayb_tZ=l~tua-QArVOqu zxe9sJLoa>?A+&WDrGR4^tCU|rMb%tI3@EvHQ^9J`Sgv9I9Mm;gH=m6Ri(uu)VjcuM z$*&{uJ*x`h?Y24-6tRwT_Ft4&U1`==YJ?EqV-IUrK#orTu00Hqj+@tlAdsQPS`heB z-;SC`;P6iK>MNknDp6G=2wqoGxf*z+e6YL=IHVXa8wK@dQOW|Ln-g$lLCBrLon;-E zIDeC}2Ue$;Y2_U@rA`(V52>r{##M_!O~;R`q~H}t4y#n4hZAdcmDL&)uqmU2e!Sg81k&! zSy97t?%lJl{*uG6c5?Fy@;)70OA%iC;Pg;6mpIxWf2*Hy*M9bh;9gskBR_F`9n6fQM1IkRtdUwDNW)Cn( zNJ8EWb+>7`;vRX-d`s~x?mo7le2w`P9IW!a>3XYkH4*RxBBagByEmEW>2Fah zz2K~c%gx?rUrbn=rzGkS(F&GDJK3*DiVnB!E{#BDIIO8Kg3eMSsyacM^yM{KM&%a2 zxL4}e;Bx(LlwHKX^M6;+YNZUE%IqQd2Y*P+w#J8Ei^N;s5q5?&lGaA;@fDH>;y$?# z*(M~7(fIbuq)gjrhYwj72umG-xmU1YI;sF^`q9~@&=7v#HMIB-@Lz66%B)Pb*~NL1 zTB<0XUKzE9)DYHS=|RWC-jDNP)G6QH>21_b?%nwF)OUgRg;tJ__<@1*^wokV-nX5g z;`a=c^QX9e>SEWiq*LTLH!mp}cgF*lg)ndP#N4WL)bR9u3`W<%|JT6>rQcB z>SCmFW*WQlYG_O?#(ePyrja`;ufXG?=i)S|#}V)G7_LVPr!Gu`X&*@OYx7j_o_X!` zdLaZkzx3uuZF1P}qZ_YFjPkvibj#vCHy~{az0`kCCI#Ue@LARb9^|~R2M)gT_iz~N zvCh#|McUsUWUy1J?Dh^X83;& zH1R+O#_@yc2|=%gsdi=|0a4+Ec)muw0oE`yD=E|z7``TLFYG|L8*$6{T_Kc&?!O`O zC+l?>h^EL78t;g{*qp5*i5AC20WqP~|@m5w>oTJU|gfFr0sT+mPvGdMC|5I_M z?p|Jq_)VBq^890p%J)!$pjJ3Y$j_R#%~sv^$YXYxGFmz>oZs) zv$0s-&CM>cSl4ns>!QW_dT{1|MR0j#2HsLfu{xcKUzVYgrV$>*o2PxZ-51s;O>^|% zluDaj5w7diQ8RqlOYI#pyZuDgN3u17mDnc=f?3~Dx)OngP4+31(XbJm@;n;T zcS?Bz4e#8gc#B@qJf+YyJ6p@j|AjtODwW?v59WjA*RguiJ@RN=UyNDaYvNJ|WTk_6yS#Dja2Kr@+T`AJyrP!_H4dF#_ zf2p}`KWnJ;lOu+)rmWIsmE*pO#U6{v)%A9W$obt32*mf9OAQ%_*W)7%9f;YX=MAS2 zd3}c(R0#LZ&GkPJIx3rbJp6fGR9!0kM|ofEEP|ojQ)`Ya%_h|vm^mh9)ed6KL{Dp* ztwe$SwfAk-dS%yDQVzLf)~C}SQueprf@Po3YRiW&n}xK!fp3`j-Sz`^W`x@I3l`nq z)AkLf-Mzg{41Lqu*7_Ja(g0ec~sDqM&su$+A z!W}J}ak7BjEvdxMo++*2cE6mC_3eiQoHXboLbuN)_jN#7Q|5gQ&_$!IeVLHr!9#uF zklj5Uefz*?+vj_af)&k8y&hmjU1HA#uwMngM+hD*bnbozam)G9t$-~~qjY;B!eYO7 z%gu7b6S`Y02K?rFED4{Pq|FAN$&5|iai?5c=(x+h3J}z^yQ1bc z8?CL;Aa;AOe|ht5n)`vs))RL7br*G5S}#64*#*OXGMwlhMsGDa(91?ffVcO>Lx02E z`^`alNXP)$=!Kc)AX`6Zca0pCeJ%D+OI63%CI!owFTCL$@-SG*fJl>VA`-P9)g8m9ui zA#c*-q4|&^l|GD}E_SyI!0D6&2sf-MstmDh#4R-^&`Zg|b+?e&wv!F*uq_Tto2)?P z)PUwr3#Qa<-c0B5fbdhCBEFgYAE>99Il;STXi-W8PhEgXs z(z+q5l5vCNohzk|*!UH++hFYCis<-hhlbKk;X>R^e;X<* z@sd}zV_q891xw$YS>#aY^e*Q)$<*aj{|N$2W?dqW{?qeE>TSCrR!*jwHJa_6vlIK-7buT2%jMiqgdq<54Hf~xEgo!- zM?GJ>mU?Qo%z3}?vZ$SAKV}^*b@k!0Ir6tY4Bs#5mOh1kXX1-|E(V1Pdwgs8+@KOp zp@8P2!EF<3xDEOn#|j)z1e{7Vv3U}-Hnq@-7+jDMf#LH&*&k8u{O@^W=x6}c2{ODa zc$4#ZmwQOF%dO@SeyHoGnhks{xA@{>-dhjD+&8>@&zGr}c){$*m~dVK7ZMiC2L>|y z<)PL*FP2f5L_l$6htEVdQF?@%V?)RqqLhS>I99}oly~Shu~m8%f+~u%H=DR6-s(^| zuq)~v~1p8}BVagFoPBtcGuOmN6lsx9*8*wrDo(D1* zkkZ6TWnWKq;h4J{rTGM0rJa>d@uqFRrk@v-6A&3U#i1CW>^nYo?qco^EANTi+^d8Y zLwb2lgqiNPJdyR`mREVE*2VSva=VD^@iUsedoH8TpY4lRRY0rYO%#qyk==p(OLYX{JbKmH7$KxktbOkx2dS#K_hIm zcr!hSYfxOvz%ZYcFqw~>Pn9OIVGcp%hy9LPQ>#48Rr8)T8!^r^yK5Afn6ajsmzcsq zek~Ex(_>n@1vAsetQoKx)m)A^0Gs+(|Jwjhs7BtmjmddP~+AK1XzBGNaQi#_z zyOFnt9B!U*V0Z_r+Uc7aw_2AnfYi+$IVkkWrp_Q#_gqoecGQQ-w_SA9wb7kj>8ODL z$F2dCygREa6qVPuv1==;uraoC6jfe*t5amUtz^0rWLlNq+_?h1IdgT#Z}WEv{heMo zX2izMtJXh*cwH2`Ei7?&3U%JaaiAZ5@6?+?A|mn_Y48hj%XIsYF5<7TM?*G<(IM1O z03xbaF+@R7JL-m(B1kIo-~a+s_iB)X;8eUH1R{%NBG(ZM!; z^BXz9ZNYlM4$<0uT@v*eoi7~T8UVUKlCBt;^ghB#fm;1gj5bI-=w})M9UVFhe~kPx zv=5SI_G0KHa5Lu4@KM7)9f1O<^f|_oo9?kAt7n&DjVRgX@kmM)D~&BOwWv${w$@*Tq7$irU^sLp@)mHx8)N7Jj)Qp zT~-HvU2%qMn$^9sI}{$#tcpSUNY1Q9;gW18>T@xg4oezkrm0j?Gaj*&mZ3Td$#nX& z)x_imdUJen9U8k`vZk~Hw_cK%?_<@RAkTP7Bra4;5;N66tVhmC7w~@B+|AbH0_+@e zhnP(JX9Yn{!IZ8dO?xxP>C$ZL`}Fq}atpD`;c6l}iUF@pL>_lHs%OD;nNE$yAU)#x z2!rN_HbD{M>L|NAk+kB26rvcPJL|Y5>b+E())n(D-ilr-F&AEUI+=7i=z;UkG!Gwr zS6ZgG+hfMZoWqWPxew+W+LD-2$}3j)y&Q{wU^jS!%R0?g_#CXVHRR@Bv(F7sU z9cP`pdCMBZ7-78Ql?UA|_&=4o9@oMKvQK#$2#%+Kyp~3!#O(Jv71a^;l657{KLE@= zoVb+LI40j`yDIaSRnVG4+_mp31|%&O-v0Wgqesn0-p+{d_d4$u@N^l zL@#!YCz02ZVCtO3f1QGJ*c7%eJ&PC}UXyKOxlxGB9W;9=F8w8}_6NncidF<&6228Z=I00NMEofL z`rL_p9BJqNDheJ$raMO$O9Jf}F>T3Xgr?XRQYDrYFUWF3@x?3Xl~WPXmz?znyW@0R z5<7YEbk~Ar4M`@WsX9^;?rvGUQo#};|p^x3Xo~#eU~?y1?Nn1 zMcL?p8P8^!IOK~BR8)r49Go!(hjgiD@XT%V2mIex2Q3!XaU)6k4p z^*n4iWd&%v(=cY`+Nf;kwVJ8cZb&B-7MC;xSjWm2HN=qh(&`$f?Xa=HMo-F{u*}9A z&X#_lrW0ew&-J!G$6lLEZ1czd9$~gUvoP$( zw9_p(o!{E`S@fvhw0*W*T6eqcrlq0+)pj1oDqP%l1K*UhrhOUVuM}*%kQ@~CwqvC| zhR^OOr3rn9Ixo3=^(gN?%FLvG>IR|DoaXnS%uS9t_R`F2r`>u_nfHyc`Yg=n2E+RL z%`f*P^bMH*X#3bF#Q>Z3_t7zdHN-v~=0j;f9}1gQAn7Anyvj1|OTxWPTGGGR`f}u* z{ydw@A>x6xluK;(;BluU#-3pxQ=KzMM@&rT=J$?*&61ADN9W9DCnCnSnT-yYjs=^w z_f3t}nKg7C8A~=h+G04yGkaKHJVr)qSE@f2L#GuzAJbQVUW^{I!4#&CkEL3KNgj=_ z!Jmup9^XYW4^oXkwRdL0_Yc)H7Op<>*zLpQ-nCD;poiUDbUpCC{TSpTb3kG)Dtpyql9kAAi6b@as5J0O&x1;k>RwE~nf1z7O z55ol5rqO2bN*rU1Wt^&ipme%a*YIXpx13k~=QhJu;o@x@fIH|L74|L}LH!tww*umG*U``U&X%D!4y! zWIza6VzXxuWg-PWQ>?E14&AAkQ0|Azm5;Nf$e1EU%0ASOl0RY^%r=z^1wb^u$|&HK zd2G!Li-G0T-*7dv+}b3g7~#1s+sVrbHEl>-1L;uvBaD&Fd?(TLq`gyD6e5tK>P~_# zb-dk^224eI$Vjz;W(rwf2_G|~u9)K%&AC^kZ*URva}pz~?Fx8=ijZD-hquf6YRMdD z4T(~I%Y$XJz3KyfwcX*`*LDyGn}#&&byUBmZtN#oy(%9a=5(vo5c$S+bz3Pc%+0a= z6R6FCn5Nr!%xYztL**`_ZCbN($mXClLS}B~oBlkd+5TOYZOkAAB6}EKO%=%31=Kh; zD7LfyqLYhi7>At~m7b+yTvt|VlW)11RR6+zdT7--VM3W-8r)2edHvb653$$#WAitqT>?%`D}cnCtGAzKSpMP^Wh8 z3Fe&~si(JBUcQlYfj3Aw=7442C_xeL`KrpHmcHDC%31VVf4`bd$g6?+bsM4G&NiZ= zZXRP!^rrbE6CPn-J>``hX=t=&8x z1Rc*E-H^oNUb|J{$+4^hHMz;Zz0a4>QtEvq^8G3IxXYx^QtAG^@w2H9g7rlBv>W{4 z;9=u{Yk`SrbZ?+-bJblM+3G zF1$jm>|IxM+VO0gW^oDaP5pY9;#ME!ldMl!f`gXc-{^U?}- z8sDI-mTTdsQ~qbbpI&j5{E%c<`|8}#I_jbtwCD%%V;PFP`;0?HwT;_sM3ttk(Rg(A zKHC?AyK69Z!(Ht)gZAO7OSKLT=-MZ>WfW?eY3+GxL4jTEOX;U>t)Td0%aol|&TaYZkSm>OwQ<}T_onqv z=YxX3+WOrF1A{v>JuAJBb%wM5VBGFb@Iz4p+t%TioL$vkjqf@3yK{?`^|VLlIjfq{ zM_t)gzX!zKPK2OtjqYH=iH zJ8`Rr-Ve>;TzaR558*mG4i01Ro13=}v+&8a%ZAgebjygt16H>c9>cG!@!71A!z4m7 zb)?4DCaPrA)}e+UGX|lTb1KF^FqXJ~9KC1GI2S+m1hf3aiwOhF@SJ>-g=J0!Og_fO zje1P+v9$xlsSnuW-Jt1%78mBJ@w@44%Z7Zy^gX;~=FXXe z1b8CH;-=}DQ`Fiy2R|FzL%OMB)~olB?$vGp^s%1LR66EcU#ktua_7Kyt7Z7)K`3^KHDPFj*%0aB z&>u)ATjsC?`p|xOL<{7MfR&!D8!(lYg_ew(2b8aquf%SxxRWlkD5|=VplO+2OOHH- zi>V*xN8oFkv^YPlUaRt%2doLLuj!ejeI0x4f^0lHzgh3M%jk~8esTEPQ-U6FB=%OL zSafk;DeR|nSAQDV!~Bgx&?vX`P;^&BTa7E1DlQNSg@?1v$eKmpk~?kuO3^V8+ko

2{?mKnvpUhEo9{nhw|qNHz9(a1vQy{#ntE>~pRM-1J~rz6DloJT_E zB*ND7O!pRuCBaXY*K&_cl9ksSv+I&QDlwsa&LQO0I9|_vD1ApykoQT@PS*>*M6|iA zD2(NSU4bPKjt8Tne6PnFcc-dD^!LoewM*%PLuWe=nE>yKzMFq)fK45-@7}l>eead^+ zxELPJz{cX+G`o=SshGqb2BkAOCF${EVjoX$uuIa609JcrfU# zv?o+6L_2fJuZHI>>+qt6YRR=+GQ*k`fs~EmpNfu??h9i}OK@WmTPw89Ln5zMr6Hqy z=4u#;w$*D}=pHUO(#Y~Nu-C6gY%wp4XDfXIi&+^Bt?@L^i4HowjW{V@uj6%zU zeon>+4+M7(Bt#-Z96HuST@Lxu@Mp|x-iwO#xad%{a%KEP_@Zo?q)@mq1)g9RaV#c2 z5gv6eyf?`?Zf#&<@=OAX9h15wIm_*7nrqq=%~s0I6xq_#6*+6HK4h+z$6`ujsqC8B z5=ooS=h0(H$2t1F;i((A&{i+$IzQbyhjc6dl(KypjDQdMPckT){Lut+-uL>ANcLgyIu7U)raX1OVeF z3r&K>B54tse>TLhI8?Bi6H+o1;m*8ShKjoE99P~Em*b#QQJMJMI!^(h?>(JgD0Pw? zd0rgiJUvD#HFaIouTj>+&~I-qpK!x8!YW$a!zwc>aUKr~TPk0A*5?FO1$$#tcT^qs zwTU@V9p+aOF0PRVtqGLWKH%B2AJ(^rwYv)%twd$LUV&l(2kIxUYHGn|a-W}7xU2bY^SWxAYJq%^%_3}-!U_Vw7E z)S=qpRV}`vdgNos&uv}Io%b7T69t;FG&?#%zPkE!mxX0g@QwcDH|J-YD{Pm|!&)+J zZ%*HD?X$}pEo!@A@7PakzvZyK<6=jQ!gAI0uI)7ae0mqx36tU6 zb=jpXVSjg#J0rrYXTS>mn0w8d11U1)dm#d&PRcoxuD2a?KbRw%U6B@etMO(#e_O`-H5y zyQA*b7E^{}JFS~W`p51QH}r$Y<;0ZEjTeWII;D(6hDA%9T@Omb|& zvVqA8dw9x;shyOgG49hVY2U(5&k$TI{F$@sJx+UOkN>pDzTh_5Z<%rG*fbp%d`y01 z4er-W>Fg4`-gwMx7oI)@pL55j_B@@tia+0Oax~6rd6V1GVyn6u;L&FUR4MNmnsC1$ z|JWPi@@&9ybJB<8xcMcv9ns12ODJXhviZACb)4mp-R6CxY>vU<4n(Gh+h-EPN^u|)DnAZvtv^#;zsjp z)7BRG7XH&$(c}1Y(`Qjm)`c?~uuJ4aGf1!j@^Mpn-EA{UvtQY2bDGL8zY~8BibY)Q91IL z_*4-nFdqz$133u9K**CKrApjYc4eTS|V$AR<5>MT05wWx7M$FE?Y&6 zZ#bILL=rWrRs{=>-0$F$uxFBwAMN;a=qMMX?NA_e5b8-p~q}j zmxZn8k?tqxbF7wL4U`*uq)!XRwzW`P=v+=&r#Ra%Mr&7GD*xjAL%B*JU|^@hzG5U>1LwQj6EOWSWF5t1}{pz=@E>?NnJYxrYaU+gu?=#h$U}M0EZDClg z@-uCHhq)NAwmru5L(uw;@9-izLUyiK+to^@Y=L-ek#TD(z5HbbB^%fgvU_3-dE~=m_V#1GBxPq7BpI)5C`5kbx%#zs>6j6zHnhDXWS!JKbYpi1v2SO1I z?v^LQPB&SY>k9H!#z+yfD{al-NALPHjgHS8x71sW1^!d1cPqCBDbp;Kc_F1zpiGN* zCVePH%Kx7EBj#P0w#-K$3Mc1&2wE(N%8z5W2)`>^+#W|H6%W!5M7}F)wQY$?s{|7o zV-|jsGaY-bw$-%W?_OO0*zKU3anrqD`SEebTT{c&#yzRqAPR_&D{YCKl^Es|#FrC- zGHykUEL6;xwJH7L7qQM#QD|gbb*8UBI{uApGiz;vYo3WKDzU8qMyXADSr|g@OK~WH z;OA3sl-)D$76^or>93JG!pp;u=rYmTuF<%3QD!qYfe^8=dU?{ONKjE*a;rEXmzKhb z)|CQM5613`m#3j6Yelc68A;1R+S30_`NrwX)R#VD-p#7Z{OF93ZOPec|1d{KE+aPQ zZdZJ^#K!*!3O=?xDKVHZVV>F+e7pZqdT>ZWdrf8!57OYCoy0e+(3E`)^-)ac?h4n< z4#|Bi#HK*=Dk5&jsN^Y8=LBo=M`AArZC9Wry*?aedD1KQn}xs94$vKnw`SzqX_xGm z=@X1HFS7McUdVCx;mz2~*ZOvh1QcX&CVQHd4DLuvRpD>H_S&@KT>*xTEEHMMA9>1OUImQqV!f)q6n}t`R2!67=D57hHkC(a z77EqHFO4$H&>%?HiAD*y5!>TJ@#LlRi9W3W2^_Mx`iB(5yfBUR0-duOo zw;|889_qJK8d!fWpe4S&VLbSfXsl@k|7}P~vy)&YcZupm1l-fTl^b=`xw`FDoYKCz zTIl%n;(^-7PFqjl8VJq!)DkvuKQ4gM{@*(v(gzs?=>VF{m>i9*q z+2P&B0EU8C6!>hv6>$8_;~UO2_7B)zlF(Y0}X9G9W8 z1AV_3D+@aNQ{6shfd+OlO_Hn!_p?OelS7AnuJQjGmilr07mZ>AM_7l(DZDE#{BE%= z{L<;(al5Uj5BHbZ%^b}bWY`O)b`AwQ=#DHO&UAR#dwV2^Qqvwex}O@*cx&{&qkFaf z7@w9>{9=4Dy-!|0&TxK|et-Nr11I@1xy&OsB5Nw0wJOAMX01=B@4eaO{#bXd;XdNm zOQWNmq&H`FjI+p{$8{$+*a&B6Q#)-JjTKG(w9Od!WBQHlr|vy7Id%yxcaA{q*VikK zoU@;*crwdyC@h?qy-OL&11G`QvXT4yZp5|L+ zTwZrXM3A3dH3zreJ^%RVV{7s3$Xo+pK?#c+dYyr1o7sAtMfT?l`%NWZL}W zWE<&B?S)f%WLf!=Q&~1qiYupA+Fq6EoQ|=-oO1NcVrq9x)|m@V24Qc`{$^bC!<(7) zEo-=HuF?0Z!oYlVz+Lekvw!eQ_6P<(JdhHJu^cUlwZXu~(}agHnv=6Zr!l*xpZT<# z|2|^vzQjCnmP5UcZJM31$*^28C$w_1Ld_k(niF;Ah-Q7{TXVk1m3BFEzEGqkpksaW zb9_Swr>dW@zB9YX)*9ONGUuW7cu#LyA#q<{MLd(p88{i?LR1Z1HuQ-WyF5FW$yL@g2jZlKg(G!*vKBo^`!Tf*wapnjC4sqGYc#{!IAnun z%i0lJar5WW-S+9KtMUei^469N9g3(uJE55JylbyGfr{yg3ypVN+eh#_>3Cvbsh1C} ze30X^jov<#;gIAUJ?u!zbKO1s3Fq#%WuzLj*#kW4W-9PZAJc%7ZCBNLbv|*ZthH@; z=6IquxcrM#VeK77u8V)YarS=KsK!OfO$@tc{TK=3X-lC%;&!C%RN!s*zK*MGZI8aL z6O7}`;vO2c(TmfYWmD$8r!U*8kv-GziFNTU8z7tYb880;5DAV+RhQg0 zl{rR_X>~cxS6o-iJat zb&D8cd!b3LMA{@G$~`3OBE$t=5nm#6l$pGj;`*Z3-2A8`r6Ny{ z7-)r~bA4=Y)oTY{JfU_EsZ(;Q?l*o-qM#9vei6({ot&}^jZ75}+6!${O*)1nt5f$i zLZeesJt~G{kEi}p(&O!=E3;oo^wLvOBni08zSuR1;j+zw?xfb-te}YGC-Ub$l++!H zWA627;YHQ-J<_4lNcA@-nibov$1{&qjbXneHT3+#~35V2gKoeuO(K*`LwcA zQsT;LFQ*m9JCuf`he&p*f5i<>2*{YuxVcar zU(b%R8<~6;P=!QBb@;P#B5#yqHpr8k5j?)X@g>3BKTcR7%-*&h3 zCoiJDwOq_kEl;W#3_Y!Is%#4{&DO8-6y~LDs76Gth#9MKjXEXZ*0#hp1{K#^Ndt;^x#PKPoY_|G`K|wPhk@6F z4)@Mj)}M`CT@CE@Rg1cveaegI-M={GTz*f!UuoL=Uc108@dn*f{$`#z zbMJ>a&$;*ho_Q**&7nf$>S&7>&G#9vEzRp~YxJzNtAcA?tnbpS>q2ZU7bw^3+nVRJ z)&I6DP1j_VJGdk&v7b4W!h##5T$91vMrV)W;A@;+-gDm9T6X(wIy-V(g1%d6G7Jse zMq_J_7@G79*IzQSY?owTH#X*?8nR7v8&n&IOdm44n?9LI(oHy1=DVp`%{}HD`Q0rb z%ft9ouDF#J_CB}B<_2<(r)_5yBg@A+x`n-MophlEs0r;oblhj!`+dII2pf0l-WsXp z=<130MztK%8|(2ILoQylIiP|Umb-OG6|^8CstmO-$g)MW@voZhT}6|xtfY4mjq7%=3)Wy`9ZJ>20ztpXH-(&ZZU#Z-CLf5zCb#J?#Qb9&v zfc}A8QQwGxT83`Fm7yRhr~k9@AUtt^VCE3ZY zY6Umk$Dh$EY#hPVYn5_*5&Bw)=HF3cS~gsI*jufC__qU(X}%Ne^Sq#STA1KyuZeE6 zx4>xv+BOX2H51#vJzsyW()Q`8zD{);SIti^u&w%jp}~hX#_jy)^^7NF<1hbVkP0Ss zp-e=Mo}NL?0(L>Kzb+W%r+9c0h&(?5RwRCT&wRvN; zv$4RW#QJiRitelpwF#j8$8JYc!D9)B-JJ7}_BmbV0PoHif1q|U#LXqCnI-8~yQzO? zI&5APt7QGQb143m5^jH!#*Ket|Ea7nGQc6QLJ0_Vn63);PjW0`Xu8`vS=H>c_j6XL zyg~FIM&z(i{pf%6rlB>b{yqrq)ylCD}%MG2(eePDM zS~>Y1tgJEe^^2?OdFkclFH_(@kt3FD5U`QE1Rn}qBD6uVL52C_pRvnf z(1?=rW~IO?+9&iVEU(NqcoFMd zzz3BMzgMiosDT zs21XA%vm%t%O|!q5tyT$ zPUPsB_o6T5^%#^T#^*nN(VKjbT=3KwOo)TD0VCUBKRL))4OnIEK6nfaTpo%rhhYkb zP=&B>xn_xWah$XtNpScynvtxQUE-LnNkBGRQq6!AwwlY>B$jK zNCuflp-Q;#SssxmF*Mx1D0f5>j`S~M;(x+I!Yy*H$L|i*%#B7Ac>?kp&}SS`M8~8z zR?;LH48o|Fd?T$*rvy_1xYh?w-v+R0dzB>*2y0HjZvfJ2KjuaNI_WM1j}SnSO8$vZ z?Y!@#yTI^_N%FO@&ZHLdAjlWqSb&3QL5mBAp`St{ita~~{8Fd|FnhOG#jEjW?QhT& zkdMtPODT!|`cpamUcZNG2pituuEQj*&p|%F@U9QJA*jgB_aft5v6SyL?N3RzUtz&6 zTC;y!_Esq~PzKvm79X^M>L}L&w8oCpp8&6cn<@gr-v*N^Cm>bcGu7#l&Mw!Odt;() zd~5E-ZJ9i!kesfL#gnO*>kKe`w>P(gS10_nOCE^BaL#T zNzdtBUUAc`3nWvP6YeIJY}YK~VGMuUa>$DY&E%&0W`t_+tpbpK60NPljjq3h?m$1A z)w)=7=?NYiU?JE4p)t>5xbr(F+Y->K)uL;4q^X>nZN;cl;7wTTS0ea}HdjiDTUTu* z$aF!K-SZq5p_zR_+H2ttM-TK|8`e1=258T7a|KU#p7R6-tBcq^b6$+@*}#(y*P5q{ zj*Kt!h(^B#Hd_ac1zlWWiHV6&&~9te*uw9CncA?)ovEh3m;oY$SvCD=m!mnA>ezkT zf}EezJz%+kzue>RGjj2)=rfJa9(8F1J-INnN*7U>w(_OAr=EP;@=o)a4*Jis{U(LoiUaL zDc&&o!oHRtV4B#Fp4Dgis_AUXMYBsBTs+pysO4&;q}eU*8c@&dG9T=pY39?q;c>|f zBYf`g-fUe+u+%iy6#g_UvIr1%=|or(g-A_HYgu8i+LrA@Vcz`~vs6X_Q{UnhgIy|R zwa7e54!1F6ZsKQb18a|_-nTnimyV#>rLYvD58Guoc!yuN12mNfKDC=~&iAC*>v3;7 zLF|XQtyc98PCRiVu45oi{AHmNlKVre-#MQ#>_{;KKGu zX(+Rm^t{#RY^vyWvawdr#QS_>*|S(5`NrJGNxpx_o)2k`fyEjP3NG2y_^Q3`MpUEX z3!dMpDnwr&aq4{LYv1MKkz_YNE-f~Go4;R~HI(bmqSuDb2Ar&X=f6E*?(g^dG!V{= zvlj$Ct0}RN2;N^yG#~>s>Q1~^4gu6fYRUj5>t)rBx{s3Ha4NhnlT?_ievbJWrFDUN z`FF^oV0wN^wpIw2q@KD8JVUNP7KX_e>c%*Qaf-}9IpJc(@j zHUx?rBg8;m@Hv^{k-xKvNiETba__@|(H{x>p-wT8`S(KWV}6iz{l3H|6<&5LgBcZF z`Ul3VZ$ zTVktGSe>C`CRKC}cT$g-t{w4ma1M7AG9@~i1B8U}n1oo!RrYR@He^SYQh_)GRXkVN z7$KcsLgD{q%&4Nu$V(WLzp||mgmdw(e;Kpn8O%LAp5_MM49qKKBf>pjm;FYyIjPbg zC)HS$RIo84Ml|B*;NPP^3yuTs^d6_o0jz~x#Yuo54um!gNU3>W)(_|}$I+jLgj2FA zz@bKYNtIeaN=8W4&hSG?{#C6YEF4o!fmlJQj8W)TD7ofJG{%o#%ZEL5KUe<{zHD#D z%0sG}-z}NO<{iyzKSYqU;=)`jyARe4Z-tGKf+%JziN($5Pf!V%_4nWWcgw- zOzVCI$;Ep5Ky9+C)Q~UhAJ^b6e8W}Oe8K4^ao2ZE1Wu|OqV|0Ad-wBnCU>`o5>=ab zz*CCY!7KAh!pZQ>z29OyTIGEg5RU{q{khS%gponX;k(+nA?g8NIwZn^JUTk3z@P0| zjjeX^)9;&&?be3pxEc1Rd(!x6_6BVXL4ZA|Wn9=|zr}jrw&@UE{i$P*qgCm2#}6k| zVRfg#c|6ZSWbNvf(IeV&-;;c>tIG2Te5#x2tpm;L_4a!ldaVCSprdcZKuJif^FcwQ zMdQrDb}37P(W{-$Em!)oML(>rbzbjATRHIidq%A)nml`ltRL0R_4QeIRqFJY*@#NK z24Zac$U%d5?2qQ08C-D)N=qF|aneT@4nJ^#!u&=o+*QH+(GOngK~v*vzKfm)y+EVh z8I69damJYUAl+p50B2a!#I@`72-&2fZTl$QRHJ2UjBYy0?jQFyOJzbP&Y7E+|C$)K zP^Fqp##`>r$4yDd8w68(G&Qz^;=((9`eXs9hk@;57?}t3gJdS;>gmol` zQEqDE(%eefWi!Lwlbde4jpvg#Y%9rcN8ho%Ef9+>vy~DSf%!ID?TP>?+g%+|KCf*h zJKNk2*9j=IX zQebhYXPtxDa01!IxS=>>+GVrxbCR;FE+--m?CRKJ5uxjTkX*fJ>`YE|+Yt^<8rS_w6Y+*!U6%8C@Ll=D9uNvYa&FNiwK z5IT59tuZGouSC;pdJJb`-qm7tHepV6&YFObO0vB0b$C;LXJZNEXa06ZY9ybSLZijp zB^r`OVTXus@Ii6R{OZ(e@P6`D}cdX(D>SEUK zqNqe8+}*s@BsN|=BR6?6TRX`tB|YaGToI$3HwnFj0p%YGrC?r>kNC@^;tI;$&tpv~ z0Q=0eSgNN*V|rnUv|)FqIBiSk5_~Alp;r^Rgq0A)B-y1ZH^yQdFc=0dZ6^jz+sM$x z^po8)hf|T+r*V6*?x_Q~(X<656A#OP$I!B+vJyd`v!n5>U{KE0oJ#NN+{1Ym&INgo zh=Ddw2zV0N)Gq&A!GzwWlF#U@SW4%h(OL(!< z1ti)ajF979N{Az*Jt;GU_o8*m*8ehQP`Lnk9g;?`N5=vhD>{T<<;sI~EYwL~xI07%027uV4#Qp_fFyA-&h3qMN-2@7$q8#Q} zg{I|Q=6nK9W~eo%hCfI$X|V*=z|mYah#Rzy=MS9?<@34Gj(#%&>%R*2BW?f0+u7yU zLwqmKM>p8{+Ko|~R(u2dKrITsPVLdW2fn&3bUw{Dz5Yk*C*PT>7lJLnj1sDF#owH~ zy^R_G&3@Ew9N2{o>DU__k1FaAgxrrE@0?QiJM|ckiLA_ipp(E86HC z^6Vfc^#QzZW-a!g^hv_p?0@5Di?}$@9&jM~>X7Z<{HCs9QAkHX+Nf)|m-`M;qy6=T zux^n(c+$RCV9y=O>94mR?*2J=%YLTq;ZTD8w-&%K)B(iG8gX&>Q(Zi&>a@F*F#5{* zRv~!ouZSY=^7s|kj~VpwUUyW|uZd{SWO&~s(7O>DG_B<~5rUjm3)1${8h&MAw6Ha{ z&mv~3Vf?2>{|Ilg*HXH#b9#p*t`jreVP()NI|H`=NE&eW={=DdYYpo=p7h&ls$UD9XEoUW1xmD98%P2^uzEkJ;Q!Mq zckrX9x0TvZmD7}^&+uvMpO(u*jiz#zkB9p7EUk+MU7x+QIWaJ=(d&4Gzl|;FG|Z2# zLOQ3k(o0@CZwN1vR9w!t`QhSS^xAV%x?RjW789&pUWl$m9dJ3`RUDS=Jl*{>@R0Lx zufA8NGqcau<)L#!Du2fD`Q?bW!4umn{ zN#tymKJy|se<9C#+qeA4Hue6(+nZYIqs`YvO8SJg=0s=t*a;0mcYT!F)`DL7NOu5y zlzk3#{&cPPp6_~Suj>6#^w~Ve=ai_$K-Xurv*(4sZ&SzHrFj{g8)LXxEqpj=pCC=3Y}?$i;6nh9JqUHo4#hXvrE48|I3G~^-UcI8W; z8N-}982+4*Ky(GQFg-Kt!K*d?$rmA>we#?G2(vx``U+CQ{+k>P!8BF|=tIaHrl$*p z+=6gwf~;~+SjR*>LQa;;GRmNVW#hE3 zQJAt8a&U}sxfy;gc7lF6RRfk#K|vmiJ6I(a{XOmy<18pW?qZEhuxXre-5#IwalZ9$ zU6tafY+E}i_+9o_vv&BK282F0L9_A7bHxamVq?)}G`fh|A`TNPx?Sf9&!h~|>l2zO zO+{ys6w0T(Wc0S8o($#0oz$<1lZhE6*to{Tg;Jl0w~4dm#87nNaD~ACLelN35cj1d zAVa~SC;25)!m>7HAM=+{3dW)Krp_{~ja1MplkhhGPAdnkPSk85CcPunSGQs`2x!_7 z>`Ou(DKIUD=!4&r4$C*g{8d4bULz+Wo(t5B=tCp|PY_c{wf;T%Aq9%=^CZo} zN(T)xuBg(|w_vNd&~POqAx&ZI_3*)5*imU_0;l1RWH8|uh57zKuWp(ExNEtm2c zlR!RM^eWX1FQjH;wNl~5ifPSAd~s%mVGO6_T-H;tQi(s_Bv?p0o}=z_jrK8j%f+?q z6mgr)e0d7V*z{Pw2HJ6AxIlnv=)Ygogpz9SD9J%0S^~=sAaB%-(r+L>SG=rnLfRHh zSK1?&3E`D%s4toNRep(QlW$c`CZXcps&!ISBGVblsW*U&jJdSS0Zui0GVXhbYQ?h7 zJIdB!vv*j;(Ol!6%u1BUz$S**D?MP&-Rg`Jv8$~EH9@hljb?Qov09AYdTy*wX$*@V zJ5pf5Hil7iZm@ZA4QWRkjNuFDiw(005-|V9Qp8V4NmChG1@M;hJ;})TN=tf5oT~@- zEmq!Ehq(=U%wp74I>)8$!z;xrePGi6p^QP!!h-H~q*QwC$ zg>_v`p{u#yx^IVF&5-ZrhQ}u9_CP`NaAdC|1PW#M)j&r=#|9*$4g6LIDX=Gp^oQS+g^reHJj5vqZl%#G^UWfMd~e3nsxPH5PrN&z;I-ZUVj;@TWYJId9~X!2 zCyXo3(7pqur=7j}R|?FXy$9uUG@S1Yoy0PoHip(w04LEfJl4vIH*yTT)18=)4kZ?)mV?Cp+o2x)e7sdJ|m$)~5IyJpt=9#H^3LfaLDs#c`%6N4y=n&X z{r7pf45oOjcKaI@g%PJ3uTu$$-&&?soD+b=*X zh}wJH>uAv9K3|s|fopwpwjP1yeH633ffapDdY^-$dUrqH6(rXDn7<1E<#e$RhH^Q^ zRm8AcoL42c!o8Xs^Cv)>EfkzNcn24oG7Q${^(O>_16mcL9)NWPt>Nn6J#7|2>Y$Mu@?tj7Ckm^0p0BUWDpPf-FfMSHL!<8ZR3GT>c2FuK^E(?n3hmp zJ+*W-N}nZDfR4^&E$1AF!LjGDp|OV=ucIVlHJkEd8e^536Tr@~hq!kDp)sF%J-+;y z!B#tWzw0SstYT2 z#bc{`iV6@n7z%{DNNeV{3=&Gd=0wsxR7fodeg>6Ze-`>z9?X^is-x~Uo(_0~5^vh@ z3_?yf_c#rrPPKShH=+Vt3QauGTHJqhz2h#^ZF(gVNabf*kD;EH`8P-;CX^{umnO%R zou=K!7?oWlu~UI%YxsLuoAN#k6KMVo4-~N8#s!^U-+9cFJv#Z{Y^jBG{CoU9!*nJ*9~j z;}TtzT6WIXi`r2xGTWKgh;N(FCI#c*y}t|9vfi{66isHCH@__Ykae_HL~G7`M}Jwm zmZe51E*r?|$WtzN!8NA8FYo`$n6JwJW10xWyDtX|Has(Z=kg zsnx1rf=IQsMX4_Mn7X}K?^Me=y|iv*QXM$sNz6dqr>s+8gZk&VufbdOTiHWCA*`)j zybHTQkx*(=SVcg)EufeR+Wq$o_hrz zSq~f~+3(_D%Oy>(;$)|FTfAWGp>`e(_N2?OwIP#zK2yydiJ35cTH__KS(FsRk2+>PKCuE`P32HV(r`tP8#TVbZEoI)- znLh^gdebUQfu5P0Z2uiW9`g`cMJV)b?>2%|wUzWrLd=?7`z}D9)%o&rkzWHx5kpidMPp*eK8@$JUhZr;J$Lm9y%Fa$$ zh58o8O{@S7bJr#v!@@HRCpW?ilCq~*pttb9(~b}{lsxMhc`y_`uM}P8r#a%|_jPS^ ztkth(kuovl7dL%;is5HF+%V(rccaH^Hrh{G*f$sDC)xaL{)V4d-RAs?Uu9+f0@^>U zHG0e4AluUZ4`vNaajNECc>F4O(Uy34{x zxAEm|OAp*ov)h*iZkc1xU#q*7^k={R>3*;?`%Rv^42w0^+*H_mcB+xG^hdV}P@6A}1sIbb7N{$pfte%LcW zZlX0P#)Cg;=kvs!Hl^ft*gb7Z+y0~5`jnKVnfvKUk`dQ^*W|sIDgphXy)9-z@*+-s zL$GcauF5}nynCP|86ejCkc0!M_9ftt0uJ{#VnTy!29^AwsqE@Um*s5Cw`4xPlCnW|8s_EMS27^xbD*>9r zkpmC>Ji?C-KJkErVTNuwrG&{0U9)}#Y#CfL=>ygd+UstCmU+X1HOMHhsp%Xvm)BkM zFiL_iU2YtGmcL0kAHCALnENp%T<|VEDu&x8lc*Ha-W~)SiNSO%M%c$li`+ty(fFq5$hKab*@;MD&nLZ)QCB&>o$4_uoNdiC7_DixjteI= z^;Jy7k8uj9*o4>3Nkn4=ss)refIP-^O$H)0cyaNT$QS(1$Ry-GflF8)qD;6RbPQqI z?(SWau+lN<8j*nPT(y@?$myh6980M01R4ayOVz*YDMn1xKjDX?Ue|AD|4wYJ`%%4- z{G#57R-1yTUnaX_ELqL?b<8_W~36hPf6JIgj^;1(Ss#n|mu&UK+oQ!nkYK>Y@#*b=! zx>c5cHIcH3OJPXn!SKf!pVB|#{c3h4Qt_F!W^onxxVqQS^Y{y_NuWB8+rSO5!{s&^ zdeU*$oHx#rxU}X*8x{PqmWw7!>D%aa!{S*EtbD7NmKPg`(5OE7R!7+_d69i&d9ebI@Y_RU^^vM*iJE}D00$^eo(g8Y83kw{wSX< zfl;1P9?|qDZMikH`XWlY5$z4NEAawtvE+K(eOfo|RYWa~SoS@XP4lDw@o%KnRN8uE z(>_-fJJL!&Fm_ty6;0(kPREw46I}*l%e)9}ouBC0gcm$s&Hn1uw?^CrzwFg1D zB#3c{0L(98K#113AjVAoW6UGQ8xj`*V+`kn4X@vug=%}m8pyiO`Pv|sb)+`6@p9H)x_l!e z3r0z8I)YQplWMw*mq?#(x|gkM0 z&lUV~*kqr?YA;`ITusfJ%4}9n6&w7_mBz3|Fy1sKkbkxH8b+NxCwPQWtVRoZF}}1* z!V8!QQisqXwHco;jKbEXh6~fvz9Jii=@~k)CxlD zSd`Igfr1bMdjrtknFhV+M7w0|UO`eod_mu?WMb6zzFACa7_2`Yiwcw(Sk2J%)D(Hf zO|PT7-^Yb5Li^6d9iF<^{~4AuC>+dz-4!_vABtV%9~=>jZDPL~c^6Bs?i&q>{Y_gO z6~V#__KwNM-OIT?<`0)mvl*L?S3xI?8zvaS7!!MtXCZGVHqaU&mnQ!tp?x0>@FJDo zzaMsi_N*{QAyDVp%yDqU(5U33ZG>S@*HlBqHevnrXGk-rex?z^t$j5c3K^+5HTyck zmFhib9`T;YnQMcN;m*%ri!8+;7Y;^MA>tS2|7FZYZCFHj;Zkb+vB09qS|IJi$C<;x zmv0j0BtvKB3G?qm)5qIh<%L=g>|2}(RqnJ{`Vq3pV=Rw{d~3M2k`*e&XnOrH6ji4F zx)pf6u=35*u$_6Q-mt@uWCXr70?8(otUd!9!KrJJkYQ-bhEXIqboY{_f9J>VD^UN) z)#GpA{uf`xt}6SVoVvIc?=L;PZ@tc6v3qHw+<%Xd`7X*GU(la zk26<+pa8WQY5?CqdG@%Mz;AZe--Yj|Ih$aYVT=Gsw#U93>obVrX9;9y~j2nqdG2j9`);fQCj3;6RY%SazgDxZ`+0 zSZG-O#N|L<*gq3v-rYdw$pF{I(ALQz``J*n$rwxHkebQMMg>qtrW-2hiS+4-OdK_K`wV?QcEV8tYCl?O1&A0J)ix%=)P#WcTKHw5L5o5>RVb*h zO!(9GqRI%tZC5LaKtAoTA{8OyI-~Ip$UG4X^9$+Oy$3OgJk{eKU5Nnp_JP(Cgni!t zb_sF=hy9e}QwC3aJb`~7I^<*zPagbbJp{iph%tGMwBh-S2GHkukN66SEj-hPtI3yn z*$h;Q4*zaxJ?31iVu1*gFObQp!psV#(#$b)Z8y;xnA&!0m^#M1gC8N4A`(3ajY_%H zMfU%ajPBXv*_Slf>*Cy)l+e3oYnoKh+hC@e^1f-N_iXBF6IK|Q*3$&yXlB?piE1M= z8#%Y=^;t2^XNr<>$D98UTyXX+b(zO-aa?%P8qSLs9WQ~q!7q%&XLSoMhVRTW5mJJ@ zGC#F__c@W7(4p%(m?_%{vWI3q>RhsL$^6Aq8sOs{>k*xDIidB}c-!)1>owS7gzS3% zY8_%>{R&MfKZq4g+Q=_sKg6T*8`!^7&*o<}$|5i2TQoh0c|hbf?*Km~-fXD`q!TuH zhx`-?J^ZKcqlE0%ILA!Fu)xqNmXN^^j-c`v7~8s+3r<%v1>Y(Asv|fOMTly2?Imh| zbtzrBn8DajX)RG_B<9(cJY-s@FPA9Rm?m*eJd`j`wS%%KVc06r>Rv9<$(vO zxW+9nTdFrl$C*csZ`QF9PzTC;CZ%Y)MnOgPL zs%K@;lE$iuvVM|c)p~h4j$AcKcfp*fvaLu$?5ccSSrUDsa=sc1B2`v0g8-*13u`ug zO)GhI7v172m+Rf^Q>t2v>Sl|XCyPu*Dr$F7HhVyI2^5+TRX<7ra*(VBN^q?XJCu@G z{*m2Yw3lMQ-YlZ#cCuHfVd=hXUNI`si5*@t5m(Hh-V$dY58A04O2yR&<@I=qJzX)XhKHfATj$OsiBzRUo<4+N~ zN}Bjzi8drzepkLCuAWcM|ADdQBglS;5x!-?aLhCQb&3yInfHr&0HDOzDyI0nc>UKxVH>V(cDdaO2Oe$dn84lWEfz`OcD99fJ!SaeDWxGs?I>7lZiTv4sn(i2Ki|6(( zBDQ?3uBQQe>eWhLXewp0rvGEA{b1(c<5XFZ=a47nJ^#Y6H0C3FYB)3XWOdwdSE@73 zcz8aQOTIX~hJ|LI9$rl|!fqR0PlurH56@=sViQJgWd%a^jv(+i0VSi4a&GwkGo+Sy z<-O~OHk!498~uSco(&s^qdt#nO#DXSdyh_upkcrzqyvi?8&e z(l%C?)FVGG)0ej*gJz;%S3p0H9DI8bn$|tDstYw2imzEi6**q(7oZBY9qaF)mntr8 zSU~aAh>a%bA7a}(*~li`_IFWHo|t>@q0wRpyAOf?GUn$Y*yk|Ps(o1LR_NMTSowSX z4QSZQ*S_!f05|3`Kez&$$Cf_QfcXAro6SIvjt`%DfRA{r&vc+>L)aI8pes}9>j_|c zS?yPHSTIF)>w5UbJjfO|{6|L1H!lz;Y4N)c*dE^gTNc8MFoKkfW(r~>x<_>yzd)_W zY-_he>&B}qZa~%luE;VY0Mjq>%^<>=YMdu{e)bmz4qBLhh`baoSWu3s346Ox4ml8Z zYT;uD7^wT|y8rdi!dKCruA$psjW}aNv|cf7fdK5Qhvr9ODh8^BiLl!Pc^njMeXy>s zA}(NPxNBz$pC~SJPhkO_Y9KVsR61y=GjunfQnna-jVlGd`#;!%HPwPY+k20P9 z9U2`8oFNCCgtpE~d%b`v&gQtpMYzw>?V=znvu+kz2~Is3?MX=eo|qOPN~ia)d>DPW z&#k%@ZPuSz8jZd&@V;;-YHLV8_X&zHoRWSBb$FyPQ30tr+7%ao@EB`@nkB@JmjK7& zb0;DKE#My~HN2s4lgTaDvbd(H5c}>p%#^uhb&`1}O=Oay*qP3UVlqWCjS{J_u5HXs ztV#F&@=k1Ek6e)oHoVt}fWThsOUl$v<@PruE2YW|jK!N{d0p?cuZs{5BWLGXJCdO+LUL5`H21pxgPwmj{Afgd@PqrjOA~#ALp}L-5I+` z)~ze0VI-s=xS)&_D?F3KBl!s5r?uyAw6&pswYI%c6JL~)TakVSxYg#=zD zoa^R!@5n3Y6?0k0i|>oKDBV9rd_STVBscCAfGKyxmA zxP;pBu;_J3RZAD)SBV2RDs!%6A5S}ZsW_N_EkTR=q4i-@H}#2t0Fo$57yb-}6g_Bv z?ki3C-GO#nFOm`|J8q*nvbT>Vm*H7aePlX_<=3HBd5ERUU8_3F0C-d@+jJk|}VA7i`{d}WO=#yDPVN9NvIB)gaSjGoKx zVAd7gWdD`uOKDd4}l2kX%hK8-w-EZ6AJKB zd^36*iNsr~aK~Tf`Bv`7H2m$i1X7G^TYWhuzGaz#1J}0vU9$rGY1vcj>Zj8Dp^oEz zybVibE^l?5pgx|Vcg9jpM~{jusN%h*U1z8#+5);ti@r2NyV|MZbxK_<>J@rJmlG9J zq||k&SR#)u$||1Da2K5|=}xlfM3wG`7j^C{Gmlj4AeE1X$#*DMSO#A1FsUSalKQ;z zn^r;ndHD{ntOg45A5TIDq4`$_8AA{9B}M!p9`PriG<+t1M}yt)?R@F#&wuOwwB(_| z{Bg4EP#Ot=pBXYD>!+3t{w&Bqt`3G3PQ+dtyjx@r-Wd2q9R^Gete0H$wH^JLz5ku! z_;x&h<>16le8#NewS@AUgk9~8F zX_wv-=B?6pF4QcTVS6XqUg=;{2c{RdV*@&e7fDzfUdz&9Y!thA$pzb7ow$^ewu6?u zRGVf_CM^x6ZD!XmeNAU#2bNVcrcvLPHM7pc&M(j6mLTyf71_T4kCsj(`F-Im+a;af za9IgSe7)@PIx?|j#_J6-(P6~=ZCs*ckIt$g`nS+@Z6A7# z7+)k@rzOr4-fiqmD$bJHa7hNIXm5t7Y;T1g9&0p{acGRbOxUjnavuXU{ zQrXXQ@%{zAUmyv)bLzjIOlV5qu_ceFP1O0eiIRwe{Z2$1fq#A^M3?`{-b6=>J|})k zj&53~eol%8Efsx9j6Oe&|Jol#9yb1_5Ot$F@Vi3P3BlG6*~lLp_>XUqU+Z@K?2Ou3 z0sR#ci-8n z(5=5_o)78O|F-=9XAf6# zb#f~$L>ixOUC=M%Lp)3plLP1F=M*ctWlyH_6z=Bgrsyku%C178l$!8Sh?B~I-H>rgfJqFV=_F~_ zvBf~dKZZMS3vQEtj_2oE{mhR_N>pmI9S#V23oRouMG zRTas2Gy7xmbm+*7gYv|1_w)7gosm)snhLn+Sy`xpE4)dvT~RhZRJ`-ZLSB3O-Q#jN zL5<_-e=^g{?u$8L3Udo2qf$E4lclv|bfK&ApCWgLg3en-obZ!SQ3VCN98hrvK#T&E z{{TUH*A#F0kuJC^t%cYsbt!%c(~w(GdK~dxy7&CP=rDiNEfw-9Mwl( zsb(FELD*2-Po0Zv!rd27f$W0cmAnhUgu2RM0mu9gp2zzSyM(K}^!#JYS5fpkrr)jf z-hJ{mL($g#h+^q^eQ&%RN4YatTQ*d=26$CUPvsxToOqT+2Riwb*1o$)aHrk=Q;4RP z&?D3cG>LvPF63*n-Z@NwRt!w6!ecc^QfAS0m&^3|6Hen+k;+Hx*Iw2r7g^%&epA-B z+@@Tr>}I!5zWscX^NPeT6$4LC@lRUm!Mo0}EJabBXZ5`vC)=E3@7W2Zi%7f8pj$lh zp;rGNgl)&=yy!{5(E0jfP5I zb>FJpeFAoWfkhPJ?2*expb+&FH=kd2Oq4qKbl#*u0iprY^jASVx_E2%#Tm5?Rim5F zwQk6ps&(q|B)+|DvJ5}B-#p4`_H>&|i2u{GKZBe=&(DoS&c!W>?K@~^qurCXW5}vq z;I$K}cRzn+H~%>f8GR6@Q6B&}t#(h(6HDoY%32G|XrjrTA13O_dh?h2)fnT&g0{|+(P8!y@e zcd9)KEppwfcl$fC_Vm!zalm7lZx>bVb(MwZ3v>&w$t%u1%DHQykb5~u6MNB0rPjdv zR=>iOHBtSxtiS90%bQY{{F2RG#8g2p&hFv{sKVe#DcRIt;gwQ3s*zW>_3u-CdgVjk z+?f+9@2YQ|4Lu*1vMIlz_#^D{B?H`yT#)$XP=i!N@l(>-I6NEf9ZsjUR5oIlv9Q=Qe)^}HsLaNAnA@a(n7DD?&l=rtbIHMX9>^O{ojer-`fc& zm-?>U3K^1#l=}_)CHqz8AGFSe+Ow${ChANvj~q>1W67_SH)a>4994N&2KpsbJFjH) z@76OqX(yyRgo+!}hW%rto@5&bdq@w({|r4S6%3q;0rNg%7fG{JQhr@{*T$x7RwQV>Be12liv+ z#k0Gv>Qm7Ryp1+`Y8F5m&c4#HbL7Lh>LWE12 zc2|(Q#jV@lR98rFgy1GANqL?|wV~8;%2#z2%tbGp$vN_p8?$eqfUQ>;P_l&6!lJnWSziSOMGDSISl zJ6FrRMa;Z>Eb|>Xth`+og_2fyC!K`&E`3KzI%Qa_^(?YLjMXLNkNDBz{`7=Fj*`DpwQb0s7|!iEgg>%B^T^7p3#>kW?G0 z%-i@Q&pyeP!SRaaa)%N=%6ZE*BD|#Y7h329izNDQGalqmw*Cjc zl384O4g_Z2$gP6g$BIXVVUI%J`1|2?Lfl;GiP)fQ>(9h-zkhX`2?>60lshqQ?z61i zxO1Ni{1t3NU?d8Ivki%b%i}_$3E(WecnqE;z=_2#}pI=0{&tFktnjanOiu%*j>jun6T-bqT5HLaoJXVj_bC2#}|mXuq(m(;wzm0 zpgM^xUY`)p2=4=gp{vB%;306bK5uFwqv)Nn6wzg~J8zS#r{?cIh;?pzN~as_nqLhH z@LY+}0lPRo_0dAG*-6>C;4Z8Z4D0Z#7D6iXgal(g;V<~71{;Ka0(vgj`m$_`p$A$q_p%wYQ)VsWS*_Wy!v^i8vbv72^ z8q6^!lCC;18@&_Ld@NjBRsBD_k9xMmGOgpI-)si;iSA?Ye>PPX2$nR@ zJePj3aM(;>FQhVIGhzqQO7`Y}tkY1BDrzl0xE>?r*tYf09e(GOCB|aD?>_xF5AH6T zXIwFy3_AHjf7ruhmmc=9h4OF9#k1uQ_%!PUk?0lU_fm2QIooJW9kb{1JM28}N%f5{bCVB-yd9M@d9OxuY z@tp=X^6EG?#Sqn>SfmiARtH9cc*mrK!ipKSQ{cNml)Dc}8o(fdS5=k4|0P^A8Una; zeC@;Fy+vi-9B>l3oc-rIgq*n76MQ|_8faAlprR=gAb2Iya*KKJcQ=Ohe!uQPO{6?+hIO=&-9+a z8Y^6_ci~gyKK9q^NOBMR?D_+WJ)l1;DkBLAu3yo53X4q7H5WvF4t!usfFC*?I}}2s zjQyRZShN*=Jn6vI!s5Ph(640E;AMmq8i*A{Nx?1B)Y$@AUgWMyPJ_$Ko~Vxi9~)zJ z{D36#3j5(Yo)X2ju;#5bK(Db>=a9wtsqd-BF$_}l!A}Y?70(j%6<*+<9`puOZJ!_R z41(#^M?M0b%KeEagX;vglBR)re0t6mgb=E)NCC+LXRpb}{s*&c{46;H!L^vF!lCM< zuiRW~aF^H6>FtLhA)vw)x&(gaXOo5TNJv4Wa z=MF$Jw08=ZAUQIbWj2uaeDJa}m=Kn}{yhwd_&mCdqDAaXe8oXgBU5bzP4rxlFnnt_ zB&G^B^O!cr1=w7*FB1hH%!xL7L9a~2Rki^K&0@8RfO=7K{W0WAd_m11w9GBKCKAv# z<*zP=b}D-`LBZkT#;sRbg7IDB%P=DPWXc{bhuNK8#X+zZGp+aw>_EzOq{Q}(0#W3R z&5Wuw*xKrox*G82Qe$@ybZ0JbJP`<-d^hO=3+enh2>?SSzsAp?>`|Fx*Pvu~iS}uz zp7w*b&)_LZtzkHA0iJJq6Oo30FmuTg@b_o)aWFjH95ucK&jjVKdD>b6r>|S!K|CGNiqrxIfX7UO#)TI|0JGibAaPlF}x9DjilAxc_>qkd=miXJwoRF5%Xw!sR zO<^}XMY9!J8S5aCr6&F{vl7ktlj!Q3dqC!OKG02U08DY|=p7Df&}Z zhzu_DUWNoBp31YJD50AK zMzm&xH|84JJT?p-hOvz^f_7l#;~#)sAGlQJE!)wlmwX=h&iK8^vXj8+!K zl35B(7Q^S6{-tEI$~}z1^l&G zemOWq>av^!XlL|*9d1W^fwv^DrhBa}V{Y-f{o7}9$I)h`$cskB_)fSIdxd>t)SALOrp$=1S z>3;<})MAxj{Jj!5byoogj;GouLK7$_({HjkxSf5zMh@hg7qy83xFLMk;RQ4ymD|?@ zW}Z6`8UZWf>|o1S^C0_hfYxcKBzU7n#sPJn*X4gXe{s)6{(M-$)$Sgr+{wvjk*jII z-l?^zr^{w3C19+{+Rf!>UcvqVeZwJFP#1peRU)$pO$}w%Oos%f+_qGLYveAwOe0iF zv%O!zVdPeh>+4u@<^N$BvH$G1N~=+Yn&Med?cxxBS@+bO{1>i)O62tNM7W zJ@lYlrkyF$l9$%8_7YjXd#MnE;Q7ICc)<|=gbJBS=w;@+)(co($)*_vp<7?+>;u%0 zr`W%&v;Gfd1c^*Mi@^%!V3xL5hIGJI#!t%=#9-*MgA=^Y<(KnKXtW8ddpB5I`I`?b z*jc1Huouu~s}I{jXruA*bQmF+Y^Eu@Ksz4Q@ywbn>$9Y$*Y#~=?VB}Z{quRf;~ zu^p`?S>=OnZTNXmF<6fX`38YsRs0RHgl?n-hnYhLgT2GuK{SpzQU8Ku3{GOlz`_bs zNrhm0A;B~X^o$6~dyk+-`IdVi>tF|sk4XtInigX@X=q1lzeWMno|4Y?Wi7*}6CS_& zDG~}DUSCRMW-*$5o2d;E>7^^6gQe9>=f8p7&J@Uh1NjyV%2$K_Yj>{@1V!sQm(W1! zWwOgJAZdJoRdawbR+|6+Vh=-+P_}M?Buz%YO(z z+qhks0qI*jZQX^&O-r}g0cU+?ZS25ae=6r6zrd()@Ww_!D12UDBCR_X3!8dNr#G$9Bjd4R?*!15+m6B;bvRfv;h zhJ6n8Sr%kJ{@v_T%r1Ul&JwqWFP-Zn2;-BX{dNBB-Egm(tcDnPOtpLMYh-)n+X@EE z*RqTP1N8gy=qzVkY-wGp0nWE%DQS=pnho;bCCp_%bx$RDr&ig9;_8c-tZ$$zi=9k@ zF0)!o@wcFsnKzLe(2dMMz#mY`I|cKD?_Hh(-Gj{6n6#(CuT@Uf?jlGP)KyGq<1`nFlKf<@Ah{Fi zaD(u=cwP7w%rGq$d>`JGUdDWhl{MlcZY*0>=Yx!3iHhRZPIzIdTBQ<_TvS&45!0A| zkU4|BnS0r|;KP#8L5n1VxLQvI0)N7>%_Lzip~JigaS>G^-+*imzeAWq8N^bf@o15_ zQur;5b-V}UGqNFZj!v95tNPO#iLSr=-3Wy#w$!_4f$6BQqfrq0B+sHa3VkP|H-i~1 z7mp1M!;!+xJf9HWMzA@-NU$&l(^dR(5VLX+Qo`q)2a6H${}=lP>l^YKv4^`6?GDkz zgJb4doTL@fU(%-2>lUL{7au$iFAHdiw2o0hS_ZNE zh~UQk!ku>r&EC9b>BLs|MD;k_9p}eF|6yjG%y6TGD34s^N20m!7;v5S>~a-{T4N=- zg<;raJ(G#ij`nWm%=-4dnrz9%H+TM~`1B{xO2m=sR2hRqC$p|XCA=d-5@C`q4X(U6 zDn}p70m2j8LM;^jh2>R=e5|*rIvK#%nS5vd&edXn23nB%=~)9>)mjhJV9~T=O)z1; z;Z~oc3tqo3pOEyp{;qcX{0iG`O6>eZ=Y8rx#>ys!827fs8R&++Nnkn-Y}M`*MmaY7 zVIs=mZLFg*$$4Rb5~gr3>4y=~d=C0WOh05kSYKfl(T2M7vb0;626?cYxSYjJu{;fa zloJYGy}Rr;w!3hXBS30{@g_q6%|!Pdq*HR0JVUNEf06;l&t%g-oz2bwc#~iA!oq8AC&1Y>HP@87?Tt(0r{M!l28Ku zL{8ygU&k`~>m6+IUwfc=HaiP;RsC3}%BG;Cn;apkEEnXLDkmoGWoM{HC0VEcRJ-3I91|B%meP~QG_}j*^QY6-Hekh2>fcw3N}!F zUg2E(DS>QCqjR|k8{Eo=U;Y%*8wJ&S0L7=f*ciY^^LE_1;U<;*e#Wp}@*|G>>z5HN z|Mz8AWmlbV6&GUn>`K$R$5o!mW(siq4DGO{;Y@Rzv|nT|HlKEW%?elRan~Swi;=w? zNz$yhe8(u$FsINefwvH$7ynYlaVyMmLJD#fa(6xyu9j|gZqiFCO^DV?0+x-);issS&6O;gtm23{V3YlU* zj(ml%L`x*3;5;Bo+2MRCpw==T`7Howe5K6=Xt#)&Gy=Vs?>|_}rF61=T&s4D1Ag!7 zc^xp{n)(!`4~SRy$512hrmRJ71Ipp1F}2`1x7c_DsLJ?8;(KtE(rU_QsEDXymL$+Y z!57RUL{Wv+AJKHM=j{}ZXeg>(MRp7t-_EB6gDOyp*srb?2mOoeKM;yvga6sW<&dD( z%i<-CV53oolE;8dV^Yxgb(5t>RNmsNOC}yegJ6o<0=jy3vv}zVDMkQzYW>o zGfev68yF9wW6}a^hQ16%Ab4VeX39t~ED%dXhaL#02cUL$a*F7XUmjQ0(!+dKYr5v3 z1@j*V?n9rAPj~Ua?$x~S)rTHuKI^uHz6g-)fC30B_I6dsYYlYUF*r)%@8Cx|R@~9F z9nu45Jo6S?h|8HB#XrYAntMnz#;s-ZV&eAti~mC}Z2zebMA1Li7+?l$Hdcq}fw&du z^a?y;DtY!6Y_ErQ<{R*({MK|e)FS5ElsXjeu`@*v<}u}%hCptsE=(UY6S$n`j8IPq zYja1~5rW-(9*LPKKL425PPhVyHc)r2z&GmxnnK{uYYQ8QNUa*-Dp$;8MMIG}dZp4M zM*$aEKAbLq>n&GFnIn|vMg>+7^$S~lXi0xF1RZrT4P{p>B`|cQex_9rudGhI9%L|^ z9ytf?EPe!s(Fc1I;7mV-S;90j z#y}T{=mH;>5iq4reQXr8S*tKS26|fkt9u@bs|>EQM~sxZl)cB;7n$X#V4mkAQ!Vjv znXe+NiQf`TyqpQJq(r-BLUtmnIV0jtOtV4`;w*}X_yM^cuY$Hi-%Gd;b4Fw&YciCg zYSL!t4e7CU>KjiO~XsTg@-33HHVm`PAl88hFuOdVp>p|ch$S`MP_&KOiTO;+5q2GKg8(<8j zm(EptoP9qv-+p!R{`Z{n8OVd(B$pO3hO?-o{0(3@@NvuyytMB>emxXfk1 z%HM&~yial2h^OLC`ZMQ!H!@PI?GD(;&p109E9!AGa+j6f`HFWEw}+jn^$Y$~D_>n% zff@4}=?LLwb3e{WaTALegq^&L-CyP&y;W}#%V+Ddu=gxQ9-oqFKrW$~xt)+Z_k)7X z4zV<+;qSI@-l7d7%@5pr=Ox(k5^e1~m>t9FVA~Pd$5&v%gxMRG6gL-VInZvfO8^Z-JN`kW9Kb?;+8lfuL9#r}>O1~{>Oi4;|npKvaN-k1p z+ZNTwJg*%UG}Z7DE@k?zkUN1`YXU?qLD=&cc$$?M>;wxap^qWLX36awgmpgBJI=@J z(})4Zp*?M$UrMN6XQe`UN{)qoi|ozFV)J?FWUl}lEhz(=Sckt4owSEtB_y6o$9rO> zw0UEEWMnf4)c$qKt>CtpB10kQPL{2G9K4~}&KnIU*0u)~!lTGDmt0STyvc!HR}j?X zfAC~Vxk~^&Qebu@TwmDdpeg(Lh>Nibh6S5K(S!UND!f4`PQZdp4>w690#H=)ltvoCQ1oAdaYGU68r3> zp=|>3?d(%;fFrm%E?|NyEbU3~6;5(kdpHxDxa(5nEP2~_GGbsLDGWU#ZWRZ$aZ>Y-j7okY;W z2G`6I!!gf0{-nYP4-0-IpJ3|~V$w{}g?_PV>xdmorVJ2FNBvrcHIzy$Fs~eFV^=9n zg-0Pbnn1YUuy1`RzHgAwQE%zHkV___;|RM)sb{BJYl!5)s2!!GnV|W09ZEomsSV5O zT=3KB%BtJIdCOk4E`TlgT2%l=M~YTw0|qWnt5*S4qq>@4=)9t1VxLZNxb(ZKU&6H&vCGB65r*IV^8)MOV_dNyB&?W=$Y-n;W^mij`hS0aK64e=L6rJ zMa&n#p}mvyBQRt|!<+&%I+lOtGZf?bWEKjwHAl^!0U&kP`ApC>kIDiO^M%;AppK6w zGA-B=@r0fQZ<0L03z%uF>3)UaYA|c@MySSycqFtOE$k{NV$^;ljUZmQ~G z=H-q@<>Ryi0!Nic%p9?~3LQcrL=^sZ>%lTsP+PfT!Y*^fml%@n-;~!L6s~5-}e&--W-e&8q*1a;;7&55u&Szb+u6%PTZ8&Ts*x zO3B59l$?aXTEc2hmB%B@m(*690Q6+hXVWRBfP4 zS{Vi!TDNj(6Pr;Riy+$?^Wij>Z{CBcWJCGGHn?B`G#<}*p+00Q6F!p1 zxn>O?-fcpRLy+Wtiy|X&)6E{jrq~}S$x>;U7qQIb<+7hK!|dacQXmhy%IKPwyU$M- zlgYHbPkowkV|nTh_oajK22v>OV;L{yT0l@r0#Bm%?cg0jCpQk~3o&6QM^li*EBiAQ zR;f+r$+O<=B^VR75NrtPOX`o!hrG{E$;QEN!dJ~}j(6Y_)~3f!;9!TT zlU5F0YuqTa)K8Pf@*YiufpGF&qbA+Dz%Q0!+DA^-c2I3eqY;l7C!ZGL{ntg2z=xczcA|xmt2WsJzV~)#EXAl7*R>K z&);=okp5MD=EWqFka62jLAo)5EeIp!>oF2mBoSx!A@V|;T8%gMy4b&BwFw|eDh|e^ zH5mkQE&GZl92Q=dYF3ZnYSMS61I$`ey@P=UN-R6)S{Ir+@7eSwGZifxp zu&)AEZJALTLQe`pVq%3f<5?3t1sHuRlGAzptld)gxu>*@GwyRrNpfa|ad2?N=5lfi zAlXVq#Dc+rjdSY$&_AsX1|tA>PqS?m#N&S`={89C_FOr%1b=uAq_^Yf4z+S$lS4;i za@$!`>bvqHS!1%O3QWkMp|XY5Bt}P>;#-89`rIW*EU&C+*%6WrhriEpeC{c zhJzPENs#S{x5`-9gK1sEFxcbEzhGGFiC4kxpN!&ItL5_mFQco|8z>M?+vL2ox^2J9EmsKq;B`!H*DI=bsbi2-*u^ zQViklf*`pZe_SGh=R7#7YQ=5uGdK5O!*+|u#*w=Fl8cXFuE*-P)>v5(M8 z^8l$Lda(!E8F#Uu0a#s%TsT;dCDTGJoK2H@2?l+@XTGS9qa&p+3KA@c!;2W=0a0X; zfpkW=fN3_1_uWEdG|9F(A^IBMw^So*ntd8KQB-Zybroojx|~8O{I!~eYlj$$y5 zP}63q$ydNdQ*gf$LcLM2wF(j3klIXzTxj&I3&iMD#}`+jt*a+;cyOncvFQwWpUR3eXh5{K4x7v8&ziXrF1JOQJA|-H)TN!f>8>XtzHvvWboVFUMO9qn5 zU0)H3<3E^hAbivKlotWfeA&EwEMm-_WVgAI;K-jT7iDUPFOWw+vdtj$iQSL(L$#T6;bhno=u0dp5 z=Ye*^?uHhFLe=AuQ9v;dTECBZoh4ZyPOM6^O4j3en0PC6j$0;X&;!Yx7kO@x#Pum$ zNp*`OE|fuFh39^#C}vb3FCv&(LiSAxJN>BsLAEA?xAV0M1I8HUt`hrwF-Ft!hdb%? zhvggV8}}=V{ilrKv6-SxXGF0KPtSXypjdz09gtMTvlm4#LfYzD`jq|D0Pm5M} z6`o~VN3WL4$>N1SJ1D@PCrTDH9J(4RUD|NIZi6EdHB63Qt78w7EF-!dN)Or)(RygN)#+RgqU=0Y-OK~Jp#{;)6( zdlllBh(Z?@;AR>nAO$8?&>h(hGp}39Fhg*W=Qu>yRoF7L`Oa8`B+L%pm8tz_BC-74 zV%U(_EbLsbubVgJt*du0j_5z8Yv3;&Vx&E58xz5(Ri~351JxCmWsARI(8d2EG0?=G z)Ri1(y$R>fUvs|&H>++B;D!BY`5L7L|4E+Yz^(gG{;=h31nE&L(SL2XJ5?#&?D4&( zn$*q}!l1@eG#VkOs-46Xqo+a&Y)d#(47ayP5l~Rjt4;S)q><~+vQ(ZIxR+C`9z}-d z_3Ko@iYgbZAn=kFaW?>v>Fn}T1O8Ipu{ms*v2w6^ZzOtH30?d9H}XXE`@x^YkCN#_ zDk;Iz^;MH;IO(&Dxhw<8<p)i>-k|Boa6Izrb=}l^pb6P zAAfU4dO42(bpX6p!a5J1n7y;w0Ox8IxLZ}7NG4ECExa{MFwTnrPTNwO=&`wwBm*9O>>j*1$Dkz zhlH`Zf8qm(pK5kPpAo}ryJ?@?>;i_lC9zC+L_-V*(d0ab^_1O2Eys z;i&;!E6emNAflCZBo?vSJkTM8ywjZ4215n4k()%2J?-6fqUhiT=YlA_Z0&kh3I0Vb zIevh+UwshjO1xRc;PDSuu*VvrrJhW|1A3N^+& z($})51bxyHJlzHmTD(Rc!#bKRyO%*GY_;A0 z!9?yB-#vyJH)~FdL;af0MkIjYMz+pltYQ^al^fw_DN}AYt8d}gq?eowx!j@8xZJat z+}AjPwBP0u{e^B4d$KX7;{f#HpbTQlT$JNhXUZl4u>xO z#<;(_TYRT+*Cn8WS#f=&0g?0APfU)AyctRKfhO2u69!?Y|LWQpv_LP*e;=X1^Hq-f z_RNm8(>qV@v(_fBE+U&tnEM6Tqq03K2l-l4lQJM80*UmIkrJyh3SJ?SHIWIHYSNmQ zSg9*78Qv+xsA?K@gDj>4i_!rlTK!46V2X5i&F5iu47RB#gmi$X^P--3Lj(%ij}BN8 zN`M1C$n_$P)b5Ef0juw9K5fur2MB#VM1gE#Za!P6<>6|VsY;B6B0Y!+}$+dLD5c4nEM zO5zb|-fN=#KVuJjO{Kqu0~O6``hmaF#C7NV4nnq#pZSEjbXyF0FBp&6Uh$bzc64O- zuM=5zwF^3Cv-XM)`-|WU2F1MseNMQR(*q65^RG09Jw#{+v0tpCsKV}C6ye~}XBR$v zaglG=ljYolsCvz{E4;TVrq05bnXb}WlxfAJviRk2R^}>H^k)1c9MPn{l1lBrMcP#m=TPhJ(wrCi}U0LC0 zW{cygm$EyDJ#Opq^Z>&9HiMaAFv>4BuMJ05MYgz&%8+=8SKqxdQ3`i1jPkvdOXi02 zDXOp9`!WXyOMBHfLo`pz*9SiZhyWtUH?8a?j@s`B zM9S=dUKx^R&dQb_AryIwobfz9ZI4|3bh*NkvU~ndCq3KsGwMnM$QW0KN%o7_gqrCnjRvbBZ#SV=HrjsrfBc6jcpaA zR=vaKBNQg3VjYa81bD8&&{npzYZSDTj=?$wQ7sg<+)GfWNG#ta0pvSNC1e?r;*u7H zo_HA>F|M@r45`+}*rkt*Zd2|AP{&>VZPTcs-u+fq^o`z^^=x?e#vl1z`29w`oF0;P zi+aip(t6$DWyg?RN9$9MQ>v4)`v>&0p+W8*vS>Df9Ey1{`-A9%XPA>B2os9ueDRJj zfwA@-6pS*KzlDLHwOMuNBJG&5jtM9!QljzM+Yn$4QcCrb?M1 zZP#an{Ur+3u6lN3U)2^`Z?bgN(@8<#lY>hHaV%&8flDHI&$QyoNw;UYu|HY4N_PAPUBGB}_;PRwlV^``vnWyeu z_%WfQde?0YkyFv4XU*JS@q_;h{8l{)rHfH_CnDs(HSYJPhFD zF)b{#E92nL-%wj6ek-`jQ@TUU&tKSM$&28Qn%XD(?PUbhmWT~f7%9* zCYWA#&mHozWb|%tr9mn?du9rV+YMVCR6N%z>`OKTe2bRTDurwEVj~`je$5W`6c=^R zST}7F*+?IjlM`P_f6XB+wVp8m6;j3L)zW>pF{|08pLcI-CNVQY!PSLN?g8R;AC6-n z|1{4YeP#IFzIl*=%d4}Uisl8CDz*%ZwB~sgQcK;=x{*|@KuGNkZc`#8n>lH!XeS!# zX=vO`w3Ec?MkK?z=#4j0JdjuIj5CXv(!EQ{LqO+Y6HU4-S7C{T9jBk+xg~@r>oC*G zy;C{Vm6{v-1)P(`;K?8fy{w|7ao4R>%4E`#HKcfHIC7Hg7Ch1(a zl8u>>3UiQ-kLJ;EbxqR1NqNVn8-Uir$_iZ|Oqsq77vTR;8mZK0ztLWq-e*HN)7<^D zL{6o=&AnQQxwN?nZI#{llV)eV$I+AmTeIj0Qk=QXN$7h&M#s0IS8R&h?u8!dT=%*f zVI!mJyB|HqM+x$ar^C;LZ7013MdWemCH|4!#axL43~ z+22vqVeSIA@>SvBH_*y+yt!k=x)%l(C+$atW{3qjrR zpr?ELR(pOtnf)zdo|11 zX5jpE1*)>LgIdXAxfKzNuz@P~WDCSUt#x_B$i&NumEwA=5W;-%O{*5h>E;_Q=j$$i z4#|EePREn)BfOn%UnVNtcLW!^C;ztpo%Adt-%c*zW=@6elwEVdRl8%o+eLbgesb$2 zSuV{2du1b@Ih2e_RDd|*TgyyD8~jCIVG09+nS8(@xSq_)$u_*v;QG@r{R3SD%(Cy~ zY0|94mxb(1IZL;0s@#1`o{FF%n0aC9Y#G1FX3*72Pvc7aA2lf^iuyn6UYVuJw>4h3 z6cA`=sjv$QY-rXhcD&Sq+UUZ6osS39$y`JEPhh+mthhOyIP$7}T-!^!SN^(F=XK8hAU z|6>2Sju~Os0?q`1vRE*2WIzz(JXvjDs^<{cXr6tqBV8ibjm|KhnDe7`tkAz7 zu&eaKm_3>$L7`tG3QP6^CRI91OT1c1VavjtyFNcw`q-{o8m~JL2rA^QE9h5zOUvD) z_oVkr$?WVz$Wnn)DYk9N%+L#ynpa7VJ?pOyqv)JYb?j1}ok~pav8Fu5KjxcgL&7yQ@3;}X(uy9l;s*j0H@b1}ZIB!JSZ#I4<{ny;_w7m&$(G`? z>_Wkkc9+&z;lv-y^73}L)+%$-)01y)XXJY)n0_a+)RX%QABa-tE?dWB-b34+SHz9& ztes0I&_w3W1|HqGzHN^SO6S?k!PN&PZ|LE|?LKdW;b`=(ZMLG`izKd`6Wv%r%O6QU z$?eOA3^jqrJ}pdDPj^(*YMAqVL&89S-q!6kzx8cplhMahqI_qgD|Lkx8! z;{8V>g(4(qZ%yJh{=cpYe>cS2jvy0Zmhz58k!e)$Kod5M;4pT9eou~{NyqFfEzg_- z2`s0*2dCe_MuRKI!%)UC?}JS!T>tx^55MgEDXU9HgW@k%zEzfCd zd}aWDNmD@7zx@By$GYorXVkpXJ!8F7m(O>Hb^p>oQ79=oxCO4(P#^08J@Wi94X{W7 zunv*aGN?r7w^JW5^8o3|Z%E#Vz4e_8}$ZSq-raIsuUEfioa%m)JQ3~5!0-F zJNK9OknVAgjQN1!P>zU_l-XWxp#a)CBfpa%>bP5oW6BB)t>Xq?NdfhrGD%&!0tZh8 zFj{4+rwiDon$Ks`#M;*Qi(J8=nzFUj5(o0-}LMlN?$-+UIvskA+w?!Fw3QGGX(v@K1)A6M-gJP ztmW`kA}EmS(f%6wfEl+=`6lM;dr5O1qhb-N-_#Vy>0gh0QVh?X|6NvSR z7&DDasEod+b~CvuE?z<^Z8<5GQzbJxZ5S9tdrnHTCMmaA8M#X~c4qioJy+5N1t#Xa3@ zy!wx4o!ZwrdY^2GqXvaQb#6lA=P)PSgATnIEWG5=xAY){%m3FBgUwm>!r!m^PMJS> zSC{PJbo44Dx6GMw3sG|9Ts9_G@zpu4`B%+}jnquqh#6(y{Fzl}e}F+cihWaXVO_}nnS>h_3- z!3zywEX@cdRz3dQM3pstBE{MeF*$SYrU_%869{wwc>lj*nUtc<$NVXz{IfXg`;YC0 zN)vBh&V*`LzJ5Qxt_R*U96!_x7)qb~qvcR#GCiRdoisaZuN>|lHqWC_Z>zjGB3r8| zwvsH}BQm+RAjd#7S{Br$#@a0Ywdz3)Erz<@L&m0%*ri{27J705FX|dvxsIQUjq3}s zzop*fk%YeE-?o(-IUU}Nm#diR+7gs{(wx54Epaz1b!$sRI^^FSNr6+xf&Eo(daZx= zZnNzR)vX1yr&4~bnm^noOssfmcj5|`UzyclrHc4j%bxzJ9b#iRpXe@RXFp$>)!?eQ zFnEmSW+F5v`ZKe;FV8X2tho1q{d(ri&KT>B;Iyr$7bB&29XZ>aP^osdYyuCGpALEQIy?`RZCA`PZ{& z+c|7M5AnMfY#cj-yOpfS$>!a2vVK$Mt`$ille|?&Yz>ipEJEaUNP28aXx0zfO2E#G zm95c`(pW#N?2-20llDZpCz87De5)*ZT)`gBHLvrl+p4v;Mbgm`! zk}Bmfj%Trh6hR;@DiK+TiHp|>Sh(KQ?tv84Z|>pYy~|pW`_f>eot?cu@bq!Z0baD% z#QokS?Dx^kW?uYaLZ z!scBNKUA#eUm_lXPX?TyYC{=DnorMx^=ZA6LKt-3_;3WmzJ7k-&b+gIZ@Ul2uy`_F z!ahGmXdB{{9Stax6>u9uq~ZnWAG!Fq@o4nHObDEo{W3yYT!;M~K)L+=$=lF)U7qO_ z@W0-^vxT4)xMY|0`FrT^zPHbA08yie3q0(6+Unc_*jq6^XCpbx9Ii0%%?&^4{~}S` z6I}C4?nZ}LuD|kW3o156rJ_m9eOkq;QCs(w>W?O{kb$;o%Mm`wEU^72)4bDmXDI`7 zc(dRS}z1v20ZiT7p?!16v`-ewfu(G`y%{-P6sqEvQolqpU9JMZMiq+Sb z)fseD$ETT^*j5rkHO#HbPaIXOk>ww>j~xaoNfO-depSaDJH8gRvG7kJPHi}*&J4Yg zPR2Rh^WuinQ36Ay_E`yOul~{b8*YKNz^Cbw-&%vV2h}3$LMQkQ1uM#1JT22pz7z%7 zmloYmigYO`zz3RpoaB$%e)oA?;Ho_ycvNI5wHa1h%EzM^{k=R7)192(kPHsVZ|*q) zBM28+cg~e4_wyLf@toM=j;AzIY?YtShqYa+_{OPFC82+|#`8S{b-AB14p|aDarA#3kT{M;0kdlh|+SDLY%GE|n z;I%+xvtqyt`wy+wLGt=(?N>wM<&`^j!{-HfI^RS~6JPf!CLF?NN1QS~!5UfbvQ=!v zIh~nKy_*bGvuS-t%$oF|f1Ode?yG!KQuof6ymYPQp>J}(Q+uV4NY$V2SdR~>rhUBb zu|e(q^=^ZXas$Ec;|4xMWY0c%`r#&@9Dd}8v;P^MG~OB#1}>fsj-xH!Q__bz)s-}`p;6-;$?RdrQ&{Z*&_I(@oZRutji=I@cL;RasslzcSwd3aKe zN$rqZL4@^{$Al7X>tt%q{fpM6<(B1IC#6zPRzVh;0e7pl%n!OvuhBMBJDFBXHNB-X z`%#415v789qvLfLUc)`BMIu5CNe*3t^$kbe5(U~kvk0eOiw3ROrtnKyN}4iz^g>>O zM%a_ZHLnb$hl=ZK&5p2Sn?#M**CASt8eG5I(t2O#=Wt2#ts`uYQA(Vaw9TuwpETa; zyrT|iiYhMaoTWto?@h7L-z-jRes`i#RJVDpy|YNwjknmrp_=#rtoINwBPIAgS@zIE z{@gPHSx?n?iqiqrw}X&sLEgBIuG>YJsa&m)BfVpD%Zw)a2C3C5B*Vyy}I z1tTe4R=Bskja>`n_H>_ROedDr$uX30l+1hv9!m_i?>>Q+aeG78Lke5T&{AYu4Tzga+>dXAr^AH8y+HdJvO4{A~grx zX#(g*#_KM9v>W|s*I|@cjX_rq!uw8mmyzt5AYIxC8BMoBuwDzWdfYip>YdJg+G2^b z@}3le^j$<<>jRlWnYS%uSySnr78zN#^hN=}20F8UV-5KN^Wb`E#u64@Fs^D*$>1h( zWn$5E(Uz)9xeLT~?&qaiOPnn|9p5AKbMxhxMnq|2K=5HiSmWNysfbyP$!8R#gqnp7 zJT^aU{vEMQjNN=gdaguaQ>)|^sToab5}txX4@X&=0@jU@tTcgn6cy&P4VxLA%*X3? zvNv`u7D#GrsLT`D)e=;?UVM2qD??A_L`!MnWrSQoL|i1etJ{ZQbyCMNsMh7+A@dRjIp zv;0|<4MjcuG)0jh=7xqh;^nl%_l;#WDWU1+i2SC4@WqIr#zP(|(rV57%*DlwT3U9$ z5h!Za6IPZkZJHx)wY$HmU8v^3Skoop{*7D9&h$SQ2(0qwc?nzAjrHCYDsHLjo44^z z_ij$$+LnRi{T#8a%t%TwR-#?5jJ0J$>$Z$vws$nPgr)9~sekGI$JX+CapQiXXv3Tx zTejS6*eBeu|5C$6;ibp!HEIbMo+UJ{Umqi~D(@mMTgbeG{n$n1SfyxxoanJSL!O(& zdh#IG7$Ml1$(p@=X4A-{nmuE+y!@T&Cgl$k^b~=C4>3biB%7rLpAcXh)8Y^xDKK?onh-dKEWTZqGfH$NKv@ z`8&HG9j(Z{+t_7#^iFkA*(t%a1Kamq(S(oJ?6x&p^_@UlD# z11|UY_&&dRt;sM<%O6wXcstUsB=uf=lP@DwCt2L5%2Pfyz(?Wqf^-9)F(c_aOZ{aI zc4pQDs_jg;OAI-Vs>oRv?j^Fc*e_w3u)>~QR!g2p>Ta=LIIM5y>1;pv`YPVN>~VEC z=9(TiKkm#mlh&csJJ*CC_-86#ExhBCL%i%C7I3f6v%_m{{u|HJr)h-?Jf9h_ERw#g zeXzUO@#_5@$t4tTBcy35*XM?yPSvYWq(B&0C4oC)_P32=pXpxnKAH3+J1oGFHsY8W z>%L|9**#VF-#P<|q}_9?D@zqFn%tExUw%O^Jg-9Qoc?9G%0n(%4(e6PF44xr)e7g* z4@TBVU#QylTdk{Gp7fcAOWf<%QR=pNN34$q8q0{b$SZ4)guLtv4?Fhu2`+8-amyi> zk^^?U;mDeVvyDCF4-=iu8fxp(or3eQ4Na$Mw^@xeTa9ZYO?R!0oDMZxp0YQ-*AjoK zVt+?#uT3~9om_ExSgf(x%HUoE%G&+RC$@#R^!5%J z)y+038uFszZQcwfw%1$Frz+4^Sw@v4(-qBQ;@8r1OjLd5(61lsbJnE)X5ea~)U9Sv ztLnw{F|Z|+wJ$L7l+q#3w|*wJxTWUIGtpI{%P}uUO_K41Gf&0}_HRA+d{!-P-|HvU zWDU)XLEVmb+UHn$j6$6bs$uT}EtBFC?6yOQnAF~1Ra>u(aQ5t7;fU+i*hSE5W$oE= ze;1CPv}-lmo%-$|N194WH}I2aYAv#QCN@9b9F;i`pAv_g(KD2P1Yf{=-&QAQ{#3o| zC9#q-tLydd%5Em>ydt56#mw5$oPC73l(00!k?D-{a&PZGgjsoVuZp zE7cx}M$IJmAbn&FDA}s3rL-v~y4ezE(=H-6a5?!$k%2ryoiJvUE7NfXL+eqbUB&*+ zAa`BEzN05}^bno27i6k+OCsL}%hHFzQ|M1<`(#s2EvGri{-jHxZP{oJ!=!<-&%)*?~a+P$SJ zLepBOJy^S`v^FeTUV3&LB5Q$cM)U76&B!^8g~1Rap;7Dd9mKZAPN!07LUX3!YKfVx ztB@Nc^jmsl)<`O~Xh^=2y3iaX(JOSJepZi_pkAwX@7xW&w2D6K4ZS_&-m@EteI|@L zk>{*i?QWrb$`TDY%t&xdT#7Gk|qzMt-sO4EB8{9N%y;;VprA>n%Noe&& z#7ic(Zj}m_mT6fgabHHcd8>GyVBEvATm|9y`o`X?LT||CeH#S)yF)oO0v;Sg&ODKQ zeS!1?;`iA4O)k=jU5_7VB9qz%G6=}sP0X--#JxtPOYyRLE&Ap$(uHkHccscKAV`v%XxV$sW#E1cb%2&{0PvV5nu*k-3KF8WwnVALG_1T3$E4*HEqIW|`B}i%zhA z4N7L5mu>uGed0~4#)qplBs?<>xE_*6^Dg$+Nv|zFKez}>)8>EmQ^I4)?=84L=&7wzF3*iJU2y+d z`+V!8MYpSK&k*yM_OfoE&Vj{{#h2dyT*M_9is0} zZ+qy9$xx}XxH2a#A^W{=P^xZBQsC_rHJ>-3(#fOeq{2H>d@cMVXQb^r+<9B+j)>wP zG0*Ob;oRc)=Ux&Wy+bUWFHnX`ymn_mavOM%xL;8#HYk8|!n8WFmgjbso#@KlczI^R z8tQ7l7jZkQE{4U%rssG@yT;szsfnwOx$MJD6pOAur<>FnqkCdjid)=vZDi`(M2o%h z8FN$45w~Vqrdx^k7R<^!EZjzPaE|0=YEs>e`twW=T`}j1o*DGV_Mh@y9HG}A8v#W; z?bse?9Nt-Dmby1Q``+Hnv*D{_Te7Ezw)#rtybO7J?#jI_p#mp*^Y(=0Xy3`tiTHi* ztU`gP0K(5j6)~yeqveP+qVP4X8Ftf$olJWja|REeH9LnJlJS=D{O$3^=w82}0qUKh zKZq~)Gllb37FJISy??P=GujyyeD?f^Z_bO$8-{xy zxZTL^*-<6x-Pcl3(|bLpMB$O#6>366or71p|MWVBr`ZM5dQFdPt0N5(9<|ygjY~Ys z6oneQFZ-gMnsTq+5PI0O$ggwblQZ{@^uAo}oon#&1tMy|q;n)cbNk8gq3rw3&f5nA zTX%R047Zb)dn9*Av=QCclpmqqab27`o%+W)!+_p)>9YsjgxZ&%DYUX^uRLRSxUs{} zSxnxh^RbJXOfNao?f1<|Ef2457YX!}Q8fFdHfF)Tx6eE>O10LHh?Qs>?-``kcc0ia z_@Xn_Ze-wn*B<-5G$ym(Zm=?oz4%mNngNS$=^nsiNgXe6-Pm1V7InOYony57plZ(? z(?HVW4p+q>sfM)nm#h-Km!({G#F30RQ?{z6#tnCN;=#AhQ163%(~te!W2JOrj~x@I z{3}kn(^6-f>>g@_=`6`!$_!g;u|JYyFl!$l%6WyGGeoGKL6RQLr?&5ap1RT3E5@_8 zcDpJ^F(7vOK5xowcD&-YJVjR5HsSDjtm8!dB^F~Ccg1=?V>UMP=rFjy-L~tOj(I35 zn$QNJau7JG>EURp!M1gV*Cm%^UO<`lZ!JWi*YLhmw$;v2{QZLKCKsi#b zAe<0NASH}gLA5j<)6@^x#j zEKv&I>MB(y^{S;?a))qv1HRW$Sdj9zPgtO~yOGl=@LS&y2P=%>V40}R73>B|yI33Z z^`l!d=*}(q?+|X}^@$uLx=Ag>5s}zv=qZhu(@Z+`1hKd^SEE~6l^l$$kq#j5lu?yt zwGK-v$aJ@qNr(#9)%kM`Hd!~c_U#q*X*2GV5%yxqb8AEv@qXeOip}8Mq*J9Ddu}%? z%Jwi0mY+hEP@OW|P%E1bhk7GY>ucTh5N*xBmb{#t0d-8U@i?~|*o4LBoVqNFn zhL)er%4s#l6(>qMx|6P+)NXI_W7|OOc~1LIlRA*PCmeTmj}m=cgz2o!$)3C0Rta7W zS=qW{ovxI^-433MbVq*XfU#`T{ewfgn48sugEZBQ*24p_25t0M?n{fE?UZ(ut$wRZ zjr5tFjj6fM&nMMSi#c`C@=>FAo2TX@M~A^H`H$Y~UAV5+uuEaO|N6%3(shB)n;xvr zh-_;ju6itcEdE#SNfah+9d``Zm{rSLq}Er|@YvblX7!Q&jW+nYA@(}wv$ei0uiaHE zYablG+EBhBlj6fIlZspizPI<{%5Vr&8e%ULR&rn4z$N18gJ6}_QLidRa7{5S)gHox zq}h+ItqnvLMQCv2v74gL^DZb|O2BiU>g~FN;H92=e$SmVDsy(zNc@cq zmHU2a%IQbbL+&W2qryD0oYKs_!tPSj4xA3l9!Re_mYS=Ssd{kLz3y!F9V7Xydl=c# z(mBPWo80!+U)(lOs+;8b`@k%#72XTE&oAZ#J|B<^S`kI@#ezDC{OO#eRR}G>e0&T zD1$1&8ssg7JCABB!dHbGJ`4$EUv_%f6Dr`a{*h$pHsckKs=`zcbk+HWpV&5|KKz!x zlxU-T3{CLu>2qe&o;G+mpZs;?uK$>0`|xP2psUAFalxYN$|LytZ6WW6$W6{6N_6uU zjiCCPUh+=?I$7!De*gIqk15N14__fs&_25y{3zl#Qj9z)VLp&rb=!KM6uCg^Mn8G6 zSn}LpQQ;$=yY&WsF7Uf&Tr+wu`jCa>(4*|Rwnv5sD=_CT4xz}`FIPX#rR}+%%{Mzcv2$m8P@K7SDd=FT+2^irkh@1>6y{gFL0Eoo%(JsFc+u{uh|kQ zlnpnmMEwHBRG}C2_-ltYKMBh=G4vf;Qzl_*I{2vB-nx2dEA5%%F78_T?`K@;$C&X> z?`u}Ex=;U-p~5<6J2!YKqv_NyZk647mcy1Z-G{BFX=<|$fvLrZ9%}mr=|uh6*9p>Dyrz%)C&x@~nYR+UzC zp*L|4DT>l_X#0Jy3f2xnn2il{KmNV$4u&mmj)GlhB<2j}MCT2qi-_kPVtS6!O;mT& zBFVgrA?ziNZ=N*nBCt+LC(M6B?kpyBuD=IB$BVi!yr_w;vZPWTiD!OYH6Tty(0zj(RjJ2@%rb z6muPUsE8%;fKB6OSWPQ@d)fmQWZnMTS%RIto%)*)04-$y;*~Ih-)d4#wBzK$}oE^n;#$#G4T~rr~;~1dH_nW zNhpMY=&2jK7=%POkC{uOPPV)3K8F0YEe>^GKD5(c%2dOsvs3(%vt`HpO@g8=l?4MU z#irFgO(x$?Wd9%u$zo!5JRVLS=9-^*+G5aS?3}x$m#Jx ziJpqPkF%tXJc=3^l$C9<>?abuXrA0Z6a=|y{Z2=!ICB^~O$9i^j@GM7{8iswV>m{wu?g#A4$J^g$ z%($*c4G3}fk!>4ro9*x4mS_Gx_$s+-e@et6G8wNI$!&KM7^#H>-tfrl-V;@wn$U5bLYIUNGcsDf@>v{qXG&B_Xgk7xLn+!~R zA1MOsilq-PTu6*_dKhG(1YTcSs3DvbRBy8FLh9+JQVHJ-ht~V651@BMR&!qwX2lSB zd77HZ$^AaZy0Z>*gY9?c<@N@7Jh`vgtsIzG^{Rs!Ik%#tb~s_QT%rKjr#?U@L}nZ_9f2V9N>7>^0?Mhh1`^s^HxTd9!NmQN{(c>VgE5NH_f+^$TfA1s>w8 zGTa`)<34w|95382;FX`2&8Z9u%r;^6C1w_$@ASSy$*+5;f3GsHBY#OjTV8AYa1k}n z&R?cvWuC+NyplKhH!N}YR}=8h!3_`tKi`d3Ec6 z!s&&+=>80^;gB=^%rJCJGEXN(KiQ7`8rTr0bUSXRQ;#Gv8A`kBHr}nm?yT3$T`qF?*k;Gr$3v%e&RGmr zT{ZRi`SGftbvITGxTQ3NT0M3u%8gmf4SXn`{EAi8fKD21JlRx`s8xt+35pL*5NO>I zciF$A^?0n`g_PC@v4)nvxBeD;MSCr|DXv>_2ALc83cNS>ZGw@&dZ&Gs5~FXgg`J*0 z`qRzCGlRqBNz*P34@DF`^%WR7((opHXtjx+}N3QsZ04uhnhzWF1LNvwLF` zyL|pykIT{b=AOM`N6)Bt^d8e?lIpr|o32KMcPQ9INzO()#>*x+fQT80@s7H}7B2*>RR`6AUER>?`yY@)^&C&#soc4-7Z=o|RBKNdV566!) zEb@qZw0EPJM^AQps|22{0-lk!W*Amg$#zf+vlWEO%fV@6Jush7D%J|o2wozZlRb5Znmhb~7zB)7+jC%0)Q z_Z_!VtdhH*nI(C<_dr{hs+QW1LVii{`al1#zoR4CkrXX3o5R zEpaAKpSxEok(=J7ia6g}+FXXxW_DKOqq%K^>5j-v_0B*$+Eq8;)`ec%q-}m2oz$^z z_cqjP>L&Cx#V$G|-KXVBr$}a>XVTt?x^5n;&KjUf>}t&5mP!86vX-YS&7-;UHp%Sc z4)!Zz8oAecLr81-vGk<9i~G%+&m6kYS6P~)&1Dy-x@ga3_4?rt?_j&z?a)kQV-5sq zSF$*m3X49LvyAe^2)ZDEF|a3sIpt(8`;mba4;MP+@ebuGops`#y%p_BX1~6)@dBFr(qzk} z;Xc1TV%JvmZpVyEG?esf9Hmhm z9{ZTNF=lXEoa7mnjE(Nj?BTX5pSj#E4MG7ntl#r8LX?=l#zcj;Fs@!Z9eKZtYx{Gw z4jrd`ES_#myF-UyHf-^y#?2!DpCZvRhW!Bn!F_kE*FXnJk@tA>lr|p{Z`w^ zS?YYGVI6~a`57gEMhn5v-KeW>4^m^R*T&tWp3IR<{I#v(wqU9_rT40Tx-o@*TJO#! z$~L`ySxuB~#YZ{X9XUv!dkZ^uY@UfjM0#;n$ZN-*RVK-ZNfx53lg1GZdBBLJi{G9WXLI##!txB)z z84b6{wd|gk;!>Q}K`BV9xJJf3&@aDOo?XGKSeUuD%CWpXqO1CJ#r!KeHAa=k9ZYL= zsx^%2AF9;$AN=Fd%18Oz3+i^(y_H_7tLS;V|GnABwJ@%w^9ujx{f^#l5sLjux1!=A zI5*R;r_!1JMIG6P=?7}^3b&J=)O!|U%aaZaskvtph6J73T;C3YH*Z{7Fi;$B=vOqLn-y`( zaUi~g7`LYHXPk#=q>M)?hOUkV%7P2HP$MEaep_$j9+(${}=5_obv_}6m|!%k_cm&rrP_n!q! z8^JaKs`~8{(&xa}n*^TU3b(S_G%_>sflbt~Vu9?r&4Xi)PI>Jd zPNMqXDB*a~=>BAeDC0=r+vZEGtv44HS~4H`yo-6)&AI;6+px>!D#EFScK-6lV{bct z_Zry8X>Y!=0$)sx_6bC8rLqEKq&;r+YvewyOLjDVJ#?@j%+zD&j=@@@uM6_>^eX4Wl}D_Z_(Nq_pCQ{%=ECEx(v(4+YQ_OmA{d z(Xl7mmP+!BZXP#;F zwlWsNayG`O^O-}GEV`Y0o+5QQdp;5Uq`tUdXZ46!Gi$%>6C;|c=BqyN^p^G#W(@tE zfhR+~ThTz7R>jIZN-Mj-F4P!}pWWM{CU`@dBc{C1`AAREj#9%=R`7PpK1l{7w+nC3 zeM#pPTAX&u__=IUySxok>TZb`{`}BU%Azd?pKH=B<$?!rw1}+%EXS@EWoPChhK%a= z?zQY8)k2Cb8?l>{zmzFVPz#A+rsFSrlrk=1-`W~>7hz5GA2JY_!+Whe`_U$tH|?9y zLa42sst27At2;alWTl5hr*&xdjS9c$G2)Jim~;1Y9XCT<>z;SwgT2HKQAr1OQ=_ev z8FOK^KEk?vS{4Vnt|dNZIZ~oYC}1v_#mqge5MoVlPP1hlTN;n4%0iS&Xe-(E6cywN zSv+Nx>{VF}@__Wi&5A9W99gl&9d9^SMYa3VdF0I)-Y#yt#6xZtvr~FnFPSPMy}0K} z{Y9h*t+}8c`I6j~sD+Gd+7T>)$N_h(w#o)H$DhnWbx@==gAn94W%L~cm+~ID1M!+t zB0K%i;;vPqzl!xWrgGkkziWBRHIjJO$>koEn9Z%}mz1RQjJOgq(%i8wb7WZG#^z_} zwJeQFF^nHIG(!Vj(&`Yp4$)TE=_Z1gYoSM-1Q6kahn*%RYju9OQu>|r#nGn ztNWa8_U2)6?^@r1RLP4?$9aO%$*u2sw$fJ_>H`jlncSDWHk>B+Mz8($4gE=UhUQit zzWIQjJcnBLtHF+5Qo7i&vppU+$+~vE^X$p`+#YSsD7{C_X1N9ScY1dsEImuuH>CW% z9lD#vevz^&OB*tjK3y|o&`#E+(R-jA=S0rpT~ocs3L5Y~itnlF)ia&fW80N`dP8qj z>qTcG{6f07N7>d*hYSQnI*KeVE4M?BT>OR!xaPBC>gR$D{4ZDfD z#b1FFT*nQz>783(6K=s)iEF*(&5F7HOSC@I)_!mNG{!w$`K0nL;a#<9L){}t=Zt4` z@HG!~aGD732>#XGl!3*XM+-3ACeD!csPo8Uf!`W`38;ja1JI_YU?A%RN zO?k-{Y%aJH$ikLx&R)z^O!+lei_spmD9@C+%Jos+3EHnF^nCG-f_-rX7W7y6^+hP! zsB{CC6Wz{>I50c$22Wf6^w$=`}`=q^S76S|F z2x?ZtPo=q3R`Id2r_`2YT!j|(WT1ZK`F11Ml6@X*}Tv$%`|#xReNLU;;Pn`{q;`3PrJI zo^u-9<>7G2_pylgwd-345^rY)<`3lOpl`3{72kJBu<0>tw9T;YBDbB%T}gFdDi+SF zt6_KN59SB45cj-dg}a{LWnB-Zug)}gJlnZ4ik>-)a*+Y zr*%dwZ_IHX81V`ha8n!Jkafu0eJHNNKLj~q-Lm<1X8%}6Y+N&w&}EiF>Rikwr%bP& z&t9K!?GBnLAGc$}v#=dV& ze;qZCIB&gVWM``WX~SWBaip8?P-$bx_1PoKJ9hc`^~BITf-ZNxWy}p7CCf0ggJX(! zGye$K8ehkl;d{qtUzh%kwzGRXY1hR}oH~52MW{8m@9>t8+eh8&{|Z${Eexj0Hb<8p zlX~Ki=4m>8NVpJXZZ$NwrqO=ppanJSe8S+mPO_T|(}=#&qlI3>X!Bwd@}0SH=!QcPRT!lZKLMRu+t&>$EZJIgBhi<+LfSjG1fqqPBqbtEF~o0E=NE z9B9cpavXDUZ+GtTla^y_1=BPw0_%Z=?vCQF3pTCTqqH;5mB@J=XWVB=H{|A%8lHJP zx~SST>eTG0eq+R*1|7~|2hkoGRI%c^kcKxHXS>RbLuoZEd&7XrIc%1;OkxTfrCI2k z)w5VV&E;v|AN#7zw0g}|s@3sqjeXCEcbO4J^Drh|8!ZkYd^=3-??~S$EhBJ;3)+_M zIQm?JZoie*zmz_#BFjqXUaJ<%L@){u>2_x@7aviguI%yKx1snw>&3S7;fk!E2ss|V z0gWZr_BrDmuEZd&`zhXARhJH7ys^~w_m~+dE`5ofJ;JY3()hKE*(cvUg}D{JcJt2Y-ikvP=-@n&+4B6$7U{sp!M0{yQ}FCwBr2a;a}#v3*HaAVGaZ>mih1b62bRd z*Qr3z0#N41^1k-H*yGO+-xSd~@W;^$udu6seYYI0du*(YF*XK4vZ8}?r#}Hm%}Vi{ z-x%>zx-BdDvvY#1GN6UaqQ>3``$E@oBnYz57#n->WUMZI`xp6UKX4j*SE>Z5CH@LQ zM|SY@4alwjBHxUs+s4M64nQ&)B@nd9lb`Q*TD60vg%EshcJ|g5M|E`hVF-HNIo|eE zS?g0y;JdYa2ePKjnlqup8Y-ZqHD8o87mNZ;<@pc}i^UNLC=^y`E;Jp)%!D)*v_N>~G_`|Ep#7Gn z_O=eDwsz)14yP>5%;g~?eo1G+cYxt@vIE<>kc0?)p73SEaiB|rIJoq=5E?_o%Art% zFUy_{?Y1|zv;pUK=qp#QHS4g741_^o@Cb|?S`LFI;0ZVq1}}$`lS5&_4p9z6M5Bl} zG!Bo)V(ENNFh@)slx4V*eBK@wS`|+7gYzp(k5X zQDOfCN-H4E3FL)T%*<^ZEstB8+kI3Bfh`{Yj0=ARbPa2ip%dqR~VQ z?#mhlAQkvR?S*WQ3;nmcML=2qy_Xt-?g`hL3UW@igTg^YrO9^K0^;FxxNm)WR;aJE z!xBi{-qzII!NJnTLdektbXj@mL>%0j`zA`+{h^eJeve0?aG(|cv)^xkG>xt8PQm4X zD|DPdb+VD+^EnGj8b9B;km4>aB`uZxAI@2RqBgk?wE^rvOQJB7HGRZx6{KkFXs!i@ zEnJh`hx3<*gyIynRzUDbMiEdr6cGbnxhV~PC_ajzAL^e8O{u-Ug5r{oSGnKZ@r13} zhxUTc9KK4_hbx#5dLIUdCw*N`%!d>~8;$pR_&A$S&hFD*tvFFGJhDDR6aQt)tb^2z z?Llw;S_hl#+p|GAe2K|#li)w8^}s}}d;YW54^rtP9-^Q0?7yyG1e7?n{$2FRxqXV) zr?UOC79oDwA}c0aWKz)cJNaZEKQeKZ@YRl&1o}7*k3)ezj`>O-m-r|(Y)u@2BsMn_ z0$(0?aI&|sG&Mdo-WScB?9KUY8~-Qg56?+3;)qXc1u~L=L!n4tsdX;2$MUSPqvaVe zlZ=l|<%tsHKPv%;CV&qAwK-`Wbimxn(H@-PZl~kN&F#Ty@3FPFHU^`Wf2R9Bk+M4` zr0gkB_M~+Av_F2e_5UX+J0H~WVeaOihu@#n`Q`PFONak2Xa50m7LStqH_2JMKj~xN zTFwG_GTFQNefys*X_G%`lm9!C_RH2C=Y<3`3WffP%v%NR1svMpt2_cM3qDAu|9-X; zbzr|P6_-Ft)+Xj=W-w*3HaD|$vYwQ$Uoxoq5EifzEI3=#SI%}JJj)m#KMusHv+-G2 zw)=n;=_GG;2jTxV!+$%83KqbVeU85xf@}R}%G6Is!>1U2+42)S@NaR~ug$PvRYe3; z3?=s!{N_TM@N5n`0^DN%k9jH{jrv@E#{QlDjlp5zF+}{zRV{%I^J%?-vo0`S07Xr(hSdM-=Lxe<7!mk@8fx{gS5fAz+T9~_LP&!DRUunTX6Xk=c^3Oh7POEcEMa^{(3W3rG&)uQ)KMQ~z zkYmOSxW;LqL=)gEXwHSycI{SD+rLZQ1e(sTRZV!j)_9P~1e(E*`hO$4UyBfa`{@)n?*akAv-T2+5Dz zJrM_wzwwra*UmuZp(gvoN4~!X+GP(fm`yDKp;WPPG(UC9(!w0>U&kRdsCy=K2DHd= zh`c&1<>m_3T;B0K*G>r6?q7az#!x>1Oy6!Mu1)fO2s1N5CCBW z9w!G#j2sDz#{eA(jzq*_0Ha0YK<#)EP@~{LL!eP`PyBXBP5&Gz3;+p(10D+X!y<+G zYorKhIT8^M6j(R{ibx=m2v|IUKp+x8ECz$Z$`OfJ0*dr)k(%)tQUrKRq46?8XdLc` zMGE`ZNTD%6PRilYBrJi5C(2=oSQLo_MiUl{DI5lm$766nN%?J&n)w-0a{S>W2arM& ze^{h&e~lCvPgq#W5{U$$guoC8Xz-C5A_)VuXhbXy&?X=>iG1l7`_1O5SrbSt;m=bf z020vJ$q12fl0WDyh5u`u@Bk+qh5%HG;N}Ak3%CgZ1H>K?k0%oG7&r|?ePf(ve})ra z1TsQ6Ap3~u9~CFUU*iM>9=OGVh1nCh$$};TtsEMU!=vSh1R(rSAOS-p;bF=5?Z(rb z&u}7~0!9u8ZUFxKG z0BF!yJg||%;4vtY92jzd0DfDn7JrTv4#+-Wm;@G6q#sE$@i|twDPRR&8wyrUKvN6U zvS3`{h=5|?2y%e^p@5ziLm&~sFe7|htbY0mR%jTjA4=c%Iac^7V1*`u$w>}q_DH}s z5(h$n2?7H|GzNtSOJXdZuSVyud%v9wT=F?q7+Bj!gt7XewCJB>MVJa!STq(Og9VnL zcsV=@2dqVbmX^=iNkCf*R#s>L=C>VJOFzeoF9YFQ9zQZx#3^6}I0WE%XfTQZKbOM+ zWd#O|C!j(?6Xno=x}!*VES9hL`*!1M*;lZ_0<5rvA9uczn*vr~P6A3L00~e@0F!IL zBmk$tV$i@K8wGAF0@V@*H-3NW+hMi*bF5&P&;%GO%#Vu|X$n}u3R_1|FBqLrlPYzgQXN$^@2O{fTCkb z81M(AAZ*=_0Rj<-Mqs-_0QZlV13QE1(1C+?kUQVvKt=Q5F%=CVpv$#`E&+=J@azlZ z1*snZW)2#Xfv`2irVqE;f;3S4(%g~b79C8m%9{&`@=H?$VR$Cp#*bSj4A<~19sPkL9VY`F^~{Ica2405kSs>SsW+}z%w7<{O~|c3dl21 zW5ajGF>)9T2H21oEmZ++gcg#3beF@=Q0ORW0t**AzSaMi7SXUB2pXuvzqA99fi!KM z?19;brM0mIF#fQ$p9p<4$rl0-!Po=SWm_AexzI{}3w>#u1zUK5yN_S7ECW zi-4B$+Y`3Yf!lKf*a|_~0f*IAK%i;)KVZBJ>|e&i%Fw#$a9gW`eK@}|*vdnHZQCOc z?E}_6e8hnjkHB=z2`e6DNbo}#F7Ka@G3L9&c=`msGyi9d<-n|r1O5F=j2D11|9u!^ zzdMY9xh#JS!&`V<|NU_c?mm#dj`7N`V9epm<$toQ#(j4f!{Zm0)9`ry6ys0lpFfk; zOCa!sBe3~`AMqM@UmMMW|4O9s-yPDQ&dC2G(tywWH>4%Mg7iOsZYF$JNKcOA$#MND z(tpaoz@3q=*;apTG>?}z&cEP!`rpC7h~FK?FwQW>Q|4b^U_3Sc_3y!0?z_Vn=3Owp zQ|4b^Vmu}OHHq;*pMR0QJB%mC@TA6cNZU#^5 z{V)6r)@=Vv_!see!x+XF{!QXNxo)01##7^8|00a#zBi0vX$@mMDYgFxjHkrECNcgO z@GsK$hA}*TC&%)qvieVF~&2CuinKjQ@;(eL9Z+KL7d`A^lzHzhI={ z-z4{%3e)}y(tm@0!ASRBTml{34M8>Yz|IFZ4>)MtgTTuNK^otKH^6;Ng?IQdpU1&b zf9d7}H~#j)@eTYQLjI4RFp&yp;YaiHemLSU3!lia6gX(ygCG!^@Zb$_6RG2Q;d~$C z_~G${zw(y=2aS6Wv;_RaKlmUx@F9U;3djeieLR96I|XkBa3Ah-d<=mjVY&Y?bv%Nf z_rnoWD1JQ49I%-K9Pr@~{LkNo_xLH3sS^+ECvqv zaBeuj5AkF83H*3C4;&Bw^M8CFK4OY)1#l4G13viZ6#Ned;LxNuu{)XY@rWtbF$*|| z?*Shi1A89={Mq`02lB!5_a{e8v5x;I*}!Xxp}D~EUswztqMP`KH*m_w)DICK&o z;DIkSLC}}pPq3Rz;D_KmaNv{7@S#(2aKHc9<>CH6F(CK{kEg;x{xE`f#|IOCkidIW zaWLJU3EVjG20?hA9|s>jxd-Rr?~VHtN5DCz;$U*eXL^6-@!8-cB^=NEU(5eL4Gm%I literal 0 HcmV?d00001 diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 648c8de6..783fce69 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -1082,7 +1082,7 @@ def _get_pixels_by_frame( dtype = np.dtype(dtype) # Check dtype is suitable - if dtype.kind not in ('u', 'i', 'f'): + if dtype.kind not in ('u', 'i', 'f', 'b'): raise ValueError( f'Data type "{dtype}" is not suitable.' ) diff --git a/src/highdicom/content.py b/src/highdicom/content.py index fe727fb1..1bcea8d2 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -2731,6 +2731,29 @@ def __init__( palette_color_lut_uid: Union[highdicom.UID, str, None], optional Unique identifier for the palette color lookup table. + + Examples + -------- + + Create a ``PaletteColorLUTTransformation`` from a built-in colormap + from the ``matplotlib`` python package. + + >>> from matplotlib import colormaps + >>> import highdicom as hd + >>> + >>> # Use the built-in 'gist_rainbow_r' colormap + >>> cmap = colormaps['gist_rainbow_r'] + >>> # Create an 8-bit RGBA LUT array from the colormap + >>> num_entries = 10 # e.g. number of classes in a segmentation + >>> lut_data = cmap(np.arange(num_entries) / (num_entries + 1), bytes=True) + >>> + >>> lut = hd.PaletteColorLUTTransformation( + >>> red_lut=hd.PaletteColorLUT(0, lut_data[:, 0], 'red'), + >>> green_lut=hd.PaletteColorLUT(0, lut_data[:, 1], 'green'), + >>> blue_lut=hd.PaletteColorLUT(0, lut_data[:, 2], 'blue'), + >>> palette_color_lut_uid=hd.UID(), + >>> ) + """ # noqa: E501 super().__init__() diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 8d1ad95f..48a85980 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -1,8 +1,7 @@ """Module for SOP classes of the SEG modality.""" import logging -from collections import Counter, defaultdict +from collections import defaultdict from concurrent.futures import Executor, Future, ProcessPoolExecutor -from contextlib import contextmanager from copy import deepcopy from itertools import chain from os import PathLike @@ -11,9 +10,7 @@ Any, BinaryIO, Dict, - Generator, Iterator, - Iterable, List, Optional, Sequence, @@ -96,8 +93,7 @@ _check_code_string, _check_long_string, ) -from highdicom.uid import UID as hd_UID -from highdicom.volume import Volume, VolumeGeometry +from highdicom.volume import Volume logger = logging.getLogger(__name__) @@ -162,6 +158,9 @@ def _check_numpy_value_representation( elif dtype.kind in ('i', 'u'): if max_val > np.iinfo(dtype).max: raise_error = True + elif dtype.kind == 'b': + if max_val > 1: + raise_error = True if raise_error: raise ValueError( "The maximum output value of the segmentation array is " @@ -3186,44 +3185,6 @@ def _get_pixels_by_seg_frame( 'Segment numbers array contains invalid values.' ) - if self.segmentation_type == SegmentationTypeValues.LABELMAP: - out_array = self._get_pixels_by_frame( - output_shape=output_shape, - indices_iterator=indices_iterator, - ) - max_segment = max(self.segment_numbers) - segment_numbers_list = segment_numbers.tolist() - remapping = np.zeros(max_segment + 1, np.uint16) - bg_val = self.get('PixelPaddingValue', 0) - if relabel and not combine_segments: - for s in range(max_segment + 1): - remapping[s] = ( - s if s in segment_numbers_list - else bg_val - ) - else: - for s in range(max_segment + 1): - remapping[s] = ( - segment_numbers_list.index(s) + 1 - if s in segment_numbers_list - else bg_val - ) - - if not np.array_equal( - remapping, - np.arange(max_segment + 1), - ): - out_array = remapping[out_array] - - if not combine_segments: - # Obscure trick to calculate one-hot - shape = out_array.shape - flat_array = out_array.flatten() - out_array = np.eye(max_segment + 1)[flat_array] - out_array = out_array.reshape(shape) - - return out_array - # Determine output type if combine_segments: max_output_val = ( @@ -3245,11 +3206,73 @@ def _get_pixels_by_seg_frame( dtype = np.dtype(dtype) # Check dtype is suitable - if dtype.kind not in ('u', 'i', 'f'): + if dtype.kind not in ('u', 'i', 'f', 'b'): raise ValueError( f'Data type "{dtype}" is not suitable.' ) + _check_numpy_value_representation(max_output_val, dtype) + num_output_segments = len(segment_numbers) + + if self.segmentation_type == SegmentationTypeValues.LABELMAP: + + need_remap = not np.array_equal( + segment_numbers, + self.segment_numbers + ) + + intermediate_dtype = ( + _get_unsigned_dtype(self.BitsStored) + if need_remap else dtype + ) + + out_array = self._get_pixels_by_frame( + output_shape=output_shape, + indices_iterator=indices_iterator, + dtype=intermediate_dtype, + ) + num_input_segments = max(self.segment_numbers) + 1 + + if need_remap: + remap_dtype = ( + dtype if combine_segments else intermediate_dtype + ) + remapping = np.zeros(num_input_segments + 1, dtype=remap_dtype) + bg_val = self.get('PixelPaddingValue', 0) + if combine_segments and not relabel: + # A remapping that just zeroes out unused segments + for s in range(num_input_segments): + remapping[s] = ( + s if s in segment_numbers + else bg_val + ) + else: + # A remapping that applies relabelling logic + for s in range(num_input_segments + 1): + remapping[s] = ( + np.nonzero(segment_numbers == s)[0][0] + 1 + if s in segment_numbers + else bg_val + ) + + out_array = remapping[out_array] + + if not combine_segments: + # Obscure trick to calculate one-hot + shape = out_array.shape + flat_array = out_array.flatten() + out_array = np.eye( + num_output_segments + 1, + dtype=dtype, + )[flat_array] + + out_shape = (*shape, num_output_segments) + + # Remove the background segment (channel 0) + out_array = out_array[:, 1:].reshape(out_shape) + + return out_array + if will_be_rescaled: intermediate_dtype = np.uint8 if dtype.kind != 'f': @@ -3259,9 +3282,6 @@ def _get_pixels_by_seg_frame( ) else: intermediate_dtype = dtype - _check_numpy_value_representation(max_output_val, dtype) - - num_segments = len(segment_numbers) if combine_segments: if self.pixel_array.ndim == 2: @@ -3335,7 +3355,7 @@ def _get_pixels_by_seg_frame( out_array = self._get_pixels_by_frame( output_shape=output_shape, indices_iterator=indices_iterator, - num_channels=num_segments, + num_channels=num_output_segments, dtype=intermediate_dtype, ) @@ -3422,55 +3442,14 @@ def are_dimension_indices_unique( 'in this image.' ) - dimension_index_pointers.append( - tag_for_keyword('ReferencedSegmentNumber') - ) + if self.segmentation_type != SegmentationTypeValues.LABELMAP: + dimension_index_pointers.append( + tag_for_keyword('ReferencedSegmentNumber') + ) return super().are_dimension_indices_unique( dimension_index_pointers ) - def _are_referenced_sop_instances_unique(self) -> bool: - """Check if Referenced SOP Instance UIDs uniquely identify frames. - - This is a pre-requisite for requesting segmentation masks defined by - the SOP Instance UIDs of their source frames, such as using the - Segmentation.get_pixels_by_source_instance() method. - - Returns - ------- - bool - True if the ReferencedSOPInstanceUID (in combination with the - segment number) uniquely identifies frames of the segmentation - image. - - """ - cur = self._db_con.cursor() - n_unique_combos = cur.execute( - 'SELECT COUNT(*) FROM ' - '(SELECT 1 FROM FrameLUT GROUP BY ReferencedSOPInstanceUID, ' - 'ReferencedSegmentNumber)' - ).fetchone()[0] - return n_unique_combos == self.NumberOfFrames - - def _are_referenced_frames_unique(self) -> bool: - """Check if Referenced Frame Numbers uniquely identify frames. - - Returns - ------- - bool - True if the ReferencedFrameNumber (in combination with the - segment number) uniquely identifies frames of the segmentation - image. - - """ - cur = self._db_con.cursor() - n_unique_combos = cur.execute( - 'SELECT COUNT(*) FROM ' - '(SELECT 1 FROM FrameLUT GROUP BY ReferencedFrameNumber, ' - 'ReferencedSegmentNumber)' - ).fetchone()[0] - return n_unique_combos == self.NumberOfFrames - def _get_segment_remap_values( self, segment_numbers: Sequence[int], @@ -3687,7 +3666,10 @@ def get_pixels_by_source_instance( # Check that the combination of source instances and segment numbers # uniquely identify segmentation frames - if not self._are_referenced_sop_instances_unique(): + columns = ['ReferencedSOPInstanceUID'] + if self.segmentation_type != SegmentationTypeValues.LABELMAP: + columns.append('ReferencedSegmentNumber') + if not self._are_columns_unique(columns): raise RuntimeError( 'Source SOP instance UIDs and segment numbers do not ' 'uniquely identify frames of the segmentation image.' @@ -3953,7 +3935,10 @@ def get_pixels_by_source_frame( # Check that the combination of frame numbers and segment numbers # uniquely identify segmentation frames - if not self._are_referenced_frames_unique(): + columns = ['ReferencedFrameNumber'] + if self.segmentation_type != SegmentationTypeValues.LABELMAP: + columns.append('ReferencedSegmentNumber') + if not self._are_columns_unique(columns): raise RuntimeError( 'Source frame numbers and segment numbers do not ' 'uniquely identify frames of the segmentation image.' diff --git a/tests/test_seg.py b/tests/test_seg.py index 61fc662a..ceb0661b 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -3557,6 +3557,13 @@ def setUp(self): self._sm_control_seg_ds ) + self._sm_control_labelmap_seg_ds = dcmread( + 'data/test_files/seg_image_sm_control_labelmap.dcm' + ) + self._sm_control_labelmap_seg = Segmentation.from_dataset( + self._sm_control_labelmap_seg_ds + ) + self._ct_binary_seg_ds = dcmread( 'data/test_files/seg_image_ct_binary.dcm' ) @@ -3602,6 +3609,7 @@ def setUp(self): @staticmethod @pytest.fixture( params=[ + bool, np.int8, np.uint8, np.int16, @@ -3628,6 +3636,11 @@ def combine_segments(request): def relabel(request): return request.param + @staticmethod + @pytest.fixture(params=['_sm_control_seg', '_sm_control_labelmap_seg']) + def seg_attr_name(request): + return request.param + def test_from_dataset(self): assert isinstance(self._sm_control_seg, Segmentation) @@ -3647,6 +3660,12 @@ def test_segread(self): assert isinstance(seg, Segmentation) seg = segread('data/test_files/seg_image_sm_dots_tiled_full.dcm') assert isinstance(seg, Segmentation) + seg = segread('data/test_files/seg_image_sm_control_labelmap.dcm') + assert isinstance(seg, Segmentation) + seg = segread( + 'data/test_files/seg_image_sm_control_labelmap_palette_color.dcm' + ) + assert isinstance(seg, Segmentation) def test_properties(self): # SM segs @@ -3878,6 +3897,7 @@ def test_get_pixels_by_source_frames_combine(self): def test_get_pixels_with_dtype( self, + seg_attr_name, numpy_dtype, combine_segments, relabel, @@ -3885,28 +3905,106 @@ def test_get_pixels_with_dtype( source_sop_uid = self._sm_control_seg.get_source_image_uids()[0][-1] source_frames_valid = [1, 2, 4, 5] - seg = self._sm_control_seg - pixels = seg.get_pixels_by_source_frame( - source_sop_instance_uid=source_sop_uid, - source_frame_numbers=source_frames_valid, - segment_numbers=[1, 4, 9], - combine_segments=combine_segments, - relabel=relabel, - dtype=numpy_dtype, - ) - assert pixels.dtype == numpy_dtype - if combine_segments: - expected_shape = (len(source_frames_valid), seg.Rows, seg.Columns) - if relabel: - expected_vals = np.array([0, 3]) # only seg 9 in these frames + seg = getattr(self, seg_attr_name) + + if numpy_dtype == bool and combine_segments: + max_val = 3 if relabel else 9 + msg = ( + "The maximum output value of the segmentation array is " + f"{max_val}, which is too large be represented using dtype " + f"bool." + ) + with pytest.raises(ValueError, match=msg): + seg.get_pixels_by_source_frame( + source_sop_instance_uid=source_sop_uid, + source_frame_numbers=source_frames_valid, + segment_numbers=[1, 4, 9], + combine_segments=combine_segments, + relabel=relabel, + dtype=numpy_dtype, + ) + else: + pixels = seg.get_pixels_by_source_frame( + source_sop_instance_uid=source_sop_uid, + source_frame_numbers=source_frames_valid, + segment_numbers=[1, 4, 9], + combine_segments=combine_segments, + relabel=relabel, + dtype=numpy_dtype, + ) + assert pixels.dtype == numpy_dtype + if combine_segments: + expected_shape = ( + len(source_frames_valid), seg.Rows, seg.Columns + ) + if relabel: + # only seg 9 in these frames + expected_vals = np.array([0, 3]) + else: + # only seg 9 in these frames + expected_vals = np.array([0, 9]) else: - expected_vals = np.array([0, 9]) # only seg 9 in these frames + expected_shape = ( + len(source_frames_valid), seg.Rows, seg.Columns, 3 + ) + expected_vals = np.array([0, 1]) + assert pixels.shape == expected_shape assert np.array_equal(np.unique(pixels), expected_vals) + + def test_get_total_pixel_matrix_with_dtype( + self, + seg_attr_name, + numpy_dtype, + combine_segments, + relabel, + ): + seg = getattr(self, seg_attr_name) + subregion_rows = 30 + subregion_columns = 30 + + if numpy_dtype == bool and combine_segments: + max_val = 3 if relabel else 9 + msg = ( + "The maximum output value of the segmentation array is " + f"{max_val}, which is too large be represented using dtype " + f"bool." + ) + with pytest.raises(ValueError, match=msg): + seg.get_total_pixel_matrix( + segment_numbers=[1, 4, 9], + row_end=1+subregion_rows, + column_end=1+subregion_columns, + combine_segments=combine_segments, + relabel=relabel, + dtype=numpy_dtype, + ) else: - expected_shape = ( - len(source_frames_valid), seg.Rows, seg.Columns, 3 + pixels = seg.get_total_pixel_matrix( + row_end=1+subregion_rows, + column_end=1+subregion_columns, + segment_numbers=[1, 4, 9], + combine_segments=combine_segments, + relabel=relabel, + dtype=numpy_dtype, ) - assert pixels.shape == expected_shape + assert pixels.dtype == numpy_dtype + if combine_segments: + expected_shape = ( + subregion_rows, subregion_columns, + ) + if relabel: + # only seg 9 in these frames + expected_vals = np.array([0, 3]) + else: + # only seg 9 in these frames + expected_vals = np.array([0, 9]) + else: + expected_shape = ( + subregion_rows, subregion_columns, 3 + ) + expected_vals = np.array([0, 1]) + assert pixels.shape == expected_shape + assert np.array_equal(np.unique(pixels), expected_vals) def test_get_default_dimension_index_pointers(self): ptrs = self._sm_control_seg.get_default_dimension_index_pointers() From f448936801bf75b53cc9bd4201cd3e79434488b2 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 13 Oct 2024 14:35:34 -0400 Subject: [PATCH 70/93] Style fixes --- src/highdicom/_multiframe.py | 10 ++-- src/highdicom/seg/content.py | 8 ++-- src/highdicom/seg/sop.py | 29 +++++++---- src/highdicom/spatial.py | 66 ++++++++++++------------- src/highdicom/volume.py | 51 ++++++++++---------- tests/test_multiframe.py | 6 +-- tests/test_seg.py | 27 +++++++---- tests/test_spatial.py | 38 +++++++++++---- tests/test_volume.py | 93 +++++++++++++++++++++++------------- 9 files changed, 199 insertions(+), 129 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 783fce69..02af8b72 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -1342,7 +1342,7 @@ def _iterate_indices_for_stack( ] if len(set(all_dimensions)) != len(all_dimensions): raise ValueError( - 'Dimensions used for stack, channel and filter must all be ' + 'Dimensions used for stack, channel, and filter must all be ' 'distinct.' ) @@ -1375,7 +1375,7 @@ def _iterate_indices_for_stack( # Filters if norm_filters is not None: filter_comparisons = [] - for c, v in norm_filters: + for c, v in norm_filters.items(): if isinstance(v, str): v = f"'{v}'" filter_comparisons.append(f'L.{c} = {v}') @@ -1445,7 +1445,7 @@ def _iterate_indices_for_stack( 'SELECT ' ' F.OutputFrameIndex,' # frame index of the output array ' L.FrameNumber - 1,' # frame *index* of segmentation image - ' C.OutputChannelIndex ' # frame index of the output array + ' C.OutputChannelIndex ' # channel index of the output array f'FROM {stack_table_name} F ' 'INNER JOIN FrameLUT L' f' ON {stack_join_str} ' @@ -1589,8 +1589,8 @@ def _iterate_indices_for_tiled_region( ] if len(all_dimensions) != len(all_dimensions): raise ValueError( - 'Dimensions used for stack, channel and filter must all be ' - 'distinct.' + 'Dimensions used for tile position, channel, and filter ' + 'must all be distinct.' ) # Check for uniqueness diff --git a/src/highdicom/seg/content.py b/src/highdicom/seg/content.py index 5e3efec8..82a5b0fa 100644 --- a/src/highdicom/seg/content.py +++ b/src/highdicom/seg/content.py @@ -33,7 +33,6 @@ ) - class SegmentDescription(Dataset): """Dataset describing a segment based on the Segment Description macro.""" @@ -617,7 +616,10 @@ def get_index_values( self, plane_positions: Sequence[PlanePositionSequence], image_orientation: Optional[Sequence[float]] = None, - index_convention: Union[str, Sequence[Union[PixelIndexDirections, str]]] = ( + index_convention: Union[ + str, + Sequence[Union[PixelIndexDirections, str]] + ] = ( PixelIndexDirections.R, PixelIndexDirections.D, ), @@ -681,7 +683,7 @@ def get_index_values( reference, and excludes values of the Referenced Segment Number attribute. - """ + """ # noqa: E501 if self._coordinate_system is None: raise RuntimeError( 'Cannot calculate index values for multiple plane ' diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 48a85980..a6a72a9d 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -1313,7 +1313,7 @@ def __init__( unique_dimension_values = [None] # Dimension Organization Type - dimension_organization_type = self._check_tiled_dimension_organization_type( + dimension_organization_type = self._check_tiled_dimension_organization( dimension_organization_type=dimension_organization_type, is_tiled=is_tiled, omit_empty_frames=omit_empty_frames, @@ -1529,7 +1529,7 @@ def __init__( ) continue - # Log a debug message + # Log a debug message if segment_number is None: msg = f'add plane #{plane_index}' else: @@ -1902,7 +1902,7 @@ def _add_slide_coordinate_metadata( self.ImageCenterPointCoordinatesSequence = [center_item] @staticmethod - def _check_tiled_dimension_organization_type( + def _check_tiled_dimension_organization( dimension_organization_type: Union[ DimensionOrganizationTypeValues, str, @@ -1980,11 +1980,11 @@ def _check_tiled_dimension_organization_type( ): raise ValueError( 'A value of "TILED_FULL" for parameter ' - '"dimension_organization_type" is not permitted because ' - 'the "plane_positions" of the segmentation ' + '"dimension_organization_type" is not permitted ' + 'because the "plane_positions" of the segmentation ' 'do not follow the relevant requirements. See ' 'https://dicom.nema.org/medical/dicom/current/output/' - 'chtml/part03/sect_C.7.6.17.3.html#sect_C.7.6.17.3.' + 'chtml/part03/sect_C.7.6.17.3.html#sect_C.7.6.17.3 .' ) if omit_empty_frames: raise ValueError( @@ -3702,7 +3702,9 @@ def get_pixels_by_source_instance( channel_indices = {'ReferencedSegmentNumber': segment_numbers} with self._iterate_indices_for_stack( - stack_indices={'ReferencedSOPInstanceUID': source_sop_instance_uids}, + stack_indices={ + 'ReferencedSOPInstanceUID': source_sop_instance_uids + }, channel_indices=channel_indices, remap_channel_indices=remap_channel_indices, ) as indices: @@ -4097,6 +4099,17 @@ def get_volume( ) n_vol_positions = self.volume_geometry.spatial_shape[0] + # Check that the combination of frame numbers and segment numbers + # uniquely identify segmentation frames + columns = ['VolumePosition'] + if self.segmentation_type != SegmentationTypeValues.LABELMAP: + columns.append('ReferencedSegmentNumber') + if not self._are_columns_unique(columns): + raise RuntimeError( + 'Volume positions and segment numbers do not ' + 'uniquely identify frames of the segmentation image.' + ) + if slice_start < 0: slice_start = n_vol_positions + slice_start @@ -4123,7 +4136,6 @@ def get_volume( "empty volume." ) - remap_channel_indices = self._get_segment_remap_values( segment_numbers, combine_segments=combine_segments, @@ -4396,7 +4408,6 @@ def get_pixels_by_dimension_index_values( ) raise ValueError(msg) - if self.segmentation_type == SegmentationTypeValues.LABELMAP: channel_indices = None else: diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 7fbe60bc..67d71488 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -774,10 +774,10 @@ def get_closest_patient_orientation(affine: np.ndarray) -> Tuple[ three axes of the volume represented by the affine matrix, the closest direction in the patient frame of reference coordinate system. - """ + """ # noqa: E501 if ( - affine.ndim != 2 - or ( + affine.ndim != 2 or + ( affine.shape != (3, 3) and affine.shape != (4, 4) ) @@ -805,10 +805,13 @@ def get_closest_patient_orientation(affine: np.ndarray) -> Tuple[ ] for d, sortind in enumerate(sort_indices.T): # Check that this axis has not already been used. This can happen if - # one or more array axis is at 45% to some FoR axis. In this case take - # the next index in the sort list. + # one or more array axis is at 45 deg to some FoR axis. In this case + # take the next index in the sort list. for i in sortind: - if pos_directions[i] not in result and neg_directions[i] not in result: + if ( + pos_directions[i] not in result and + neg_directions[i] not in result + ): break if alignments[i, d] > 0: @@ -911,7 +914,7 @@ def get_normal_vector( np.ndarray: Unit normal vector as a NumPy array with shape (3, ). - """ + """ # noqa: E501 image_orientation_arr = np.array(image_orientation, dtype=np.float64) if image_orientation_arr.ndim != 1 or image_orientation_arr.shape[0] != 6: raise ValueError( @@ -994,11 +997,11 @@ def create_rotation_matrix( Returns ------- numpy.ndarray - 3 x 3 rotation matrix. Pre-multiplying an image coordinate in the format (column - index, row index, slice index) by this matrix gives the x, y, z - position in the frame-of-reference coordinate system. + 3 x 3 rotation matrix. Pre-multiplying an image coordinate in the + format (column index, row index, slice index) by this matrix gives the + x, y, z position in the frame-of-reference coordinate system. - """ + """ # noqa: E501 if len(image_orientation) != 6: raise ValueError('Argument "image_orientation" must have length 6.') index_convention_ = _normalize_pixel_index_convention(index_convention) @@ -1008,7 +1011,7 @@ def create_rotation_matrix( column_cosines = np.array(image_orientation[3:], dtype=float) if isinstance(pixel_spacing, Sequence): if len(pixel_spacing) != 2: - raise Value.LEF( + raise ValueError( "A sequence passed to argument 'pixel_spacing' must have " "length 2." ) @@ -1159,7 +1162,7 @@ def _create_affine_transformation_matrix( format (column index, row index, slice index, 1) by this matrix gives the (x, y, z, 1) position in the frame-of-reference coordinate system. - """ + """ # noqa: E501 if not isinstance(image_position, Sequence): raise TypeError('Argument "image_position" must be a sequence.') if len(image_position) != 3: @@ -1175,12 +1178,11 @@ def _create_affine_transformation_matrix( index_convention_ = _normalize_pixel_index_convention(index_convention) if ( - PixelIndexDirections.L in index_convention_ or + PixelIndexDirections.L in index_convention_ or PixelIndexDirections.U in index_convention_ ): raise ValueError( - f"Index convention cannot include 'L' or 'U'." - + "Index convention cannot include 'L' or 'U'." ) translation = np.array([float(x) for x in image_position], dtype=float) @@ -1313,12 +1315,12 @@ def rotation_for_patient_orientation( norm_orientation = _normalize_patient_orientation(patient_orientation) direction_to_vector_mapping = { - PatientOrientationValuesBiped.L: np.array([ 1., 0., 0.]), - PatientOrientationValuesBiped.R: np.array([-1., 0., 0.]), - PatientOrientationValuesBiped.P: np.array([ 0., 1., 0.]), - PatientOrientationValuesBiped.A: np.array([ 0., -1., 0.]), - PatientOrientationValuesBiped.H: np.array([ 0., 0., 1.]), - PatientOrientationValuesBiped.F: np.array([ 0., 0., -1.]), + PatientOrientationValuesBiped.L: np.array([1., 0., 0.]), + PatientOrientationValuesBiped.R: np.array([-1., 0., 0.]), + PatientOrientationValuesBiped.P: np.array([0., 1., 0.]), + PatientOrientationValuesBiped.A: np.array([0., -1., 0.]), + PatientOrientationValuesBiped.H: np.array([0., 0., 1.]), + PatientOrientationValuesBiped.F: np.array([0., 0., -1.]), } if isinstance(spacing, float): @@ -1445,7 +1447,7 @@ def _translate_affine_matrix( """ if len(pixel_offset) != 3: raise ValueError( - f"Argument 'pixel_spacing' must have three elements." + "Argument 'pixel_spacing' must have three elements." ) offset_arr = np.array(pixel_offset) origin = affine[:3, 3] @@ -3115,7 +3117,7 @@ def get_series_volume_positions( in the volume. If the image positions do not represent a volume, returns None. - """ + """ # noqa: E501 if len(datasets) == 0: raise ValueError("List must not be empty.") # We stipluate that a single image does represent a volume with spacing 0.0 @@ -3258,7 +3260,7 @@ def get_volume_positions( in the volume. If the image positions do not represent a volume, returns None. - """ + """ # noqa: E501 if not sort: if allow_duplicates: raise ValueError( @@ -3449,7 +3451,7 @@ def get_plane_sort_index( is sorted along the positive direction of the normal vector of the imaging plane. - """ + """ # noqa: E501 pos_arr = np.array(image_positions) if pos_arr.ndim != 2 or pos_arr.shape[1] != 3: raise ValueError("Argument 'image_positions' must have shape (N, 3)") @@ -3516,7 +3518,7 @@ def get_dataset_sort_index( sorted along the positive direction of the normal vector of the imaging plane. - """ + """ # noqa: E501 if is_multiframe_image(datasets[0]): raise ValueError('Datasets should be single frame images.') if 'ImageOrientationPatient' not in datasets[0]: @@ -3583,7 +3585,7 @@ def sort_datasets( sorted along the positive direction of the normal vector of the imaging plane. - """ + """ # noqa: E501 sort_index = get_dataset_sort_index( datasets, index_convention=index_convention, @@ -3598,14 +3600,14 @@ def _get_slice_distances( ) -> np.ndarray: """Get distances of a set of planes from the origin. - For each plane position, find (signed) distance from origin along the vector normal - to the imaging plane. + For each plane position, find (signed) distance from origin along the + vector normal to the imaging plane. Parameters ---------- image_positions: np.ndarray - Image positions array. 2D array of shape (N, 3) where N is the number of - planes and each row gives the (x, y, z) image position of a plane. + Image positions array. 2D array of shape (N, 3) where N is the number + of planes and each row gives the (x, y, z) image position of a plane. normal_vector: np.ndarray Unit normal vector (perpendicular to the imaging plane). diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index d64e4ebd..4826c555 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -58,9 +58,6 @@ # TODO make multiframe public # TODO figure out type hinting for _VolumeBase # TODO inheritance of are_dimension_indices_unique -# TODO tests for labelmap segmentation with combine_segments False -# TODO include labelmap test case -# TODO test filter class _VolumeBase(ABC): @@ -101,7 +98,6 @@ def __init__( self._affine = affine self._frame_of_reference_uid = frame_of_reference_uid - @property @abstractmethod def spatial_shape(self) -> Tuple[int, int, int]: @@ -337,7 +333,7 @@ def get_plane_orientation(self) -> PlaneOrientationSequence: Returns ------- highdicom.PlaneOrientationSequence: - Plane orientation sequence + Plane orientation sequence. """ return PlaneOrientationSequence( @@ -389,7 +385,9 @@ def inverse_affine(self) -> np.ndarray: return np.linalg.inv(self._affine) @property - def direction_cosines(self) -> Tuple[float, float, float, float, float, float]: + def direction_cosines(self) -> Tuple[ + float, float, float, float, float, float + ]: """Tuple[float, float, float, float, float float]: Tuple of 6 floats giving the direction cosines of the @@ -532,8 +530,8 @@ def _prepare_getitem_index( def _check_int(val: int, dim: int) -> None: if ( - val < -self.spatial_shape[dim] - or val >= self.spatial_shape[dim] + val < -self.spatial_shape[dim] or + val >= self.spatial_shape[dim] ): raise IndexError( f'Index {val} is out of bounds for axis {dim} with size ' @@ -683,7 +681,7 @@ def _prepare_pad_width( if isinstance(pad_width, int): if pad_width < 0: raise ValueError( - f"Argument 'pad_width' cannot contain negative values." + "Argument 'pad_width' cannot contain negative values." ) full_pad_width: List[List[int]] = [[pad_width, pad_width]] * 3 elif isinstance(pad_width, Sequence): @@ -692,7 +690,7 @@ def _prepare_pad_width( raise ValueError("Invalid arrangement in 'pad_width'.") if pad_width[0] < 0 or pad_width[1] < 0: raise ValueError( - f"Argument 'pad_width' cannot contain negative values." + "Argument 'pad_width' cannot contain negative values." ) full_pad_width = [list(pad_width)] * 3 elif isinstance(pad_width[0], Sequence): @@ -716,7 +714,6 @@ def _prepare_pad_width( return new_affine, full_pad_width - def _permute_affine(self, indices: Sequence[int]) -> np.ndarray: """Get affine after permuting spatial axes. @@ -735,7 +732,7 @@ def _permute_affine(self, indices: Sequence[int]) -> np.ndarray: """ if len(indices) != 3 or set(indices) != {0, 1, 2}: raise ValueError( - f'Argument "indices" must consist of the values 0, 1, and 2 ' + 'Argument "indices" must consist of the values 0, 1, and 2 ' 'in some order.' ) @@ -1319,7 +1316,7 @@ def geometry_equal( return np.array_equal(self._affine, other._affine) else: return np.allclose( - self._affine, + self._affine, other._affine, atol=tol, ) @@ -1560,9 +1557,9 @@ def from_attributes( spacing_between_slices: float Spacing between slices in millimeter units in the frame of reference coordinate system space. Corresponds to the DICOM - attribute "SpacingBetweenSlices" (however, this may not be present in - many images and may need to be inferred from "ImagePositionPatient" - attributes of consecutive slices). + attribute "SpacingBetweenSlices" (however, this may not be present + in many images and may need to be inferred from + "ImagePositionPatient" attributes of consecutive slices). number_of_frames: int Number of frames in the volume. frame_of_reference_uid: Union[str, None], optional @@ -1926,7 +1923,7 @@ def from_image_series( if apply_voi_transform: frame = apply_voi_lut(frame, ds, voi_transform_index) if ( - apply_palette_color_lut and + apply_palette_color_lut and ds.PhotometricInterpretation == 'PALETTE_COLOR' ): frame = apply_color_lut(frame, ds) @@ -2078,9 +2075,9 @@ def from_attributes( spacing_between_slices: float Spacing between slices in millimeter units in the frame of reference coordinate system space. Corresponds to the DICOM - attribute "SpacingBetweenSlices" (however, this may not be present in - many images and may need to be inferred from "ImagePositionPatient" - attributes of consecutive slices). + attribute "SpacingBetweenSlices" (however, this may not be present + in many images and may need to be inferred from + "ImagePositionPatient" attributes of consecutive slices). frame_of_reference_uid: Union[str, None], optional Frame of reference UID, if known. Corresponds to DICOM attribute FrameOfReferenceUID. @@ -2420,8 +2417,8 @@ def normalize_mean_std( else: new_array = ( (self.array - self.array.mean()) / - (self.array.std() / output_std) - + output_mean + (self.array.std() / output_std) + + output_mean ) return self.with_array(new_array) @@ -2465,7 +2462,7 @@ def normalize_min_max( ): new_array = self.array.astype(np.float64) for c in range(self.number_of_channels): - channel = new_array[:,:, :, c] + channel = new_array[:, :, :, c] imin = channel.min() imax = channel.max() scale_factor = output_range / (imax - imin) @@ -2510,7 +2507,7 @@ def apply_window( self, *, window_min: Optional[float] = None, - window_max: Optional[float]= None, + window_max: Optional[float] = None, window_center: Optional[float] = None, window_width: Optional[float] = None, output_min: float = 0.0, @@ -2547,11 +2544,11 @@ def apply_window( Volume with windowed intensities. """ - if window_min is None != window_max is None: + if (window_min is None) != (window_max is None): raise TypeError("Invalid combination of inputs specified.") - if window_center is None != window_width is None: + if (window_center is None) != (window_width is None): raise TypeError("Invalid combination of inputs specified.") - if window_center is None == window_min is None: + if (window_center is None) == (window_min is None): raise TypeError("Invalid combination of inputs specified.") if window_min is None: diff --git a/tests/test_multiframe.py b/tests/test_multiframe.py index 576e7bce..696d680f 100644 --- a/tests/test_multiframe.py +++ b/tests/test_multiframe.py @@ -2,7 +2,7 @@ import pickle import numpy as np from pydicom import dcmread -from pydicom.data import get_testdata_file, get_testdata_files +from pydicom.data import get_testdata_file from highdicom._multiframe import MultiFrameImage @@ -15,8 +15,8 @@ def test_slice_spacing(): expected_affine = np.array( [ - [0.0, 0.0, -0.388672, 99.5], - [0.0, 0.388672, 0.0, -301.5], + [0.0, 0.0, -0.388672, 99.5], + [0.0, 0.388672, 0.0, -301.5], [10.0, 0.0, 0.0, -159], [0.0, 0.0, 0.0, 1.0], ] diff --git a/tests/test_seg.py b/tests/test_seg.py index ceb0661b..a26d33fc 100644 --- a/tests/test_seg.py +++ b/tests/test_seg.py @@ -1934,8 +1934,8 @@ def test_construction_3d_multiframe(self): # ordered the wrong way volume_multiframe = deepcopy(self._ct_multiframe) positions = [ - fm.PlanePositionSequence[0].ImagePositionPatient - for fm in volume_multiframe.PerFrameFunctionalGroupsSequence + fm.PlanePositionSequence[0].ImagePositionPatient + for fm in volume_multiframe.PerFrameFunctionalGroupsSequence ] positions = positions[::-1] for pos, fm in zip( @@ -3972,16 +3972,16 @@ def test_get_total_pixel_matrix_with_dtype( with pytest.raises(ValueError, match=msg): seg.get_total_pixel_matrix( segment_numbers=[1, 4, 9], - row_end=1+subregion_rows, - column_end=1+subregion_columns, + row_end=1 + subregion_rows, + column_end=1 + subregion_columns, combine_segments=combine_segments, relabel=relabel, dtype=numpy_dtype, ) else: pixels = seg.get_total_pixel_matrix( - row_end=1+subregion_rows, - column_end=1+subregion_columns, + row_end=1 + subregion_rows, + column_end=1 + subregion_columns, segment_numbers=[1, 4, 9], combine_segments=combine_segments, relabel=relabel, @@ -5151,7 +5151,7 @@ def test_multiple_source_single_pixel_array_multisegment(self): assert hasattr(seg, 'PyramidUID') seg_pix = seg.get_total_pixel_matrix() assert np.array_equal( - seg.get_total_pixel_matrix(), + seg_pix, pix[0] ) @@ -5253,7 +5253,9 @@ def test_multiple_source_multiple_pixel_arrays_multisegment(self): pix[0] ) - def test_multiple_source_multiple_pixel_arrays_multisegment_from_labelmap(self): + def test_multiple_source_multiple_pixel_arrays_multisegment_from_labelmap( + self + ): # Test construction when given multiple source images and multiple # segmentation images mask = np.argmax(self._seg_pix_multisegment, axis=3).astype(np.uint8) @@ -5295,3 +5297,12 @@ def test_multiple_source_multiple_pixel_arrays_multisegment_labelmap(self): software_versions='1', device_serial_number='123', ) + + assert len(segs) == len(self._source_pyramid) + for pix, seg in zip(self._downsampled_pix_arrays_multisegment, segs): + mask = np.argmax(pix, axis=3).astype(np.uint8) + assert hasattr(seg, 'PyramidUID') + assert np.array_equal( + seg.get_total_pixel_matrix(combine_segments=True), + mask[0] + ) diff --git a/tests/test_spatial.py b/tests/test_spatial.py index d0e65033..e28f3778 100644 --- a/tests/test_spatial.py +++ b/tests/test_spatial.py @@ -729,11 +729,21 @@ def test_map_coordinates_between_images(params, inputs, expected_outputs): @pytest.mark.parametrize( 'image_orientation,orientation_str', [ - ([ 1, 0, 0, 0, 1, 0], 'LPH'), - ([ 0, 1, 0, 1, 0, 0], 'PLF'), - ([-1, 0, 0, 0, 1, 0], 'RPF'), - ([ 0, 0, -1, 1, 0, 0], 'FLA'), - ([np.cos(np.pi / 4), -np.sin(np.pi / 4), 0, np.sin(np.pi / 4), np.cos(np.pi / 4), 0], 'LPH'), + ([1, 0, 0, 0, 1, 0], 'LPH'), + ([0, 1, 0, 1, 0, 0], 'PLF'), + ([-1, 0, 0, 0, 1, 0], 'RPF'), + ([0, 0, -1, 1, 0, 0], 'FLA'), + ( + [ + np.cos(np.pi / 4), + -np.sin(np.pi / 4), + 0, + np.sin(np.pi / 4), + np.cos(np.pi / 4), + 0 + ], + 'LPH' + ), ] ) def test_get_closest_patient_orientation( @@ -770,8 +780,8 @@ def test_rotation_from_patient_orientation_spacing(): ) expected = np.array( [ - [ 0.0, 0.0, 2.5], - [ 0.0, 2.0, 0.0], + [0.0, 0.0, 2.5], + [0.0, 2.0, 0.0], [-1.0, 0.0, 0.0], ] ) @@ -1112,8 +1122,18 @@ def test_transform_affine_matrix(): ) expected = np.array( [ - [-np.cos(np.radians(30)), -np.sin(np.radians(30)), 0.0, -26.20577137], - [-np.sin(np.radians(30)), np.cos(np.radians(30)), 0.0, 40.7], + [ + -np.cos(np.radians(30)), + -np.sin(np.radians(30)), + 0.0, + -26.20577137, + ], + [ + -np.sin(np.radians(30)), + np.cos(np.radians(30)), + 0.0, + 40.7, + ], [0.0, 0.0, -1.0, 7.8], [0.0, 0.0, 0.0, 1.0], ] diff --git a/tests/test_volume.py b/tests/test_volume.py index 19e371e8..0fee9866 100644 --- a/tests/test_volume.py +++ b/tests/test_volume.py @@ -2,9 +2,7 @@ import numpy as np import pydicom from pydicom.data import get_testdata_file -from pydicom.pixel_data_handlers.util import pixel_dtype import pytest -from highdicom import spatial from highdicom.spatial import ( @@ -46,7 +44,10 @@ def test_transforms(): ) plane_positions = volume.get_plane_positions() for i, pos in enumerate(plane_positions): - assert np.array_equal(pos[0].ImagePositionPatient, [0.0, 0.0, -10.0 * i]) + assert np.array_equal( + pos[0].ImagePositionPatient, + [0.0, 0.0, -10.0 * i] + ) indices = np.array([[1, 2, 3]]) coords = volume.map_indices_to_reference(indices) @@ -170,7 +171,10 @@ def test_volume_single_frame(): assert direction[:, 0] @ direction[:, 1] == 0.0 assert direction[:, 0] @ direction[:, 2] == 0.0 assert (direction[:, 0] ** 2).sum() == 1.0 - assert volume.position == tuple(ct_series[1].ImagePositionPatient) # sorting + + # (sorting) + assert volume.position == tuple(ct_series[1].ImagePositionPatient) + assert volume.pixel_spacing == tuple(ct_series[0].PixelSpacing) slice_spacing = 1.25 assert volume.spacing == (slice_spacing, *ct_series[0].PixelSpacing[::-1]) @@ -264,10 +268,10 @@ def test_indexing(): subvolume = volume[3] assert subvolume.shape == (1, 50, 50) expected_affine = np.array([ - [ 0.0, 0.0, 1.0, 0.0], - [ 0.0, 1.0, 0.0, 0.0], - [-10.0, 0.0, 0.0, -30.0], - [ 0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [-10.0, 0.0, 0.0, -30.0], + [0.0, 0.0, 0.0, 1.0], ]) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[3:4]) @@ -292,10 +296,10 @@ def test_indexing(): subvolume = volume[3, 7] assert subvolume.shape == (1, 1, 50) expected_affine = np.array([ - [ 0.0, 0.0, 1.0, 0.0], - [ 0.0, 1.0, 0.0, 7.0], - [-10.0, 0.0, 0.0, -30.0], - [ 0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 7.0], + [-10.0, 0.0, 0.0, -30.0], + [0.0, 0.0, 0.0, 1.0], ]) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[3:4, 7:8]) @@ -314,10 +318,10 @@ def test_indexing(): subvolume = volume[-4] assert subvolume.shape == (1, 50, 50) expected_affine = np.array([ - [ 0.0, 0.0, 1.0, 0.0], - [ 0.0, 1.0, 0.0, 0.0], - [-10.0, 0.0, 0.0, -210.0], - [ 0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [-10.0, 0.0, 0.0, -210.0], + [0.0, 0.0, 0.0, 1.0], ]) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[-4:-3]) @@ -332,10 +336,10 @@ def test_indexing(): subvolume = volume[12:16:2, ::-1, :] assert subvolume.shape == (2, 50, 50) expected_affine = np.array([ - [ 0.0, 0.0, 1.0, 0.0], - [ 0.0, -1.0, 0.0, 49.0], - [-20.0, 0.0, 0.0, -120.0], - [ 0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, -1.0, 0.0, 49.0], + [-20.0, 0.0, 0.0, -120.0], + [0.0, 0.0, 0.0, 1.0], ]) assert np.array_equal(subvolume.affine, expected_affine) assert np.array_equal(subvolume.array, array[12:16:2, ::-1]) @@ -344,10 +348,10 @@ def test_indexing(): def test_indexing_source_dimension_2(): array = np.random.randint(0, 100, (50, 50, 25)) affine = np.array([ - [ 0.0, 0.0, 1.0, 0.0], - [ 0.0, 1.0, 0.0, 0.0], - [10.0, 0.0, 0.0, 30.0], - [ 0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [10.0, 0.0, 0.0, 30.0], + [0.0, 0.0, 0.0, 1.0], ]) volume = Volume( array=array, @@ -361,10 +365,10 @@ def test_indexing_source_dimension_2(): def test_array_setter(): array = np.random.randint(0, 100, (50, 50, 25)) affine = np.array([ - [ 0.0, 0.0, 1.0, 0.0], - [ 0.0, 1.0, 0.0, 0.0], - [10.0, 0.0, 0.0, 30.0], - [ 0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [10.0, 0.0, 0.0, 30.0], + [0.0, 0.0, 0.0, 1.0], ]) volume = Volume( @@ -656,13 +660,36 @@ def test_match_geometry_failure_rotation(): @pytest.mark.parametrize( 'fp,glob', [ - (Path(__file__).parent.parent.joinpath('data/test_files/ct_image.dcm'), None), - (str(Path(__file__).parent.parent.joinpath('data/test_files/ct_image.dcm')), None), - ([Path(__file__).parent.parent.joinpath('data/test_files/ct_image.dcm')], None), + ( + Path(__file__).parents[1].joinpath('data/test_files/ct_image.dcm'), + None + ), + ( + str( + Path(__file__).parents[1].joinpath( + 'data/test_files/ct_image.dcm' + ) + ), + None + ), + ( + [ + Path(__file__).parents[1].joinpath( + 'data/test_files/ct_image.dcm' + ) + ], + None + ), (get_testdata_file('eCT_Supplemental.dcm'), None), ([get_testdata_file('eCT_Supplemental.dcm')], None), - (Path(__file__).parent.parent.joinpath('data/test_files/'), 'ct_image.dcm'), - (str(Path(__file__).parent.parent.joinpath('data/test_files/')), 'ct_image.dcm'), + ( + Path(__file__).parents[1].joinpath('data/test_files/'), + 'ct_image.dcm' + ), + ( + str(Path(__file__).parents[1].joinpath('data/test_files/')), + 'ct_image.dcm' + ), ( [ get_testdata_file('dicomdirtests/77654033/CT2/17136'), From 799e4e09e9d1244b42a73042c43b3edc0f674680 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Wed, 16 Oct 2024 22:46:43 -0400 Subject: [PATCH 71/93] Fixes to slice thickness --- docs/seg.rst | 2 +- src/highdicom/content.py | 5 +++++ src/highdicom/volume.py | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/docs/seg.rst b/docs/seg.rst index 2abdda37..96a19e1e 100644 --- a/docs/seg.rst +++ b/docs/seg.rst @@ -447,7 +447,7 @@ segments. Note that passing a "label map" is purely a convenience provided by (`highdicom` splits the label map into multiple single-segment frames and stores these, as required by the standard). -Therefore, The following snippet produces an equivalent SEG image to the +Therefore, the following snippet produces an equivalent SEG image to the previous snippet, but passes the mask as a label map rather than as a stack of segments. diff --git a/src/highdicom/content.py b/src/highdicom/content.py index 1bcea8d2..c5398dcf 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -411,6 +411,11 @@ def __eq__(self, other: DataElementSequence) -> bool: if len(other) != 1: raise ValueError('Second item must have length 1.') + if ( + hasattr(other[0], 'SliceThickness') != + hasattr(self[0], 'SliceThickness') + ): + return False if other[0].SliceThickness != self[0].SliceThickness: return False if other[0].PixelSpacing != self[0].PixelSpacing: diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 4826c555..89612f36 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -355,7 +355,7 @@ def get_pixel_measures(self) -> PixelMeasuresSequence: """ return PixelMeasuresSequence( pixel_spacing=self.pixel_spacing, - slice_thickness=None, + slice_thickness=self.spacing_between_slices, spacing_between_slices=self.spacing_between_slices, ) From c72cd9fe5969e35f6bbdae3c159ae1eec0f81a4b Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Wed, 16 Oct 2024 22:56:17 -0400 Subject: [PATCH 72/93] Fix background segment description --- src/highdicom/seg/sop.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index a6a72a9d..f56bfe79 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -1014,7 +1014,6 @@ def __init__( ] else: self.SegmentSequence = segment_descriptions - self.SegmentSequence = segment_descriptions # Checks on pixels and overlap pixel_array, segments_overlap = self._check_and_cast_pixel_array( From cf1969f85978b9329ea211c04387ef97efb5162f Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 17 Oct 2024 09:09:31 -0400 Subject: [PATCH 73/93] Fixes for volume-based segment creation --- src/highdicom/seg/sop.py | 30 +++++++++++++++--------------- src/highdicom/spatial.py | 2 +- src/highdicom/volume.py | 2 ++ 3 files changed, 18 insertions(+), 16 deletions(-) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index f56bfe79..ce25d6c5 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -1102,7 +1102,10 @@ def __init__( ) and ( not user_provided_measures or - pixel_measures == source_pixel_measures + ( + pixel_measures[0].PixelSpacing == + source_pixel_measures[0].PixelSpacing + ) ) ) @@ -1182,7 +1185,10 @@ def __init__( ) and ( not user_provided_measures or - pixel_measures == source_pixel_measures + ( + pixel_measures[0].PixelSpacing == + source_pixel_measures[0].PixelSpacing + ) ) ) @@ -2712,19 +2718,13 @@ def from_dataset( else: seg._coordinate_system = None - for i, segment in enumerate(seg.SegmentSequence, 1): - if segment.SegmentNumber != i: - raise AttributeError( - 'Segments are expected to start at 1 and be consecutive ' - 'integers.' - ) - - for i, s in enumerate(seg.SegmentSequence, 1): - if s.SegmentNumber != i: - raise ValueError( - 'Segment numbers in the segmentation image must start at ' - '1 and increase by 1 with the segments sequence.' - ) + if seg.SegmentationType != 'LABELMAP': + for i, segment in enumerate(seg.SegmentSequence, 1): + if segment.SegmentNumber != i: + raise AttributeError( + 'Segments are expected to start at 1 and be ' + 'consecutive integers.' + ) # Convert contained items to highdicom types # Segment descriptions diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index 67d71488..e05aaec7 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -3093,7 +3093,7 @@ def get_series_volume_positions( This is used in combination with the ``handedness`` to determine the positive direction used to order frames. handedness: Union[highdicom.enum.AxisHandedness, str], optional - Choose the frame order in order such that the frame axis creates a + Choose the frame order such that the frame axis creates a coordinate system with this handedness in the when combined with the within-frame convention given by ``index_convention``. enforce_handedness: bool, optional diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 89612f36..ac1f1399 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -58,6 +58,8 @@ # TODO make multiframe public # TODO figure out type hinting for _VolumeBase # TODO inheritance of are_dimension_indices_unique +# TODO allow non-consecutive segments when reading (confirm with standard)? +# TODO check logic around slice thickness and spacing for seg creation class _VolumeBase(ABC): From ced72c16223a9d55eca6da56631bdd28c3799c33 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sat, 2 Nov 2024 14:27:59 -0400 Subject: [PATCH 74/93] Move to using typing_extensions.Self --- pyproject.toml | 1 + src/highdicom/_multiframe.py | 4 +- src/highdicom/ann/content.py | 5 ++- src/highdicom/ann/sop.py | 3 +- src/highdicom/content.py | 15 ++++--- src/highdicom/io.py | 3 +- src/highdicom/ko/content.py | 3 +- src/highdicom/ko/sop.py | 3 +- src/highdicom/sc/sop.py | 3 +- src/highdicom/seg/content.py | 3 +- src/highdicom/seg/sop.py | 3 +- src/highdicom/spatial.py | 13 +++--- src/highdicom/sr/coding.py | 5 ++- src/highdicom/sr/content.py | 43 +++++++++--------- src/highdicom/sr/sop.py | 5 ++- src/highdicom/sr/templates.py | 27 ++++++------ src/highdicom/sr/value_types.py | 41 +++++++++--------- src/highdicom/uid.py | 3 +- src/highdicom/volume.py | 77 ++++++++++++++++----------------- 19 files changed, 139 insertions(+), 121 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a6c9f2be..87147bba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,6 +35,7 @@ dependencies = [ "pillow>=8.3", "pydicom>=3.0.1", "pyjpegls>=1.0.0", + "typing-extensions>=4.0.0", ] [project.optional-dependencies] diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 02af8b72..06284cd8 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -18,6 +18,8 @@ Union, cast, ) +from typing_extensions import Self + import numpy as np from pydicom import Dataset from pydicom.tag import BaseTag @@ -110,7 +112,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'MultiFrameImage': + ) -> Self: """Create a MultiFrameImage from an existing pydicom Dataset. Parameters diff --git a/src/highdicom/ann/content.py b/src/highdicom/ann/content.py index 91296b7e..10f8d4bb 100644 --- a/src/highdicom/ann/content.py +++ b/src/highdicom/ann/content.py @@ -1,6 +1,7 @@ """Content that is specific to Annotation IODs.""" from copy import deepcopy from typing import cast, List, Optional, Sequence, Tuple, Union +from typing_extensions import Self import numpy as np from pydicom.dataset import Dataset @@ -119,7 +120,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True - ) -> 'Measurements': + ) -> Self: """Construct instance from an existing dataset. Parameters @@ -770,7 +771,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'AnnotationGroup': + ) -> Self: """Construct instance from an existing dataset. Parameters diff --git a/src/highdicom/ann/sop.py b/src/highdicom/ann/sop.py index 1f361b64..ff0e1123 100644 --- a/src/highdicom/ann/sop.py +++ b/src/highdicom/ann/sop.py @@ -14,6 +14,7 @@ Tuple, Union, ) +from typing_extensions import Self import numpy as np from pydicom import dcmread @@ -418,7 +419,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'MicroscopyBulkSimpleAnnotations': + ) -> Self: """Construct instance from an existing dataset. Parameters diff --git a/src/highdicom/content.py b/src/highdicom/content.py index c5398dcf..84c5b637 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -3,6 +3,7 @@ import datetime from copy import deepcopy from typing import cast, Dict, List, Optional, Union, Sequence, Tuple +from typing_extensions import Self import numpy as np from pydicom.dataset import Dataset @@ -98,7 +99,7 @@ def from_sequence( cls, sequence: DataElementSequence, copy: bool = True, - ) -> 'AlgorithmIdentificationSequence': + ) -> Self: """Construct instance from an existing data element sequence. Parameters @@ -344,7 +345,7 @@ def from_sequence( cls, sequence: DataElementSequence, copy: bool = True, - ) -> 'PixelMeasuresSequence': + ) -> Self: """Create a PixelMeasuresSequence from an existing Sequence. Parameters @@ -554,7 +555,7 @@ def from_sequence( cls, sequence: DataElementSequence, copy: bool = True, - ) -> 'PlanePositionSequence': + ) -> Self: """Create a PlanePositionSequence from an existing Sequence. The coordinate system is inferred from the attributes in the sequence. @@ -694,7 +695,7 @@ def from_sequence( cls, sequence: DataElementSequence, copy: bool = True, - ) -> 'PlaneOrientationSequence': + ) -> Self: """Create a PlaneOrientationSequence from an existing Sequence. The coordinate system is inferred from the attributes in the sequence. @@ -796,7 +797,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'IssuerOfIdentifier': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1331,7 +1332,7 @@ def specimen_type(self) -> Union[CodedConcept, None]: def from_dataset( cls, dataset: Dataset, - ) -> 'SpecimenPreparationStep': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1653,7 +1654,7 @@ def primary_anatomic_structures(self) -> Union[List[CodedConcept], None]: return self.get("PrimaryAnatomicStructureSequence") @classmethod - def from_dataset(cls, dataset: Dataset) -> 'SpecimenDescription': + def from_dataset(cls, dataset: Dataset) -> Self: """Construct object from an existing dataset. Parameters diff --git a/src/highdicom/io.py b/src/highdicom/io.py index 5f960fe7..4898c923 100644 --- a/src/highdicom/io.py +++ b/src/highdicom/io.py @@ -3,6 +3,7 @@ import sys import traceback from typing import List, Tuple, Union +from typing_extensions import Self from pathlib import Path import numpy as np @@ -271,7 +272,7 @@ def filename(self) -> str: """str: Path to the image file""" return str(self._filename) - def __enter__(self) -> 'ImageFileReader': + def __enter__(self) -> Self: self.open() return self diff --git a/src/highdicom/ko/content.py b/src/highdicom/ko/content.py index 922c6e99..d7c1e9a2 100644 --- a/src/highdicom/ko/content.py +++ b/src/highdicom/ko/content.py @@ -1,5 +1,6 @@ """Content that is specific to Key Object Selection IODs.""" from typing import cast, List, Optional, Sequence, Union +from typing_extensions import Self from pydicom.dataset import Dataset from pydicom.sr.coding import Code @@ -137,7 +138,7 @@ def from_sequence( cls, sequence: Sequence[Dataset], is_root: bool = True - ) -> 'KeyObjectSelection': + ) -> Self: """Construct object from a sequence of datasets. Parameters diff --git a/src/highdicom/ko/sop.py b/src/highdicom/ko/sop.py index cacc1600..ec33e19b 100644 --- a/src/highdicom/ko/sop.py +++ b/src/highdicom/ko/sop.py @@ -1,6 +1,7 @@ """Module for SOP Classes of Key Object (KO) IODs.""" import logging from typing import Any, cast, List, Optional, Sequence, Tuple, Union +from typing_extensions import Self from copy import deepcopy from pydicom.dataset import Dataset @@ -184,7 +185,7 @@ def resolve_reference(self, sop_instance_uid: str) -> Tuple[str, str, str]: ) from e @classmethod - def from_dataset(cls, dataset: Dataset) -> 'KeyObjectSelectionDocument': + def from_dataset(cls, dataset: Dataset) -> Self: """Construct object from an existing dataset. Parameters diff --git a/src/highdicom/sc/sop.py b/src/highdicom/sc/sop.py index 0646d4ae..9dc18bb9 100644 --- a/src/highdicom/sc/sop.py +++ b/src/highdicom/sc/sop.py @@ -3,6 +3,7 @@ import logging import datetime from typing import Any, List, Optional, Sequence, Tuple, Union +from typing_extensions import Self import numpy as np from pydicom.uid import SecondaryCaptureImageStorage @@ -452,7 +453,7 @@ def from_ref_dataset( ] = None, transfer_syntax_uid: str = ImplicitVRLittleEndian, **kwargs: Any - ) -> 'SCImage': + ) -> Self: """Constructor that copies patient and study from an existing dataset. This provides a more concise way to construct an SCImage when an diff --git a/src/highdicom/seg/content.py b/src/highdicom/seg/content.py index 82a5b0fa..0d7b62a4 100644 --- a/src/highdicom/seg/content.py +++ b/src/highdicom/seg/content.py @@ -1,6 +1,7 @@ """Content that is specific to Segmentation IODs.""" from copy import deepcopy from typing import cast, List, Optional, Sequence, Tuple, Union +from typing_extensions import Self import numpy as np from pydicom.datadict import keyword_for_tag, tag_for_keyword @@ -166,7 +167,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True - ) -> 'SegmentDescription': + ) -> Self: """Construct instance from an existing dataset. Parameters diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index ce25d6c5..c4f321e7 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -18,6 +18,7 @@ Union, cast, ) +from typing_extensions import Self import warnings import numpy as np @@ -2670,7 +2671,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'Segmentation': + ) -> Self: """Create instance from an existing dataset. Parameters diff --git a/src/highdicom/spatial.py b/src/highdicom/spatial.py index e05aaec7..cfb46220 100644 --- a/src/highdicom/spatial.py +++ b/src/highdicom/spatial.py @@ -8,6 +8,7 @@ Tuple, Union, ) +from typing_extensions import Self from pydicom import Dataset import numpy as np @@ -1662,7 +1663,7 @@ def for_image( dataset: Dataset, frame_number: Optional[int] = None, for_total_pixel_matrix: bool = False, - ) -> 'PixelToReferenceTransformer': + ) -> Self: """Construct a transformer for a given image or image frame. Parameters @@ -1879,7 +1880,7 @@ def for_image( for_total_pixel_matrix: bool = False, round_output: bool = True, drop_slice_index: bool = False, - ) -> 'ReferenceToPixelTransformer': + ) -> Self: """Construct a transformer for a given image or image frame. Parameters @@ -2124,7 +2125,7 @@ def for_images( for_total_pixel_matrix_from: bool = False, for_total_pixel_matrix_to: bool = False, round_output: bool = True, - ) -> 'PixelToPixelTransformer': + ) -> Self: """Construct a transformer for two given images or image frames. Parameters @@ -2329,7 +2330,7 @@ def for_image( dataset: Dataset, frame_number: Optional[int] = None, for_total_pixel_matrix: bool = False, - ) -> 'ImageToReferenceTransformer': + ) -> Self: """Construct a transformer for a given image or image frame. Parameters @@ -2543,7 +2544,7 @@ def for_image( frame_number: Optional[int] = None, for_total_pixel_matrix: bool = False, drop_slice_coord: bool = False, - ) -> 'ReferenceToImageTransformer': + ) -> Self: """Construct a transformer for a given image or image frame. Parameters @@ -2781,7 +2782,7 @@ def for_images( frame_number_to: Optional[int] = None, for_total_pixel_matrix_from: bool = False, for_total_pixel_matrix_to: bool = False, - ) -> 'ImageToImageTransformer': + ) -> Self: """Construct a transformer for two given images or image frames. Parameters diff --git a/src/highdicom/sr/coding.py b/src/highdicom/sr/coding.py index 22553839..de6f407e 100644 --- a/src/highdicom/sr/coding.py +++ b/src/highdicom/sr/coding.py @@ -1,6 +1,7 @@ from copy import deepcopy import logging from typing import Optional, Union +from typing_extensions import Self from pydicom.dataset import Dataset from pydicom.sr.coding import Code @@ -96,7 +97,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True - ) -> 'CodedConcept': + ) -> Self: """Construct a CodedConcept from an existing dataset. Parameters @@ -147,7 +148,7 @@ def from_dataset( return concept @classmethod - def from_code(cls, code: Union[Code, 'CodedConcept']) -> 'CodedConcept': + def from_code(cls, code: Union[Code, 'CodedConcept']) -> Self: """Construct a CodedConcept for a pydicom Code. Parameters diff --git a/src/highdicom/sr/content.py b/src/highdicom/sr/content.py index 06fec588..fcc5de14 100644 --- a/src/highdicom/sr/content.py +++ b/src/highdicom/sr/content.py @@ -2,6 +2,7 @@ import logging from copy import deepcopy from typing import cast, List, Optional, Sequence, Union +from typing_extensions import Self import numpy as np from pydicom.uid import ( @@ -158,7 +159,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'LongitudinalTemporalOffsetFromEvent': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -236,7 +237,7 @@ def from_source_image( cls, image: Dataset, referenced_frame_numbers: Optional[Sequence[int]] = None - ) -> 'SourceImageForMeasurementGroup': + ) -> Self: """Construct the content item directly from an image dataset Parameters @@ -270,7 +271,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'SourceImageForMeasurementGroup': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -348,7 +349,7 @@ def from_source_image( cls, image: Dataset, referenced_frame_numbers: Optional[Sequence[int]] = None - ) -> 'SourceImageForMeasurement': + ) -> Self: """Construct the content item directly from an image dataset Parameters @@ -382,7 +383,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'SourceImageForMeasurement': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -460,7 +461,7 @@ def from_source_image( cls, image: Dataset, referenced_frame_numbers: Optional[Sequence[int]] = None - ) -> 'SourceImageForRegion': + ) -> Self: """Construct the content item directly from an image dataset Parameters @@ -494,7 +495,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'SourceImageForRegion': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -569,7 +570,7 @@ def from_source_image( cls, image: Dataset, referenced_frame_numbers: Optional[Sequence[int]] = None - ) -> 'SourceImageForSegmentation': + ) -> Self: """Construct the content item directly from an image dataset Parameters @@ -603,7 +604,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'SourceImageForSegmentation': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -657,7 +658,7 @@ def __init__(self, referenced_series_instance_uid: str): def from_source_image( cls, image: Dataset, - ) -> 'SourceSeriesForSegmentation': + ) -> Self: """Construct the content item directly from an image dataset Parameters @@ -682,7 +683,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'SourceSeriesForSegmentation': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -783,7 +784,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'ImageRegion': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -859,7 +860,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'ImageRegion3D': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1011,7 +1012,7 @@ def __init__( def from_sequence( cls, sequence: Sequence[Dataset] - ) -> 'VolumeSurface': + ) -> Self: """Construct an object from an existing content sequence. Parameters @@ -1219,7 +1220,7 @@ def __init__(self, referenced_sop_instance_uid: str): def from_source_value_map( cls, value_map_dataset: Dataset, - ) -> 'RealWorldValueMap': + ) -> Self: """Construct the content item directly from an image dataset Parameters @@ -1247,7 +1248,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'RealWorldValueMap': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1371,7 +1372,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'FindingSite': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1454,7 +1455,7 @@ def __init__( def from_sequence( cls, sequence: Sequence[Dataset] - ) -> 'ReferencedSegmentationFrame': + ) -> Self: """Construct an object from items within an existing content sequence. Parameters @@ -1524,7 +1525,7 @@ def from_segmentation( segmentation: Dataset, frame_number: Optional[Union[int, Sequence[int]]] = None, segment_number: Optional[int] = None - ) -> 'ReferencedSegmentationFrame': + ) -> Self: """Construct the content item directly from a segmentation dataset Parameters @@ -1811,7 +1812,7 @@ def __init__( def from_sequence( cls, sequence: Sequence[Dataset] - ) -> 'ReferencedSegment': + ) -> Self: """Construct an object from items within an existing content sequence. Parameters @@ -1913,7 +1914,7 @@ def from_segmentation( segmentation: Dataset, segment_number: int, frame_numbers: Optional[Sequence[int]] = None - ) -> 'ReferencedSegment': + ) -> Self: """Construct the content item directly from a segmentation dataset Parameters diff --git a/src/highdicom/sr/sop.py b/src/highdicom/sr/sop.py index 60bd09f0..c0b54c95 100644 --- a/src/highdicom/sr/sop.py +++ b/src/highdicom/sr/sop.py @@ -17,6 +17,7 @@ Union, BinaryIO, ) +from typing_extensions import Self from pydicom import dcmread from pydicom.dataset import Dataset @@ -734,7 +735,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'ComprehensiveSR': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -885,7 +886,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True - ) -> 'Comprehensive3DSR': + ) -> Self: """Construct object from an existing dataset. Parameters diff --git a/src/highdicom/sr/templates.py b/src/highdicom/sr/templates.py index 272ffbfd..8e5a62f4 100644 --- a/src/highdicom/sr/templates.py +++ b/src/highdicom/sr/templates.py @@ -3,6 +3,7 @@ import logging from copy import deepcopy from typing import cast, Dict, Iterable, List, Optional, Sequence, Tuple, Union +from typing_extensions import Self from pydicom.dataset import Dataset from pydicom.sr.coding import Code @@ -1260,7 +1261,7 @@ def from_sequence( cls, sequence: Sequence[Dataset], is_root: bool = False - ) -> 'PersonObserverIdentifyingAttributes': + ) -> Self: """Construct object from a sequence of datasets. Parameters @@ -1496,7 +1497,7 @@ def from_sequence( cls, sequence: Sequence[Dataset], is_root: bool = False - ) -> 'DeviceObserverIdentifyingAttributes': + ) -> Self: """Construct object from a sequence of datasets. Parameters @@ -1650,7 +1651,7 @@ def from_sequence( cls, sequence: Sequence[Dataset], is_root: bool = False - ) -> 'SubjectContextFetus': + ) -> Self: """Construct object from a sequence of datasets. Parameters @@ -1814,7 +1815,7 @@ def specimen_type(self) -> Union[CodedConcept, None]: def from_image( cls, image: Dataset, - ) -> 'SubjectContextSpecimen': + ) -> Self: """Deduce specimen information from an existing image. This is appropriate, for example, when copying the specimen information @@ -1858,7 +1859,7 @@ def from_sequence( cls, sequence: Sequence[Dataset], is_root: bool = False - ) -> 'SubjectContextSpecimen': + ) -> Self: """Construct object from a sequence of datasets. Parameters @@ -2071,7 +2072,7 @@ def from_sequence( cls, sequence: Sequence[Dataset], is_root: bool = False - ) -> 'SubjectContextDevice': + ) -> Self: """Construct object from a sequence of datasets. Parameters @@ -2171,7 +2172,7 @@ def __init__( self.extend(subject_class_specific_context) @classmethod - def from_image(cls, image: Dataset) -> 'Optional[SubjectContext]': + def from_image(cls, image: Dataset) -> Self | None: """Get a subject context inferred from an existing image. Currently this is only supported for subjects that are specimens. @@ -2335,7 +2336,7 @@ def from_sequence( cls, sequence: Sequence[Dataset], is_root: bool = False - ) -> 'QualitativeEvaluation': + ) -> Self: """Construct object from a sequence of content items. Parameters @@ -2528,7 +2529,7 @@ def from_sequence( cls, sequence: Sequence[Dataset], is_root: bool = False - ) -> 'Measurement': + ) -> Self: """Construct object from a sequence of content items. Parameters @@ -2818,7 +2819,7 @@ def from_sequence( cls, sequence: Sequence[Dataset], is_root: bool = False - ) -> '_MeasurementsAndQualitativeEvaluations': + ) -> Self: """Construct object from a sequence of datasets. Parameters @@ -3547,7 +3548,7 @@ def from_sequence( cls, sequence: Sequence[Dataset], is_root: bool = False - ) -> 'PlanarROIMeasurementsAndQualitativeEvaluations': + ) -> Self: """Construct object from a sequence of datasets. Parameters @@ -3821,7 +3822,7 @@ def from_sequence( cls, sequence: Sequence[Dataset], is_root: bool = False - ) -> 'VolumetricROIMeasurementsAndQualitativeEvaluations': + ) -> Self: """Construct object from a sequence of datasets. Parameters @@ -4252,7 +4253,7 @@ def from_sequence( sequence: Sequence[Dataset], is_root: bool = True, copy: bool = True, - ) -> 'MeasurementReport': + ) -> Self: """Construct object from a sequence of datasets. Parameters diff --git a/src/highdicom/sr/value_types.py b/src/highdicom/sr/value_types.py index 152a12e8..5c81a613 100644 --- a/src/highdicom/sr/value_types.py +++ b/src/highdicom/sr/value_types.py @@ -14,6 +14,7 @@ Tuple, Union, ) +from typing_extensions import Self import numpy as np from pydicom.dataelem import DataElement @@ -158,7 +159,7 @@ def __setattr__( super().__setattr__(name, value) @classmethod - def _from_dataset_derived(cls, dataset: Dataset) -> 'ContentItem': + def _from_dataset_derived(cls, dataset: Dataset) -> Self: """Construct object of derived type from an existing dataset. Parameters @@ -184,7 +185,7 @@ def _from_dataset_derived(cls, dataset: Dataset) -> 'ContentItem': ) # type: ignore @classmethod - def _from_dataset_base(cls, dataset: Dataset) -> 'ContentItem': + def _from_dataset_base(cls, dataset: Dataset) -> Self: if not hasattr(dataset, 'ValueType'): raise AttributeError( 'Dataset is not an SR Content Item because it lacks ' @@ -430,7 +431,7 @@ def index(self, val: ContentItem) -> int: # type: ignore[override] raise ValueError(error_message) from e return index - def find(self, name: Union[Code, CodedConcept]) -> 'ContentSequence': + def find(self, name: Union[Code, CodedConcept]) -> Self: """Find contained content items given their name. Parameters @@ -450,7 +451,7 @@ def find(self, name: Union[Code, CodedConcept]) -> 'ContentSequence': is_sr=self._is_sr ) - def get_nodes(self) -> 'ContentSequence': + def get_nodes(self) -> Self: """Get content items that represent nodes in the content tree. A node is hereby defined as a content item that has a `ContentSequence` @@ -556,7 +557,7 @@ def from_sequence( is_root: bool = False, is_sr: bool = True, copy: bool = True, - ) -> 'ContentSequence': + ) -> Self: """Construct object from a sequence of datasets. Parameters @@ -688,7 +689,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'CodeContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -755,7 +756,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'PnameContentItem': + ) -> Self: """Construct object from existing dataset. Parameters @@ -818,7 +819,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'TextContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -890,7 +891,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'TimeContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -962,7 +963,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'DateContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1034,7 +1035,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'DateTimeContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1097,7 +1098,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'UIDRefContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1216,7 +1217,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'NumContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1308,7 +1309,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'ContainerContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1393,7 +1394,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'CompositeContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1524,7 +1525,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'ImageContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1654,7 +1655,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'ScoordContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1799,7 +1800,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'Scoord3DContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -1904,7 +1905,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'TcoordContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters @@ -2023,7 +2024,7 @@ def from_dataset( cls, dataset: Dataset, copy: bool = True, - ) -> 'WaveformContentItem': + ) -> Self: """Construct object from an existing dataset. Parameters diff --git a/src/highdicom/uid.py b/src/highdicom/uid.py index 334bf877..862c8582 100644 --- a/src/highdicom/uid.py +++ b/src/highdicom/uid.py @@ -1,6 +1,7 @@ import logging from uuid import UUID from typing import Optional, Type, TypeVar +from typing_extensions import Self import pydicom @@ -25,7 +26,7 @@ def __new__(cls: Type[T], value: Optional[str] = None) -> T: return super().__new__(cls, value) @classmethod - def from_uuid(cls, uuid: str) -> 'UID': + def from_uuid(cls, uuid: str) -> Self: """Create a DICOM UID from a UUID using the 2.25 root. Parameters diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index ac1f1399..2821edd0 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -2,6 +2,7 @@ from os import PathLike from pathlib import Path from typing import List, Optional, Sequence, Union, Tuple, cast +from typing_extensions import Self import numpy as np @@ -56,8 +57,6 @@ # TODO lazy loading for multiframe # TODO get volume from legacy series # TODO make multiframe public -# TODO figure out type hinting for _VolumeBase -# TODO inheritance of are_dimension_indices_unique # TODO allow non-consecutive segments when reading (confirm with standard)? # TODO check logic around slice thickness and spacing for seg creation @@ -522,7 +521,7 @@ def unit_vectors(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: def __getitem__( self, index: Union[int, slice, Tuple[Union[int, slice]]], - ) -> '_VolumeBase': + ) -> Self: pass def _prepare_getitem_index( @@ -638,7 +637,7 @@ def pad( mode: Union[PadModes, str] = PadModes.CONSTANT, constant_value: float = 0.0, per_channel: bool = False, - ) -> '_VolumeBase': + ) -> Self: pass def _prepare_pad_width( @@ -745,7 +744,7 @@ def _permute_affine(self, indices: Sequence[int]) -> np.ndarray: ) @abstractmethod - def copy(self) -> '_VolumeBase': + def copy(self) -> Self: """Create a copy of the object. Returns @@ -757,7 +756,7 @@ def copy(self) -> '_VolumeBase': pass @abstractmethod - def permute_axes(self, indices: Sequence[int]) -> '_VolumeBase': + def permute_axes(self, indices: Sequence[int]) -> Self: """Create a new volume by permuting the spatial axes. Parameters @@ -778,7 +777,7 @@ def permute_axes(self, indices: Sequence[int]) -> '_VolumeBase': def random_permute_axes( self, axes: Sequence[int] = (0, 1, 2) - ) -> '_VolumeBase': + ) -> Self: """Create a new geometry by randomly permuting the spatial axes. Parameters @@ -839,7 +838,7 @@ def to_patient_orientation( str, Sequence[Union[str, PatientOrientationValuesBiped]], ], - ) -> '_VolumeBase': + ) -> Self: """Rearrange the array to a given orientation. The resulting volume is formed from this volume through a combination @@ -885,7 +884,7 @@ def to_patient_orientation( return result.permute_axes(permute_indices) - def swap_axes(self, axis_1: int, axis_2: int) -> '_VolumeBase': + def swap_axes(self, axis_1: int, axis_2: int) -> Self: """Swap the spatial axes of the array. Parameters @@ -918,7 +917,7 @@ def swap_axes(self, axis_1: int, axis_2: int) -> '_VolumeBase': return self.permute_axes(permutation) - def flip(self, axes: Union[int, Sequence[int]]) -> '_VolumeBase': + def flip(self, axes: Union[int, Sequence[int]]) -> Self: """Flip the spatial axes of the array. Note that this flips the array and updates the affine to reflect the @@ -955,7 +954,7 @@ def flip(self, axes: Union[int, Sequence[int]]) -> '_VolumeBase': return self[tuple(index)] - def random_flip(self, axes: Sequence[int] = (0, 1, 2)) -> '_VolumeBase': + def random_flip(self, axes: Sequence[int] = (0, 1, 2)) -> Self: """Randomly flip the spatial axes of the array. Note that this flips the array and updates the affine to reflect the @@ -1015,7 +1014,7 @@ def ensure_handedness( *, flip_axis: Optional[int] = None, swap_axes: Optional[Sequence[int]] = None, - ) -> '_VolumeBase': + ) -> Self: """Manipulate the volume if necessary to ensure a given handedness. If the volume already has the specified handedness, it is returned @@ -1071,7 +1070,7 @@ def pad_to_shape( mode: PadModes = PadModes.CONSTANT, constant_value: float = 0.0, per_channel: bool = False, - ) -> '_VolumeBase': + ) -> Self: """Pad volume to given spatial shape. The volume is padded symmetrically, placing the original array at the @@ -1133,7 +1132,7 @@ def pad_to_shape( per_channel=per_channel, ) - def crop_to_shape(self, spatial_shape: Sequence[int]) -> '_VolumeBase': + def crop_to_shape(self, spatial_shape: Sequence[int]) -> Self: """Center-crop volume to a given spatial shape. Parameters @@ -1179,7 +1178,7 @@ def pad_or_crop_to_shape( mode: PadModes = PadModes.CONSTANT, constant_value: float = 0.0, per_channel: bool = False, - ) -> '_VolumeBase': + ) -> Self: """Pad and/or crop volume to given spatial shape. For each dimension where padding is required, the volume is padded @@ -1254,7 +1253,7 @@ def pad_or_crop_to_shape( ) return padded - def random_crop(self, spatial_shape: Sequence[int]) -> '_VolumeBase': + def random_crop(self, spatial_shape: Sequence[int]) -> Self: """Create a random crop of a certain shape from the volume. Parameters @@ -1331,7 +1330,7 @@ def match_geometry( constant_value: float = 0.0, per_channel: bool = False, tol: float = _DEFAULT_EQUALITY_TOLERANCE, - ) -> '_VolumeBase': + ) -> Self: """Match the geometry of this volume to another. This performs a combination of permuting, padding and cropping, and @@ -1530,7 +1529,7 @@ def from_attributes( spacing_between_slices: float, number_of_frames: int, frame_of_reference_uid: Optional[str] = None, - ) -> "VolumeGeometry": + ) -> Self: """Create a volume from DICOM attributes. Parameters @@ -1590,7 +1589,7 @@ def from_attributes( frame_of_reference_uid=frame_of_reference_uid, ) - def copy(self) -> 'VolumeGeometry': + def copy(self) -> Self: """Get an unaltered copy of the geometry. Returns @@ -1627,7 +1626,7 @@ def shape(self) -> Tuple[int, ...]: def __getitem__( self, index: Union[int, slice, Tuple[Union[int, slice]]], - ) -> "VolumeGeometry": + ) -> Self: """Get a sub-volume of this volume as a new volume. Parameters @@ -1659,7 +1658,7 @@ def pad( mode: Union[PadModes, str] = PadModes.CONSTANT, constant_value: float = 0.0, per_channel: bool = False, - ) -> 'VolumeGeometry': + ) -> Self: """Pad volume along the three spatial dimensions. Parameters @@ -1710,7 +1709,7 @@ def pad( frame_of_reference_uid=self.frame_of_reference_uid, ) - def permute_axes(self, indices: Sequence[int]) -> 'VolumeGeometry': + def permute_axes(self, indices: Sequence[int]) -> Self: """Create a new geometry by permuting the spatial axes. Parameters @@ -1736,7 +1735,7 @@ def permute_axes(self, indices: Sequence[int]) -> 'VolumeGeometry': frame_of_reference_uid=self.frame_of_reference_uid, ) - def with_array(self, array: np.ndarray) -> 'Volume': + def with_array(self, array: np.ndarray) -> Self: """Create a volume using this geometry and an array. Parameters @@ -1830,7 +1829,7 @@ def from_image_series( apply_palette_color_lut: bool = True, apply_icc_transform: bool = True, standardize_color_space: bool = True, - ) -> "Volume": + ) -> Self: """Create volume from a series of single frame images. Parameters @@ -1959,7 +1958,7 @@ def from_image_series( def from_image( cls, dataset: Dataset, - ) -> "Volume": + ) -> Self: """Create volume from a multiframe image. Parameters @@ -2048,7 +2047,7 @@ def from_attributes( pixel_spacing: Sequence[float], spacing_between_slices: float, frame_of_reference_uid: Optional[str] = None, - ) -> "Volume": + ) -> Self: """Create a volume from DICOM attributes. Parameters @@ -2112,7 +2111,7 @@ def from_components( direction: Sequence[float], spacing: Sequence[float], frame_of_reference_uid: Optional[str] = None, - ) -> "Volume": + ) -> Self: """Construct a Volume from components. Parameters @@ -2250,7 +2249,7 @@ def array(self, value: np.ndarray) -> None: ) self._array = value - def astype(self, dtype: type) -> 'Volume': + def astype(self, dtype: type) -> Self: """Get new volume with a new datatype. Parameters @@ -2269,7 +2268,7 @@ def astype(self, dtype: type) -> 'Volume': return self.with_array(new_array) - def copy(self) -> 'Volume': + def copy(self) -> Self: """Get an unaltered copy of the volume. Returns @@ -2284,7 +2283,7 @@ def copy(self) -> 'Volume': frame_of_reference_uid=self.frame_of_reference_uid, ) - def with_array(self, array: np.ndarray) -> 'Volume': + def with_array(self, array: np.ndarray) -> Self: """Get a new volume using a different array. The spatial and other metadata will be copied from this volume. @@ -2320,7 +2319,7 @@ def with_array(self, array: np.ndarray) -> 'Volume': def __getitem__( self, index: Union[int, slice, Tuple[Union[int, slice]]], - ) -> "Volume": + ) -> Self: """Get a sub-volume of this volume as a new volume. Parameters @@ -2346,7 +2345,7 @@ def __getitem__( frame_of_reference_uid=self.frame_of_reference_uid, ) - def permute_axes(self, indices: Sequence[int]) -> 'Volume': + def permute_axes(self, indices: Sequence[int]) -> Self: """Create a new volume by permuting the spatial axes. Parameters @@ -2380,7 +2379,7 @@ def normalize_mean_std( per_channel: bool = True, output_mean: float = 0.0, output_std: float = 1.0, - ) -> 'Volume': + ) -> Self: """Normalize the intensities using the mean and variance. The resulting volume has zero mean and unit variance. @@ -2430,7 +2429,7 @@ def normalize_min_max( output_min: float = 0.0, output_max: float = 1.0, per_channel: bool = False, - ) -> 'Volume': + ) -> Self: """Normalize by mapping its full intensity range to a fixed range. Other pixel values are scaled linearly within this range. @@ -2483,7 +2482,7 @@ def clip( self, a_min: Optional[float], a_max: Optional[float], - ) -> 'Volume': + ) -> Self: """Clip voxel intensities to lie within a given range. Parameters @@ -2515,7 +2514,7 @@ def apply_window( output_min: float = 0.0, output_max: float = 1.0, clip: bool = True, - ) -> 'Volume': + ) -> Self: """Apply a window (similar to VOI transform) to the volume. Parameters @@ -2567,7 +2566,7 @@ def apply_window( return self.with_array(new_array) - def squeeze_channel(self) -> 'Volume': + def squeeze_channel(self) -> Self: """Remove a singleton channel axis. If the volume has no channels, returns an unaltered copy. @@ -2587,7 +2586,7 @@ def squeeze_channel(self) -> 'Volume': 'Volume with multiple channels cannot be squeezed.' ) - def ensure_channel(self) -> 'Volume': + def ensure_channel(self) -> Self: """Add a singleton channel axis, if needed. If the volume has channels already, returns an unaltered copy. @@ -2609,7 +2608,7 @@ def pad( mode: Union[PadModes, str] = PadModes.CONSTANT, constant_value: float = 0.0, per_channel: bool = False, - ) -> 'Volume': + ) -> Self: """Pad volume along the three spatial dimensions. Parameters From 7321d02326f427f9859f5d53c52aaaa9e53f840e Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 10 Nov 2024 20:31:24 +0000 Subject: [PATCH 75/93] Add pixel transformations to ImageFileReader --- src/highdicom/content.py | 278 ++++++++++++++++++++++++++++++++ src/highdicom/frame.py | 2 +- src/highdicom/io.py | 334 +++++++++++++++++++++++++++++++++------ src/highdicom/volume.py | 4 +- tests/test_content.py | 31 ++++ 5 files changed, 594 insertions(+), 55 deletions(-) diff --git a/src/highdicom/content.py b/src/highdicom/content.py index 84c5b637..ea1a2dc9 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -1975,6 +1975,78 @@ def bits_per_entry(self) -> int: """int: Bits allocated for the lookup table data. 8 or 16.""" return int(self.LUTDescriptor[2]) + @classmethod + def from_dataset( + cls, + dataset: Dataset, + copy: bool = True, + ) -> Self: + """Create a LUT from an existing Dataset. + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset representing a LUT. + copy: bool + If True, the underlying dataset is deep-copied such that the + original dataset remains intact. If False, this operation will + alter the original dataset in place. + + Returns + ------- + highdicom.LUT + Constructed object + + """ + attrs = [ + 'LUTDescriptor', + 'LUTData' + ] + for attr in attrs: + if attr not in dataset: + raise AttributeError( + f"Required attribute '{attr}' is not present in dataset." + ) + + if copy: + dataset_copy = deepcopy(dataset) + else: + dataset_copy = dataset + + dataset_copy = dataset + dataset_copy.__class__ = cls + return cast(cls, dataset_copy) + + def apply(self, array: np.ndarray) -> np.ndarray: + """Apply the LUT to a pixel array. + + Parameters + ---------- + apply: np.ndarray + Pixel array to which the LUT should be applied. Can be of any shape + but must have an integer datatype. + + Returns + ------- + np.ndarray + Array with LUT applied. + + """ + if array.dtype.kind not in ('i', 'u'): + raise ValueError( + "Array must have an integer datatype." + ) + last_mapped_value = self.first_mapped_value + self.number_of_entries - 1 + if ( + array.min() < self.first_mapped_value or + array.max() > last_mapped_value + ): + raise RuntimeError( + "Array contains values not in the LUT." + ) + + return self.lut_data[array - self.first_mapped_value] + class ModalityLUT(LUT): @@ -2502,6 +2574,72 @@ def bits_per_entry(self) -> int: descriptor = getattr(self, f'{self._attr_name_prefix}Descriptor') return int(descriptor[2]) + def apply(self, array: np.ndarray) -> np.ndarray: + """Apply the LUT to a pixel array. + + Parameters + ---------- + apply: np.ndarray + Pixel array to which the LUT should be applied. Can be of any shape + but must have an integer datatype. + + Returns + ------- + np.ndarray + Array with LUT applied. + + """ + if array.dtype.kind not in ('i', 'u'): + raise ValueError( + "Array must have an integer datatype." + ) + last_mapped_value = self.first_mapped_value + self.number_of_entries - 1 + if ( + array.min() < self.first_mapped_value or + array.max() > last_mapped_value + ): + raise RuntimeError( + "Array contains values not in the LUT." + ) + + return self.lut_data[array - self.first_mapped_value] + + @classmethod + def from_dataset(cls, dataset: Dataset, color: str) -> Self: + """Construct from an existing dataset. + + Note that unlike many other from_dataset() methods, this method + extracts only the atrributes it needs from the original dataset, and + always returns a new object. + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset containing the attributes of the Palette Color Lookup Table + Transformation. + color: str + Text representing the color (``red``, ``green``, or + ``blue``). + + Returns + ------- + highdicom.PaletteColorLUT + New object containing attributes found in ``dataset``. + + """ + kw_prefix = f'{color.title()}PaletteColorLookupTable' + descriptor_kw = kw_prefix + 'Descriptor' + data_kw = kw_prefix + 'Data' + + new_ds = Dataset() + new_ds._attr_name_prefix = kw_prefix + + for kw in [descriptor_kw, data_kw]: + setattr(new_ds, kw, getattr(dataset, kw)) + + new_ds.__class__ = cls + return cast(cls, new_ds) + class SegmentedPaletteColorLUT(Dataset): @@ -2708,6 +2846,42 @@ def bits_per_entry(self) -> int: descriptor = getattr(self, f'{self._attr_name_prefix}Descriptor') return int(descriptor[2]) + @classmethod + def from_dataset(cls, dataset: Dataset, color: str) -> Self: + """Construct from an existing dataset. + + Note that unlike many other from_dataset() methods, this method + extracts only the atrributes it needs from the original dataset, and + always returns a new object. + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset containing the attributes of the Palette Color Lookup Table + Transformation. + color: str + Text representing the color (``red``, ``green``, or + ``blue``). + + Returns + ------- + highdicom.SegmentedPaletteColorLUT + New object containing attributes found in ``dataset``. + + """ + kw_prefix = f'{color.title()}PaletteColorLookupTable' + descriptor_kw = kw_prefix + 'Descriptor' + data_kw = 'Segmented' + kw_prefix + 'Data' + + new_ds = Dataset() + new_ds._attr_name_prefix = kw_prefix + + for kw in [descriptor_kw, data_kw]: + setattr(new_ds, kw, getattr(dataset, kw)) + + new_ds.__class__ = cls + return cast(cls, new_ds) + class PaletteColorLUTTransformation(Dataset): @@ -2859,3 +3033,107 @@ def blue_lut(self) -> Union[PaletteColorLUT, SegmentedPaletteColorLUT]: """ return self._color_luts['Blue'] + + def apply(self, array: np.ndarray) -> np.ndarray: + """Apply the LUT to a pixel array. + + Parameters + ---------- + apply: np.ndarray + Pixel array to which the LUT should be applied. Can be of any shape + but must have an integer datatype. + + Returns + ------- + np.ndarray + Array with LUT applied. The RGB channels will be stacked along a + new final dimension. + + """ + if isinstance(self.red_lut, SegmentedPaletteColorLUT): + raise RuntimeError( + "The 'apply' method is not implemented for segmented LUTs." + ) + + red_plane = self.red_lut.apply(array) + green_plane = self.green_lut.apply(array) + blue_plane = self.blue_lut.apply(array) + + return np.stack([red_plane, green_plane, blue_plane], -1) + + @classmethod + def from_dataset(cls, dataset: Dataset) -> Self: + """Construct from an existing dataset. + + Note that unlike many other from_dataset() methods, this method + extracts only the atrributes it needs from the original dataset, and + always returns a new object. + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset containing the attributes of the Palette Color Lookup Table + Transformation. + + Returns + ------- + highdicom.PaletteColorLUTTransformation + New object containing attributes found in ``dataset``. + + """ + new_dataset = Dataset() + + is_segmented = 'SegmentedRedPaletteColorLookupTableData' in dataset + + new_dataset._color_luts = {} + + for color in ['Red', 'Green', 'Blue']: + desc_attr = f'{color}PaletteColorLookupTableDescriptor' + + if desc_attr not in dataset: + raise AttributeError( + f"Dataset has no attribute '{desc_attr}'." + ) + setattr( + new_dataset, + desc_attr, + getattr(dataset, desc_attr) + ) + + if is_segmented: + data_attr = f'Segmented{color}PaletteColorLookupTableData' + wrong_attr = f'{color}PaletteColorLookupTableData' + else: + data_attr = f'{color}PaletteColorLookupTableData' + wrong_attr = f'Segmented{color}PaletteColorLookupTableData' + + if data_attr not in dataset: + raise AttributeError( + f"Dataset has no attribute '{desc_attr}'." + ) + if wrong_attr in dataset: + raise AttributeError( + "Mismatch of segmented LUT and standard LUT found." + ) + + setattr( + new_dataset, + data_attr, + getattr(dataset, data_attr) + ) + + if is_segmented: + new_dataset._color_luts[color] = ( + SegmentedPaletteColorLUT.from_dataset( + new_dataset, + color=color.lower(), + ) + ) + else: + new_dataset._color_luts[color] = PaletteColorLUT.from_dataset( + new_dataset, + color=color.lower(), + ) + + new_dataset.__class__ = cls + return cast(cls, new_dataset) diff --git a/src/highdicom/frame.py b/src/highdicom/frame.py index adc569a3..bafb5734 100644 --- a/src/highdicom/frame.py +++ b/src/highdicom/frame.py @@ -145,7 +145,7 @@ def encode_frame( 'with native encoding.' ) allowable_pis = { - 1: ['MONOCHROME1', 'MONOCHROME2', 'PALETTE_COLOR'], + 1: ['MONOCHROME1', 'MONOCHROME2', 'PALETTE COLOR'], 3: ['RGB', 'YBR_FULL'], }[samples_per_pixel] if photometric_interpretation not in allowable_pis: diff --git a/src/highdicom/io.py b/src/highdicom/io.py index 4898c923..12ab2deb 100644 --- a/src/highdicom/io.py +++ b/src/highdicom/io.py @@ -2,7 +2,7 @@ import logging import sys import traceback -from typing import List, Tuple, Union +from typing import List, Tuple, Union, cast from typing_extensions import Self from pathlib import Path @@ -23,6 +23,7 @@ from highdicom.frame import decode_frame from highdicom.color import ColorManager +from highdicom.content import LUT, PaletteColorLUTTransformation logger = logging.getLogger(__name__) @@ -266,6 +267,9 @@ def __init__(self, filename: Union[str, Path, DicomFileLike]): 'or the path to a DICOM file stored on disk.' ) self._metadata = None + self._voi_lut = None + self._palette_color_lut = None + self._modality_lut = None @property def filename(self) -> str: @@ -281,6 +285,8 @@ def __exit__(self, except_type, except_value, except_trace) -> None: self._fp.close() except AttributeError: pass + else: + self._fp = None if except_value: sys.stderr.write( f'Error while accessing file "{self._filename}":\n' @@ -595,7 +601,7 @@ def read_frame(self, index: int, correct_color: bool = True) -> np.ndarray: logger.debug(f'decode frame #{index}') if self.metadata.BitsAllocated == 1: - unpacked_frame = unpack_bits(frame_data) + unpacked_frame = cast(np.ndarray, unpack_bits(frame_data)) rows, columns = self.metadata.Rows, self.metadata.Columns n_pixels = self._pixels_per_frame pixel_offset = int(((index * n_pixels / 8) % 1) * 8) @@ -634,77 +640,301 @@ def read_frame(self, index: int, correct_color: bool = True) -> np.ndarray: return frame_array - def read_frame_transformed( + def read_transformed_frame( self, index: int, - correct_color: bool = True, apply_modality_transform: bool = True, apply_voi_transform: bool = False, voi_transform_index: int = 0, apply_palette_color_lut: bool = True, - apply_icc_transform: bool = True, + ensure_monochrome_2: bool = True, + window_output_min: float = 0.0, + window_output_max: float = 1.0, + correct_color: bool = True, ) -> np.ndarray: - """Return a frame with pixel transformations applied. + """Reads a frame using pixel transformations defined in the dataset. Parameters ---------- + index: int + Zero-based frame index apply_modality_transform: bool, optional - Whether to apply the modality transform (either a rescale intercept - and slope or modality LUT) to the pixel values, if present in the - datasets. + Whether to apply to the modality transform (if present in the + dataset) the frame. The modality transformation maps stored pixel + values to output values, either using a LUT or rescale slope and + intercept. apply_voi_transform: bool, optional - Whether to apply the value of interest (VOI) transform (either a - windowing operation or VOI LUT) to the pixel values, if present in - the datasets. + Apply the value-of-interest (VOI) transformation (if present in the + dataset), which limits the range of pixel values to a particular + range of interest, using either a windowing operation or a LUT. voi_transform_index: int, optional - Index of the VOI transform to apply if multiple are included in the - datasets. Ignored if ``apply_voi_transform`` is ``False`` or no VOI - transform is included in the datasets. + Index (zero-based) of the VOI transform to apply if multiple are + included in the datasets. Ignored if ``apply_voi_transform`` is + ``False`` or no VOI transform is included in the datasets. May be a + negative integer, following standard Python indexing convention. apply_palette_color_lut: bool, optional - Whether to apply the palette color LUT if a dataset has photometric - interpretation ``'PALETTE_COLOR'``. - apply_icc_transform: bool, optional - Whether to apply an ICC color profile, if present in the datasets. - convert_color_space: bool, optional - Whether to convert the color space to a standardized space. If - True, images with photometric interpretation ``MONOCHROME1`` are - inverted to mimic ``MONOCHROME2``, and images with photometric - interpretation ``YBR_FULL`` or ``YBR_FULL_422`` are converted to - ``RGB``. + Apply the palette color LUT, if present in the dataset. The palette + color LUT maps a single sample for each pixel stored in the dataset + to a 3 sample-per-pixel color image. + ensure_monochrome_2: bool, optional + If the Photometric Interpretation is MONOCHROME1, convert the range + of the output pixels corresponds to MONOCHROME2 (in which high + values are represent white and low values represent black). Ignored + if PhotometricInterpretation is not MONOCHROME1. + window_output_min: float, optional + Value to which the lower edge of the window is mapped. + window_output_max: float, optional + Value to which the upper edge of the window is mapped. + correct_color: bool, optional + Whether colors should be corrected by applying an ICC + transformation. Will only be performed if metadata contain an + ICC Profile. Returns ------- - numpy.ndarray: - Numpy array of frame with requested transformations applied. + np.ndarray + Output array, having undergone the pixel transformation. Will have + shape of either (rows, columns) or (rows, columns, samples). + TODO document datatype. """ frame = self.read_frame(index, correct_color=False) - # TODO fix this from here - if apply_modality_transform: - frame = apply_modality_lut(frame, ds) - if apply_voi_transform: - frame = apply_voi_lut(frame, ds, voi_transform_index) - if ( - apply_palette_color_lut and - ds.PhotometricInterpretation == 'PALETTE_COLOR' - ): - frame = apply_color_lut(frame, ds) - if apply_icc_transform and 'ICCProfile' in ds: - manager = ColorManager(ds.ICCProfile) - frame = manager.transform_frame(frame) - if standardize_color_space: - if ds.PhotometricInterpretation == 'MONOCHROME1': - # TODO what if a VOI_LUT has been applied - frame = max_value - frame - elif ds.PhotometricInterpretation in ( - 'YBR_FULL', 'YBR_FULL_422' - ): - frame = convert_color_space( - frame, - current=ds.PhotometricInterpretation, - desired='RGB' - ) + return self._transform_frame( + frame=frame, + frame_index=index, + apply_modality_transform=apply_modality_transform, + apply_voi_transform=apply_voi_transform, + voi_transform_index=voi_transform_index, + apply_palette_color_lut=apply_palette_color_lut, + ensure_monochrome_2=ensure_monochrome_2, + window_output_min=window_output_min, + window_output_max=window_output_max, + correct_color=correct_color, + ) + + def _transform_frame( + self, + frame: np.ndarray, + frame_index: int, + apply_modality_transform: bool = True, + apply_voi_transform: bool = False, + voi_transform_index: int = 0, + apply_palette_color_lut: bool = True, + ensure_monochrome_2: bool = True, + window_output_min: float = 0.0, + window_output_max: float = 1.0, + correct_color: bool = True, + ) -> np.ndarray: + """Apply pixel transformation to a frame. + + Parameters + ---------- + frame: n.ndarray + Numpy array of the frame. Integer datatype and with shape (rows, + columns) or (rows, columns, samples). + frame_index: int + Zero-based index (one less than the frame number). + apply_modality_transform: bool, optional + Whether to apply to the modality transform (if present in the + dataset) the frame. The modality transformation maps stored pixel + values to output values, either using a LUT or rescale slope and + intercept. + apply_voi_transform: bool, optional + Apply the value-of-interest (VOI) transformation (if present in the + dataset), which limits the range of pixel values to a particular + range of interest, using either a windowing operation or a LUT. + voi_transform_index: int, optional + Index (zero-based) of the VOI transform to apply if multiple are + included in the datasets. Ignored if ``apply_voi_transform`` is + ``False`` or no VOI transform is included in the datasets. May be a + negative integer, following standard Python indexing convention. + apply_palette_color_lut: bool, optional + Apply the palette color LUT, if present in the dataset. The palette + color LUT maps a single sample for each pixel stored in the dataset + to a 3 sample-per-pixel color image. + ensure_monochrome_2: bool, optional + If the Photometric Interpretation is MONOCHROME1, convert the range + of the output pixels corresponds to MONOCHROME2 (in which high + values are represent white and low values represent black). Ignored + if PhotometricInterpretation is not MONOCHROME1. + window_output_min: float, optional + Value to which the lower edge of the window is mapped. + window_output_max: float, optional + Value to which the upper edge of the window is mapped. + correct_color: bool, optional + Whether colors should be corrected by applying an ICC + transformation. Will only be performed if metadata contain an + ICC Profile. + + Returns + ------- + np.ndarray + Output array, having undergone the pixel transformation. Will have + shape of either (rows, columns) or (rows, columns, samples). + TODO document datatype. + + """ + # TODO: real world value map + # TODO: what if modality LUT outputs non-integer and there a VOI LUT? + # TODO: output range for VOI LUT and monochrome1 + # TODO: how to combine with multiframe? + if apply_voi_transform and not apply_modality_transform: + raise ValueError( + "Parameter 'apply_voi_transform' requires " + "'apply_modality_transform'." + ) + + # Crrate a list of all datasets to check for transforms for this frame + datasets = [self.metadata] + + if 'SharedFunctionalGroupsSequence' in self.metadata: + datasets.append(self.metadata.SharedFunctionalGroupsSequence[0]) + + if 'PerFrameFunctionalGroupsSequence' in self.metadata: + datasets.append( + self.metadata.PerFrameFunctionalGroupsSequence[frame_index] + ) + + if self.metadata.SamplesPerPixel == 1: + if self.metadata.PhotometricInterpretation == 'PALETTE COLOR': + + if apply_palette_color_lut: + if self._palette_color_lut is None: + self._palette_color_lut = ( + PaletteColorLUTTransformation.from_dataset( + self.metadata + ) + ) + frame = self._palette_color_lut.apply(frame) + + else: + if apply_modality_transform: + + if 'ModalityLUTSequence' in self.metadata: + self._modality_lut = LUT.from_dataset( + self.metadata.ModalityLUTSequence[0] + ) + frame = self._modality_lut.apply(frame) + else: + slope = None + intercept = None + for ds in datasets: + if ( + 'RescaleSlope' in ds or + 'RescaleIntercept' in ds + ): + slope = float(ds.get('RescaleSlope', 1.0)) + intercept = float( + ds.get('RescaleIntercept', 0.0) + ) + break + + if slope is not None or intercept is not None: + frame = frame * slope + intercept + + if apply_voi_transform: + + if 'VOILUTSequence' in self.metadata: + self._voi_lut = LUT.from_dataset( + self.metadata.VOILUTSequence[0] + ) + frame = self._voi_lut.apply(frame) + # TODO should rescale here? + else: + window_center = None + window_width = None + voi_function = 'LINEAR' + + for ds in datasets: + if ( + 'WindowCenter' in ds or + 'WindowWidth' in ds + ): + window_center = ds.WindowCenter + window_width = ds.WindowWidth + + if 'VOILUTFunction' in ds: + voi_function = ds.VOILUTFunction + + if isinstance(window_width, list): + window_width = window_width[ + voi_transform_index + ] + elif voi_transform_index not in (0, -1): + raise IndexError( + "Requested 'voi_transform_index' is " + "not present." + ) + + if isinstance(window_center, list): + window_center = window_center[ + voi_transform_index + ] + elif voi_transform_index not in (0, -1): + raise IndexError( + "Requested 'voi_transform_index' is " + "not present." + ) + break + + if ( + window_center is not None and + window_width is not None + ): + if voi_function in ('LINEAR', 'LINEAR_EXACT'): + window_min = window_center - window_width / 2.0 + output_range = ( + window_output_max - window_output_min + ) + if voi_function == 'LINEAR': + # LINEAR uses the range + # from c - 0.5w to c + 0.5w - 1 + scale_factor = ( + output_range / (window_width - 1) + ) + else: + # LINEAR_EXACT uses the full range + # from c - 0.5w to c + 0.5w + scale_factor = output_range / window_width + + frame = ( + (frame - window_min) * scale_factor + + window_output_min + ) + elif voi_function == 'SIGMOID': + exp_term = np.exp( + -4.0 * (frame - window_center) / + window_width + ) + frame = ( + (window_output_max - window_output_min) / + (1.0 + exp_term) + ) + else: + raise ValueError( + 'Unrecognized value for VOILUTFunction: ' + f"'{voi_function}'" + ) + + frame = np.clip( + frame, + window_output_min, + window_output_max, + ) + + if ensure_monochrome_2: + if self.metadata.PhotometricInterpretation == 'MONOCHROME1': + # Flip pixel intensities within the same range + frame = frame.min() + frame.max() - frame + + # We don't use the color_correct_frame() function here, since we cache + # the ICC transform on the reader instance for improved performance. + if correct_color and frame.shape[-1] == 3: + if self._color_manager is not None: + frame = self._color_manager.transform_frame(frame) + + return frame @property def number_of_frames(self) -> int: diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index 2821edd0..dd853226 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -1851,7 +1851,7 @@ def from_image_series( transform is included in the datasets. apply_palette_color_lut: bool, optional Whether to apply the palette color LUT if a dataset has photometric - interpretation ``'PALETTE_COLOR'``. + interpretation ``'PALETTE COLOR'``. apply_icc_transform: bool, optional Whether to apply an ICC color profile, if present in the datasets. convert_color_space: bool, optional @@ -1925,7 +1925,7 @@ def from_image_series( frame = apply_voi_lut(frame, ds, voi_transform_index) if ( apply_palette_color_lut and - ds.PhotometricInterpretation == 'PALETTE_COLOR' + ds.PhotometricInterpretation == 'PALETTE COLOR' ): frame = apply_color_lut(frame, ds) if apply_icc_transform and 'ICCProfile' in ds: diff --git a/tests/test_content.py b/tests/test_content.py index e65b4276..920ec8f1 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -152,6 +152,11 @@ def test_construction_16bit(self): assert np.array_equal(lut.lut_data, self._lut_data_16) assert not hasattr(lut, 'LUTExplanation') + arr = np.array([0, 1, 0, 89, 1]) + expected = np.array([510, 511, 510, 599, 511]) + output = lut.apply(arr) + assert np.array_equal(output, expected) + def test_construction_explanation(self): first_value = 0 lut = LUT( @@ -203,6 +208,11 @@ def test_construction_16bit(self): assert np.array_equal(lut.lut_data, self._lut_data_16) assert not hasattr(lut, 'LUTExplanation') + arr = np.array([0, 1, 0, 89, 1]) + expected = np.array([510, 511, 510, 599, 511]) + output = lut.apply(arr) + assert np.array_equal(output, expected) + def test_construction_string_type(self): first_value = 0 lut_type = 'MY_MAPPING' @@ -1186,6 +1196,11 @@ def test_construction_16bit(self): assert lut.lut_data.dtype == np.uint16 np.array_equal(lut.lut_data, lut_data) + arr = np.array([32, 33, 32, 132]) + expected = np.array([10, 11, 10, 110]) + output = lut.apply(arr) + assert np.array_equal(output, expected) + # Commented out until 8 bit LUTs are reimplemented def test_construction_8bit(self): lut_data = np.arange(0, 256, dtype=np.uint8) @@ -1209,6 +1224,11 @@ def test_construction_8bit(self): assert lut.lut_data.dtype == np.uint8 np.array_equal(lut.lut_data, lut_data) + arr = np.array([0, 1, 0, 255]) + expected = np.array([0, 1, 0, 255]) + output = lut.apply(arr) + assert np.array_equal(output, expected) + class TestPaletteColorLUTTransformation(TestCase): @@ -1258,6 +1278,17 @@ def test_construction(self): assert np.array_equal(instance.green_lut.lut_data, g_lut_data) assert np.array_equal(instance.blue_lut.lut_data, b_lut_data) + arr = np.array([32, 33, 32, 132]) + expected = np.array( + [ + [10, 11, 10, 110], + [20, 21, 20, 120], + [30, 31, 30, 130], + ] + ).T + output = instance.apply(arr) + assert np.array_equal(output, expected) + def test_construction_no_uid(self): r_lut_data = np.arange(10, 120, dtype=np.uint16) g_lut_data = np.arange(20, 130, dtype=np.uint16) From e6f4e0ab747175a11ef24d44f268ffc438ec70d3 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sat, 23 Nov 2024 20:43:09 +0100 Subject: [PATCH 76/93] Implement apply methods for voi and modaiity transforms --- src/highdicom/content.py | 525 ++++++++++++++++++++++++-- src/highdicom/io.py | 32 +- tests/test_content.py | 785 ++++++++++++++++++++++++++++++++++++--- 3 files changed, 1252 insertions(+), 90 deletions(-) diff --git a/src/highdicom/content.py b/src/highdicom/content.py index ea1a2dc9..76a71859 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -3,11 +3,14 @@ import datetime from copy import deepcopy from typing import cast, Dict, List, Optional, Union, Sequence, Tuple +from pydicom.multival import MultiValue from typing_extensions import Self import numpy as np +from PIL import ImageColor from pydicom.dataset import Dataset from pydicom import DataElement +from pydicom.multival import MultiValue from pydicom.sequence import Sequence as DataElementSequence from pydicom.sr.coding import Code from pydicom.sr.codedict import codes @@ -1865,7 +1868,7 @@ def __init__( Pixel value that will be mapped to the first value in the lookup-table. lut_data: numpy.ndarray - Lookup table data. Must be of type uint16. + Lookup table data. Must be of type uint8 or uint16. lut_explanation: Union[str, None], optional Free-form text explanation of the meaning of the LUT. @@ -1906,16 +1909,16 @@ def __init__( elif len_data == 2**16: # Per the standard, this is recorded as 0 len_data = 0 - # Note 8 bit LUT data is unsupported pending clarification on the - # standard if lut_data.dtype.type == np.uint16: bits_per_entry = 16 + elif lut_data.dtype.type == np.uint8: + bits_per_entry = 8 else: raise ValueError( - "Numpy array must have dtype uint16." + "Numpy array must have dtype uint8 or uint16." ) # The LUT data attribute has VR OW (16-bit other words) - self.LUTData = lut_data.astype(np.uint16).tobytes() + self.LUTData = lut_data.tobytes() self.LUTDescriptor = [ len_data, @@ -1930,18 +1933,22 @@ def __init__( @property def lut_data(self) -> np.ndarray: """numpy.ndarray: LUT data""" - if self.bits_per_entry == 8: - raise RuntimeError("8 bit LUTs are currently unsupported.") - elif self.bits_per_entry == 16: + bits_per_entry = self.bits_per_entry + if bits_per_entry == 8: + dtype = np.uint8 + elif bits_per_entry == 16: dtype = np.uint16 else: raise RuntimeError("Invalid LUT descriptor.") length = self.LUTDescriptor[0] data = self.LUTData + + # Account for a zero-padding byte in the case of an 8 bit LUT + if bits_per_entry == 8 and length % 2 == 1 and len(data) == length + 1: + data = data[:-1] + # The LUT data attributes have VR OW (16-bit other words) - array = np.frombuffer(data, dtype=np.uint16) - # Needs to be casted according to third descriptor value. - array = array.astype(dtype) + array = np.frombuffer(data, dtype=dtype) if len(array) != length: raise RuntimeError( 'Length of LUTData does not match the value expected from the ' @@ -2017,18 +2024,94 @@ def from_dataset( dataset_copy.__class__ = cls return cast(cls, dataset_copy) - def apply(self, array: np.ndarray) -> np.ndarray: + def get_scaled_lut_data( + self, + output_range: Tuple[float, float] = (0.0, 1.0), + dtype: Union[type, str, np.dtype, None] = np.float64, + invert: bool = False, + ) -> np.ndarray: + """Get LUT data array with output values scaled to a given range. + + Parameters + ---------- + output_range: Tuple[float, float], optional + Tuple containing (lower, upper) value of the range into which to + scale the output values. The lowest value in the LUT data will be + mapped to the lower limit, and the highest value will be mapped to + the upper limit, with a linear scaling used elsewhere. + dtype: Union[type, str, numpy.dtype, None], optional + Data type of the returned array (must be a floating point NumPy + data type). + invert: bool, optional + Invert the returned array such that the lowest original value in + the LUT is mapped to the upper limit and the highest original value + is mapped to the lower limit. This may be used to efficiently + combined a LUT with a Resentation transform that inverts the range. + + Returns + ------- + numpy.ndarray: + Rescaled LUT data array. + + """ + dtype = np.dtype(dtype) + + # Check dtype is suitable + if dtype.kind != 'f': + raise ValueError( + f'Data type "{dtype}" is not suitable.' + ) + + lut_data = self.lut_data + min = lut_data.min() + max = lut_data.max() + output_min, output_max = output_range + output_scale = output_max - output_min + if output_scale <= 0.0: + raise ValueError('Invalid output range.') + + input_scale = max - min + scale_factor = output_scale / input_scale + + scale_factor = dtype.type(scale_factor) + output_min = dtype.type(output_min) + + lut_data = lut_data.astype(dtype) + if invert: + lut_data = -lut_data + min = -max.astype(dtype) + + if min != 0: + lut_data = lut_data - min + + lut_data = lut_data.astype(dtype) * scale_factor + + if output_min != 0.0: + lut_data = lut_data + output_min + + return lut_data + + def apply( + self, + array: np.ndarray, + dtype: Union[type, str, np.dtype, None] = None, + ) -> np.ndarray: """Apply the LUT to a pixel array. Parameters ---------- - apply: np.ndarray + apply: numpy.ndarray Pixel array to which the LUT should be applied. Can be of any shape but must have an integer datatype. + dtype: Union[type, str, numpy.dtype, None], optional + Datatype of the output array. If ``None``, an unsigned integer + datatype corresponding to the number of bits in the LUT will be + used (either ``numpy.uint8`` or ``numpy.uint16``). Only safe casts + are permitted. Returns ------- - np.ndarray + numpy.ndarray Array with LUT applied. """ @@ -2036,6 +2119,21 @@ def apply(self, array: np.ndarray) -> np.ndarray: raise ValueError( "Array must have an integer datatype." ) + + lut_data = self.lut_data + if dtype is None: + dtype = lut_data.dtype + dtype = np.dtype(dtype) + + # Check dtype is suitable + if dtype.kind not in ('u', 'i', 'f'): + raise ValueError( + f'Data type "{dtype}" is not suitable.' + ) + + if dtype != np.dtype: + lut_data = lut_data.astype(dtype, casting='safe') + last_mapped_value = self.first_mapped_value + self.number_of_entries - 1 if ( array.min() < self.first_mapped_value or @@ -2045,7 +2143,7 @@ def apply(self, array: np.ndarray) -> np.ndarray: "Array contains values not in the LUT." ) - return self.lut_data[array - self.first_mapped_value] + return lut_data[array - self.first_mapped_value] class ModalityLUT(LUT): @@ -2259,6 +2357,188 @@ def __init__( 'provided.' ) + def apply( + self, + array: np.ndarray, + output_range: Tuple[float, float] = (0.0, 1.0), + voi_transform_index: int = 0, + dtype: Union[type, str, np.dtype, None] = None, + invert: bool = False, + ) -> np.ndarray: + """Apply the transformation to an array. + + Parameters + ---------- + apply: numpy.ndarray + Pixel array to which the transformation should be applied. Can be + of any shape but must have an integer datatype if the + transformation uses a LUT. + output_range: Tuple[float, float], optional + Range of output values to which the VOI range is mapped. + voi_transform_index: int, optional + Index (zero-based) of the VOI transform to apply if multiple are + included in the dataset. May be a negative integer, following + standard Python indexing convention. + dtype: Union[type, str, numpy.dtype, None], optional + Data type the output array. Should be a floating point data type. + If not specified, ``numpy.float64`` is used. + invert: bool, optional + Invert the returned array such that the lowest original value in + the LUT or input window is mapped to the upper limit and the + highest original value is mapped to the lower limit. This may be + used to efficiently combined a VOI LUT transformation with a + presentation transform that inverts the range. + + Returns + ------- + numpy.ndarray + Array with transformation applied. + + """ + # TODO what if both window and LUT are present (explicitky possible + # within the standard)? + if dtype is None: + dtype = np.dtype(np.float64) + else: + dtype = np.dtype(dtype) + + # Check dtype is suitable + if dtype.kind != 'f': + raise ValueError( + f'Data type "{dtype}" is not suitable.' + ) + + output_min, output_max = output_range + if output_min >= output_max: + raise ValueError( + "Second value of 'output_range' must be higher than the first." + ) + + if 'VOILUTSequence' in self: + if array.dtype.kind not in ('i', 'u'): + raise ValueError( + "Array must have an integer data type if a LUT is used." + ) + + try: + voi_lut = self.VOILUTSequence[voi_transform_index] + except IndexError as e: + raise IndexError( + "Requested 'voi_transform_index' is " + "not present." + ) from e + scaled_lut_data = voi_lut.get_scaled_lut_data( + output_range=output_range, + dtype=dtype, + invert=invert, + ) + array = scaled_lut_data[array - voi_lut.first_mapped_value] + else: + window_center = None + window_width = None + voi_function = 'LINEAR' + + if ( + 'WindowCenter' in self or + 'WindowWidth' in self + ): + window_center = self.WindowCenter + window_width = self.WindowWidth + + if 'VOILUTFunction' in self: + voi_function = self.VOILUTFunction + + if isinstance(window_width, (list, MultiValue)): + try: + window_width = window_width[ + voi_transform_index + ] + except IndexError as e: + raise IndexError( + "Requested 'voi_transform_index' is " + "not present." + ) from e + elif voi_transform_index not in (0, -1): + raise IndexError( + "Requested 'voi_transform_index' is " + "not present." + ) + + if isinstance(window_center, (list, MultiValue)): + try: + window_center = window_center[ + voi_transform_index + ] + except IndexError as e: + raise IndexError( + "Requested 'voi_transform_index' is " + "not present." + ) from e + elif voi_transform_index not in (0, -1): + raise IndexError( + "Requested 'voi_transform_index' is " + "not present." + ) + + if ( + window_center is not None and + window_width is not None + ): + window_width = dtype.type(window_width) + window_center = dtype.type(window_center) + if array.dtype != dtype: + array = array.astype(dtype) + + if voi_function in ('LINEAR', 'LINEAR_EXACT'): + output_scale = ( + output_max - output_min + ) + if voi_function == 'LINEAR': + # LINEAR uses the range + # from c - 0.5w to c + 0.5w - 1 + scale_factor = ( + output_scale / (window_width - 1) + ) + else: + # LINEAR_EXACT uses the full range + # from c - 0.5w to c + 0.5w + scale_factor = output_scale / window_width + + window_min = window_center - window_width / 2.0 + if invert: + array = ( + (window_min - array) * scale_factor + + output_max + ) + else: + array = ( + (array - window_min) * scale_factor + + output_min + ) + + array = np.clip(array, output_min, output_max) + + elif voi_function == 'SIGMOID': + if invert: + offset_array = window_center - array + else: + offset_array = array - window_center + exp_term = np.exp( + -4.0 * offset_array / + window_width + ) + array = ( + (output_max - output_min) / + (1.0 + exp_term) + ) + output_min + else: + raise ValueError( + 'Unrecognized value for VOILUTFunction: ' + f"'{voi_function}'" + ) + + return array + class ModalityLUTTransformation(Dataset): @@ -2345,6 +2625,95 @@ def __init__( _check_long_string(rescale_type) self.RescaleType = rescale_type + def apply( + self, + array: np.ndarray, + dtype: Union[type, str, np.dtype, None] = None, + ) -> np.ndarray: + """Apply the transformation to a pixel array. + + Parameters + ---------- + apply: numpy.ndarray + Pixel array to which the transformation should be applied. Can be + of any shape but must have an integer datatype if the + transformation uses a LUT. + dtype: Union[type, str, numpy.dtype, None], optional + Ensure the output type has this value. By default, this will have + type ``numpy.float64`` if the transformation uses a rescale + operation, or the datatype of the Modality LUT (``numpy.uint8`` or + ``numpy.uint16``) if it uses a LUT. An integer datatype may be + specified if a rescale operation is used, however if Rescale Slope + or Rescale Intecept are non-integer values an error will be raised. + + Returns + ------- + numpy.ndarray + Array with transformation applied. + + """ + if 'ModalityLUTSequence' in self: + return self.ModalityLUTSequence[0].apply(array, dtype=dtype) + else: + slope = np.float64(self.get('RescaleSlope', 1.0)) + intercept = np.float64( + self.get('RescaleIntercept', 0.0) + ) + + if dtype is None: + dtype = np.dtype(np.float64) + dtype = np.dtype(dtype) + + # Check dtype is suitable + if dtype.kind not in ('u', 'i', 'f'): + raise ValueError( + f'Data type "{dtype}" is not suitable.' + ) + if dtype.kind in ('u', 'i'): + if not (slope.is_integer() and intercept.is_integer()): + raise ValueError( + 'An integer data type cannot be used if the slope ' + 'or intercept is a non-integer value.' + ) + if array.dtype.kind not in ('u', 'i'): + raise ValueError( + 'An integer data type cannot be used if the input ' + 'array is floating point.' + ) + + if dtype.kind == 'u' and intercept < 0.0: + raise ValueError( + 'An unsigned integer data type cannot be used if the ' + 'intercept is negative.' + ) + + output_max = np.iinfo(array.dtype).max * slope + intercept + output_type_max = np.iinfo(dtype).max + output_min = np.iinfo(array.dtype).min * slope + intercept + output_type_min = np.iinfo(dtype).min + + if output_max > output_type_max or output_min < output_type_min: + raise ValueError( + f'Datatype {dtype} does not have capacity for values ' + f'with slope {slope:.2f} and intercept {intercept:.2f}.' + ) + + if dtype != np.float64: + slope = slope.astype(dtype) + intercept = intercept.astype(dtype) + + # Avoid unnecessary array operations for efficiency + if slope != 1.0 or intercept != 0.0: + if slope != 1.0: + array = array * slope + if intercept != 0.0: + array = array + intercept + else: + if array.dtype != dtype: + array = array.astype(dtype) + + return array + class PresentationLUT(LUT): @@ -2531,14 +2900,20 @@ def __init__( @property def lut_data(self) -> np.ndarray: """numpy.ndarray: lookup table data""" - if self.bits_per_entry == 8: + bits_per_entry = self.bits_per_entry + if bits_per_entry == 8: dtype = np.uint8 - elif self.bits_per_entry == 16: + elif bits_per_entry == 16: dtype = np.uint16 else: raise RuntimeError("Invalid LUT descriptor.") length = self.number_of_entries data = getattr(self, f'{self._attr_name_prefix}Data') + + # Account for a zero-padding byte in the case of an 8 bit LUT + if bits_per_entry == 8 and length % 2 == 1 and len(data) == length + 1: + data = data[:-1] + # The LUT data attributes have VR OW (16-bit other words) array = np.frombuffer(data, dtype=dtype) # Needs to be casted according to third descriptor value. @@ -2574,18 +2949,27 @@ def bits_per_entry(self) -> int: descriptor = getattr(self, f'{self._attr_name_prefix}Descriptor') return int(descriptor[2]) - def apply(self, array: np.ndarray) -> np.ndarray: + def apply( + self, + array: np.ndarray, + dtype: Union[type, str, np.dtype, None] = None, + ) -> np.ndarray: """Apply the LUT to a pixel array. Parameters ---------- - apply: np.ndarray + apply: numpy.ndarray Pixel array to which the LUT should be applied. Can be of any shape but must have an integer datatype. + dtype: Union[type, str, numpy.dtype, None], optional + Datatype of the output array. If ``None``, an unsigned integer + datatype corresponding to the number of bits in the LUT will be + used (either ``numpy.uint8`` or ``numpy.uint16``). Only safe casts + are permitted. Returns ------- - np.ndarray + numpy.ndarray Array with LUT applied. """ @@ -2593,6 +2977,21 @@ def apply(self, array: np.ndarray) -> np.ndarray: raise ValueError( "Array must have an integer datatype." ) + + lut_data = self.lut_data + if dtype is None: + dtype = lut_data.dtype + dtype = np.dtype(dtype) + + # Check dtype is suitable + if dtype.kind not in ('u', 'i', 'f'): + raise ValueError( + f'Data type "{dtype}" is not suitable.' + ) + + if dtype != np.dtype: + lut_data = lut_data.astype(dtype, casting='safe') + last_mapped_value = self.first_mapped_value + self.number_of_entries - 1 if ( array.min() < self.first_mapped_value or @@ -2602,7 +3001,7 @@ def apply(self, array: np.ndarray) -> np.ndarray: "Array contains values not in the LUT." ) - return self.lut_data[array - self.first_mapped_value] + return lut_data[array - self.first_mapped_value] @classmethod def from_dataset(cls, dataset: Dataset, color: str) -> Self: @@ -2911,7 +3310,6 @@ def __init__( palette_color_lut_uid: Union[highdicom.UID, str, None], optional Unique identifier for the palette color lookup table. - Examples -------- @@ -3010,6 +3408,85 @@ def __init__( # To cache the array self._lut_data = None + @classmethod + def from_colors( + cls, + colors: Sequence[str], + first_mapped_value: int = 0, + palette_color_lut_uid: Union[UID, str, None] = None + ) -> Self: + """Create a palette color lookup table from a list of colors. + + Parameters + ---------- + colors: Sequence[str] + List of colors. Item ``i`` of the list will be used as the color + for input value ``first_mapped_value + i``. Each color should be a + string understood by PIL's ``getrgb()`` function (see `here + `_ + for the documentation of that function or `here + `_) for the + original list). + This includes many case-insensitive color names (e.g. ``"red"``, + ``"Blue"``, or ``"YELLOW"``), hex codes (e.g. ``"#ff7733"``) or + decimal integers in the format of this example: ``"RGB(255, 255, + 0)"``. + first_mapped_value: int + Pixel value that will be mapped to the first value in the + lookup table. + palette_color_lut_uid: Union[highdicom.UID, str, None], optional + Unique identifier for the palette color lookup table. + + Examples + -------- + + Create a ``PaletteColorLUTTransformation`` for a small number of values + (4 in this case). This would be typical for a labelmap segmentation. + + >>> import highdicom as hd + >>> + >>> lut = hd.PaletteColorLUTTransformation.from_colors( + >>> colors=['black', 'red', 'orange', 'yellow'], + >>> palette_color_lut_uid=hd.UID(), + >>> ) + + Returns + ------- + highdicom.PaletteColorLUTTransformation: + Palette Color Lookup table created from the given colors. This will + always be an 8 bit LUT. + + """ # noqa: E501 + if len(colors) == 0: + raise ValueError("List 'colors' may not be empty.") + + r_list, g_list, b_list = zip( + *[ImageColor.getrgb(c) for c in colors] + ) + + red_lut = PaletteColorLUT( + first_mapped_value=first_mapped_value, + lut_data=np.array(r_list, dtype=np.uint8), + color='red' + ) + green_lut = PaletteColorLUT( + first_mapped_value=first_mapped_value, + lut_data=np.array(g_list, dtype=np.uint8), + color='green' + ) + blue_lut = PaletteColorLUT( + first_mapped_value=first_mapped_value, + lut_data=np.array(b_list, dtype=np.uint8), + color='blue' + ) + + return cls( + red_lut=red_lut, + green_lut=green_lut, + blue_lut=blue_lut, + palette_color_lut_uid=palette_color_lut_uid, + ) + @property def red_lut(self) -> Union[PaletteColorLUT, SegmentedPaletteColorLUT]: """Union[highdicom.PaletteColorLUT, highdicom.SegmentedPaletteColorLUT]: @@ -3039,13 +3516,13 @@ def apply(self, array: np.ndarray) -> np.ndarray: Parameters ---------- - apply: np.ndarray + apply: numpy.ndarray Pixel array to which the LUT should be applied. Can be of any shape but must have an integer datatype. Returns ------- - np.ndarray + numpy.ndarray Array with LUT applied. The RGB channels will be stacked along a new final dimension. diff --git a/src/highdicom/io.py b/src/highdicom/io.py index 12ab2deb..715d47aa 100644 --- a/src/highdicom/io.py +++ b/src/highdicom/io.py @@ -722,8 +722,7 @@ def _transform_frame( voi_transform_index: int = 0, apply_palette_color_lut: bool = True, ensure_monochrome_2: bool = True, - window_output_min: float = 0.0, - window_output_max: float = 1.0, + output_range: Tuple[float, float] = (0.0, 1.0), correct_color: bool = True, ) -> np.ndarray: """Apply pixel transformation to a frame. @@ -758,10 +757,10 @@ def _transform_frame( of the output pixels corresponds to MONOCHROME2 (in which high values are represent white and low values represent black). Ignored if PhotometricInterpretation is not MONOCHROME1. - window_output_min: float, optional - Value to which the lower edge of the window is mapped. - window_output_max: float, optional - Value to which the upper edge of the window is mapped. + output_range: Tuple[float, float], optional + Range of output values to which the VOI range is mapped. Only + relevant if ``apply_voi_transform`` is True and a VOI transform is + present. correct_color: bool, optional Whether colors should be corrected by applying an ICC transformation. Will only be performed if metadata contain an @@ -777,6 +776,7 @@ def _transform_frame( """ # TODO: real world value map # TODO: what if modality LUT outputs non-integer and there a VOI LUT? + # TODO: specify that code should error if no transform found? # TODO: output range for VOI LUT and monochrome1 # TODO: how to combine with multiframe? if apply_voi_transform and not apply_modality_transform: @@ -785,6 +785,12 @@ def _transform_frame( "'apply_modality_transform'." ) + output_min, output_max = output_range + if output_min >= output_max: + raise ValueError( + "Second value of 'output_range' must be higher than the first." + ) + # Crrate a list of all datasets to check for transforms for this frame datasets = [self.metadata] @@ -884,9 +890,7 @@ def _transform_frame( ): if voi_function in ('LINEAR', 'LINEAR_EXACT'): window_min = window_center - window_width / 2.0 - output_range = ( - window_output_max - window_output_min - ) + output_range = output_max - output_min if voi_function == 'LINEAR': # LINEAR uses the range # from c - 0.5w to c + 0.5w - 1 @@ -900,7 +904,7 @@ def _transform_frame( frame = ( (frame - window_min) * scale_factor + - window_output_min + output_min ) elif voi_function == 'SIGMOID': exp_term = np.exp( @@ -908,7 +912,7 @@ def _transform_frame( window_width ) frame = ( - (window_output_max - window_output_min) / + (output_max - output_min) / (1.0 + exp_term) ) else: @@ -917,11 +921,7 @@ def _transform_frame( f"'{voi_function}'" ) - frame = np.clip( - frame, - window_output_min, - window_output_max, - ) + frame = np.clip(frame, output_min, output_max) if ensure_monochrome_2: if self.metadata.PhotometricInterpretation == 'MONOCHROME1': diff --git a/tests/test_content.py b/tests/test_content.py index 920ec8f1..aba26ee0 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -17,6 +17,7 @@ PaletteColorLUT, ContentCreatorIdentificationCodeSequence, ModalityLUT, + ModalityLUTTransformation, LUT, PaletteColorLUTTransformation, PixelMeasuresSequence, @@ -127,18 +128,74 @@ def setUp(self): self._lut_data_16 = np.arange(510, 600, dtype=np.uint16) self._explanation = 'My LUT' - # Commented out until 8 bit LUTs are reimplemented - # def test_construction(self): - # first_value = 0 - # lut = LUT( - # first_mapped_value=first_value, - # lut_data=self._lut_data, - # ) - # assert lut.LUTDescriptor == [len(self._lut_data), first_value, 8] - # assert lut.bits_per_entry == 8 - # assert lut.first_mapped_value == first_value - # assert np.array_equal(lut.lut_data, self._lut_data) - # assert not hasattr(lut, 'LUTExplanation') + def test_construction(self): + first_value = 0 + lut = LUT( + first_mapped_value=first_value, + lut_data=self._lut_data, + ) + assert lut.LUTDescriptor == [len(self._lut_data), first_value, 8] + assert lut.bits_per_entry == 8 + assert lut.first_mapped_value == first_value + assert np.array_equal(lut.lut_data, self._lut_data) + assert not hasattr(lut, 'LUTExplanation') + + arr = np.array([0, 1, 0, 89, 1]) + expected = np.array([10, 11, 10, 99, 11]) + output = lut.apply(arr) + assert output.dtype == np.uint8 + assert np.array_equal(output, expected) + + for dtype in [ + np.uint8, + np.uint16, + np.uint32, + np.float32, + np.float64, + np.int16, + np.int32, + np.int64, + ]: + output = lut.apply(arr, dtype=dtype) + assert output.dtype == dtype + assert np.array_equal(output, expected.astype(dtype)) + + for dtype in [ + np.int8, + ]: + with pytest.raises(TypeError): + lut.apply(arr, dtype=dtype) + + for out_min, out_max in [ + (0.0, 1.0), + (-10, -5), + (100.0, 150.0), + ]: + for dtype in [ + np.float16, + np.float32, + np.float64, + ]: + scaled_data = lut.get_scaled_lut_data( + output_range=(out_min, out_max), + dtype=dtype, + ) + assert scaled_data.min() == out_min + assert scaled_data.max() == out_max + assert scaled_data[0] == out_min + assert scaled_data[-1] == out_max + assert scaled_data.dtype == dtype + + scaled_data = lut.get_scaled_lut_data( + output_range=(out_min, out_max), + dtype=dtype, + invert=True, + ) + assert scaled_data.min() == out_min + assert scaled_data.max() == out_max + assert scaled_data[-1] == out_min + assert scaled_data[0] == out_max + assert scaled_data.dtype == dtype def test_construction_16bit(self): first_value = 0 @@ -155,8 +212,60 @@ def test_construction_16bit(self): arr = np.array([0, 1, 0, 89, 1]) expected = np.array([510, 511, 510, 599, 511]) output = lut.apply(arr) + assert output.dtype == np.uint16 assert np.array_equal(output, expected) + for dtype in [ + np.uint16, + np.uint32, + np.float32, + np.float64, + np.int32, + np.int64, + ]: + output = lut.apply(arr, dtype=dtype) + assert output.dtype == dtype + assert np.array_equal(output, expected.astype(dtype)) + + for dtype in [ + np.uint8, + np.int8, + np.int16, + ]: + with pytest.raises(TypeError): + lut.apply(arr, dtype=dtype) + + for out_min, out_max in [ + (0.0, 1.0), + (-10, -5), + (100.0, 150.0), + ]: + for dtype in [ + np.float16, + np.float32, + np.float64, + ]: + scaled_data = lut.get_scaled_lut_data( + output_range=(out_min, out_max), + dtype=dtype, + ) + assert scaled_data.min() == out_min + assert scaled_data.max() == out_max + assert scaled_data[0] == out_min + assert scaled_data[-1] == out_max + assert scaled_data.dtype == dtype + + scaled_data = lut.get_scaled_lut_data( + output_range=(out_min, out_max), + dtype=dtype, + invert=True, + ) + assert scaled_data.min() == out_min + assert scaled_data.max() == out_max + assert scaled_data[-1] == out_min + assert scaled_data[0] == out_max + assert scaled_data.dtype == dtype + def test_construction_explanation(self): first_value = 0 lut = LUT( @@ -179,20 +288,45 @@ def setUp(self): self._lut_data_16 = np.arange(510, 600, dtype=np.uint16) self._explanation = 'My LUT' - # Commented out until 8 bit LUTs are reimplemented - # def test_construction(self): - # first_value = 0 - # lut = ModalityLUT( - # lut_type=RescaleTypeValues.HU, - # first_mapped_value=first_value, - # lut_data=self._lut_data, - # ) - # assert lut.ModalityLUTType == RescaleTypeValues.HU.value - # assert lut.LUTDescriptor == [len(self._lut_data), first_value, 8] - # assert lut.bits_per_entry == 8 - # assert lut.first_mapped_value == first_value - # assert np.array_equal(lut.lut_data, self._lut_data) - # assert not hasattr(lut, 'LUTExplanation') + def test_construction(self): + first_value = 0 + lut = ModalityLUT( + lut_type=RescaleTypeValues.HU, + first_mapped_value=first_value, + lut_data=self._lut_data, + ) + assert lut.ModalityLUTType == RescaleTypeValues.HU.value + assert lut.LUTDescriptor == [len(self._lut_data), first_value, 8] + assert lut.bits_per_entry == 8 + assert lut.first_mapped_value == first_value + assert np.array_equal(lut.lut_data, self._lut_data) + assert not hasattr(lut, 'LUTExplanation') + + arr = np.array([0, 1, 0, 89, 1]) + expected = np.array([10, 11, 10, 99, 11]) + output = lut.apply(arr) + assert output.dtype == np.uint8 + assert np.array_equal(output, expected) + + for dtype in [ + np.uint16, + np.uint32, + np.int16, + np.float16, + np.float32, + np.float64, + np.int32, + np.int64, + ]: + output = lut.apply(arr, dtype=dtype) + assert output.dtype == dtype + assert np.array_equal(output, expected.astype(dtype)) + + for dtype in [ + np.int8, + ]: + with pytest.raises(TypeError): + lut.apply(arr, dtype=dtype) def test_construction_16bit(self): first_value = 0 @@ -211,8 +345,30 @@ def test_construction_16bit(self): arr = np.array([0, 1, 0, 89, 1]) expected = np.array([510, 511, 510, 599, 511]) output = lut.apply(arr) + assert output.dtype == np.uint16 assert np.array_equal(output, expected) + for dtype in [ + np.uint16, + np.uint32, + np.float32, + np.float64, + np.int32, + np.int64, + ]: + output = lut.apply(arr, dtype=dtype) + assert output.dtype == dtype + assert np.array_equal(output, expected.astype(dtype)) + + for dtype in [ + np.uint8, + np.int8, + np.int16, + np.float16, + ]: + with pytest.raises(TypeError): + lut.apply(arr, dtype=dtype) + def test_construction_string_type(self): first_value = 0 lut_type = 'MY_MAPPING' @@ -259,9 +415,217 @@ def test_construction_wrong_dtype(self): with pytest.raises(ValueError): ModalityLUT( lut_type=RescaleTypeValues.HU, - first_mapped_value=0, # invalid - lut_data=np.array([0, 1, 2], dtype=np.int16), + first_mapped_value=0, + lut_data=np.array([0, 1, 2], dtype=np.int16), # invalid + ) + + +class TestModalityLUTTransformation(TestCase): + + def setUp(self): + super().setUp() + self._lut = ModalityLUT( + lut_type=RescaleTypeValues.HU, + first_mapped_value=0, + lut_data=np.array([11, 22, 33, 44], np.uint8), + ) + self._input_array = np.array( + [ + [0, 1, 2, 3], + [0, 1, 2, 3], + [0, 1, 2, 3], + ], + dtype=np.uint8, + ) + + def test_with_lut(self): + transf = ModalityLUTTransformation(modality_lut=self._lut) + + out = transf.apply(self._input_array) + + expected = np.array( + [ + [11, 22, 33, 44], + [11, 22, 33, 44], + [11, 22, 33, 44], + ] + ) + + assert np.array_equal(out, expected) + assert out.dtype == np.uint8 + + def test_with_scale_1(self): + transf = ModalityLUTTransformation( + rescale_type=RescaleTypeValues.HU, + rescale_slope=1.0, + rescale_intercept=0.0, + ) + + out = transf.apply(self._input_array) + + expected = self._input_array + + assert np.array_equal(out, expected) + assert out.dtype == np.float64 + + for dtype in [ + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.float16, + np.float32, + np.float64, + np.int16, + np.int32, + np.int64, + ]: + out = transf.apply(self._input_array, dtype=dtype) + assert np.array_equal(out, expected) + assert out.dtype == dtype + + for dtype in [np.int8]: + msg = ( + f'Datatype {np.dtype(dtype)} does not have capacity for ' + 'values with slope 1.00 and intercept 0.00.' + ) + with pytest.raises(ValueError, match=msg): + transf.apply(self._input_array, dtype=dtype) + + msg = ( + 'An integer data type cannot be used if the input ' + 'array is floating point.' + ) + with pytest.raises(ValueError, match=msg): + transf.apply(self._input_array.astype(np.float32), dtype=np.int64) + + def test_with_scale_2(self): + transf = ModalityLUTTransformation( + rescale_type=RescaleTypeValues.HU, + rescale_slope=10.0, + rescale_intercept=0.0, + ) + + out = transf.apply(self._input_array) + + expected = self._input_array * 10 + + assert np.array_equal(out, expected) + assert out.dtype == np.float64 + + for dtype in [ + np.uint16, + np.uint32, + np.uint64, + np.float32, + np.float64, + np.int16, + np.int32, + np.int64, + ]: + out = transf.apply(self._input_array, dtype=dtype) + assert np.array_equal(out, expected) + assert out.dtype == dtype + + for dtype in [ + np.int8, + np.uint8, + ]: + msg = ( + f'Datatype {np.dtype(dtype)} does not have capacity for ' + 'values with slope 10.00 and intercept 0.00.' ) + with pytest.raises(ValueError, match=msg): + transf.apply(self._input_array, dtype=dtype) + + def test_with_scale_3(self): + transf = ModalityLUTTransformation( + rescale_type=RescaleTypeValues.HU, + rescale_slope=2.0, + rescale_intercept=-1000.0, + ) + + out = transf.apply(self._input_array) + + expected = self._input_array.astype(np.float64) * 2 - 1000 + + assert np.array_equal(out, expected) + assert out.dtype == np.float64 + + for dtype in [ + np.float16, + np.float32, + np.float64, + np.int16, + np.int32, + np.int64, + ]: + out = transf.apply(self._input_array, dtype=dtype) + assert np.array_equal(out, expected) + assert out.dtype == dtype + + for dtype in [ + np.uint8, + np.uint16, + np.uint32, + np.uint64, + ]: + msg = ( + 'An unsigned integer data type cannot be used if the ' + 'intercept is negative.' + ) + with pytest.raises(ValueError, match=msg): + transf.apply(self._input_array, dtype=dtype) + + for dtype in [ + np.int8, + ]: + msg = ( + f'Datatype {np.dtype(dtype)} does not have capacity for ' + 'values with slope 2.00 and intercept -1000.00.' + ) + with pytest.raises(ValueError, match=msg): + transf.apply(self._input_array, dtype=dtype) + + def test_with_scale_4(self): + transf = ModalityLUTTransformation( + rescale_type=RescaleTypeValues.HU, + rescale_slope=3.14159, + rescale_intercept=0.0, + ) + + out = transf.apply(self._input_array) + + expected = self._input_array.astype(np.float64) * 3.14159 + assert np.array_equal(out, expected) + assert out.dtype == np.float64 + + for dtype in [ + np.float16, + np.float32, + np.float64, + ]: + expected = self._input_array.astype(np.float64) * dtype(3.14159) + out = transf.apply(self._input_array, dtype=dtype) + assert np.array_equal(out, expected) + assert out.dtype == dtype + + for dtype in [ + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + ]: + msg = ( + 'An integer data type cannot be used if the slope ' + 'or intercept is a non-integer value.' + ) + with pytest.raises(ValueError, match=msg): + transf.apply(self._input_array, dtype=dtype) class TestPlanePositionSequence(TestCase): @@ -943,6 +1307,63 @@ def test_construction_basic(self): assert lut.WindowWidth == 400.0 assert not hasattr(lut, 'VOILUTSequence') + input_array = np.array( + [ + [-200, -200, -200], + [-160, -160, -160], + [39.5, 39.5, 39.5], + [239, 239, 239], + [300, 300, 300], + ] + ) + + expected = np.array( + [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.5, 0.5, 0.5], + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ] + ) + inverted_expected = np.flipud(expected) + + out = lut.apply(array=input_array) + assert np.array_equal(expected, out) + assert out.dtype == np.float64 + + # Check with a different dtype + for dtype in [np.float16, np.float32, np.float64]: + out = lut.apply(array=input_array, dtype=dtype) + assert np.allclose(expected, out) + assert out.dtype == dtype + + out = lut.apply(array=input_array, dtype=dtype, invert=True) + assert np.allclose(inverted_expected, out) + assert out.dtype == dtype + + for dtype in [np.int16, np.uint8, np.int64]: + msg = f'Data type "{np.dtype(dtype)}" is not suitable.' + with pytest.raises(ValueError, match=msg): + lut.apply(array=input_array, dtype=dtype) + + # Check with a different window + expected = np.array( + [ + [-0.5, -0.5, -0.5], + [-0.5, -0.5, -0.5], + [0.0, 0.0, 0.0], + [0.5, 0.5, 0.5], + [0.5, 0.5, 0.5], + ] + ) + out = lut.apply( + array=input_array, + output_range=(-0.5, 0.5), + ) + assert np.array_equal(expected, out) + assert out.dtype == np.float64 + def test_construction_explanation(self): lut = VOILUTTransformation( window_center=40.0, @@ -954,12 +1375,54 @@ def test_construction_explanation(self): def test_construction_multiple(self): lut = VOILUTTransformation( - window_center=[40.0, 600.0], - window_width=[400.0, 1500.0], - window_explanation=['Soft Tissue Window', 'Lung Window'], + window_center=[600.0, 40.0], + window_width=[1500.0, 400.0], + window_explanation=['Lung Window', 'Soft Tissue Window'], + ) + assert lut.WindowCenter == [600.0, 40.0] + assert lut.WindowWidth == [1500.0, 400.0] + + input_array_lung = np.array( + [ + [-200, -200, -200], + [-150, -150, -150], + [599.5, 599.5, 599.5], + [1350, 1350, 1350], + [1400, 1400, 1400], + ] + ) + + input_array_soft_tissue = np.array( + [ + [-200, -200, -200], + [-160, -160, -160], + [39.5, 39.5, 39.5], + [239, 239, 239], + [300, 300, 300], + ] + ) + + expected = np.array( + [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.5, 0.5, 0.5], + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ] ) - assert lut.WindowCenter == [40.0, 600.0] - assert lut.WindowWidth == [400.0, 1500.0] + + out = lut.apply(array=input_array_lung, voi_transform_index=0) + assert np.allclose(expected, out) + assert out.dtype == np.float64 + + out = lut.apply(array=input_array_soft_tissue, voi_transform_index=1) + assert np.array_equal(expected, out) + assert out.dtype == np.float64 + + out = lut.apply(array=input_array_soft_tissue, voi_transform_index=-1) + assert np.array_equal(expected, out) + assert out.dtype == np.float64 def test_construction_multiple_mismatch1(self): with pytest.raises(ValueError): @@ -998,7 +1461,7 @@ def test_construction_explanation_mismatch2(self): window_explanation=['Soft Tissue Window', 'Lung Window'], ) - def test_construction_lut_function(self): + def test_construction_sigmoid(self): window_center = 40.0 window_width = 400.0 voi_lut_function = VOILUTFunctionValues.SIGMOID @@ -1011,12 +1474,186 @@ def test_construction_lut_function(self): assert lut.WindowWidth == 400.0 assert lut.VOILUTFunction == voi_lut_function.value + input_array = np.array( + [ + [-2000, -20000, -2000], + [40, 40, 40], + [3000, 3000, 3000], + ] + ) + + expected = np.array( + [ + [0.0, 0.0, 0.0], + [0.5, 0.5, 0.5], + [1.0, 1.0, 1.0], + ] + ) + + out = lut.apply(array=input_array) + assert np.allclose(expected, out) + assert out.dtype == np.float64 + + # Check with a different dtype + for dtype in [np.float16, np.float32, np.float64]: + out = lut.apply(array=input_array, dtype=dtype) + assert np.allclose(expected, out) + assert out.dtype == dtype + + expected = np.array( + [ + [10.0, 10.0, 10.0], + [15.0, 15.0, 15.0], + [20.0, 20.0, 20.0], + ] + ) + + out = lut.apply( + array=input_array, + output_range=(10.0, 20.0), + ) + assert np.allclose(expected, out) + assert out.dtype == np.float64 + + # test with invert + inverted_expected = np.flipud(expected) + out = lut.apply( + array=input_array, + output_range=(10.0, 20.0), + invert=True, + ) + assert np.allclose(inverted_expected, out) + assert out.dtype == np.float64 + + def test_construction_linear_exact(self): + window_center = 40.0 + window_width = 400.0 + voi_lut_function = VOILUTFunctionValues.LINEAR_EXACT + lut = VOILUTTransformation( + window_center=window_center, + window_width=window_width, + voi_lut_function=voi_lut_function, + ) + assert lut.WindowCenter == 40.0 + assert lut.WindowWidth == 400.0 + assert lut.VOILUTFunction == voi_lut_function.value + + input_array = np.array( + [ + [-200, -200, -200], + [-160, -160, -160], + [40, 40, 40], + [240, 240, 240], + [300, 300, 300], + ], + np.int16 + ) + + expected = np.array( + [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.5, 0.5, 0.5], + [1.0, 1.0, 1.0], + [1.0, 1.0, 1.0], + ] + ) + + out = lut.apply(array=input_array) + assert np.array_equal(expected, out) + assert out.dtype == np.float64 + + # Check with a different window + expected = np.array( + [ + [-20.0, -20.0, -20.0], + [-20.0, -20.0, -20.0], + [-15.0, -15.0, -15.0], + [-10.0, -10.0, -10.0], + [-10.0, -10.0, -10.0], + ] + ) + out = lut.apply( + array=input_array, + output_range=(-20.0, -10.0), + ) + assert np.array_equal(expected, out) + assert out.dtype == np.float64 + + # Check inverted + expected = np.array( + [ + [-10.0, -10.0, -10.0], + [-10.0, -10.0, -10.0], + [-15.0, -15.0, -15.0], + [-20.0, -20.0, -20.0], + [-20.0, -20.0, -20.0], + ] + ) + out = lut.apply( + array=input_array, + output_range=(-20.0, -10.0), + invert=True, + dtype=np.float32 + ) + assert np.array_equal(expected, out) + assert out.dtype == np.float32 + def test_construction_luts(self): lut = VOILUTTransformation(voi_luts=[self._lut]) assert len(lut.VOILUTSequence) == 1 assert not hasattr(lut, 'WindowWidth') assert not hasattr(lut, 'WindowCenter') + input_array = np.array( + [ + [0, 0, 0], + [1, 1, 1], + [2, 2, 2], + ], + dtype=np.uint8 + ) + + expected = np.array( + [ + [0.0, 0.0, 0.0], + [0.5, 0.5, 0.5], + [1.0, 1.0, 1.0], + ], + ) + out = lut.apply(input_array) + assert np.array_equal(expected, out) + + expected = np.array( + [ + [1.0, 1.0, 1.0], + [0.5, 0.5, 0.5], + [0.0, 0.0, 0.0], + ], + ) + out = lut.apply(input_array, invert=True) + assert np.array_equal(expected, out) + + expected = np.array( + [ + [1.0, 1.0, 1.0], + [3.5, 3.5, 3.5], + [6.0, 6.0, 6.0], + ], + ) + out = lut.apply(input_array, output_range=(1.0, 6.0)) + assert np.array_equal(expected, out) + + expected = np.array( + [ + [6.0, 6.0, 6.0], + [3.5, 3.5, 3.5], + [1.0, 1.0, 1.0], + ], + ) + out = lut.apply(input_array, output_range=(1.0, 6.0), invert=True) + assert np.array_equal(expected, out) + def test_construction_both(self): lut = VOILUTTransformation( window_center=40.0, @@ -1199,9 +1836,29 @@ def test_construction_16bit(self): arr = np.array([32, 33, 32, 132]) expected = np.array([10, 11, 10, 110]) output = lut.apply(arr) + assert output.dtype == np.uint16 assert np.array_equal(output, expected) - # Commented out until 8 bit LUTs are reimplemented + for dtype in [ + np.uint16, + np.uint32, + np.float32, + np.float64, + np.int32, + np.int64, + ]: + output = lut.apply(arr, dtype=dtype) + assert output.dtype == dtype + assert np.array_equal(output, expected.astype(dtype)) + + for dtype in [ + np.uint8, + np.int8, + np.int16, + ]: + with pytest.raises(TypeError): + lut.apply(arr, dtype=dtype) + def test_construction_8bit(self): lut_data = np.arange(0, 256, dtype=np.uint8) first_mapped_value = 0 @@ -1227,8 +1884,28 @@ def test_construction_8bit(self): arr = np.array([0, 1, 0, 255]) expected = np.array([0, 1, 0, 255]) output = lut.apply(arr) + assert output.dtype == np.uint8 assert np.array_equal(output, expected) + for dtype in [ + np.uint16, + np.uint32, + np.int16, + np.float32, + np.float64, + np.int32, + np.int64, + ]: + output = lut.apply(arr, dtype=dtype) + assert output.dtype == dtype + assert np.array_equal(output, expected.astype(dtype)) + + for dtype in [ + np.int8, + ]: + with pytest.raises(TypeError): + lut.apply(arr, dtype=dtype) + class TestPaletteColorLUTTransformation(TestCase): @@ -1319,21 +1996,20 @@ def test_construction_different_lengths(self): blue_lut=b_lut, ) - # Commented out until 8 bit LUTs are reimplemented - # def test_construction_different_dtypes(self): - # r_lut_data = np.arange(10, 120, dtype=np.uint8) - # g_lut_data = np.arange(20, 130, dtype=np.uint16) - # b_lut_data = np.arange(30, 140, dtype=np.uint16) - # first_mapped_value = 32 - # r_lut = PaletteColorLUT(first_mapped_value, r_lut_data, color='red') - # g_lut = PaletteColorLUT(first_mapped_value, g_lut_data, color='green') - # b_lut = PaletteColorLUT(first_mapped_value, b_lut_data, color='blue') - # with pytest.raises(ValueError): - # PaletteColorLUTTransformation( - # red_lut=r_lut, - # green_lut=g_lut, - # blue_lut=b_lut, - # ) + def test_construction_different_dtypes(self): + r_lut_data = np.arange(10, 120, dtype=np.uint8) + g_lut_data = np.arange(20, 130, dtype=np.uint16) + b_lut_data = np.arange(30, 140, dtype=np.uint16) + first_mapped_value = 32 + r_lut = PaletteColorLUT(first_mapped_value, r_lut_data, color='red') + g_lut = PaletteColorLUT(first_mapped_value, g_lut_data, color='green') + b_lut = PaletteColorLUT(first_mapped_value, b_lut_data, color='blue') + with pytest.raises(ValueError): + PaletteColorLUTTransformation( + red_lut=r_lut, + green_lut=g_lut, + blue_lut=b_lut, + ) def test_construction_different_first_values(self): r_lut_data = np.arange(10, 120, dtype=np.uint16) @@ -1352,6 +2028,15 @@ def test_construction_different_first_values(self): blue_lut=b_lut, ) + def test_construction_from_colors(self): + lut = PaletteColorLUTTransformation.from_colors( + ['black', 'red', 'green', 'blue', 'white'], + ) + + assert np.array_equal(lut.red_lut.lut_data, [0, 255, 0, 0, 255]) + assert np.array_equal(lut.green_lut.lut_data, [0, 0, 128, 0, 255]) + assert np.array_equal(lut.blue_lut.lut_data, [0, 0, 0, 255, 255]) + class TestSpecimenDescription(TestCase): def test_construction(self): From 5bc14aa066c9490d057fcada5f640f5df70eb8e2 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sat, 23 Nov 2024 21:52:52 +0100 Subject: [PATCH 77/93] Implement apply for real world mappings --- src/highdicom/content.py | 30 +++++++++++++- src/highdicom/pm/content.py | 68 ++++++++++++++++++++++++++++++++ tests/test_content.py | 12 ++++++ tests/test_pm.py | 78 ++++++++++++++++++++++++++++++++----- 4 files changed, 178 insertions(+), 10 deletions(-) diff --git a/src/highdicom/content.py b/src/highdicom/content.py index 76a71859..770a99aa 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -2143,7 +2143,9 @@ def apply( "Array contains values not in the LUT." ) - return lut_data[array - self.first_mapped_value] + if self.first_mapped_value != 0: + array = array - self.first_mapped_value + return lut_data[array] class ModalityLUT(LUT): @@ -2357,6 +2359,19 @@ def __init__( 'provided.' ) + def has_lut(self) -> bool: + """Determine whether the transformation contains a lookup table. + + Returns + ------- + bool: + True if the transformation contains a look-up table. False + otherwise, when the mapping is represented by window center and + width defining a linear relationship. + + """ + return 'VOILUTSequence' in self + def apply( self, array: np.ndarray, @@ -2625,6 +2640,19 @@ def __init__( _check_long_string(rescale_type) self.RescaleType = rescale_type + def has_lut(self) -> bool: + """Determine whether the transformation contains a lookup table. + + Returns + ------- + bool: + True if the transformation contains a look-up table. False + otherwise, when the mapping is represented by slope and intercept + defining a linear relationship. + + """ + return 'ModalityLUTSequence' in self + def apply( self, array: np.ndarray, diff --git a/src/highdicom/pm/content.py b/src/highdicom/pm/content.py index aea90057..5710ad11 100644 --- a/src/highdicom/pm/content.py +++ b/src/highdicom/pm/content.py @@ -152,6 +152,74 @@ def __init__( ) self.QuantityDefinitionSequence = [quantity_item] + def has_lut(self) -> bool: + """Determine whether the mapping contains a non-linear lookup table. + + Returns + ------- + bool: + True if the mapping contains a look-up table. False otherwise, when + the mapping is represented by a slope and intercept defining a + linear relationship. + + """ + return 'RealWorldValueLUTData' in self + + @property + def lut_data(self) -> Optional[np.ndarray]: + """Union[numpy.ndarray, None] LUT data, if present.""" + if self.has_lut(): + return np.array(self.RealWorldValueLUTData) + return None + + def apply( + self, + array: np.ndarray, + ) -> np.ndarray: + """Apply the mapping to a pixel array. + + Parameters + ---------- + apply: numpy.ndarray + Pixel array to which the transform should be applied. Can be of any + shape but must have an integer datatype if the mapping uses a LUT. + + Returns + ------- + numpy.ndarray + Array with LUT applied, will have data type ``numpy.float64``. + + """ + lut_data = self.lut_data + if lut_data is not None: + if array.dtype.kind not in ('u', 'i'): + raise ValueError( + 'Array must have an integer data type if the mapping ' + 'contains a LUT.' + ) + first = self.RealWorldValueFirstValueMapped + last = self.RealWorldValueLastValueMapped + if len(lut_data) != last + 1 - first: + raise RuntimeError( + "LUT data is stored with the incorrect number of elements." + ) + + if array.min() < first or array.max() > last: + raise RuntimeError( + "Array contains values not in the LUT." + ) + + if first != 0: + array = array - first + + return lut_data[array] + else: + slope = self.RealWorldValueSlope + intercept = self.RealWorldValueIntercept + + # TODO should we check values are within range here? + return array * slope + intercept + class DimensionIndexSequence(DataElementSequence): diff --git a/tests/test_content.py b/tests/test_content.py index aba26ee0..42c76950 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -440,6 +440,7 @@ def setUp(self): def test_with_lut(self): transf = ModalityLUTTransformation(modality_lut=self._lut) + assert transf.has_lut() out = transf.apply(self._input_array) @@ -460,6 +461,7 @@ def test_with_scale_1(self): rescale_slope=1.0, rescale_intercept=0.0, ) + assert not transf.has_lut() out = transf.apply(self._input_array) @@ -505,6 +507,7 @@ def test_with_scale_2(self): rescale_slope=10.0, rescale_intercept=0.0, ) + assert not transf.has_lut() out = transf.apply(self._input_array) @@ -544,6 +547,7 @@ def test_with_scale_3(self): rescale_slope=2.0, rescale_intercept=-1000.0, ) + assert not transf.has_lut() out = transf.apply(self._input_array) @@ -593,6 +597,7 @@ def test_with_scale_4(self): rescale_slope=3.14159, rescale_intercept=0.0, ) + assert not transf.has_lut() out = transf.apply(self._input_array) @@ -1306,6 +1311,7 @@ def test_construction_basic(self): assert lut.WindowCenter == 40.0 assert lut.WindowWidth == 400.0 assert not hasattr(lut, 'VOILUTSequence') + assert not lut.has_lut() input_array = np.array( [ @@ -1372,6 +1378,7 @@ def test_construction_explanation(self): ) assert lut.WindowCenter == 40.0 assert lut.WindowWidth == 400.0 + assert not lut.has_lut() def test_construction_multiple(self): lut = VOILUTTransformation( @@ -1381,6 +1388,7 @@ def test_construction_multiple(self): ) assert lut.WindowCenter == [600.0, 40.0] assert lut.WindowWidth == [1500.0, 400.0] + assert not lut.has_lut() input_array_lung = np.array( [ @@ -1473,6 +1481,7 @@ def test_construction_sigmoid(self): assert lut.WindowCenter == 40.0 assert lut.WindowWidth == 400.0 assert lut.VOILUTFunction == voi_lut_function.value + assert not lut.has_lut() input_array = np.array( [ @@ -1537,6 +1546,7 @@ def test_construction_linear_exact(self): assert lut.WindowCenter == 40.0 assert lut.WindowWidth == 400.0 assert lut.VOILUTFunction == voi_lut_function.value + assert not lut.has_lut() input_array = np.array( [ @@ -1604,6 +1614,7 @@ def test_construction_luts(self): assert len(lut.VOILUTSequence) == 1 assert not hasattr(lut, 'WindowWidth') assert not hasattr(lut, 'WindowCenter') + assert lut.has_lut() input_array = np.array( [ @@ -1663,6 +1674,7 @@ def test_construction_both(self): assert len(lut.VOILUTSequence) == 1 assert lut.WindowCenter == 40.0 assert lut.WindowWidth == 400.0 + assert lut.has_lut() def test_construction_neither(self): with pytest.raises(TypeError): diff --git a/tests/test_pm.py b/tests/test_pm.py index 3538f300..d4843ef5 100644 --- a/tests/test_pm.py +++ b/tests/test_pm.py @@ -40,7 +40,7 @@ def test_failed_construction_missing_or_unnecessary_parameters(self): lut_label = '1' lut_explanation = 'Feature 1' unit = codes.UCUM.NoUnits - value_range = [0, 255] + value_range = (0, 255) lut_data = [v**2 for v in range(256)] intercept = 0 slope = 1 @@ -92,9 +92,9 @@ def test_construction_integer_linear_relationship(self): lut_label = '1' lut_explanation = 'Feature 1' unit = codes.UCUM.NoUnits - value_range = [0, 255] - intercept = 0 - slope = 1 + value_range = (0, 255) + intercept = 200 + slope = 10 quantity_definition = Code('130402', 'DCM', 'Class activation') m = RealWorldValueMapping( lut_label=lut_label, @@ -126,12 +126,32 @@ def test_construction_integer_linear_relationship(self): quantity_item = m.QuantityDefinitionSequence[0] assert quantity_item.name == codes.SCT.Quantity assert quantity_item.value == quantity_definition + assert not m.has_lut() + + array = np.array( + [ + [0, 0, 0], + [5, 5, 5], + [10, 10, 10], + ], + ) + expected = np.array( + [ + [200, 200, 200], + [250, 250, 250], + [300, 300, 300], + ], + ) + + out = m.apply(array) + assert np.array_equal(out, expected) + assert out.dtype == np.float64 def test_construction_integer_nonlinear_relationship(self): lut_label = '1' lut_explanation = 'Feature 1' unit = codes.UCUM.NoUnits - value_range = [0, 255] + value_range = (0, 255) lut_data = [v**2 for v in range(256)] m = RealWorldValueMapping( lut_label=lut_label, @@ -157,14 +177,34 @@ def test_construction_integer_nonlinear_relationship(self): m.RealWorldValueSlope # noqa: B018 with pytest.raises(AttributeError): m.RealWorldValueIntercept # noqa: B018 + assert m.has_lut() + + array = np.array( + [ + [0, 0, 0], + [5, 5, 5], + [10, 10, 10], + ], + ) + expected = np.array( + [ + [0, 0, 0], + [25, 25, 25], + [100, 100, 100], + ], + ) + + out = m.apply(array) + assert np.array_equal(out, expected) + assert out.dtype == np.float64 def test_construction_floating_point_linear_relationship(self): lut_label = '1' lut_explanation = 'Feature 1' unit = codes.UCUM.NoUnits - value_range = [0.0, 1.0] - intercept = 0 - slope = 1 + value_range = (0.0, 1.0) + intercept = -23.13 + slope = 5.0 m = RealWorldValueMapping( lut_label=lut_label, lut_explanation=lut_explanation, @@ -190,12 +230,32 @@ def test_construction_floating_point_linear_relationship(self): m.RealWorldValueLastValueMapped # noqa: B018 with pytest.raises(AttributeError): m.RealWorldValueLUTData # noqa: B018 + assert not m.has_lut() + + array = np.array( + [ + [0, 0, 0], + [5, 5, 5], + [10, 10, 10], + ], + ) + expected = np.array( + [ + [-23.13, -23.13, -23.13], + [1.87, 1.87, 1.87], + [26.87, 26.87, 26.87], + ], + ) + + out = m.apply(array) + assert np.allclose(out, expected) + assert out.dtype == np.float64 def test_failed_construction_floating_point_nonlinear_relationship(self): lut_label = '1' lut_explanation = 'Feature 1' unit = codes.UCUM.NoUnits - value_range = [0.0, 1.0] + value_range = (0.0, 1.0) lut_data = [ v**2 for v in np.arange(value_range[0], value_range[1], 0.1) From 4e8365758d83e5b775ce8f2cdca3dcf3f830f9e7 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 24 Nov 2024 08:34:59 -0500 Subject: [PATCH 78/93] Progress on pixel transform parsing --- src/highdicom/content.py | 660 +++++++++++++++++++++++++----------- src/highdicom/pm/content.py | 21 +- src/highdicom/pr/content.py | 6 +- src/highdicom/pr/sop.py | 6 +- src/highdicom/seg/sop.py | 6 + tests/test_content.py | 124 ++++--- tests/test_pm.py | 16 +- tests/test_pr.py | 2 +- 8 files changed, 584 insertions(+), 257 deletions(-) diff --git a/src/highdicom/content.py b/src/highdicom/content.py index 770a99aa..62f182d5 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -2135,13 +2135,9 @@ def apply( lut_data = lut_data.astype(dtype, casting='safe') last_mapped_value = self.first_mapped_value + self.number_of_entries - 1 - if ( - array.min() < self.first_mapped_value or - array.max() > last_mapped_value - ): - raise RuntimeError( - "Array contains values not in the LUT." - ) + # Clip because values outside the range should be mapped to the + # first/last value + array = np.clip(array, self.first_mapped_value, last_mapped_value) if self.first_mapped_value != 0: array = array - self.first_mapped_value @@ -2367,11 +2363,136 @@ def has_lut(self) -> bool: bool: True if the transformation contains a look-up table. False otherwise, when the mapping is represented by window center and - width defining a linear relationship. + width defining a linear relationship. Note that it is possible for + a transformation to contain both a LUT and window parameters. """ return 'VOILUTSequence' in self + def has_window(self) -> bool: + """Determine whether the transformation contains window parameters. + + Returns + ------- + bool: + True if the transformation contains one or more sets of window + parameters defining a linear relationship. False otherwise, when + the mapping is represented by a lookup table. Note that it is + possible for a transformation to contain both a LUT and window + parameters. + + """ + return 'WindowCenter' in self + + @staticmethod + def voi_window_function( + array: np.ndarray, + window_center: float, + window_width: float, + voi_lut_function: Union[ + str, + VOILUTFunctionValues + ] = VOILUTFunctionValues.LINEAR, + output_range: Tuple[float, float] = (0.0, 1.0), + dtype: Union[type, str, np.dtype, None] = np.float64, + invert: bool = False, + ) -> np.ndarray: + """Functional implementation of the DICOM VOI windowing function. + + Parameters + ---------- + apply: numpy.ndarray + Pixel array to which the transformation should be applied. Can be + of any shape but must have an integer datatype if the + transformation uses a LUT. + window_center: float + Center of the window. + window_width: float + Width of the window. + voi_lut_function: Union[str, highdicom.VOILUTFunctionValues], optional + Type of VOI LUT function. + output_range: Tuple[float, float], optional + Range of output values to which the VOI range is mapped. + dtype: Union[type, str, numpy.dtype, None], optional + Data type the output array. Should be a floating point data type. + invert: bool, optional + Invert the returned array such that the lowest original value in + the LUT or input window is mapped to the upper limit and the + highest original value is mapped to the lower limit. This may be + used to efficiently combined a VOI LUT transformation with a + presentation transform that inverts the range. + + Returns + ------- + numpy.ndarray: + Array with the VOI window function applied. + + """ + voi_lut_function = VOILUTFunctionValues(voi_lut_function) + output_min, output_max = output_range + if output_min >= output_max: + raise ValueError( + "Second value of 'output_range' must be higher than the first." + ) + + if dtype is None: + dtype = np.dtype(np.float64) + else: + dtype = np.dtype(dtype) + + window_width = dtype.type(window_width) + window_center = dtype.type(window_center) + if array.dtype != dtype: + array = array.astype(dtype) + + if voi_lut_function in ( + VOILUTFunctionValues.LINEAR, + VOILUTFunctionValues.LINEAR_EXACT, + ): + output_scale = ( + output_max - output_min + ) + if voi_lut_function == VOILUTFunctionValues.LINEAR: + # LINEAR uses the range + # from c - 0.5w to c + 0.5w - 1 + scale_factor = ( + output_scale / (window_width - 1) + ) + else: + # LINEAR_EXACT uses the full range + # from c - 0.5w to c + 0.5w + scale_factor = output_scale / window_width + + window_min = window_center - window_width / 2.0 + if invert: + array = ( + (window_min - array) * scale_factor + + output_max + ) + else: + array = ( + (array - window_min) * scale_factor + + output_min + ) + + array = np.clip(array, output_min, output_max) + + elif voi_lut_function == VOILUTFunctionValues.SIGMOID: + if invert: + offset_array = window_center - array + else: + offset_array = array - window_center + exp_term = np.exp( + -4.0 * offset_array / + window_width + ) + array = ( + (output_max - output_min) / + (1.0 + exp_term) + ) + output_min + + return array + def apply( self, array: np.ndarray, @@ -2379,6 +2500,7 @@ def apply( voi_transform_index: int = 0, dtype: Union[type, str, np.dtype, None] = None, invert: bool = False, + prefer_lut: bool = False, ) -> np.ndarray: """Apply the transformation to an array. @@ -2403,6 +2525,10 @@ def apply( highest original value is mapped to the lower limit. This may be used to efficiently combined a VOI LUT transformation with a presentation transform that inverts the range. + prefer_lut: bool, optional + If True and the transformation contains both a LUT and a window + parameters, apply the LUT. If False and both a LUT and window + parameters are present, apply the window. Returns ------- @@ -2410,8 +2536,6 @@ def apply( Array with transformation applied. """ - # TODO what if both window and LUT are present (explicitky possible - # within the standard)? if dtype is None: dtype = np.dtype(np.float64) else: @@ -2423,13 +2547,7 @@ def apply( f'Data type "{dtype}" is not suitable.' ) - output_min, output_max = output_range - if output_min >= output_max: - raise ValueError( - "Second value of 'output_range' must be higher than the first." - ) - - if 'VOILUTSequence' in self: + if not self.has_window() or (self.has_lut() and prefer_lut): if array.dtype.kind not in ('i', 'u'): raise ValueError( "Array must have an integer data type if a LUT is used." @@ -2449,108 +2567,55 @@ def apply( ) array = scaled_lut_data[array - voi_lut.first_mapped_value] else: - window_center = None - window_width = None - voi_function = 'LINEAR' + voi_lut_function = 'LINEAR' - if ( - 'WindowCenter' in self or - 'WindowWidth' in self - ): - window_center = self.WindowCenter - window_width = self.WindowWidth - - if 'VOILUTFunction' in self: - voi_function = self.VOILUTFunction - - if isinstance(window_width, (list, MultiValue)): - try: - window_width = window_width[ - voi_transform_index - ] - except IndexError as e: - raise IndexError( - "Requested 'voi_transform_index' is " - "not present." - ) from e - elif voi_transform_index not in (0, -1): + window_center = self.WindowCenter + window_width = self.WindowWidth + + if 'VOILUTFunction' in self: + voi_lut_function = self.VOILUTFunction + + if isinstance(window_width, (list, MultiValue)): + try: + window_width = window_width[ + voi_transform_index + ] + except IndexError as e: raise IndexError( "Requested 'voi_transform_index' is " "not present." - ) + ) from e + elif voi_transform_index not in (0, -1): + raise IndexError( + "Requested 'voi_transform_index' is " + "not present." + ) - if isinstance(window_center, (list, MultiValue)): - try: - window_center = window_center[ - voi_transform_index - ] - except IndexError as e: - raise IndexError( - "Requested 'voi_transform_index' is " - "not present." - ) from e - elif voi_transform_index not in (0, -1): + if isinstance(window_center, (list, MultiValue)): + try: + window_center = window_center[ + voi_transform_index + ] + except IndexError as e: raise IndexError( "Requested 'voi_transform_index' is " "not present." - ) - - if ( - window_center is not None and - window_width is not None - ): - window_width = dtype.type(window_width) - window_center = dtype.type(window_center) - if array.dtype != dtype: - array = array.astype(dtype) - - if voi_function in ('LINEAR', 'LINEAR_EXACT'): - output_scale = ( - output_max - output_min - ) - if voi_function == 'LINEAR': - # LINEAR uses the range - # from c - 0.5w to c + 0.5w - 1 - scale_factor = ( - output_scale / (window_width - 1) - ) - else: - # LINEAR_EXACT uses the full range - # from c - 0.5w to c + 0.5w - scale_factor = output_scale / window_width - - window_min = window_center - window_width / 2.0 - if invert: - array = ( - (window_min - array) * scale_factor + - output_max - ) - else: - array = ( - (array - window_min) * scale_factor + - output_min - ) - - array = np.clip(array, output_min, output_max) + ) from e + elif voi_transform_index not in (0, -1): + raise IndexError( + "Requested 'voi_transform_index' is " + "not present." + ) - elif voi_function == 'SIGMOID': - if invert: - offset_array = window_center - array - else: - offset_array = array - window_center - exp_term = np.exp( - -4.0 * offset_array / - window_width - ) - array = ( - (output_max - output_min) / - (1.0 + exp_term) - ) + output_min - else: - raise ValueError( - 'Unrecognized value for VOILUTFunction: ' - f"'{voi_function}'" - ) + array = self.voi_window_function( + array, + window_center=cast(float, window_center), + window_width=cast(float, window_width), + voi_lut_function=voi_lut_function, + output_range=output_range, + dtype=dtype, + invert=invert, + ) return array @@ -2960,7 +3025,7 @@ def number_of_entries(self) -> int: descriptor = getattr(self, f'{self._attr_name_prefix}Descriptor') value = int(descriptor[0]) if value == 0: - return 2 ** self.bits_per_entry + return 2 ** 16 return value @property @@ -3021,21 +3086,17 @@ def apply( lut_data = lut_data.astype(dtype, casting='safe') last_mapped_value = self.first_mapped_value + self.number_of_entries - 1 - if ( - array.min() < self.first_mapped_value or - array.max() > last_mapped_value - ): - raise RuntimeError( - "Array contains values not in the LUT." - ) + # Clip because values outside the range should be mapped to the + # first/last value + array = np.clip(array, self.first_mapped_value, last_mapped_value) return lut_data[array - self.first_mapped_value] @classmethod - def from_dataset(cls, dataset: Dataset, color: str) -> Self: + def extract_from_dataset(cls, dataset: Dataset, color: str) -> Self: """Construct from an existing dataset. - Note that unlike many other from_dataset() methods, this method + Note that unlike many other ``from_dataset()`` methods, this method extracts only the atrributes it needs from the original dataset, and always returns a new object. @@ -3255,7 +3316,7 @@ def number_of_entries(self) -> int: # That's because the descriptor attributes have VR US, which cannot # encode the value of 2^16, but only values in the range [0, 2^16 - 1]. if value == 0: - return 2 ** self.bits_per_entry + return 2 ** 16 else: return value @@ -3274,10 +3335,10 @@ def bits_per_entry(self) -> int: return int(descriptor[2]) @classmethod - def from_dataset(cls, dataset: Dataset, color: str) -> Self: + def extract_from_dataset(cls, dataset: Dataset, color: str) -> Self: """Construct from an existing dataset. - Note that unlike many other from_dataset() methods, this method + Note that unlike many other ``from_dataset()`` methods, this method extracts only the atrributes it needs from the original dataset, and always returns a new object. @@ -3364,12 +3425,12 @@ def __init__( super().__init__() # Checks on inputs - self._color_luts = { + _color_luts = { 'Red': red_lut, 'Green': green_lut, 'Blue': blue_lut } - for lut in self._color_luts.values(): + for lut in _color_luts.values(): if not isinstance(lut, (PaletteColorLUT, SegmentedPaletteColorLUT)): raise TypeError( 'Arguments "red_lut", "green_lut", and "blue_lut" must be ' @@ -3413,7 +3474,7 @@ def __init__( 'first mapped value.' ) - for name, lut in self._color_luts.items(): + for name, lut in _color_luts.items(): desc_attr = f'{name}PaletteColorLookupTableDescriptor' setattr( self, @@ -3454,11 +3515,10 @@ def from_colors( `_ for the documentation of that function or `here `_) for the - original list). - This includes many case-insensitive color names (e.g. ``"red"``, - ``"Blue"``, or ``"YELLOW"``), hex codes (e.g. ``"#ff7733"``) or - decimal integers in the format of this example: ``"RGB(255, 255, - 0)"``. + original list of colors). This includes many case-insensitive color + names (e.g. ``"red"``, ``"Crimson"``, or ``"INDIGO"``), hex codes + (e.g. ``"#ff7733"``) or decimal integers in the format of this + example: ``"RGB(255, 255, 0)"``. first_mapped_value: int Pixel value that will be mapped to the first value in the lookup table. @@ -3515,13 +3575,179 @@ def from_colors( palette_color_lut_uid=palette_color_lut_uid, ) + @staticmethod + def _parse_attributes(dataset: Dataset) -> Tuple[ + bool, + Tuple[int, int, int], + Tuple[bytes, bytes, bytes], + ]: + """Extract information about palette color lookup table from a dataset. + + Performs various checks that the information retrieved is valid. + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset containing Palette Color LUT information. Note that any + number of other attributes may be included and will be ignored (for + example allowing an entire image with Palette Color LUT information + at the top level to be passed). + + Returns + ------- + is_segmented: bool + True if the LUT is segmented. False otherwise. + descriptor: Tuple[int, int, int] + Lookup table descriptor containing in this order the number of + entries, first mapped value, and bits per entry. These values are + shared between the three color LUTs. + lut_data: Tuple[bytes, bytes, bytes] + Raw bytes data for the red, green and blue LUTs. + + """ + is_segmented = 'SegmentedRedPaletteColorLookupTableData' in dataset + + if not is_segmented: + if 'RedPaletteColorLookupTableData' not in dataset: + raise AttributeError( + 'Dataset does not contain palette color lookup table ' + 'attributes.' + ) + + descriptor = dataset.RedPaletteColorLookupTableDescriptor + if len(descriptor) != 3: + raise RuntimeError( + 'Invalid Palette Color LUT Descriptor' + ) + number_of_entries, _, bits_per_entry = descriptor + + if number_of_entries == 0: + number_of_entries = 2 ** 16 + + if bits_per_entry == 8: + expected_num_bytes = number_of_entries + if number_of_entries % 2 == 1: + # Account for padding byte + number_of_entries += 1 + elif bits_per_entry == 16: + expected_num_bytes = number_of_entries * 2 + else: + raise RuntimeError( + 'Invalid number of bits per entry found in Palette Color ' + 'LUT Descriptor.' + ) + + lut_data = [] + for color in ['Red', 'Green', 'Blue']: + desc_kw = f'{color}PaletteColorLookupTableDescriptor' + if desc_kw not in dataset: + raise AttributeError( + f"Dataset has no attribute '{desc_kw}'." + ) + + color_descriptor = getattr(dataset, desc_kw) + if color_descriptor != descriptor: + # Descriptors must match between all three colors + raise RuntimeError( + 'Dataset has no mismatched palette color LUT ' + 'descriptors.' + ) + + segmented_kw = f'Segmented{color}PaletteColorLookupTableData' + standard_kw = f'{color}PaletteColorLookupTableData' + if is_segmented: + data_kw = segmented_kw + wrong_data_kw = standard_kw + else: + data_kw = standard_kw + wrong_data_kw = segmented_kw + + if data_kw not in dataset: + raise AttributeError( + f"Dataset has no attribute '{desc_kw}'." + ) + if wrong_data_kw in dataset: + raise AttributeError( + "Mismatch of segmented LUT and standard LUT found." + ) + + lut_bytes = getattr(dataset, data_kw) + if len(lut_bytes) != expected_num_bytes: + raise RuntimeError( + "LUT data has incorrect length" + ) + lut_data.append(lut_bytes) + + return ( + is_segmented, + tuple(descriptor), + tuple(lut_data) + ) + + @property + def is_segmented(self) -> bool: + """bool: True if the transformation is a segmented LUT. + False otherwise.""" + return 'SegmentedRedPaletteColorLookupTableData' in self + + @property + def number_of_entries(self) -> int: + """int: Number of entries in the lookup table.""" + value = int(self.RedPaletteColorLookupTableDescriptor[0]) + # Part 3 Section C.7.6.3.1.5 Palette Color Lookup Table Descriptor + # "When the number of table entries is equal to 2^16 + # then this value shall be 0". + # That's because the descriptor attributes have VR US, which cannot + # encode the value of 2^16, but only values in the range [0, 2^16 - 1]. + if value == 0: + return 2**16 + else: + return value + + @property + def first_mapped_value(self) -> int: + """int: Pixel value that will be mapped to the first value in the + lookup table. + """ + return int(self.RedPaletteColorLookupTableDescriptor[1]) + + @property + def bits_per_entry(self) -> int: + """int: Bits allocated for the lookup table data. 8 or 16.""" + return int(self.RedPaletteColorLookupTableDescriptor[2]) + + def _get_lut(self, color: str): + """Get a LUT for a single given color channel. + + Parameters + ---------- + color: str + Name of the color, either ``'red'``, ``'green'``, or ``'blue'``. + + Returns + ------- + Union[highdicom.PaletteColorLUT, highdicom.SegmentedPaletteColorLUT]: + Lookup table for the given output color channel + + """ + if self.is_segmented: + return SegmentedPaletteColorLUT.extract_from_dataset( + self, + color=color.lower(), + ) + else: + return PaletteColorLUT.extract_from_dataset( + self, + color=color.lower(), + ) + @property def red_lut(self) -> Union[PaletteColorLUT, SegmentedPaletteColorLUT]: """Union[highdicom.PaletteColorLUT, highdicom.SegmentedPaletteColorLUT]: Lookup table for the red output color channel """ - return self._color_luts['Red'] + return self._get_lut('red') @property def green_lut(self) -> Union[PaletteColorLUT, SegmentedPaletteColorLUT]: @@ -3529,7 +3755,7 @@ def green_lut(self) -> Union[PaletteColorLUT, SegmentedPaletteColorLUT]: Lookup table for the green output color channel """ - return self._color_luts['Green'] + return self._get_lut('green') @property def blue_lut(self) -> Union[PaletteColorLUT, SegmentedPaletteColorLUT]: @@ -3537,48 +3763,90 @@ def blue_lut(self) -> Union[PaletteColorLUT, SegmentedPaletteColorLUT]: Lookup table for the blue output color channel """ - return self._color_luts['Blue'] + return self._get_lut('blue') - def apply(self, array: np.ndarray) -> np.ndarray: - """Apply the LUT to a pixel array. + @property + def combined_lut_data(self) -> np.ndarray: + """numpy.ndarray: + + An NumPy array of shape (number_of_entries, 3) containing the red, + green and blue lut data stacked along the final dimension of the + array. Data type with be 8 or 16 bit unsigned integer depending on + the number of bits per entry in the LUT. + + """ + if self._lut_data is None: + _, self._lut_data = self._get_combined_lut_data(self) + return cast(np.ndarray, self._lut_data) + + @classmethod + def _get_combined_lut_data( + cls, + dataset: Dataset, + ) -> Tuple[int, np.ndarray]: + """Get a LUT array with three color channels from a dataset. Parameters ---------- - apply: numpy.ndarray - Pixel array to which the LUT should be applied. Can be of any shape - but must have an integer datatype. + dataset: pydicom.Dataset + Dataset containing Palette Color LUT information. Note that any + number of other attributes may be included and will be ignored (for + example allowing an entire image with Palette Color LUT information + at the top level to be passed). Returns ------- - numpy.ndarray - Array with LUT applied. The RGB channels will be stacked along a - new final dimension. + first_mapped_value: int + The first input value included in the LUT. + lut_data: numpy.ndarray + An NumPy array of shape (number_of_entries, 3) containing the red, + green and blue lut data stacked along the final dimension of the + array. Data type with be 8 or 16 bit unsigned integer depending on + the number of bits per entry in the LUT. """ - if isinstance(self.red_lut, SegmentedPaletteColorLUT): + ( + is_segmented, + (number_of_entries, first_mapped_value, bits_per_entry), + lut_data, + ) = cls._parse_attributes(dataset) + + if is_segmented: raise RuntimeError( - "The 'apply' method is not implemented for segmented LUTs." + 'Combined LUT data is not supported for segmented LUTs' ) - red_plane = self.red_lut.apply(array) - green_plane = self.green_lut.apply(array) - blue_plane = self.blue_lut.apply(array) + if bits_per_entry == 8: + dtype = np.uint8 + else: + dtype = np.uint16 - return np.stack([red_plane, green_plane, blue_plane], -1) + combined_array = np.stack( + [np.frombuffer(buf, dtype=dtype) for buf in lut_data], + axis=-1 + ) + + # Account for padding byte + if combined_array.shape[0] == number_of_entries + 1: + combined_array = combined_array[:-1] + + return first_mapped_value, combined_array @classmethod - def from_dataset(cls, dataset: Dataset) -> Self: + def extract_from_dataset(cls, dataset: Dataset) -> Self: """Construct from an existing dataset. - Note that unlike many other from_dataset() methods, this method + Note that unlike many other ``from_dataset()`` methods, this method extracts only the atrributes it needs from the original dataset, and always returns a new object. Parameters ---------- dataset: pydicom.Dataset - Dataset containing the attributes of the Palette Color Lookup Table - Transformation. + Dataset containing Palette Color LUT information. Note that any + number of other attributes may be included and will be ignored (for + example allowing an entire image with Palette Color LUT information + at the top level to be passed). Returns ------- @@ -3588,57 +3856,55 @@ def from_dataset(cls, dataset: Dataset) -> Self: """ new_dataset = Dataset() - is_segmented = 'SegmentedRedPaletteColorLookupTableData' in dataset + ( + is_segmented, + descriptor, + lut_data, + ) = cls._parse_attributes(dataset) - new_dataset._color_luts = {} - - for color in ['Red', 'Green', 'Blue']: - desc_attr = f'{color}PaletteColorLookupTableDescriptor' - - if desc_attr not in dataset: - raise AttributeError( - f"Dataset has no attribute '{desc_attr}'." - ) - setattr( - new_dataset, - desc_attr, - getattr(dataset, desc_attr) - ) + for color, data in zip(['Red', 'Green', 'Blue'], lut_data): + desc_kw = f'{color}PaletteColorLookupTableDescriptor' + setattr(new_dataset, desc_kw, list(descriptor)) if is_segmented: - data_attr = f'Segmented{color}PaletteColorLookupTableData' - wrong_attr = f'{color}PaletteColorLookupTableData' + data_kw = f'Segmented{color}PaletteColorLookupTableData' else: - data_attr = f'{color}PaletteColorLookupTableData' - wrong_attr = f'Segmented{color}PaletteColorLookupTableData' + data_kw = f'{color}PaletteColorLookupTableData' + setattr(new_dataset, data_kw, data) - if data_attr not in dataset: - raise AttributeError( - f"Dataset has no attribute '{desc_attr}'." - ) - if wrong_attr in dataset: - raise AttributeError( - "Mismatch of segmented LUT and standard LUT found." - ) + new_dataset.__class__ = cls + return cast(cls, new_dataset) - setattr( - new_dataset, - data_attr, - getattr(dataset, data_attr) + def apply(self, array: np.ndarray) -> np.ndarray: + """Apply the LUT to a pixel array. + + Parameters + ---------- + apply: numpy.ndarray + Pixel array to which the LUT should be applied. Can be of any shape + but must have an integer datatype. + + Returns + ------- + numpy.ndarray + Array with LUT applied. The RGB channels will be stacked along a + new final dimension. + + """ + if isinstance(self.red_lut, SegmentedPaletteColorLUT): + raise RuntimeError( + "The 'apply' method is not implemented for segmented LUTs." ) - if is_segmented: - new_dataset._color_luts[color] = ( - SegmentedPaletteColorLUT.from_dataset( - new_dataset, - color=color.lower(), - ) - ) - else: - new_dataset._color_luts[color] = PaletteColorLUT.from_dataset( - new_dataset, - color=color.lower(), - ) + last_mapped_value = ( + self.first_mapped_value + self.number_of_entries - 1 + ) - new_dataset.__class__ = cls - return cast(cls, new_dataset) + # Clip because values outside the region are mapped to the first/last + # values in the LUT + array = np.clip(array, self.first_mapped_value, last_mapped_value) + + if self.first_mapped_value != 0: + array = array - self.first_mapped_value + + return self.combined_lut_data[array, :] diff --git a/src/highdicom/pm/content.py b/src/highdicom/pm/content.py index 5710ad11..9f0b8432 100644 --- a/src/highdicom/pm/content.py +++ b/src/highdicom/pm/content.py @@ -172,6 +172,19 @@ def lut_data(self) -> Optional[np.ndarray]: return np.array(self.RealWorldValueLUTData) return None + @property + def value_range(self) -> Tuple[float, float]: + """Tuple[float, float]: Range of valid input values.""" + if 'DoubleFloatRealWorldValueFirstValueMapped' in self: + return ( + self.DoubleFloatRealWorldValueFirstValueMapped, + self.DoubleFloatRealWorldValueLastValueMapped, + ) + return ( + float(self.RealWorldValueFirstValueMapped), + float(self.RealWorldValueLastValueMapped), + ) + def apply( self, array: np.ndarray, @@ -217,7 +230,13 @@ def apply( slope = self.RealWorldValueSlope intercept = self.RealWorldValueIntercept - # TODO should we check values are within range here? + first, last = self.value_range + + if array.min() < first or array.max() > last: + raise ValueError( + 'Array contains value outside the valid range.' + ) + return array * slope + intercept diff --git a/src/highdicom/pr/content.py b/src/highdicom/pr/content.py index aefc3e5c..78129abd 100644 --- a/src/highdicom/pr/content.py +++ b/src/highdicom/pr/content.py @@ -1203,7 +1203,7 @@ def _get_modality_lut_transformation( if intercept is None: rescale_type = None else: - rescale_type = RescaleTypeValues.HU.value + rescale_type = RescaleTypeValues.US.value if intercept is None: return None @@ -1332,7 +1332,7 @@ def _add_softcopy_voi_lut_attributes( dataset.SoftcopyVOILUTSequence = voi_lut_transformations -def _get_softcopy_voi_lut_transformations( +def _get_voi_lut_transformations( referenced_images: Sequence[Dataset] ) -> Sequence[SoftcopyVOILUTTransformation]: """Get Softcopy VOI LUT Transformation from referenced images. @@ -1828,7 +1828,7 @@ def __init__( voi_lut_transformations=voi_lut_transformations ) else: - voi_lut_transformations = _get_softcopy_voi_lut_transformations( + voi_lut_transformations = _get_voi_lut_transformations( referenced_images ) if len(voi_lut_transformations) > 0: diff --git a/src/highdicom/pr/sop.py b/src/highdicom/pr/sop.py index 85086ed8..885eb7fa 100644 --- a/src/highdicom/pr/sop.py +++ b/src/highdicom/pr/sop.py @@ -35,7 +35,7 @@ _add_softcopy_presentation_lut_attributes, _add_softcopy_voi_lut_attributes, _get_modality_lut_transformation, - _get_softcopy_voi_lut_transformations, + _get_voi_lut_transformations, _get_icc_profile, AdvancedBlending, BlendingDisplay, @@ -292,7 +292,7 @@ def __init__( voi_lut_transformations=voi_lut_transformations ) else: - voi_lut_transformations = _get_softcopy_voi_lut_transformations( + voi_lut_transformations = _get_voi_lut_transformations( referenced_images ) if len(voi_lut_transformations) > 0: @@ -564,7 +564,7 @@ def __init__( voi_lut_transformations=voi_lut_transformations ) else: - voi_lut_transformations = _get_softcopy_voi_lut_transformations( + voi_lut_transformations = _get_voi_lut_transformations( referenced_images ) if len(voi_lut_transformations) > 0: diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index c4f321e7..9baacfb4 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -843,6 +843,12 @@ def __init__( 'of type highdicom.PaletteColorLUTTransformation.' ) + if palette_color_lut_transformation.is_segmented: + raise ValueError( + 'Palette Color LUT Transformations must not be ' + 'segmented when included in a Segmentation.' + ) + lut = palette_color_lut_transformation.red_lut lut_entries = lut.number_of_entries lut_start = lut.first_mapped_value diff --git a/tests/test_content.py b/tests/test_content.py index 42c76950..8a2a0edf 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -1925,58 +1925,80 @@ def setUp(self): super().setUp() def test_construction(self): - dtype = np.uint16 - r_lut_data = np.arange(10, 120, dtype=dtype) - g_lut_data = np.arange(20, 130, dtype=dtype) - b_lut_data = np.arange(30, 140, dtype=dtype) - first_mapped_value = 32 - lut_uid = UID() - r_lut = PaletteColorLUT(first_mapped_value, r_lut_data, color='red') - g_lut = PaletteColorLUT(first_mapped_value, g_lut_data, color='green') - b_lut = PaletteColorLUT(first_mapped_value, b_lut_data, color='blue') - instance = PaletteColorLUTTransformation( - red_lut=r_lut, - green_lut=g_lut, - blue_lut=b_lut, - palette_color_lut_uid=lut_uid, - ) - assert instance.PaletteColorLookupTableUID == lut_uid - red_desc = [len(r_lut_data), first_mapped_value, 16] - r_lut_data_retrieved = np.frombuffer( - instance.RedPaletteColorLookupTableData, - dtype=np.uint16 - ) - assert np.array_equal(r_lut_data, r_lut_data_retrieved) - assert instance.RedPaletteColorLookupTableDescriptor == red_desc - green_desc = [len(g_lut_data), first_mapped_value, 16] - g_lut_data_retrieved = np.frombuffer( - instance.GreenPaletteColorLookupTableData, - dtype=np.uint16 - ) - assert np.array_equal(g_lut_data, g_lut_data_retrieved) - assert instance.GreenPaletteColorLookupTableDescriptor == green_desc - blue_desc = [len(b_lut_data), first_mapped_value, 16] - b_lut_data_retrieved = np.frombuffer( - instance.BluePaletteColorLookupTableData, - dtype=np.uint16 - ) - assert np.array_equal(b_lut_data, b_lut_data_retrieved) - assert instance.BluePaletteColorLookupTableDescriptor == blue_desc - - assert np.array_equal(instance.red_lut.lut_data, r_lut_data) - assert np.array_equal(instance.green_lut.lut_data, g_lut_data) - assert np.array_equal(instance.blue_lut.lut_data, b_lut_data) + for bits, dtype in [(8, np.uint8), (16, np.uint16)]: + r_lut_data = np.arange(10, 120, dtype=dtype) + g_lut_data = np.arange(20, 130, dtype=dtype) + b_lut_data = np.arange(30, 140, dtype=dtype) + first_mapped_value = 32 + lut_uid = UID() + r_lut = PaletteColorLUT( + first_mapped_value, + r_lut_data, + color='red', + ) + g_lut = PaletteColorLUT( + first_mapped_value, + g_lut_data, + color='green', + ) + b_lut = PaletteColorLUT( + first_mapped_value, + b_lut_data, + color='blue', + ) + instance = PaletteColorLUTTransformation( + red_lut=r_lut, + green_lut=g_lut, + blue_lut=b_lut, + palette_color_lut_uid=lut_uid, + ) + assert instance.PaletteColorLookupTableUID == lut_uid + red_desc = [len(r_lut_data), first_mapped_value, bits] + r_lut_data_retrieved = np.frombuffer( + instance.RedPaletteColorLookupTableData, + dtype=dtype, + ) + assert np.array_equal(r_lut_data, r_lut_data_retrieved) + assert ( + instance.RedPaletteColorLookupTableDescriptor == red_desc + ) + green_desc = [len(g_lut_data), first_mapped_value, bits] + g_lut_data_retrieved = np.frombuffer( + instance.GreenPaletteColorLookupTableData, + dtype=dtype, + ) + assert np.array_equal(g_lut_data, g_lut_data_retrieved) + assert ( + instance.GreenPaletteColorLookupTableDescriptor == green_desc + ) + blue_desc = [len(b_lut_data), first_mapped_value, bits] + b_lut_data_retrieved = np.frombuffer( + instance.BluePaletteColorLookupTableData, + dtype=dtype, + ) + assert np.array_equal(b_lut_data, b_lut_data_retrieved) + assert ( + instance.BluePaletteColorLookupTableDescriptor == blue_desc + ) - arr = np.array([32, 33, 32, 132]) - expected = np.array( - [ - [10, 11, 10, 110], - [20, 21, 20, 120], - [30, 31, 30, 130], - ] - ).T - output = instance.apply(arr) - assert np.array_equal(output, expected) + assert np.array_equal(instance.red_lut.lut_data, r_lut_data) + assert np.array_equal(instance.green_lut.lut_data, g_lut_data) + assert np.array_equal(instance.blue_lut.lut_data, b_lut_data) + + assert instance.first_mapped_value == first_mapped_value + assert instance.number_of_entries == len(r_lut_data) + assert instance.bits_per_entry == bits + + arr = np.array([32, 33, 32, 132]) + expected = np.array( + [ + [10, 11, 10, 110], + [20, 21, 20, 120], + [30, 31, 30, 130], + ] + ).T + output = instance.apply(arr) + assert np.array_equal(output, expected) def test_construction_no_uid(self): r_lut_data = np.arange(10, 120, dtype=np.uint16) diff --git a/tests/test_pm.py b/tests/test_pm.py index d4843ef5..37d00708 100644 --- a/tests/test_pm.py +++ b/tests/test_pm.py @@ -127,6 +127,7 @@ def test_construction_integer_linear_relationship(self): assert quantity_item.name == codes.SCT.Quantity assert quantity_item.value == quantity_definition assert not m.has_lut() + assert m.value_range == value_range array = np.array( [ @@ -178,6 +179,7 @@ def test_construction_integer_nonlinear_relationship(self): with pytest.raises(AttributeError): m.RealWorldValueIntercept # noqa: B018 assert m.has_lut() + assert m.value_range == value_range array = np.array( [ @@ -202,7 +204,7 @@ def test_construction_floating_point_linear_relationship(self): lut_label = '1' lut_explanation = 'Feature 1' unit = codes.UCUM.NoUnits - value_range = (0.0, 1.0) + value_range = (-1000.0, 1000.0) intercept = -23.13 slope = 5.0 m = RealWorldValueMapping( @@ -231,6 +233,7 @@ def test_construction_floating_point_linear_relationship(self): with pytest.raises(AttributeError): m.RealWorldValueLUTData # noqa: B018 assert not m.has_lut() + assert m.value_range == value_range array = np.array( [ @@ -251,6 +254,17 @@ def test_construction_floating_point_linear_relationship(self): assert np.allclose(out, expected) assert out.dtype == np.float64 + invalid_array = np.array( + [ + [1200, 0, 0], + [5, 5, 5], + [10, 10, 10], + ], + ) + msg = 'Array contains value outside the valid range.' + with pytest.raises(ValueError, match=msg): + m.apply(invalid_array) + def test_failed_construction_floating_point_nonlinear_relationship(self): lut_label = '1' lut_explanation = 'Feature 1' diff --git a/tests/test_pr.py b/tests/test_pr.py index b77007da..402a0162 100644 --- a/tests/test_pr.py +++ b/tests/test_pr.py @@ -1222,7 +1222,7 @@ def test_construction_with_copy_modality_lut(self): ) assert gsps.RescaleIntercept == self._ct_series[0].RescaleIntercept assert gsps.RescaleSlope == self._ct_series[0].RescaleSlope - assert gsps.RescaleType == 'HU' + assert gsps.RescaleType == 'US' def test_construction_with_copy_modality_lut_multiframe(self): gsps = GrayscaleSoftcopyPresentationState( From 7d7c03ee2a133ffd8de2d415e754af23e9a14abb Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 24 Nov 2024 12:58:49 -0500 Subject: [PATCH 79/93] Factor out pixel transform code --- src/highdicom/content.py | 380 ++++-------------------------- src/highdicom/pixel_transforms.py | 358 ++++++++++++++++++++++++++++ src/highdicom/pm/content.py | 16 +- 3 files changed, 406 insertions(+), 348 deletions(-) create mode 100644 src/highdicom/pixel_transforms.py diff --git a/src/highdicom/content.py b/src/highdicom/content.py index 62f182d5..f92ea709 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -24,6 +24,12 @@ UniversalEntityIDTypeValues, VOILUTFunctionValues, ) +from highdicom.pixel_transforms import ( + _get_combined_palette_color_lut, + _parse_palette_color_lut_attributes, + apply_lut, + voi_window_function, +) from highdicom.sr.enum import ValueTypeValues from highdicom.sr.coding import CodedConcept from highdicom.sr.value_types import ( @@ -2115,33 +2121,12 @@ def apply( Array with LUT applied. """ - if array.dtype.kind not in ('i', 'u'): - raise ValueError( - "Array must have an integer datatype." - ) - - lut_data = self.lut_data - if dtype is None: - dtype = lut_data.dtype - dtype = np.dtype(dtype) - - # Check dtype is suitable - if dtype.kind not in ('u', 'i', 'f'): - raise ValueError( - f'Data type "{dtype}" is not suitable.' - ) - - if dtype != np.dtype: - lut_data = lut_data.astype(dtype, casting='safe') - - last_mapped_value = self.first_mapped_value + self.number_of_entries - 1 - # Clip because values outside the range should be mapped to the - # first/last value - array = np.clip(array, self.first_mapped_value, last_mapped_value) - - if self.first_mapped_value != 0: - array = array - self.first_mapped_value - return lut_data[array] + return apply_lut( + array=array, + lut_data=self.lut_data, + first_mapped_value=self.first_mapped_value, + dtype=dtype, + ) class ModalityLUT(LUT): @@ -2384,115 +2369,6 @@ def has_window(self) -> bool: """ return 'WindowCenter' in self - @staticmethod - def voi_window_function( - array: np.ndarray, - window_center: float, - window_width: float, - voi_lut_function: Union[ - str, - VOILUTFunctionValues - ] = VOILUTFunctionValues.LINEAR, - output_range: Tuple[float, float] = (0.0, 1.0), - dtype: Union[type, str, np.dtype, None] = np.float64, - invert: bool = False, - ) -> np.ndarray: - """Functional implementation of the DICOM VOI windowing function. - - Parameters - ---------- - apply: numpy.ndarray - Pixel array to which the transformation should be applied. Can be - of any shape but must have an integer datatype if the - transformation uses a LUT. - window_center: float - Center of the window. - window_width: float - Width of the window. - voi_lut_function: Union[str, highdicom.VOILUTFunctionValues], optional - Type of VOI LUT function. - output_range: Tuple[float, float], optional - Range of output values to which the VOI range is mapped. - dtype: Union[type, str, numpy.dtype, None], optional - Data type the output array. Should be a floating point data type. - invert: bool, optional - Invert the returned array such that the lowest original value in - the LUT or input window is mapped to the upper limit and the - highest original value is mapped to the lower limit. This may be - used to efficiently combined a VOI LUT transformation with a - presentation transform that inverts the range. - - Returns - ------- - numpy.ndarray: - Array with the VOI window function applied. - - """ - voi_lut_function = VOILUTFunctionValues(voi_lut_function) - output_min, output_max = output_range - if output_min >= output_max: - raise ValueError( - "Second value of 'output_range' must be higher than the first." - ) - - if dtype is None: - dtype = np.dtype(np.float64) - else: - dtype = np.dtype(dtype) - - window_width = dtype.type(window_width) - window_center = dtype.type(window_center) - if array.dtype != dtype: - array = array.astype(dtype) - - if voi_lut_function in ( - VOILUTFunctionValues.LINEAR, - VOILUTFunctionValues.LINEAR_EXACT, - ): - output_scale = ( - output_max - output_min - ) - if voi_lut_function == VOILUTFunctionValues.LINEAR: - # LINEAR uses the range - # from c - 0.5w to c + 0.5w - 1 - scale_factor = ( - output_scale / (window_width - 1) - ) - else: - # LINEAR_EXACT uses the full range - # from c - 0.5w to c + 0.5w - scale_factor = output_scale / window_width - - window_min = window_center - window_width / 2.0 - if invert: - array = ( - (window_min - array) * scale_factor + - output_max - ) - else: - array = ( - (array - window_min) * scale_factor + - output_min - ) - - array = np.clip(array, output_min, output_max) - - elif voi_lut_function == VOILUTFunctionValues.SIGMOID: - if invert: - offset_array = window_center - array - else: - offset_array = array - window_center - exp_term = np.exp( - -4.0 * offset_array / - window_width - ) - array = ( - (output_max - output_min) / - (1.0 + exp_term) - ) + output_min - - return array - def apply( self, array: np.ndarray, @@ -2565,7 +2441,11 @@ def apply( dtype=dtype, invert=invert, ) - array = scaled_lut_data[array - voi_lut.first_mapped_value] + array = apply_lut( + array=array, + lut_data=scaled_lut_data, + first_mapped_value=voi_lut.first_mapped_value, + ) else: voi_lut_function = 'LINEAR' @@ -2607,7 +2487,7 @@ def apply( "not present." ) - array = self.voi_window_function( + array = voi_window_function( array, window_center=cast(float, window_center), window_width=cast(float, window_width), @@ -3066,31 +2946,12 @@ def apply( Array with LUT applied. """ - if array.dtype.kind not in ('i', 'u'): - raise ValueError( - "Array must have an integer datatype." - ) - - lut_data = self.lut_data - if dtype is None: - dtype = lut_data.dtype - dtype = np.dtype(dtype) - - # Check dtype is suitable - if dtype.kind not in ('u', 'i', 'f'): - raise ValueError( - f'Data type "{dtype}" is not suitable.' - ) - - if dtype != np.dtype: - lut_data = lut_data.astype(dtype, casting='safe') - - last_mapped_value = self.first_mapped_value + self.number_of_entries - 1 - # Clip because values outside the range should be mapped to the - # first/last value - array = np.clip(array, self.first_mapped_value, last_mapped_value) - - return lut_data[array - self.first_mapped_value] + return apply_lut( + array=array, + lut_data=self.lut_data, + first_mapped_value=self.first_mapped_value, + dtype=dtype, + ) @classmethod def extract_from_dataset(cls, dataset: Dataset, color: str) -> Self: @@ -3575,115 +3436,6 @@ def from_colors( palette_color_lut_uid=palette_color_lut_uid, ) - @staticmethod - def _parse_attributes(dataset: Dataset) -> Tuple[ - bool, - Tuple[int, int, int], - Tuple[bytes, bytes, bytes], - ]: - """Extract information about palette color lookup table from a dataset. - - Performs various checks that the information retrieved is valid. - - Parameters - ---------- - dataset: pydicom.Dataset - Dataset containing Palette Color LUT information. Note that any - number of other attributes may be included and will be ignored (for - example allowing an entire image with Palette Color LUT information - at the top level to be passed). - - Returns - ------- - is_segmented: bool - True if the LUT is segmented. False otherwise. - descriptor: Tuple[int, int, int] - Lookup table descriptor containing in this order the number of - entries, first mapped value, and bits per entry. These values are - shared between the three color LUTs. - lut_data: Tuple[bytes, bytes, bytes] - Raw bytes data for the red, green and blue LUTs. - - """ - is_segmented = 'SegmentedRedPaletteColorLookupTableData' in dataset - - if not is_segmented: - if 'RedPaletteColorLookupTableData' not in dataset: - raise AttributeError( - 'Dataset does not contain palette color lookup table ' - 'attributes.' - ) - - descriptor = dataset.RedPaletteColorLookupTableDescriptor - if len(descriptor) != 3: - raise RuntimeError( - 'Invalid Palette Color LUT Descriptor' - ) - number_of_entries, _, bits_per_entry = descriptor - - if number_of_entries == 0: - number_of_entries = 2 ** 16 - - if bits_per_entry == 8: - expected_num_bytes = number_of_entries - if number_of_entries % 2 == 1: - # Account for padding byte - number_of_entries += 1 - elif bits_per_entry == 16: - expected_num_bytes = number_of_entries * 2 - else: - raise RuntimeError( - 'Invalid number of bits per entry found in Palette Color ' - 'LUT Descriptor.' - ) - - lut_data = [] - for color in ['Red', 'Green', 'Blue']: - desc_kw = f'{color}PaletteColorLookupTableDescriptor' - if desc_kw not in dataset: - raise AttributeError( - f"Dataset has no attribute '{desc_kw}'." - ) - - color_descriptor = getattr(dataset, desc_kw) - if color_descriptor != descriptor: - # Descriptors must match between all three colors - raise RuntimeError( - 'Dataset has no mismatched palette color LUT ' - 'descriptors.' - ) - - segmented_kw = f'Segmented{color}PaletteColorLookupTableData' - standard_kw = f'{color}PaletteColorLookupTableData' - if is_segmented: - data_kw = segmented_kw - wrong_data_kw = standard_kw - else: - data_kw = standard_kw - wrong_data_kw = segmented_kw - - if data_kw not in dataset: - raise AttributeError( - f"Dataset has no attribute '{desc_kw}'." - ) - if wrong_data_kw in dataset: - raise AttributeError( - "Mismatch of segmented LUT and standard LUT found." - ) - - lut_bytes = getattr(dataset, data_kw) - if len(lut_bytes) != expected_num_bytes: - raise RuntimeError( - "LUT data has incorrect length" - ) - lut_data.append(lut_bytes) - - return ( - is_segmented, - tuple(descriptor), - tuple(lut_data) - ) - @property def is_segmented(self) -> bool: """bool: True if the transformation is a segmented LUT. @@ -3776,62 +3528,9 @@ def combined_lut_data(self) -> np.ndarray: """ if self._lut_data is None: - _, self._lut_data = self._get_combined_lut_data(self) + _, self._lut_data = _get_combined_palette_color_lut(self) return cast(np.ndarray, self._lut_data) - @classmethod - def _get_combined_lut_data( - cls, - dataset: Dataset, - ) -> Tuple[int, np.ndarray]: - """Get a LUT array with three color channels from a dataset. - - Parameters - ---------- - dataset: pydicom.Dataset - Dataset containing Palette Color LUT information. Note that any - number of other attributes may be included and will be ignored (for - example allowing an entire image with Palette Color LUT information - at the top level to be passed). - - Returns - ------- - first_mapped_value: int - The first input value included in the LUT. - lut_data: numpy.ndarray - An NumPy array of shape (number_of_entries, 3) containing the red, - green and blue lut data stacked along the final dimension of the - array. Data type with be 8 or 16 bit unsigned integer depending on - the number of bits per entry in the LUT. - - """ - ( - is_segmented, - (number_of_entries, first_mapped_value, bits_per_entry), - lut_data, - ) = cls._parse_attributes(dataset) - - if is_segmented: - raise RuntimeError( - 'Combined LUT data is not supported for segmented LUTs' - ) - - if bits_per_entry == 8: - dtype = np.uint8 - else: - dtype = np.uint16 - - combined_array = np.stack( - [np.frombuffer(buf, dtype=dtype) for buf in lut_data], - axis=-1 - ) - - # Account for padding byte - if combined_array.shape[0] == number_of_entries + 1: - combined_array = combined_array[:-1] - - return first_mapped_value, combined_array - @classmethod def extract_from_dataset(cls, dataset: Dataset) -> Self: """Construct from an existing dataset. @@ -3860,7 +3559,7 @@ def extract_from_dataset(cls, dataset: Dataset) -> Self: is_segmented, descriptor, lut_data, - ) = cls._parse_attributes(dataset) + ) = _parse_palette_color_lut_attributes(dataset) for color, data in zip(['Red', 'Green', 'Blue'], lut_data): desc_kw = f'{color}PaletteColorLookupTableDescriptor' @@ -3875,7 +3574,11 @@ def extract_from_dataset(cls, dataset: Dataset) -> Self: new_dataset.__class__ = cls return cast(cls, new_dataset) - def apply(self, array: np.ndarray) -> np.ndarray: + def apply( + self, + array: np.ndarray, + dtype: Union[type, str, np.dtype, None] = None, + ) -> np.ndarray: """Apply the LUT to a pixel array. Parameters @@ -3883,6 +3586,11 @@ def apply(self, array: np.ndarray) -> np.ndarray: apply: numpy.ndarray Pixel array to which the LUT should be applied. Can be of any shape but must have an integer datatype. + dtype: Union[type, str, numpy.dtype, None], optional + Datatype of the output array. If ``None``, an unsigned integer + datatype corresponding to the number of bits in the LUT will be + used (either ``numpy.uint8`` or ``numpy.uint16``). Only safe casts + are permitted. Returns ------- @@ -3896,15 +3604,9 @@ def apply(self, array: np.ndarray) -> np.ndarray: "The 'apply' method is not implemented for segmented LUTs." ) - last_mapped_value = ( - self.first_mapped_value + self.number_of_entries - 1 + return apply_lut( + array=array, + lut_data=self.combined_lut_data, + first_mapped_value=self.first_mapped_value, + dtype=dtype, ) - - # Clip because values outside the region are mapped to the first/last - # values in the LUT - array = np.clip(array, self.first_mapped_value, last_mapped_value) - - if self.first_mapped_value != 0: - array = array - self.first_mapped_value - - return self.combined_lut_data[array, :] diff --git a/src/highdicom/pixel_transforms.py b/src/highdicom/pixel_transforms.py new file mode 100644 index 00000000..482f7365 --- /dev/null +++ b/src/highdicom/pixel_transforms.py @@ -0,0 +1,358 @@ +"""Functional interface for pixel transformations.""" +from typing import Union, Tuple + +import numpy as np + +from pydicom import Dataset +from highdicom.enum import VOILUTFunctionValues + + +def _parse_palette_color_lut_attributes(dataset: Dataset) -> Tuple[ + bool, + Tuple[int, int, int], + Tuple[bytes, bytes, bytes], +]: + """Extract information about palette color lookup table from a dataset. + + Performs various checks that the information retrieved is valid. + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset containing Palette Color LUT information. Note that any + number of other attributes may be included and will be ignored (for + example allowing an entire image with Palette Color LUT information + at the top level to be passed). + + Returns + ------- + is_segmented: bool + True if the LUT is segmented. False otherwise. + descriptor: Tuple[int, int, int] + Lookup table descriptor containing in this order the number of + entries, first mapped value, and bits per entry. These values are + shared between the three color LUTs. + lut_data: Tuple[bytes, bytes, bytes] + Raw bytes data for the red, green and blue LUTs. + + """ + is_segmented = 'SegmentedRedPaletteColorLookupTableData' in dataset + + if not is_segmented: + if 'RedPaletteColorLookupTableData' not in dataset: + raise AttributeError( + 'Dataset does not contain palette color lookup table ' + 'attributes.' + ) + + descriptor = dataset.RedPaletteColorLookupTableDescriptor + if len(descriptor) != 3: + raise RuntimeError( + 'Invalid Palette Color LUT Descriptor' + ) + number_of_entries, _, bits_per_entry = descriptor + + if number_of_entries == 0: + number_of_entries = 2 ** 16 + + if bits_per_entry == 8: + expected_num_bytes = number_of_entries + if number_of_entries % 2 == 1: + # Account for padding byte + number_of_entries += 1 + elif bits_per_entry == 16: + expected_num_bytes = number_of_entries * 2 + else: + raise RuntimeError( + 'Invalid number of bits per entry found in Palette Color ' + 'LUT Descriptor.' + ) + + lut_data = [] + for color in ['Red', 'Green', 'Blue']: + desc_kw = f'{color}PaletteColorLookupTableDescriptor' + if desc_kw not in dataset: + raise AttributeError( + f"Dataset has no attribute '{desc_kw}'." + ) + + color_descriptor = getattr(dataset, desc_kw) + if color_descriptor != descriptor: + # Descriptors must match between all three colors + raise RuntimeError( + 'Dataset has no mismatched palette color LUT ' + 'descriptors.' + ) + + segmented_kw = f'Segmented{color}PaletteColorLookupTableData' + standard_kw = f'{color}PaletteColorLookupTableData' + if is_segmented: + data_kw = segmented_kw + wrong_data_kw = standard_kw + else: + data_kw = standard_kw + wrong_data_kw = segmented_kw + + if data_kw not in dataset: + raise AttributeError( + f"Dataset has no attribute '{desc_kw}'." + ) + if wrong_data_kw in dataset: + raise AttributeError( + "Mismatch of segmented LUT and standard LUT found." + ) + + lut_bytes = getattr(dataset, data_kw) + if len(lut_bytes) != expected_num_bytes: + raise RuntimeError( + "LUT data has incorrect length" + ) + lut_data.append(lut_bytes) + + return ( + is_segmented, + tuple(descriptor), + tuple(lut_data) + ) + + +def _get_combined_palette_color_lut( + dataset: Dataset, +) -> Tuple[int, np.ndarray]: + """Get a LUT array with three color channels from a dataset. + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset containing Palette Color LUT information. Note that any + number of other attributes may be included and will be ignored (for + example allowing an entire image with Palette Color LUT information + at the top level to be passed). + + Returns + ------- + first_mapped_value: int + The first input value included in the LUT. + lut_data: numpy.ndarray + An NumPy array of shape (number_of_entries, 3) containing the red, + green and blue lut data stacked along the final dimension of the + array. Data type with be 8 or 16 bit unsigned integer depending on + the number of bits per entry in the LUT. + + """ + ( + is_segmented, + (number_of_entries, first_mapped_value, bits_per_entry), + lut_data, + ) = _parse_palette_color_lut_attributes(dataset) + + if is_segmented: + raise RuntimeError( + 'Combined LUT data is not supported for segmented LUTs' + ) + + if bits_per_entry == 8: + dtype = np.uint8 + else: + dtype = np.uint16 + + combined_array = np.stack( + [np.frombuffer(buf, dtype=dtype) for buf in lut_data], + axis=-1 + ) + + # Account for padding byte + if combined_array.shape[0] == number_of_entries + 1: + combined_array = combined_array[:-1] + + return first_mapped_value, combined_array + + +def voi_window_function( + array: np.ndarray, + window_center: float, + window_width: float, + voi_lut_function: Union[ + str, + VOILUTFunctionValues + ] = VOILUTFunctionValues.LINEAR, + output_range: Tuple[float, float] = (0.0, 1.0), + dtype: Union[type, str, np.dtype, None] = np.float64, + invert: bool = False, +) -> np.ndarray: + """DICOM VOI windowing function. + + This function applies a "value-of-interest" window, defined by a window + center and width, to a pixel array. Values within the window are rescaled + to the output window, while values outside the range are clipped to the + upper or lower value of the output range. + + Parameters + ---------- + apply: numpy.ndarray + Pixel array to which the transformation should be applied. Can be + of any shape but must have an integer datatype if the + transformation uses a LUT. + window_center: float + Center of the window. + window_width: float + Width of the window. + voi_lut_function: Union[str, highdicom.VOILUTFunctionValues], optional + Type of VOI LUT function. + output_range: Tuple[float, float], optional + Range of output values to which the VOI range is mapped. + dtype: Union[type, str, numpy.dtype, None], optional + Data type the output array. Should be a floating point data type. + invert: bool, optional + Invert the returned array such that the lowest original value in + the LUT or input window is mapped to the upper limit and the + highest original value is mapped to the lower limit. This may be + used to efficiently combined a VOI LUT transformation with a + presentation transform that inverts the range. + + Returns + ------- + numpy.ndarray: + Array with the VOI window function applied. + + """ + voi_lut_function = VOILUTFunctionValues(voi_lut_function) + output_min, output_max = output_range + if output_min >= output_max: + raise ValueError( + "Second value of 'output_range' must be higher than the first." + ) + + if dtype is None: + dtype = np.dtype(np.float64) + else: + dtype = np.dtype(dtype) + + window_width = dtype.type(window_width) + window_center = dtype.type(window_center) + if array.dtype != dtype: + array = array.astype(dtype) + + if voi_lut_function in ( + VOILUTFunctionValues.LINEAR, + VOILUTFunctionValues.LINEAR_EXACT, + ): + output_scale = ( + output_max - output_min + ) + if voi_lut_function == VOILUTFunctionValues.LINEAR: + # LINEAR uses the range + # from c - 0.5w to c + 0.5w - 1 + scale_factor = ( + output_scale / (window_width - 1) + ) + else: + # LINEAR_EXACT uses the full range + # from c - 0.5w to c + 0.5w + scale_factor = output_scale / window_width + + window_min = window_center - window_width / 2.0 + if invert: + array = ( + (window_min - array) * scale_factor + + output_max + ) + else: + array = ( + (array - window_min) * scale_factor + + output_min + ) + + array = np.clip(array, output_min, output_max) + + elif voi_lut_function == VOILUTFunctionValues.SIGMOID: + if invert: + offset_array = window_center - array + else: + offset_array = array - window_center + exp_term = np.exp( + -4.0 * offset_array / + window_width + ) + array = ( + (output_max - output_min) / + (1.0 + exp_term) + ) + output_min + + return array + + +def apply_lut( + array: np.ndarray, + lut_data: np.ndarray, + first_mapped_value: int, + dtype: Union[type, str, np.dtype, None] = None, + clip: bool = True, +) -> np.ndarray: + """Apply a LUT to a pixel array. + + Parameters + ---------- + apply: numpy.ndarray + Pixel array to which the LUT should be applied. Can be of any shape + but must have an integer datatype. + lut_data: numpy.ndarray + Lookup table data. The items in the LUT will be indexed down axis 0, + but additional dimensions may optionally be included. + first_mapped_value: int + Input value that should be mapped to the first item in the LUT. + dtype: Union[type, str, numpy.dtype, None], optional + Datatype of the output array. If ``None``, the output data type will + match that of the input ``lut_data``. Only safe casts are permitted. + clip: bool + If True, values in ``array`` outside the range of the LUT (i.e. those + below ``first_mapped_value`` or those above ``first_mapped_value + + len(lut_data) - 1``) are clipped to lie within the range before + applying the LUT, meaning that after the LUT is applied they will take + the first or last value in the LUT. If False, values outside the range + of the LUT will raise an error. + + Returns + ------- + numpy.ndarray + Array with LUT applied. + + """ + if array.dtype.kind not in ('i', 'u'): + raise ValueError( + "Array must have an integer datatype." + ) + + if dtype is None: + dtype = lut_data.dtype + dtype = np.dtype(dtype) + + # Check dtype is suitable + if dtype.kind not in ('u', 'i', 'f'): + raise ValueError( + f'Data type "{dtype}" is not suitable.' + ) + + if dtype != lut_data.dtype: + # This is probably more efficient on average than applying the LUT and + # then casting(?) + lut_data = lut_data.astype(dtype, casting='safe') + + last_mapped_value = first_mapped_value + len(lut_data) - 1 + + if clip: + # Clip because values outside the range should be mapped to the + # first/last value + array = np.clip(array, first_mapped_value, last_mapped_value) + else: + if array.min() < first_mapped_value or array.max() > last_mapped_value: + raise ValueError( + 'Array contains values outside the range of the LUT.' + ) + + if first_mapped_value != 0: + # This is a common case and the subtraction may be slow, so avoid it if + # not needed + array = array - first_mapped_value + + return lut_data[array, ...] diff --git a/src/highdicom/pm/content.py b/src/highdicom/pm/content.py index 9f0b8432..bf660295 100644 --- a/src/highdicom/pm/content.py +++ b/src/highdicom/pm/content.py @@ -9,6 +9,7 @@ from highdicom.content import PlanePositionSequence from highdicom.enum import CoordinateSystemNames +from highdicom.pixel_transforms import apply_lut from highdicom.sr.coding import CodedConcept from highdicom.sr.value_types import CodeContentItem from highdicom.uid import UID @@ -217,15 +218,12 @@ def apply( "LUT data is stored with the incorrect number of elements." ) - if array.min() < first or array.max() > last: - raise RuntimeError( - "Array contains values not in the LUT." - ) - - if first != 0: - array = array - first - - return lut_data[array] + return apply_lut( + array=array, + lut_data=lut_data, + first_mapped_value=first, + clip=False, # values outside the range are undefined + ) else: slope = self.RealWorldValueSlope intercept = self.RealWorldValueIntercept From 4caa018e69656679b97de81745f33d9031e04bc6 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 24 Nov 2024 13:22:40 -0500 Subject: [PATCH 80/93] Add palette lut from combined array --- src/highdicom/content.py | 120 +++++++++++++++++++++++++++++---------- tests/test_content.py | 17 ++++++ 2 files changed, 108 insertions(+), 29 deletions(-) diff --git a/src/highdicom/content.py b/src/highdicom/content.py index f92ea709..0bbdd6ef 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -2424,11 +2424,6 @@ def apply( ) if not self.has_window() or (self.has_lut() and prefer_lut): - if array.dtype.kind not in ('i', 'u'): - raise ValueError( - "Array must have an integer data type if a LUT is used." - ) - try: voi_lut = self.VOILUTSequence[voi_transform_index] except IndexError as e: @@ -3026,8 +3021,6 @@ def __init__( """ super().__init__() - # Note 8 bit LUT data is unsupported for presentation states pending - # clarification on the standard, but is valid for segmentations if segmented_lut_data.dtype.type == np.uint8: bits_per_entry = 8 elif segmented_lut_data.dtype.type == np.uint16: @@ -3260,28 +3253,6 @@ def __init__( palette_color_lut_uid: Union[highdicom.UID, str, None], optional Unique identifier for the palette color lookup table. - Examples - -------- - - Create a ``PaletteColorLUTTransformation`` from a built-in colormap - from the ``matplotlib`` python package. - - >>> from matplotlib import colormaps - >>> import highdicom as hd - >>> - >>> # Use the built-in 'gist_rainbow_r' colormap - >>> cmap = colormaps['gist_rainbow_r'] - >>> # Create an 8-bit RGBA LUT array from the colormap - >>> num_entries = 10 # e.g. number of classes in a segmentation - >>> lut_data = cmap(np.arange(num_entries) / (num_entries + 1), bytes=True) - >>> - >>> lut = hd.PaletteColorLUTTransformation( - >>> red_lut=hd.PaletteColorLUT(0, lut_data[:, 0], 'red'), - >>> green_lut=hd.PaletteColorLUT(0, lut_data[:, 1], 'green'), - >>> blue_lut=hd.PaletteColorLUT(0, lut_data[:, 2], 'blue'), - >>> palette_color_lut_uid=hd.UID(), - >>> ) - """ # noqa: E501 super().__init__() @@ -3436,6 +3407,97 @@ def from_colors( palette_color_lut_uid=palette_color_lut_uid, ) + @classmethod + def from_combined_lut( + cls, + lut_data: np.ndarray, + first_mapped_value: int = 0, + palette_color_lut_uid: Union[UID, str, None] = None + ) -> Self: + """Create a palette color lookup table from a combined LUT array. + + Parameters + ---------- + lut_data: numpy.ndarray + LUT array with shape ``(number_of_entries, 3)`` where the entries + are stacked as rows and the 3 columns represent the red, green, and + blue channels (in that order). Data type must be ``numpy.uint8`` or + ``numpy.uint16``. + first_mapped_value: int + Input pixel value that will be mapped to the first value in the + lookup table. + palette_color_lut_uid: Union[highdicom.UID, str, None], optional + Unique identifier for the palette color lookup table. + + Returns + ------- + highdicom.PaletteColorLUTTransformation: + Palette Color Lookup table created from the given colors. This will + be an 8-bit or 16-bit LUT depending on the data type of the input + ``lut_data``. + + + Examples + -------- + + Create a ``PaletteColorLUTTransformation`` from a built-in colormap + from the well-known ``matplotlib`` python package (must be installed + separately). + + >>> import numpy as np + >>> from matplotlib import colormaps + >>> import highdicom as hd + >>> + >>> # Use matplotlib's built-in 'gist_rainbow_r' colormap as an example + >>> cmap = colormaps['gist_rainbow_r'] + >>> + >>> # Create an 8-bit RGBA LUT array from the colormap + >>> num_entries = 10 # e.g. number of classes in a segmentation + >>> lut_data = cmap(np.arange(num_entries) / (num_entries + 1), bytes=True) + >>> + >>> # Remove the alpha channel (at index 3) + >>> lut_data = lut_data[:, :3] + >>> + >>> lut = hd.PaletteColorLUTTransformation.from_combined_lut( + >>> lut_data, + >>> palette_color_lut_uid=hd.UID(), + >>> ) + + """ # noqa: E501 + if lut_data.ndim != 2 or lut_data.shape[1] != 3: + raise ValueError( + "Argument 'lut_data' must have shape (number_of_entries, 3)." + ) + + if lut_data.dtype not in (np.uint8, np.uint16): + raise ValueError( + "Argument 'lut_data' must have data type numpy.uint8 or " + 'numpy.uint16.' + ) + + red_lut = PaletteColorLUT( + first_mapped_value=first_mapped_value, + lut_data=lut_data[:, 0], + color='red' + ) + green_lut = PaletteColorLUT( + first_mapped_value=first_mapped_value, + lut_data=lut_data[:, 1], + color='green' + ) + blue_lut = PaletteColorLUT( + first_mapped_value=first_mapped_value, + lut_data=lut_data[:, 2], + color='blue' + ) + + return cls( + red_lut=red_lut, + green_lut=green_lut, + blue_lut=blue_lut, + palette_color_lut_uid=palette_color_lut_uid, + ) + @property def is_segmented(self) -> bool: """bool: True if the transformation is a segmented LUT. diff --git a/tests/test_content.py b/tests/test_content.py index 8a2a0edf..c8826391 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -2071,6 +2071,23 @@ def test_construction_from_colors(self): assert np.array_equal(lut.green_lut.lut_data, [0, 0, 128, 0, 255]) assert np.array_equal(lut.blue_lut.lut_data, [0, 0, 0, 255, 255]) + def test_construction_from_combined(self): + + r_lut_data = np.arange(10, 120, dtype=np.uint8) + g_lut_data = np.arange(20, 130, dtype=np.uint8) + b_lut_data = np.arange(30, 140, dtype=np.uint8) + + combined_lut = np.stack( + [r_lut_data, g_lut_data, b_lut_data], + axis=-1 + ) + lut = PaletteColorLUTTransformation.from_combined_lut( + combined_lut, + ) + + assert np.array_equal(lut.red_lut.lut_data, r_lut_data) + assert np.array_equal(lut.blue_lut.lut_data, b_lut_data) + assert np.array_equal(lut.green_lut.lut_data, g_lut_data) class TestSpecimenDescription(TestCase): def test_construction(self): From befdec2c95fe5a6778371fef5c28c6950951f776 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 19 Dec 2024 17:08:16 -0500 Subject: [PATCH 81/93] Fixes to multiframe, add various pixel transforms --- src/highdicom/_multiframe.py | 490 ++++++++++++++++++++++++++++-- src/highdicom/content.py | 56 +--- src/highdicom/pixel_transforms.py | 76 ++++- src/highdicom/volume.py | 1 - 4 files changed, 558 insertions(+), 65 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index 06284cd8..bee4be17 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -31,9 +31,17 @@ from highdicom._module_utils import is_multiframe_image from highdicom.base import SOPClass, _check_little_endian +from highdicom.color import ColorManager +from highdicom.content import LUT from highdicom.enum import ( CoordinateSystemNames, ) +from highdicom.pixel_transforms import ( + _check_rescale_dtype, + _get_combined_palette_color_lut, + apply_lut, + voi_window, +) from highdicom.seg.enum import SpatialLocationsPreservedValues from highdicom.spatial import ( get_image_coordinate_system, @@ -93,6 +101,422 @@ logger = logging.getLogger(__name__) +class _CombinedPixelTransformation: + + """Class representing a combined pixel transformation.""" + + def __init__( + self, + image: Dataset, + frame_index: int = 0, + output_dtype: Union[type, str, np.dtype, None] = np.float64, + apply_modality_transform: bool = True, + apply_voi_transform: bool = False, + voi_transform_index: int = 0, + apply_palette_color_lut: bool = True, + ensure_monochrome_2: bool = True, + real_world_value_map_index: int = 0, + output_range: Tuple[float, float] = (0.0, 1.0), + correct_color: bool = True, + ): + """Apply pixel transformation to a frame. + + Parameters + ---------- + image: pydicom.Dataset + Image (single frame or multiframe) for which the pixel + transformation should be represented. + frame_index: int + Zero-based index (one less than the frame number). + apply_modality_transform: bool, optional + Whether to apply to the modality transform (if present in the + dataset) the frame. The modality transformation maps stored pixel + values to output values, either using a LUT or rescale slope and + intercept. + apply_voi_transform: bool, optional + Apply the value-of-interest (VOI) transformation (if present in the + dataset), which limits the range of pixel values to a particular + range of interest, using either a windowing operation or a LUT. + voi_transform_index: int, optional + Index (zero-based) of the VOI transform to apply if multiple are + included in the datasets. Ignored if ``apply_voi_transform`` is + ``False`` or no VOI transform is included in the datasets. May be a + negative integer, following standard Python indexing convention. + apply_palette_color_lut: bool, optional + Apply the palette color LUT, if present in the dataset. The palette + color LUT maps a single sample for each pixel stored in the dataset + to a 3 sample-per-pixel color image. + ensure_monochrome_2: bool, optional + If the Photometric Interpretation is MONOCHROME1, convert the range + of the output pixels corresponds to MONOCHROME2 (in which high + values are represent white and low values represent black). Ignored + if PhotometricInterpretation is not MONOCHROME1. + real_world_value_map_index: int, optional + Index of the real world value map to use (multiple may be stored + within the dataset). + output_range: Tuple[float, float], optional + Range of output values to which the VOI range is mapped. Only + relevant if ``apply_voi_transform`` is True and a VOI transform is + present. + correct_color: bool, optional + Whether colors should be corrected by applying an ICC + transformation. Will only be performed if metadata contain an + ICC Profile. + + """ + # TODO: real world value map + # TODO: specify that code should error if no transform found? + # TODO: choose VOI by explanation? + # TODO: how to combine with multiframe? + if apply_voi_transform and not apply_modality_transform: + raise ValueError( + "Parameter 'apply_voi_transform' requires " + "'apply_modality_transform'." + ) + + output_min, output_max = output_range + if output_min >= output_max: + raise ValueError( + "Second value of 'output_range' must be higher than the first." + ) + + self.output_dtype = np.dtype(output_dtype) + self.is_shared = True + self.is_color_input = image.SamplesPerPixel == 3 + self._input_range_check: Optional[Tuple[int, int]] = None + self._output_range = output_range + self._effective_lut_data: Optional[np.ndarray] = None + self._effective_lut_first_mapped_value = 0 + self._effective_window_center_width: Optional[Tuple[float, float]] = None + self._effective_slope_intercept: Optional[Tuple[float, float]] = None + self._invert = False + + if 'FloatPixelData' in image: + input_dtype = np.float32 + input_range = None + elif 'DoubleFloatPixelData' in image: + input_dtype = np.float64 + input_range = None + else: + if image.BitsAllocated == 16: + self.input_dtype = np.dtype(np.uint16) + elif image.BitsAllocated == 32: + self.input_dtype = np.dtype(np.uint32) + else: + self.input_dtype = np.dtype(np.uint8) + input_range = (0, 2 ** image.BitsStored) + + if not self.is_color_input: + if ( + image.PhotometricInterpretation == 'PALETTE COLOR' and + apply_palette_color_lut + ): + if 'SegmentedRedPaletteColorLookupTableData' in image: + # TODO + raise RuntimeError("Segmented LUTs are not implemented.") + + self._first_mapped_value, self._effective_lut = ( + _get_combined_palette_color_lut(image) + ) + else: + # Create a list of all datasets to check for transforms for + # this frame, and whether they are shared by all frames + datasets = [(image, True)] + + if 'SharedFunctionalGroupsSequence' in image: + datasets.append( + (image.SharedFunctionalGroupsSequence[0], True) + ) + + if 'PerFrameFunctionalGroupsSequence' in image: + datasets.append( + ( + image.PerFrameFunctionalGroupsSequence[frame_index], + False, + ) + ) + + modality_lut: Optional[LUT] = None + modality_slope_intercept: Optional[Tuple[float, float]] = None + + voi_lut: Optional[LUT] = None + voi_scaled_lut_data: Optional[np.ndarray] = None + voi_center_width: Optional[Tuple[float, float]] = None + voi_function = 'LINEAR' + invert = False + + if ensure_monochrome_2: + if image.PhotometricInterpretation == 'MONOCHROME1': + # TODO what about presentation LUT + invert = True + + for ds, is_shared in datasets: + rwvm_seq = ds.get('RealWorldValueMappingSequence') + if rwvm_seq is not None: + try: + rwvm_item = rwvm_seq[real_world_value_map_index] + except IndexError as e: + raise IndexError( + "Requested 'real_world_value_map_index' is " + "not present." + ) from e + if 'RealWorldValueLUTData' in rwvm_item: + self._effective_lut_data = np.array( + rwvm_item.RealWorldValueLUTData + ) + self._effective_lut_first_mapped_value = int( + rwvm_item.RealWorldValueFirstValueMapped + ) + else: + self._effective_slope_intercept = ( + rwvm_item.RealWorldValueSlope, + rwvm_item.RealWorldValueIntercept, + ) + if 'DoubleFloatRealWorldValueFirstValueMapped' in rwvm_item: + self._input_range_check = ( + rwvm_item.DoubleFloatRealWorldValueFirstValueMapped, + rwvm_item.DoubleFloatRealWorldValueLastValueMapped + ) + else: + self._input_range_check = ( + rwvm_item.RealWorldValueFirstValueMapped, + rwvm_item.RealWorldValueLastValueMapped + ) + self.is_shared = self.is_shared and is_shared + # TODO skip pixel transformation + break + + if apply_modality_transform: + + if 'ModalityLUTSequence' in image: + modality_lut = LUT.from_dataset( + image.ModalityLUTSequence[0] + ) + else: + for ds, is_shared in datasets: + if ( + 'RescaleSlope' in ds or + 'RescaleIntercept' in ds + ): + modality_slope_intercept = ( + float(ds.get('RescaleSlope', 1.0)), + float(ds.get('RescaleIntercept', 0.0)) + ) + self.is_shared = self.is_shared and is_shared + break + + if apply_voi_transform: + + if 'VOILUTSequence' in image: + voi_lut = LUT.from_dataset( + image.VOILUTSequence[0] + ) + voi_scaled_lut_data = voi_lut.get_scaled_lut_data( + output_range=output_range, + dtype=output_dtype, + invert=invert, + ) + for ds, is_shared in datasets: + if ( + 'WindowCenter' in ds or + 'WindowWidth' in ds + ): + voi_center = ds.WindowCenter + voi_width = ds.WindowWidth + + if 'VOILUTFunction' in ds: + voi_function = ds.VOILUTFunction + + if isinstance(voi_width, list): + voi_width = voi_width[ + voi_transform_index + ] + elif voi_transform_index not in (0, -1): + raise IndexError( + "Requested 'voi_transform_index' is " + "not present." + ) + + if isinstance(voi_center, list): + voi_center = voi_center[ + voi_transform_index + ] + elif voi_transform_index not in (0, -1): + raise IndexError( + "Requested 'voi_transform_index' is " + "not present." + ) + self.is_shared = self.is_shared and is_shared + voi_center_width = (voi_center, voi_width) + break + + # Determine how to combine modality, voi and presentation + # transforms + if modality_slope_intercept is not None: + intercept, slope = modality_slope_intercept + + if voi_center_width is not None: + # Shift and scale the window to account for the scaling + # and intercept + center, width = voi_center_width + self._effective_window_center_width = ( + (center - intercept) / slope, + width / slope + ) + self._effective_voi_function = voi_function + self._invert = invert + + elif voi_lut is not None and voi_scaled_lut_data is not None: + # Shift and "scale" the LUT to account for the rescale + if not intercept.is_integer() and slope.is_integer(): + raise ValueError( + "Cannot apply a VOI LUT when rescale intercept " + "or slope have non-integer values." + ) + intercept = int(intercept) + slope = int(slope) + self._effective_lut_data = voi_scaled_lut_data[::slope] + adjusted_first_value = ( + (voi_lut.first_mapped_value - intercept) / slope + ) + if not adjusted_first_value.is_integer(): + raise ValueError( + "Cannot apply a VOI LUT when rescale intercept " + "or slope have non-integer values." + ) + self._effective_lut_first_mapped_value = int( + adjusted_first_value + ) + else: + # No VOI LUT transform, so the modality rescale + # operates alone + if invert: + # TODO what do here? + pass + else: + _check_rescale_dtype( + slope=modality_slope_intercept[0], + intercept=modality_slope_intercept[1], + output_dtype=self.output_dtype, + input_dtype=self.input_dtype, + input_range=input_range, + ) + self._effective_slope_intercept = ( + modality_slope_intercept + ) + + elif modality_lut is not None: + if voi_center_width is not None: + # Apply the window function to the modality LUT + self._effective_lut_data = voi_window( + array=modality_lut.lut_data, + window_center=voi_center_width[0], + window_width=voi_center_width[1], + output_range=output_range, + dtype=output_dtype, + invert=invert, + ) + self._effective_lut_first_mapped_value = ( + modality_lut.first_mapped_value + ) + + elif voi_lut is not None and voi_scaled_lut_data is not None: + # "Compose" the two LUTs together by applying the + # second to the first + self._effective_lut_data = voi_lut.apply( + modality_lut.lut_data + ) + self._effective_lut_first_mapped_value = ( + modality_lut.first_mapped_value + ) + else: + # No VOI LUT transform so the modality lut operates alone + if invert: + # TODO what do here? + pass + else: + self._effective_lut_data = modality_lut.lut_data + self._effective_lut_first_mapped_value = ( + modality_lut.first_mapped_value + ) + + if self._effective_lut_data is not None: + if self._effective_lut_data.dtype != output_dtype: + self._effective_lut_data = ( + self._effective_lut_data.astype(output_dtype) + ) + + if input_dtype.kind == 'f': + raise ValueError( + 'Images with floating point data may not contain LUTs.' + ) + + # TODO change type of slope/intercept here? + + # We don't use the color_correct_frame() function here, since we cache + # the ICC transform on the instance for improved performance. + if correct_color and 'ICCProfile' in image: + self._color_manager = ColorManager(image.ICCProfile) + else: + self._color_manager = None + + + def __call__(self, frame: np.ndarray) -> np.ndarray: + """Apply the composed loss. + + Parameters + ---------- + frame: numpy.ndarray + Input frame for the transformation. + + Returns + ------- + numpy.ndarray: + Output frame after the transformation is applied. + + """ + if self.is_color_input: + if frame.ndim != 3 or frame.shape[2] != 3: + raise ValueError( + "Expected an image of shape (R, C, 3)." + ) + + else: + if frame.ndim != 2: + raise ValueError( + "Expected an image of shape (R, C)." + ) + + if self._effective_lut_data is not None: + frame = apply_lut( + frame, + self._effective_lut_data, + self._effective_lut_first_mapped_value, + ) + + elif self._effective_slope_intercept is not None: + slope, intercept = self._effective_slope_intercept + if slope != 1.0: + frame = frame * slope + if intercept != 0.0: + frame = frame + intercept + + elif self._effective_window_center_width is not None: + frame = voi_window( + frame, + window_center=self._effective_window_center_width[0], + window_width=self._effective_window_center_width[1], + dtype=self.output_dtype, + invert=self._invert, + output_range=self._output_range, + ) + + if self._color_manager is not None: + return self._color_manager.transform_frame(frame) + + return frame + + class MultiFrameImage(SOPClass): """Database manager for frame information in a multiframe image.""" @@ -226,16 +650,23 @@ def _build_luts(self) -> None: self.DimensionOrganizationType == 'TILED_FULL' ) - self._dim_ind_pointers = [ - dim_ind.DimensionIndexPointer - for dim_ind in self.DimensionIndexSequence - ] + self._dim_ind_pointers = [] func_grp_pointers = {} - for dim_ind in self.DimensionIndexSequence: - ptr = dim_ind.DimensionIndexPointer - if ptr in self._dim_ind_pointers: - grp_ptr = getattr(dim_ind, "FunctionalGroupPointer", None) - func_grp_pointers[ptr] = grp_ptr + dim_ind_positions = {} + if 'DimensionIndexSequence' in self: + self._dim_ind_pointers = [ + dim_ind.DimensionIndexPointer + for dim_ind in self.DimensionIndexSequence + ] + for dim_ind in self.DimensionIndexSequence: + ptr = dim_ind.DimensionIndexPointer + if ptr in self._dim_ind_pointers: + grp_ptr = getattr(dim_ind, "FunctionalGroupPointer", None) + func_grp_pointers[ptr] = grp_ptr + dim_ind_positions = { + dim_ind.DimensionIndexPointer: i + for i, dim_ind in enumerate(self.DimensionIndexSequence) + } # We may want to gather additional information that is not one of the # indices @@ -265,15 +696,12 @@ def _build_luts(self) -> None: if hasattr(self, 'PerFrameFunctionalGroupsSequence'): pfg1 = self.PerFrameFunctionalGroupsSequence[0] if hasattr(pfg1, 'PixelMeasuresSequence'): - slice_spacing_hint = pfg1.PixelMeasuresSequence[0].get( + measures = pfg1.PixelMeasuresSequence[0] + slice_spacing_hint = measures.get( 'SpacingBetweenSlices' ) - shared_pixel_spacing = pfg1.get('PixelSpacing') + shared_pixel_spacing = measures.get('PixelSpacing') - dim_ind_positions = { - dim_ind.DimensionIndexPointer: i - for i, dim_ind in enumerate(self.DimensionIndexSequence) - } dim_indices: Dict[int, List[int]] = { ptr: [] for ptr in self._dim_ind_pointers } @@ -371,11 +799,14 @@ def _build_luts(self) -> None: for frame_item in self.PerFrameFunctionalGroupsSequence: # Get dimension indices for this frame - content_seq = frame_item.FrameContentSequence[0] - indices = content_seq.DimensionIndexValues - if not isinstance(indices, (MultiValue, list)): - # In case there is a single dimension index - indices = [indices] + if 'FrameContentSequence' in frame_item: + content_seq = frame_item.FrameContentSequence[0] + indices = content_seq.DimensionIndexValues + if not isinstance(indices, (MultiValue, list)): + # In case there is a single dimension index + indices = [indices] + else: + indices = [] if len(indices) != len(self._dim_ind_pointers): raise RuntimeError( 'Unexpected mismatch between dimension index values in ' @@ -472,6 +903,25 @@ def _build_luts(self) -> None: if iop != shared_image_orientation: shared_image_orientation = None + if hasattr(frame_item, 'PixelMeasuresSequence'): + measures = frame_item.PixelMeasuresSequence[0] + + fm_slice_spacing = measures.get( + 'SpacingBetweenSlices' + ) + if ( + slice_spacing_hint is not None and + fm_slice_spacing != slice_spacing_hint + ): + slice_spacing_hint = None + + fm_pixel_spacing = measures.get('PixelSpacing') + if ( + shared_pixel_spacing is not None and + fm_pixel_spacing != shared_pixel_spacing + ): + shared_pixel_spacing = None + # Summarise if any( isinstance(v, SpatialLocationsPreservedValues) and diff --git a/src/highdicom/content.py b/src/highdicom/content.py index 0bbdd6ef..53955352 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -25,10 +25,11 @@ VOILUTFunctionValues, ) from highdicom.pixel_transforms import ( + _check_rescale_dtype, _get_combined_palette_color_lut, _parse_palette_color_lut_attributes, apply_lut, - voi_window_function, + voi_window, ) from highdicom.sr.enum import ValueTypeValues from highdicom.sr.coding import CodedConcept @@ -2011,6 +2012,7 @@ def from_dataset( Constructed object """ + # TODO should this be an extract_from_dataset? attrs = [ 'LUTDescriptor', 'LUTData' @@ -2482,7 +2484,7 @@ def apply( "not present." ) - array = voi_window_function( + array = voi_window( array, window_center=cast(float, window_center), window_width=cast(float, window_width), @@ -2623,52 +2625,22 @@ def apply( if 'ModalityLUTSequence' in self: return self.ModalityLUTSequence[0].apply(array, dtype=dtype) else: - slope = np.float64(self.get('RescaleSlope', 1.0)) - intercept = np.float64( - self.get('RescaleIntercept', 0.0) - ) + slope = self.get('RescaleSlope', 1.0) + intercept = self.get('RescaleIntercept', 0.0) if dtype is None: dtype = np.dtype(np.float64) dtype = np.dtype(dtype) - # Check dtype is suitable - if dtype.kind not in ('u', 'i', 'f'): - raise ValueError( - f'Data type "{dtype}" is not suitable.' - ) - if dtype.kind in ('u', 'i'): - if not (slope.is_integer() and intercept.is_integer()): - raise ValueError( - 'An integer data type cannot be used if the slope ' - 'or intercept is a non-integer value.' - ) - if array.dtype.kind not in ('u', 'i'): - raise ValueError( - 'An integer data type cannot be used if the input ' - 'array is floating point.' - ) - - if dtype.kind == 'u' and intercept < 0.0: - raise ValueError( - 'An unsigned integer data type cannot be used if the ' - 'intercept is negative.' - ) - - output_max = np.iinfo(array.dtype).max * slope + intercept - output_type_max = np.iinfo(dtype).max - output_min = np.iinfo(array.dtype).min * slope + intercept - output_type_min = np.iinfo(dtype).min - - if output_max > output_type_max or output_min < output_type_min: - raise ValueError( - f'Datatype {dtype} does not have capacity for values ' - f'with slope {slope:.2f} and intercept {intercept:.2f}.' - ) + _check_rescale_dtype( + input_dtype=array.dtype, + output_dtype=dtype, + intercept=intercept, + slope=slope, + ) - if dtype != np.float64: - slope = slope.astype(dtype) - intercept = intercept.astype(dtype) + intercept = intercept.astype(dtype) + slope = slope.astype(dtype) # Avoid unnecessary array operations for efficiency if slope != 1.0 or intercept != 0.0: diff --git a/src/highdicom/pixel_transforms.py b/src/highdicom/pixel_transforms.py index 482f7365..f1347aed 100644 --- a/src/highdicom/pixel_transforms.py +++ b/src/highdicom/pixel_transforms.py @@ -1,5 +1,5 @@ """Functional interface for pixel transformations.""" -from typing import Union, Tuple +from typing import Optional, Union, Tuple import numpy as np @@ -168,7 +168,79 @@ def _get_combined_palette_color_lut( return first_mapped_value, combined_array -def voi_window_function( +def _check_rescale_dtype( + input_dtype: np.dtype, + output_dtype: np.dtype, + intercept: float, + slope: float, + input_range: Optional[Tuple[float, float]] = None, +) -> None: + """Checks whether it is appropriate to apply a given rescale to an array + with a given dtype. + + Raises an error if not compatible. + + Parameters + ---------- + input_dtype: numpy.dtype + Datatype of the input array of the rescale operation. + output_dtype: numpy.dtype + Datatype of the output array of the rescale operation. + intercept: float + Intercept of the rescale operation. + slope: float + Slope of the rescale operation. + input_range: Optional[Tuple[float, float]], optional + Known limit of values for the input array. This could for example be + deduced by the number of bits stored in an image. If not specified, the + full range of values of the ``input_dtype`` is assumed. + + """ + slope_np = np.float64(slope) + intercept_np = np.float64(intercept) + + # Check dtype is suitable + if output_dtype.kind not in ('u', 'i', 'f'): + raise ValueError( + f'Data type "{output_dtype}" is not suitable.' + ) + if output_dtype.kind in ('u', 'i'): + if not (slope.is_integer() and intercept.is_integer()): + raise ValueError( + 'An integer data type cannot be used if the slope ' + 'or intercept is a non-integer value.' + ) + if input_dtype.kind not in ('u', 'i'): + raise ValueError( + 'An integer data type cannot be used if the input ' + 'array is floating point.' + ) + + if input_dtype.kind == 'u' and intercept < 0.0: + raise ValueError( + 'An unsigned integer data type cannot be used if the ' + 'intercept is negative.' + ) + + if input_range is not None: + input_min, input_max = input_range + else: + input_min = np.iinfo(input_dtype).min + input_max = np.iinfo(input_dtype).max + + output_max = input_max * slope_np + intercept_np + output_min = input_min * slope_np + intercept_np + output_type_max = np.iinfo(output_dtype).max + output_type_min = np.iinfo(output_dtype).min + + if output_max > output_type_max or output_min < output_type_min: + raise ValueError( + f'Datatype {output_dtype} does not have capacity for values ' + f'with slope {slope:.2f} and intercept {intercept:.2f}.' + ) + + +def voi_window( array: np.ndarray, window_center: float, window_width: float, diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index dd853226..bf0bd15f 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -1643,7 +1643,6 @@ def __getitem__( """ _, new_shape, new_affine = self._prepare_getitem_index(index) - self._spatial_shape = new_shape return self.__class__( affine=new_affine, From 4fb9881cc137b5fd75e01ea851e099383c0e238a Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 22 Dec 2024 12:09:35 -0500 Subject: [PATCH 82/93] Fix volume indexing at -1 --- src/highdicom/volume.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/highdicom/volume.py b/src/highdicom/volume.py index bf0bd15f..56781fdb 100644 --- a/src/highdicom/volume.py +++ b/src/highdicom/volume.py @@ -568,7 +568,11 @@ def _check_slice(val: slice, dim: int) -> None: # are retained in the output array. Also make into a tuple of # length 1 to standardize format _check_int(index, 0) - tuple_index = (slice(index, index + 1), ) + if index == -1: + end_index = None + else: + end_index = index + 1 + tuple_index = (slice(index, end_index), ) elif isinstance(index, slice): # Make into a tuple of length one to standardize the format _check_slice(index, 0) @@ -580,7 +584,11 @@ def _check_slice(val: slice, dim: int) -> None: # Change the index to a slice of length one so that all # dimensions are retained in the output array. _check_int(item, dim) - item = slice(item, item + 1) + if item == -1: + end_index = None + else: + end_index = item + 1 + item = slice(item, end_index) index_list.append(item) elif isinstance(item, slice): _check_slice(item, dim) From 3d34cc8f34a2f8cc83491859daad5265f1ee8521 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Sun, 22 Dec 2024 16:03:24 -0500 Subject: [PATCH 83/93] Implement missing parts of pixel transforms --- src/highdicom/_multiframe.py | 144 ++++++++++++++++++++++++++--------- src/highdicom/content.py | 5 +- 2 files changed, 111 insertions(+), 38 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index bee4be17..b255bfe5 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -28,6 +28,7 @@ tag_for_keyword, ) from pydicom.multival import MultiValue +from pydicom.uid import ParametricMapStorage from highdicom._module_utils import is_multiframe_image from highdicom.base import SOPClass, _check_little_endian @@ -164,7 +165,6 @@ def __init__( ICC Profile. """ - # TODO: real world value map # TODO: specify that code should error if no transform found? # TODO: choose VOI by explanation? # TODO: how to combine with multiframe? @@ -190,21 +190,41 @@ def __init__( self._effective_window_center_width: Optional[Tuple[float, float]] = None self._effective_slope_intercept: Optional[Tuple[float, float]] = None self._invert = False + self._clip = True - if 'FloatPixelData' in image: - input_dtype = np.float32 - input_range = None - elif 'DoubleFloatPixelData' in image: - input_dtype = np.float64 - input_range = None + input_range = None + if ( + image.SOPClassUID == ParametricMapStorage + and image.BitsAllocated > 16 + ): + # Parametric Maps are the only SOP Class (currently) that allows + # floating point pixels + if image.BitsAllocated == 32: + self.input_dtype = np.dtype(np.float32) + elif image.BitsAllocated == 64: + self.input_dtype = np.dtype(np.float64) else: - if image.BitsAllocated == 16: - self.input_dtype = np.dtype(np.uint16) - elif image.BitsAllocated == 32: - self.input_dtype = np.dtype(np.uint32) + if image.PixelRepresentation == 1: + if image.BitsAllocated == 8: + self.input_dtype = np.dtype(np.int8) + elif image.BitsAllocated == 16: + self.input_dtype = np.dtype(np.int16) + elif image.BitsAllocated == 32: + self.input_dtype = np.dtype(np.int32) + + # 2's complement to define the range + half_range = 2 ** (image.BitsStored - 1) + input_range = (-half_range, half_range - 1) else: - self.input_dtype = np.dtype(np.uint8) - input_range = (0, 2 ** image.BitsStored) + if image.BitsAllocated == 1: + self.input_dtype = np.dtype(np.uint8) + elif image.BitsAllocated == 8: + self.input_dtype = np.dtype(np.uint8) + elif image.BitsAllocated == 16: + self.input_dtype = np.dtype(np.uint16) + elif image.BitsAllocated == 32: + self.input_dtype = np.dtype(np.uint32) + input_range = (0, 2 ** image.BitsStored) if not self.is_color_input: if ( @@ -244,10 +264,12 @@ def __init__( voi_center_width: Optional[Tuple[float, float]] = None voi_function = 'LINEAR' invert = False + has_rwvm = False if ensure_monochrome_2: - if image.PhotometricInterpretation == 'MONOCHROME1': - # TODO what about presentation LUT + if 'PresentationLUTShape' in image: + invert = image.PresentationLUTShape == 'INVERSE' + elif image.PhotometricInterpretation == 'MONOCHROME1': invert = True for ds, is_shared in datasets: @@ -267,6 +289,7 @@ def __init__( self._effective_lut_first_mapped_value = int( rwvm_item.RealWorldValueFirstValueMapped ) + self._clip = False else: self._effective_slope_intercept = ( rwvm_item.RealWorldValueSlope, @@ -283,10 +306,10 @@ def __init__( rwvm_item.RealWorldValueLastValueMapped ) self.is_shared = self.is_shared and is_shared - # TODO skip pixel transformation + has_rwvm = True break - if apply_modality_transform: + if not has_rwvm and apply_modality_transform: if 'ModalityLUTSequence' in image: modality_lut = LUT.from_dataset( @@ -305,7 +328,7 @@ def __init__( self.is_shared = self.is_shared and is_shared break - if apply_voi_transform: + if not has_rwvm and apply_voi_transform: if 'VOILUTSequence' in image: voi_lut = LUT.from_dataset( @@ -353,7 +376,7 @@ def __init__( # Determine how to combine modality, voi and presentation # transforms if modality_slope_intercept is not None: - intercept, slope = modality_slope_intercept + slope, intercept = modality_slope_intercept if voi_center_width is not None: # Shift and scale the window to account for the scaling @@ -391,16 +414,24 @@ def __init__( # No VOI LUT transform, so the modality rescale # operates alone if invert: - # TODO what do here? - pass - else: - _check_rescale_dtype( - slope=modality_slope_intercept[0], - intercept=modality_slope_intercept[1], - output_dtype=self.output_dtype, - input_dtype=self.input_dtype, - input_range=input_range, + # Adjust the parameters to invert the intensities + # within the scaled and offset range + eff_slope = -slope + if input_range is None: + # This situation will be unusual: float valued + # pixels with a rescale transform that needs to + # be inverted. For simplicity, just invert + # the pixel values + eff_intercept = -intercept + else: + imin, imax = input_range + eff_intercept = ( + slope * (imin + imax) + intercept + ) + self._effective_slope_intercept = ( + eff_slope, eff_intercept ) + else: self._effective_slope_intercept = ( modality_slope_intercept ) @@ -432,13 +463,32 @@ def __init__( else: # No VOI LUT transform so the modality lut operates alone if invert: - # TODO what do here? - pass + lut_data = modality_lut.lut_data + inverted_lut_data = ( + lut_data.min() + lut_data.max() - lut_data + ) + self._effective_lut_data = inverted_lut_data else: self._effective_lut_data = modality_lut.lut_data - self._effective_lut_first_mapped_value = ( - modality_lut.first_mapped_value - ) + self._effective_lut_first_mapped_value = ( + modality_lut.first_mapped_value + ) + + else: + # No mdality LUT, but may still require inversion + if invert: + # Use a rescale slope and intercept to invert the + # values within their existing range + if input_range is None: + eff_intercept = 0 + else: + imin, imax = input_range + eff_intercept = imin + imax + + self._effective_slope_intercept = ( + -1, + eff_intercept + ) if self._effective_lut_data is not None: if self._effective_lut_data.dtype != output_dtype: @@ -446,12 +496,24 @@ def __init__( self._effective_lut_data.astype(output_dtype) ) - if input_dtype.kind == 'f': + if self.input_dtype.kind == 'f': raise ValueError( 'Images with floating point data may not contain LUTs.' ) - # TODO change type of slope/intercept here? + if self._effective_slope_intercept is not None: + slope, intercept = self._effective_slope_intercept + _check_rescale_dtype( + slope=slope, + intercept=intercept, + output_dtype=self.output_dtype, + input_dtype=self.input_dtype, + input_range=input_range, + ) + self._effective_slope_intercept = ( + np.float64(slope).astype(self.output_dtype), + np.float64(intercept).astype(self.output_dtype), + ) # We don't use the color_correct_frame() function here, since we cache # the ICC transform on the instance for improved performance. @@ -487,19 +549,31 @@ def __call__(self, frame: np.ndarray) -> np.ndarray: "Expected an image of shape (R, C)." ) + if self._input_range_check is not None: + first, last = self._input_range_check + if frame.min() < first or frame.max() > last: + raise ValueError( + 'Array contains value outside the valid range.' + ) + if self._effective_lut_data is not None: frame = apply_lut( frame, self._effective_lut_data, self._effective_lut_first_mapped_value, + clip=self._clip, ) elif self._effective_slope_intercept is not None: slope, intercept = self._effective_slope_intercept + + # Avoid unnecessary array operations for efficiency if slope != 1.0: frame = frame * slope if intercept != 0.0: frame = frame + intercept + if frame.dtype != self.output_dtype: + frame = frame.astype(self.output_dtype) elif self._effective_window_center_width is not None: frame = voi_window( diff --git a/src/highdicom/content.py b/src/highdicom/content.py index 53955352..f3b88c33 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -2639,14 +2639,13 @@ def apply( slope=slope, ) - intercept = intercept.astype(dtype) - slope = slope.astype(dtype) - # Avoid unnecessary array operations for efficiency if slope != 1.0 or intercept != 0.0: if slope != 1.0: + slope = np.float64(slope).astype(dtype) array = array * slope if intercept != 0.0: + intercept = np.float64(intercept).astype(dtype) array = array + intercept else: if array.dtype != dtype: From 862ab71343d39b94a2a71c2360ca616418b6c65f Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 23 Dec 2024 11:38:23 -0500 Subject: [PATCH 84/93] Fixes to transform class --- src/highdicom/_multiframe.py | 39 +++++++++++++++++++++--------------- src/highdicom/content.py | 21 +++++++++++++++++++ 2 files changed, 44 insertions(+), 16 deletions(-) diff --git a/src/highdicom/_multiframe.py b/src/highdicom/_multiframe.py index b255bfe5..9743fb16 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/_multiframe.py @@ -117,7 +117,7 @@ def __init__( apply_palette_color_lut: bool = True, ensure_monochrome_2: bool = True, real_world_value_map_index: int = 0, - output_range: Tuple[float, float] = (0.0, 1.0), + voi_output_range: Tuple[float, float] = (0.0, 1.0), correct_color: bool = True, ): """Apply pixel transformation to a frame. @@ -155,7 +155,7 @@ def __init__( real_world_value_map_index: int, optional Index of the real world value map to use (multiple may be stored within the dataset). - output_range: Tuple[float, float], optional + voi_output_range: Tuple[float, float], optional Range of output values to which the VOI range is mapped. Only relevant if ``apply_voi_transform`` is True and a VOI transform is present. @@ -174,17 +174,18 @@ def __init__( "'apply_modality_transform'." ) - output_min, output_max = output_range + output_min, output_max = voi_output_range if output_min >= output_max: raise ValueError( - "Second value of 'output_range' must be higher than the first." + "Second value of 'voi_output_range' must be higher than " + "the first." ) self.output_dtype = np.dtype(output_dtype) - self.is_shared = True + self.applies_to_all_frames = True self.is_color_input = image.SamplesPerPixel == 3 self._input_range_check: Optional[Tuple[int, int]] = None - self._output_range = output_range + self._voi_output_range = voi_output_range self._effective_lut_data: Optional[np.ndarray] = None self._effective_lut_first_mapped_value = 0 self._effective_window_center_width: Optional[Tuple[float, float]] = None @@ -192,6 +193,7 @@ def __init__( self._invert = False self._clip = True + # Determine input type and range of values input_range = None if ( image.SOPClassUID == ParametricMapStorage @@ -305,7 +307,9 @@ def __init__( rwvm_item.RealWorldValueFirstValueMapped, rwvm_item.RealWorldValueLastValueMapped ) - self.is_shared = self.is_shared and is_shared + self.applies_to_all_frames = ( + self.applies_to_all_frames and is_shared + ) has_rwvm = True break @@ -325,7 +329,9 @@ def __init__( float(ds.get('RescaleSlope', 1.0)), float(ds.get('RescaleIntercept', 0.0)) ) - self.is_shared = self.is_shared and is_shared + self.applies_to_all_frames = ( + self.applies_to_all_frames and is_shared + ) break if not has_rwvm and apply_voi_transform: @@ -335,10 +341,11 @@ def __init__( image.VOILUTSequence[0] ) voi_scaled_lut_data = voi_lut.get_scaled_lut_data( - output_range=output_range, + output_range=voi_output_range, dtype=output_dtype, invert=invert, ) + else: for ds, is_shared in datasets: if ( 'WindowCenter' in ds or @@ -369,7 +376,9 @@ def __init__( "Requested 'voi_transform_index' is " "not present." ) - self.is_shared = self.is_shared and is_shared + self.applies_to_all_frames = ( + self.applies_to_all_frames and is_shared + ) voi_center_width = (voi_center, voi_width) break @@ -443,7 +452,7 @@ def __init__( array=modality_lut.lut_data, window_center=voi_center_width[0], window_width=voi_center_width[1], - output_range=output_range, + output_range=voi_output_range, dtype=output_dtype, invert=invert, ) @@ -463,11 +472,9 @@ def __init__( else: # No VOI LUT transform so the modality lut operates alone if invert: - lut_data = modality_lut.lut_data - inverted_lut_data = ( - lut_data.min() + lut_data.max() - lut_data + self._effective_lut_data = ( + modality_lut.get_inverted_lut_data() ) - self._effective_lut_data = inverted_lut_data else: self._effective_lut_data = modality_lut.lut_data self._effective_lut_first_mapped_value = ( @@ -582,7 +589,7 @@ def __call__(self, frame: np.ndarray) -> np.ndarray: window_width=self._effective_window_center_width[1], dtype=self.output_dtype, invert=self._invert, - output_range=self._output_range, + output_range=self._voi_output_range, ) if self._color_manager is not None: diff --git a/src/highdicom/content.py b/src/highdicom/content.py index f3b88c33..0fdc9cae 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -2099,6 +2099,27 @@ def get_scaled_lut_data( return lut_data + def get_inverted_lut_data( + self, + ) -> np.ndarray: + """Get LUT data array with output values inverted within the same range. + + This returns the LUT data inverted within its original range. So if the + original LUT data has output values in the range 10-20 inclusive, then + the entries with output value 10 will be mapped to 20, the entries with + output value 11 will be mapped to value 19, and so on until the entries + with value 20 are mapped to 10. + + Returns + ------- + numpy.ndarray: + Inverted LUT data array, with the same size and data type as the + original array. + + """ + lut_data = self.lut_data + return lut_data.min() + lut_data.max() - lut_data + def apply( self, array: np.ndarray, From 6cc51509e81f73b32a43870328d3dabe723fc450 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 23 Dec 2024 19:20:45 -0500 Subject: [PATCH 85/93] Add some tests related to combined pixel transform --- src/highdicom/{_multiframe.py => image.py} | 686 ++++++++++++++------- src/highdicom/pixel_transforms.py | 7 +- src/highdicom/seg/sop.py | 2 +- tests/test_image.py | 335 ++++++++++ tests/test_multiframe.py | 64 -- 5 files changed, 800 insertions(+), 294 deletions(-) rename src/highdicom/{_multiframe.py => image.py} (80%) create mode 100644 tests/test_image.py delete mode 100644 tests/test_multiframe.py diff --git a/src/highdicom/_multiframe.py b/src/highdicom/image.py similarity index 80% rename from src/highdicom/_multiframe.py rename to src/highdicom/image.py index 9743fb16..20809dd6 100644 --- a/src/highdicom/_multiframe.py +++ b/src/highdicom/image.py @@ -1,7 +1,8 @@ -"""Tools for working with multiframe DICOM images.""" +"""Tools for working with general DICOM images.""" from collections import Counter from contextlib import contextmanager from copy import deepcopy +from enum import Enum import logging import sqlite3 from typing import ( @@ -102,25 +103,64 @@ logger = logging.getLogger(__name__) +class _ImageColorType(Enum): + """Internal enum describing color arrangement of an image.""" + MONOCHROME = 'MONOCHROME' + COLOR = 'COLOR' + PALETTE_COLOR = 'PALETTE_COLOR' + + class _CombinedPixelTransformation: - """Class representing a combined pixel transformation.""" + """Class representing a combined pixel transformation. + + DICOM images contain multi-stage transformations to apply to the raw stored + pixel values. This class is intended to provdie a single class that + configurably and efficiently applies the net effect of the selected + transforms to stored pixel data. + + Depending on the parameters, it may perform operations related to the + following: + + For monochrome images: + * Real world value maps, which map stored pixels to real-world values and + is independent of all other transforms + * Modality LUT transformation, which transforms stored pixel values to + modality-specific values + * Value-of-interest (VOI) LUT transformation, which transforms the output + of the Modality LUT transform to output values in order to focus on a + particular region of intensities values of particular interest (such as a + windowing operation). + * Presentation LUT transformation, which inverts the range of values for + display. + + For pseudo-color images (stored as monochrome images but displayed as color + images): + * The Palette Color LUT transformation, which maps stored single-sample + pixel values to 3-samples-per-pixel RGB color images. + + For color images and pseudo-color images: + * The ICCProfile, which performs color correction. + + """ def __init__( self, image: Dataset, frame_index: int = 0, + *, output_dtype: Union[type, str, np.dtype, None] = np.float64, - apply_modality_transform: bool = True, - apply_voi_transform: bool = False, - voi_transform_index: int = 0, - apply_palette_color_lut: bool = True, - ensure_monochrome_2: bool = True, + apply_real_world_transform: bool | None = None, real_world_value_map_index: int = 0, + apply_modality_transform: bool | None = None, + apply_voi_transform: bool | None = False, + voi_transform_index: int = 0, voi_output_range: Tuple[float, float] = (0.0, 1.0), - correct_color: bool = True, + apply_presentation_lut: bool = True, + apply_palette_color_lut: bool | None = None, + apply_icc_profile: bool | None = None, ): - """Apply pixel transformation to a frame. + """ Parameters ---------- @@ -129,49 +169,194 @@ def __init__( transformation should be represented. frame_index: int Zero-based index (one less than the frame number). - apply_modality_transform: bool, optional + output_dtype: Union[type, str, np.dtype, None], optional + Data type of the output array. + apply_real_world_transform: bool | None, optional + Whether to apply to apply the real-world value map to the frame. + The real world value map converts stored pixel values to output + values with a real-world meaning, either using a LUT or a linear + slope and intercept. + + If True, the transform is applied if present, and if not + present an error will be raised. If False, the transform will not + be applied, regardless of whether it is present. If ``None``, the + transform will be applied if present but no error will be raised if + it is not present. + + Note that if the dataset contains both a modality LUT and a real + world value map, the real world value map will be applied + preferentially. This also implies that specifying both + ``apply_real_world_transform`` and ``apply_modality_transform`` to + True is not permitted. + real_world_value_map_index: int, optional + Index of the real world value map to use (multiple may be stored + within the dataset). + apply_modality_transform: bool | None, optional Whether to apply to the modality transform (if present in the dataset) the frame. The modality transformation maps stored pixel values to output values, either using a LUT or rescale slope and intercept. - apply_voi_transform: bool, optional + + If True, the transform is applied if present, and if not + present an error will be raised. If False, the transform will not + be applied, regardless of whether it is present. If ``None``, the + transform will be applied if it is present and no real world value + map takes precedence, but no error will be raised if it is not + present. + apply_voi_transform: bool | None, optional Apply the value-of-interest (VOI) transformation (if present in the dataset), which limits the range of pixel values to a particular range of interest, using either a windowing operation or a LUT. + + If True, the transform is applied if present, and if not + present an error will be raised. If False, the transform will not + be applied, regardless of whether it is present. If ``None``, the + transform will be applied if it is present and no real world value + map takes precedence, but no error will be raised if it is not + present. voi_transform_index: int, optional Index (zero-based) of the VOI transform to apply if multiple are included in the datasets. Ignored if ``apply_voi_transform`` is ``False`` or no VOI transform is included in the datasets. May be a negative integer, following standard Python indexing convention. - apply_palette_color_lut: bool, optional - Apply the palette color LUT, if present in the dataset. The palette - color LUT maps a single sample for each pixel stored in the dataset - to a 3 sample-per-pixel color image. - ensure_monochrome_2: bool, optional - If the Photometric Interpretation is MONOCHROME1, convert the range - of the output pixels corresponds to MONOCHROME2 (in which high - values are represent white and low values represent black). Ignored - if PhotometricInterpretation is not MONOCHROME1. - real_world_value_map_index: int, optional - Index of the real world value map to use (multiple may be stored - within the dataset). voi_output_range: Tuple[float, float], optional Range of output values to which the VOI range is mapped. Only relevant if ``apply_voi_transform`` is True and a VOI transform is present. - correct_color: bool, optional + apply_palette_color_lut: bool | None, optional + Apply the palette color LUT, if present in the dataset. The palette + color LUT maps a single sample for each pixel stored in the dataset + to a 3 sample-per-pixel color image. + apply_presentation_lut: bool, optional + Apply the presentation LUT transform to invert the pixel values. If + the PresentationLUTShape is present with the value ``'INVERSE''``, + or the PresentationLUTShape is not present but the Photometric + Interpretation is MONOCHROME1, convert the range of the output + pixels corresponds to MONOCHROME2 (in which high values are + represent white and low values represent black). Ignored if + PhotometricInterpretation is not MONOCHROME1 and the + PresentationLUTShape is not present, or if a real world value + transform is applied. + apply_icc_profile: bool | None, optional Whether colors should be corrected by applying an ICC transformation. Will only be performed if metadata contain an ICC Profile. + If True, the transform is applied if present, and if not + present an error will be raised. If False, the transform will not + be applied, regardless of whether it is present. If ``None``, the + transform will be applied if it is present, but no error will be + raised if it is not present. + """ - # TODO: specify that code should error if no transform found? # TODO: choose VOI by explanation? # TODO: how to combine with multiframe? - if apply_voi_transform and not apply_modality_transform: + photometric_interpretation = image.PhotometricInterpretation + + if photometric_interpretation in ( + 'MONOCHROME1', + 'MONOCHROME2', + ): + self._color_type = _ImageColorType.MONOCHROME + elif photometric_interpretation == 'PALETTE_COLOR': + self._color_type = _ImageColorType.PALETTE_COLOR + else: + self._color_type = _ImageColorType.COLOR + + if apply_real_world_transform is None: + use_rwvm = True + require_rwvm = False + else: + use_rwvm = bool(apply_real_world_transform) + require_rwvm = use_rwvm + + if apply_modality_transform is None: + use_modality = True + require_modality = False + else: + use_modality = bool(apply_modality_transform) + require_modality = use_modality + + if require_modality and require_rwvm: + raise ValueError( + "Setting both 'apply_real_world_transform' and " + "'apply_modality_transform' to True is not " + "permitted." + ) + if require_rwvm: + # No need to search for modality or VOI since they won't be used + use_modality = False + use_voi = False + if require_modality: + # No need to search for rwvm since it won't be used + use_rwvm = False + + if apply_voi_transform is None: + use_voi = True + require_voi = False + else: + use_voi = bool(apply_voi_transform) + require_voi = use_voi + + if use_voi and not use_modality: + # The transform is dependent on first applying the modality + # transform raise ValueError( - "Parameter 'apply_voi_transform' requires " - "'apply_modality_transform'." + "If 'apply_voi_transform' is True or None, " + "'apply_modality_transform' cannot be False." + ) + + if require_rwvm and self._color_type != _ImageColorType.MONOCHROME: + raise ValueError( + 'Real-world value map is required but the image is not ' + 'a monochrome image.' + ) + if require_modality and self._color_type != _ImageColorType.MONOCHROME: + raise ValueError( + 'Modality transform is required but the image is not ' + 'a monochrome image.' + ) + if require_voi and self._color_type != _ImageColorType.MONOCHROME: + raise ValueError( + 'VOI transform is required but the image is not ' + 'a monochrome image.' + ) + + if apply_palette_color_lut is None: + use_palette_color = True + require_palette_color = False + else: + use_palette_color = bool(apply_palette_color_lut) + require_palette_color = use_palette_color + + if ( + require_palette_color and self._color_type != + _ImageColorType.PALETTE_COLOR + ): + raise ValueError( + 'Palette color transform is required but the image is not ' + 'a palette color image.' + ) + + if apply_icc_profile is None: + use_icc = True + require_icc = False + else: + use_icc = bool(apply_icc_profile) + require_icc = use_icc + + if use_icc and not use_palette_color: + # The transform is dependent on first applying the icc + # transform + raise ValueError( + "If 'apply_icc_transform' is True or None, " + "'apply_palette_color_lut' cannot be False." + ) + + if require_icc and self._color_type == _ImageColorType.MONOCHROME: + raise ValueError( + 'ICC profile is required but the image is not ' + 'a color or palette color image.' ) output_min, output_max = voi_output_range @@ -183,7 +368,6 @@ def __init__( self.output_dtype = np.dtype(output_dtype) self.applies_to_all_frames = True - self.is_color_input = image.SamplesPerPixel == 3 self._input_range_check: Optional[Tuple[int, int]] = None self._voi_output_range = voi_output_range self._effective_lut_data: Optional[np.ndarray] = None @@ -228,11 +412,8 @@ def __init__( self.input_dtype = np.dtype(np.uint32) input_range = (0, 2 ** image.BitsStored) - if not self.is_color_input: - if ( - image.PhotometricInterpretation == 'PALETTE COLOR' and - apply_palette_color_lut - ): + if self._color_type == _ImageColorType.PALETTE_COLOR: + if use_palette_color: if 'SegmentedRedPaletteColorLookupTableData' in image: # TODO raise RuntimeError("Segmented LUTs are not implemented.") @@ -240,40 +421,42 @@ def __init__( self._first_mapped_value, self._effective_lut = ( _get_combined_palette_color_lut(image) ) - else: - # Create a list of all datasets to check for transforms for - # this frame, and whether they are shared by all frames - datasets = [(image, True)] - if 'SharedFunctionalGroupsSequence' in image: - datasets.append( - (image.SharedFunctionalGroupsSequence[0], True) - ) + elif self._color_type == _ImageColorType.MONOCHROME: + # Create a list of all datasets to check for transforms for + # this frame, and whether they are shared by all frames + datasets = [(image, True)] - if 'PerFrameFunctionalGroupsSequence' in image: - datasets.append( - ( - image.PerFrameFunctionalGroupsSequence[frame_index], - False, - ) + if 'SharedFunctionalGroupsSequence' in image: + datasets.append( + (image.SharedFunctionalGroupsSequence[0], True) + ) + + if 'PerFrameFunctionalGroupsSequence' in image: + datasets.append( + ( + image.PerFrameFunctionalGroupsSequence[frame_index], + False, ) + ) - modality_lut: Optional[LUT] = None - modality_slope_intercept: Optional[Tuple[float, float]] = None + modality_lut: Optional[LUT] = None + modality_slope_intercept: Optional[Tuple[float, float]] = None - voi_lut: Optional[LUT] = None - voi_scaled_lut_data: Optional[np.ndarray] = None - voi_center_width: Optional[Tuple[float, float]] = None - voi_function = 'LINEAR' - invert = False - has_rwvm = False + voi_lut: Optional[LUT] = None + voi_scaled_lut_data: Optional[np.ndarray] = None + voi_center_width: Optional[Tuple[float, float]] = None + voi_function = 'LINEAR' + invert = False + has_rwvm = False - if ensure_monochrome_2: - if 'PresentationLUTShape' in image: - invert = image.PresentationLUTShape == 'INVERSE' - elif image.PhotometricInterpretation == 'MONOCHROME1': - invert = True + if apply_presentation_lut: + if 'PresentationLUTShape' in image: + invert = image.PresentationLUTShape == 'INVERSE' + elif image.PhotometricInterpretation == 'MONOCHROME1': + invert = True + if use_rwvm: for ds, is_shared in datasets: rwvm_seq = ds.get('RealWorldValueMappingSequence') if rwvm_seq is not None: @@ -313,189 +496,225 @@ def __init__( has_rwvm = True break - if not has_rwvm and apply_modality_transform: + if require_rwvm and not has_rwvm: + raise RuntimeError( + 'A real-world value map is required but not found in the ' + 'image.' + ) - if 'ModalityLUTSequence' in image: - modality_lut = LUT.from_dataset( - image.ModalityLUTSequence[0] - ) - else: - for ds, is_shared in datasets: - if ( - 'RescaleSlope' in ds or - 'RescaleIntercept' in ds - ): - modality_slope_intercept = ( - float(ds.get('RescaleSlope', 1.0)), - float(ds.get('RescaleIntercept', 0.0)) - ) - self.applies_to_all_frames = ( - self.applies_to_all_frames and is_shared - ) - break + if not has_rwvm and use_modality: + + if 'ModalityLUTSequence' in image: + modality_lut = LUT.from_dataset( + image.ModalityLUTSequence[0] + ) + else: + for ds, is_shared in datasets: + if 'PixelValueTransformationSequence' in ds: + sub_ds = ds.PixelValueTransformationSequence[0] + else: + sub_ds = ds - if not has_rwvm and apply_voi_transform: + if ( + 'RescaleSlope' in sub_ds or + 'RescaleIntercept' in sub_ds + ): + modality_slope_intercept = ( + float(sub_ds.get('RescaleSlope', 1.0)), + float(sub_ds.get('RescaleIntercept', 0.0)) + ) + self.applies_to_all_frames = ( + self.applies_to_all_frames and is_shared + ) + break - if 'VOILUTSequence' in image: - voi_lut = LUT.from_dataset( - image.VOILUTSequence[0] - ) - voi_scaled_lut_data = voi_lut.get_scaled_lut_data( - output_range=voi_output_range, - dtype=output_dtype, - invert=invert, - ) - else: - for ds, is_shared in datasets: - if ( - 'WindowCenter' in ds or - 'WindowWidth' in ds - ): - voi_center = ds.WindowCenter - voi_width = ds.WindowWidth + if ( + require_modality and + modality_lut is None and + modality_slope_intercept is None + ): + raise RuntimeError( + 'A modality LUT transform is required but not found in ' + 'the image.' + ) - if 'VOILUTFunction' in ds: - voi_function = ds.VOILUTFunction + if not has_rwvm and use_voi: - if isinstance(voi_width, list): - voi_width = voi_width[ - voi_transform_index - ] - elif voi_transform_index not in (0, -1): - raise IndexError( - "Requested 'voi_transform_index' is " - "not present." - ) - - if isinstance(voi_center, list): - voi_center = voi_center[ - voi_transform_index - ] - elif voi_transform_index not in (0, -1): - raise IndexError( - "Requested 'voi_transform_index' is " - "not present." - ) - self.applies_to_all_frames = ( - self.applies_to_all_frames and is_shared + if 'VOILUTSequence' in image: + voi_lut = LUT.from_dataset( + image.VOILUTSequence[0] + ) + voi_scaled_lut_data = voi_lut.get_scaled_lut_data( + output_range=voi_output_range, + dtype=output_dtype, + invert=invert, + ) + else: + for ds, is_shared in datasets: + if 'FrameVOILUTSequence' in ds: + sub_ds = ds.FrameVOILUTSequence[0] + else: + sub_ds = ds + + if ( + 'WindowCenter' in sub_ds or + 'WindowWidth' in sub_ds + ): + voi_center = sub_ds.WindowCenter + voi_width = sub_ds.WindowWidth + + if 'VOILUTFunction' in sub_ds: + voi_function = sub_ds.VOILUTFunction + + if isinstance(voi_width, list): + voi_width = voi_width[ + voi_transform_index + ] + elif voi_transform_index not in (0, -1): + raise IndexError( + "Requested 'voi_transform_index' is " + "not present." ) - voi_center_width = (voi_center, voi_width) - break - - # Determine how to combine modality, voi and presentation - # transforms - if modality_slope_intercept is not None: - slope, intercept = modality_slope_intercept - - if voi_center_width is not None: - # Shift and scale the window to account for the scaling - # and intercept - center, width = voi_center_width - self._effective_window_center_width = ( - (center - intercept) / slope, - width / slope - ) - self._effective_voi_function = voi_function - self._invert = invert - - elif voi_lut is not None and voi_scaled_lut_data is not None: - # Shift and "scale" the LUT to account for the rescale - if not intercept.is_integer() and slope.is_integer(): - raise ValueError( - "Cannot apply a VOI LUT when rescale intercept " - "or slope have non-integer values." - ) - intercept = int(intercept) - slope = int(slope) - self._effective_lut_data = voi_scaled_lut_data[::slope] - adjusted_first_value = ( - (voi_lut.first_mapped_value - intercept) / slope - ) - if not adjusted_first_value.is_integer(): - raise ValueError( - "Cannot apply a VOI LUT when rescale intercept " - "or slope have non-integer values." - ) - self._effective_lut_first_mapped_value = int( - adjusted_first_value - ) - else: - # No VOI LUT transform, so the modality rescale - # operates alone - if invert: - # Adjust the parameters to invert the intensities - # within the scaled and offset range - eff_slope = -slope - if input_range is None: - # This situation will be unusual: float valued - # pixels with a rescale transform that needs to - # be inverted. For simplicity, just invert - # the pixel values - eff_intercept = -intercept - else: - imin, imax = input_range - eff_intercept = ( - slope * (imin + imax) + intercept + + if isinstance(voi_center, list): + voi_center = voi_center[ + voi_transform_index + ] + elif voi_transform_index not in (0, -1): + raise IndexError( + "Requested 'voi_transform_index' is " + "not present." ) - self._effective_slope_intercept = ( - eff_slope, eff_intercept - ) - else: - self._effective_slope_intercept = ( - modality_slope_intercept + self.applies_to_all_frames = ( + self.applies_to_all_frames and is_shared ) + voi_center_width = (voi_center, voi_width) + break - elif modality_lut is not None: - if voi_center_width is not None: - # Apply the window function to the modality LUT - self._effective_lut_data = voi_window( - array=modality_lut.lut_data, - window_center=voi_center_width[0], - window_width=voi_center_width[1], - output_range=voi_output_range, - dtype=output_dtype, - invert=invert, - ) - self._effective_lut_first_mapped_value = ( - modality_lut.first_mapped_value - ) + if ( + require_voi and + voi_center_width is None and + voi_lut is None + ): + raise RuntimeError( + 'A VOI transform is required but not found in ' + 'the image.' + ) - elif voi_lut is not None and voi_scaled_lut_data is not None: - # "Compose" the two LUTs together by applying the - # second to the first - self._effective_lut_data = voi_lut.apply( - modality_lut.lut_data - ) - self._effective_lut_first_mapped_value = ( - modality_lut.first_mapped_value + # Determine how to combine modality, voi and presentation + # transforms + if modality_slope_intercept is not None: + slope, intercept = modality_slope_intercept + + if voi_center_width is not None: + # Shift and scale the window to account for the scaling + # and intercept + center, width = voi_center_width + self._effective_window_center_width = ( + (center - intercept) / slope, + width / slope + ) + self._effective_voi_function = voi_function + self._invert = invert + + elif voi_lut is not None and voi_scaled_lut_data is not None: + # Shift and "scale" the LUT to account for the rescale + if not intercept.is_integer() and slope.is_integer(): + raise ValueError( + "Cannot apply a VOI LUT when rescale intercept " + "or slope have non-integer values." ) - else: - # No VOI LUT transform so the modality lut operates alone - if invert: - self._effective_lut_data = ( - modality_lut.get_inverted_lut_data() - ) - else: - self._effective_lut_data = modality_lut.lut_data - self._effective_lut_first_mapped_value = ( - modality_lut.first_mapped_value + intercept = int(intercept) + slope = int(slope) + self._effective_lut_data = voi_scaled_lut_data[::slope] + adjusted_first_value = ( + (voi_lut.first_mapped_value - intercept) / slope + ) + if not adjusted_first_value.is_integer(): + raise ValueError( + "Cannot apply a VOI LUT when rescale intercept " + "or slope have non-integer values." ) - + self._effective_lut_first_mapped_value = int( + adjusted_first_value + ) else: - # No mdality LUT, but may still require inversion + # No VOI LUT transform, so the modality rescale + # operates alone if invert: - # Use a rescale slope and intercept to invert the - # values within their existing range + # Adjust the parameters to invert the intensities + # within the scaled and offset range + eff_slope = -slope if input_range is None: - eff_intercept = 0 + # This situation will be unusual: float valued + # pixels with a rescale transform that needs to + # be inverted. For simplicity, just invert + # the pixel values + eff_intercept = -intercept else: imin, imax = input_range - eff_intercept = imin + imax - + eff_intercept = ( + slope * (imin + imax) + intercept + ) self._effective_slope_intercept = ( - -1, - eff_intercept + eff_slope, eff_intercept + ) + else: + self._effective_slope_intercept = ( + modality_slope_intercept + ) + + elif modality_lut is not None: + if voi_center_width is not None: + # Apply the window function to the modality LUT + self._effective_lut_data = voi_window( + array=modality_lut.lut_data, + window_center=voi_center_width[0], + window_width=voi_center_width[1], + output_range=voi_output_range, + dtype=output_dtype, + invert=invert, + ) + self._effective_lut_first_mapped_value = ( + modality_lut.first_mapped_value + ) + + elif voi_lut is not None and voi_scaled_lut_data is not None: + # "Compose" the two LUTs together by applying the + # second to the first + self._effective_lut_data = voi_lut.apply( + modality_lut.lut_data + ) + self._effective_lut_first_mapped_value = ( + modality_lut.first_mapped_value + ) + else: + # No VOI LUT transform so the modality lut operates alone + if invert: + self._effective_lut_data = ( + modality_lut.get_inverted_lut_data() ) + else: + self._effective_lut_data = modality_lut.lut_data + self._effective_lut_first_mapped_value = ( + modality_lut.first_mapped_value + ) + + else: + # No modality LUT, but may still require inversion + if invert: + # Use a rescale slope and intercept to invert the + # values within their existing range + if input_range is None: + eff_intercept = 0 + else: + imin, imax = input_range + eff_intercept = imin + imax + + self._effective_slope_intercept = ( + -1, + eff_intercept + ) if self._effective_lut_data is not None: if self._effective_lut_data.dtype != output_dtype: @@ -522,13 +741,24 @@ def __init__( np.float64(intercept).astype(self.output_dtype), ) + if self._effective_window_center_width is not None: + if self.output_dtype.kind != 'f': + raise ValueError( + 'The VOI transformation requires a floating point data ' + 'type.' + ) + # We don't use the color_correct_frame() function here, since we cache # the ICC transform on the instance for improved performance. - if correct_color and 'ICCProfile' in image: + if use_icc and 'ICCProfile' in image: self._color_manager = ColorManager(image.ICCProfile) else: self._color_manager = None - + if require_icc: + raise RuntimeError( + 'An ICC profile is required but not found in ' + 'the image.' + ) def __call__(self, frame: np.ndarray) -> np.ndarray: """Apply the composed loss. @@ -544,7 +774,7 @@ def __call__(self, frame: np.ndarray) -> np.ndarray: Output frame after the transformation is applied. """ - if self.is_color_input: + if self._color_type == _ImageColorType.COLOR: if frame.ndim != 3 or frame.shape[2] != 3: raise ValueError( "Expected an image of shape (R, C, 3)." diff --git a/src/highdicom/pixel_transforms.py b/src/highdicom/pixel_transforms.py index f1347aed..5a2c7c88 100644 --- a/src/highdicom/pixel_transforms.py +++ b/src/highdicom/pixel_transforms.py @@ -1,4 +1,5 @@ """Functional interface for pixel transformations.""" +from enum import Enum from typing import Optional, Union, Tuple import numpy as np @@ -216,7 +217,7 @@ def _check_rescale_dtype( 'array is floating point.' ) - if input_dtype.kind == 'u' and intercept < 0.0: + if output_dtype.kind == 'u' and intercept < 0.0: raise ValueError( 'An unsigned integer data type cannot be used if the ' 'intercept is negative.' @@ -299,6 +300,10 @@ def voi_window( dtype = np.dtype(np.float64) else: dtype = np.dtype(dtype) + if dtype.kind != 'f': + raise ValueError( + 'dtype must be a floating point data type.' + ) window_width = dtype.type(window_width) window_center = dtype.type(window_center) diff --git a/src/highdicom/seg/sop.py b/src/highdicom/seg/sop.py index 9baacfb4..41192eb3 100644 --- a/src/highdicom/seg/sop.py +++ b/src/highdicom/seg/sop.py @@ -47,7 +47,7 @@ get_module_usage, is_multiframe_image, ) -from highdicom._multiframe import MultiFrameImage +from highdicom.image import MultiFrameImage from highdicom.base import _check_little_endian from highdicom.color import CIELabColor from highdicom.content import ( diff --git a/tests/test_image.py b/tests/test_image.py new file mode 100644 index 00000000..faa3d0e9 --- /dev/null +++ b/tests/test_image.py @@ -0,0 +1,335 @@ +"""Tests for the highdicom.image module.""" +import pickle +import numpy as np +import pydicom +from pydicom.data import get_testdata_file +import pytest + +from highdicom.image import ( + _CombinedPixelTransformation, + MultiFrameImage, +) + + +def test_slice_spacing(): + ct_multiframe = pydicom.dcmread( + get_testdata_file('eCT_Supplemental.dcm') + ) + image = MultiFrameImage.from_dataset(ct_multiframe) + + expected_affine = np.array( + [ + [0.0, 0.0, -0.388672, 99.5], + [0.0, 0.388672, 0.0, -301.5], + [10.0, 0.0, 0.0, -159], + [0.0, 0.0, 0.0, 1.0], + ] + ) + assert image.volume_geometry is not None + assert image.volume_geometry.spatial_shape[0] == 2 + assert np.array_equal(image.volume_geometry.affine, expected_affine) + + +def test_slice_spacing_irregular(): + ct_multiframe = pydicom.dcmread( + get_testdata_file('eCT_Supplemental.dcm') + ) + + # Mock some iregular spacings + ct_multiframe.PerFrameFunctionalGroupsSequence[0].\ + PlanePositionSequence[0].ImagePositionPatient = [1.0, 0.0, 0.0] + + image = MultiFrameImage.from_dataset(ct_multiframe) + + assert image.volume_geometry is None + + +def test_pickle(): + # Check that the database is successfully serialized and deserialized + ct_multiframe = pydicom.dcmread( + get_testdata_file('eCT_Supplemental.dcm') + ) + image = MultiFrameImage.from_dataset(ct_multiframe) + + ptr = image.dimension_index_pointers[0] + + pickled = pickle.dumps(image) + + # Check that the pickling process has not damaged the db on the existing + # instance + # This is just an example operation that requires the db + assert not image.are_dimension_indices_unique([ptr]) + + unpickled = pickle.loads(pickled) + assert isinstance(unpickled, MultiFrameImage) + + # Check that the database has been successfully restored in the + # deserialization process + assert not unpickled.are_dimension_indices_unique([ptr]) + + +def test_combined_transform_ect_rwvm(): + + dcm = pydicom.dcmread( + get_testdata_file('eCT_Supplemental.dcm') + ) + rwvm_seq = ( + dcm + .SharedFunctionalGroupsSequence[0] + .RealWorldValueMappingSequence[0] + ) + slope = rwvm_seq.RealWorldValueSlope + intercept = rwvm_seq.RealWorldValueIntercept + first = rwvm_seq.RealWorldValueFirstValueMapped + last = rwvm_seq.RealWorldValueLastValueMapped + + for output_dtype in [ + np.int32, + np.int64, + np.float16, + np.float32, + np.float64, + ]: + tf = _CombinedPixelTransformation( + dcm, + output_dtype=output_dtype, + ) + + assert tf.applies_to_all_frames + + assert tf._effective_slope_intercept == ( + slope, intercept + ) + assert tf._input_range_check == ( + first, last + ) + + input_arr = np.array([[1, 2], [3, 4]], np.uint16) + expected = input_arr * slope + intercept + + output_arr = tf(input_arr) + + assert np.array_equal(output_arr, expected) + + assert output_arr.dtype == output_dtype + + out_of_range_input = np.array( + [[last + 1, 1], [1, 1]], + np.uint16 + ) + msg = 'Array contains value outside the valid range.' + with pytest.raises(ValueError, match=msg): + tf(out_of_range_input) + + msg = ( + 'An unsigned integer data type cannot be used if the intercept is ' + 'negative.' + ) + with pytest.raises(ValueError, match=msg): + _CombinedPixelTransformation( + dcm, + output_dtype=np.uint32, + ) + + msg = ( + f'Datatype int16 does not have capacity for values ' + f'with slope 1.00 and intercept -1024.0.' + ) + with pytest.raises(ValueError, match=msg): + _CombinedPixelTransformation( + dcm, + output_dtype=np.int16, + ) + + # Delete the real world value map + del ( + dcm + .SharedFunctionalGroupsSequence[0] + .RealWorldValueMappingSequence + ) + msg = ( + 'A real-world value map is required but not found in the image.' + ) + with pytest.raises(RuntimeError, match=msg): + _CombinedPixelTransformation( + dcm, + apply_real_world_transform=True, + ) + + +def test_combined_transform_ect_modality(): + + dcm = pydicom.dcmread( + get_testdata_file('eCT_Supplemental.dcm') + ) + pix_value_seq = ( + dcm + .SharedFunctionalGroupsSequence[0] + .PixelValueTransformationSequence[0] + ) + slope = pix_value_seq.RescaleSlope + intercept = pix_value_seq.RescaleIntercept + + for output_dtype in [ + np.int32, + np.int64, + np.float16, + np.float32, + np.float64, + ]: + tf = _CombinedPixelTransformation( + dcm, + output_dtype=output_dtype, + apply_real_world_transform=False, + ) + + assert tf.applies_to_all_frames + + assert tf._effective_slope_intercept == ( + slope, intercept + ) + assert tf._input_range_check is None + + input_arr = np.array([[1, 2], [3, 4]], np.uint16) + expected = input_arr * slope + intercept + + output_arr = tf(input_arr) + + assert np.array_equal(output_arr, expected) + + assert output_arr.dtype == output_dtype + + # Same thing should work by requiring the modality LUT + tf = _CombinedPixelTransformation( + dcm, + output_dtype=output_dtype, + apply_modality_transform=True, + ) + + assert tf.applies_to_all_frames + + assert tf._effective_slope_intercept == ( + slope, intercept + ) + assert tf._input_range_check is None + + msg = ( + 'An unsigned integer data type cannot be used if the intercept is ' + 'negative.' + ) + with pytest.raises(ValueError, match=msg): + _CombinedPixelTransformation( + dcm, + output_dtype=np.uint32, + ) + + msg = ( + f'Datatype int16 does not have capacity for values ' + f'with slope 1.00 and intercept -1024.0.' + ) + with pytest.raises(ValueError, match=msg): + _CombinedPixelTransformation( + dcm, + output_dtype=np.int16, + ) + + # Delete the modality transform + del ( + dcm + .SharedFunctionalGroupsSequence[0] + .PixelValueTransformationSequence + ) + msg = ( + 'A modality LUT transform is required but not found in ' + 'the image.' + ) + with pytest.raises(RuntimeError, match=msg): + _CombinedPixelTransformation( + dcm, + apply_modality_transform=True, + ) + + +def test_combined_transform_ect_with_voi(): + + dcm = pydicom.dcmread( + get_testdata_file('eCT_Supplemental.dcm') + ) + pix_value_seq = ( + dcm + .SharedFunctionalGroupsSequence[0] + .PixelValueTransformationSequence[0] + ) + slope = pix_value_seq.RescaleSlope + intercept = pix_value_seq.RescaleIntercept + frame_voi_seq = ( + dcm + .SharedFunctionalGroupsSequence[0] + .FrameVOILUTSequence[0] + ) + center = frame_voi_seq.WindowCenter + width = frame_voi_seq.WindowWidth + + lower = center - width // 2 + upper = center + width // 2 + + for output_dtype in [ + np.float16, + np.float32, + np.float64, + ]: + tf = _CombinedPixelTransformation( + dcm, + output_dtype=output_dtype, + apply_real_world_transform=False, + apply_voi_transform=None, + ) + + assert tf.applies_to_all_frames + + assert tf._effective_window_center_width == ( + center - intercept, width / slope + ) + assert tf._input_range_check is None + + input_arr = np.array( + [ + [lower - intercept, center - intercept], + [upper - intercept - 1, upper - intercept - 1] + ], + np.uint16 + ) + expected = np.array([[0.0, 0.5], [1.0, 1.0]]) + + output_arr = tf(input_arr) + + assert np.allclose(output_arr, expected, atol=0.05) + + assert output_arr.dtype == output_dtype + + msg = ( + 'The VOI transformation requires a floating point data type.' + ) + with pytest.raises(ValueError, match=msg): + _CombinedPixelTransformation( + dcm, + output_dtype=np.int32, + apply_real_world_transform=False, + apply_voi_transform=None, + ) + + # Delete the voi transform + del ( + dcm + .SharedFunctionalGroupsSequence[0] + .PixelValueTransformationSequence + ) + msg = ( + 'A modality LUT transform is required but not found in ' + 'the image.' + ) + with pytest.raises(RuntimeError, match=msg): + _CombinedPixelTransformation( + dcm, + apply_modality_transform=True, + ) diff --git a/tests/test_multiframe.py b/tests/test_multiframe.py deleted file mode 100644 index 696d680f..00000000 --- a/tests/test_multiframe.py +++ /dev/null @@ -1,64 +0,0 @@ -"""Tests for the highdicom._multiframe module.""" -import pickle -import numpy as np -from pydicom import dcmread -from pydicom.data import get_testdata_file - -from highdicom._multiframe import MultiFrameImage - - -def test_slice_spacing(): - ct_multiframe = dcmread( - get_testdata_file('eCT_Supplemental.dcm') - ) - image = MultiFrameImage.from_dataset(ct_multiframe) - - expected_affine = np.array( - [ - [0.0, 0.0, -0.388672, 99.5], - [0.0, 0.388672, 0.0, -301.5], - [10.0, 0.0, 0.0, -159], - [0.0, 0.0, 0.0, 1.0], - ] - ) - assert image.volume_geometry is not None - assert image.volume_geometry.spatial_shape[0] == 2 - assert np.array_equal(image.volume_geometry.affine, expected_affine) - - -def test_slice_spacing_irregular(): - ct_multiframe = dcmread( - get_testdata_file('eCT_Supplemental.dcm') - ) - - # Mock some iregular spacings - ct_multiframe.PerFrameFunctionalGroupsSequence[0].\ - PlanePositionSequence[0].ImagePositionPatient = [1.0, 0.0, 0.0] - - image = MultiFrameImage.from_dataset(ct_multiframe) - - assert image.volume_geometry is None - - -def test_pickle(): - # Check that the database is successfully serialized and deserialized - ct_multiframe = dcmread( - get_testdata_file('eCT_Supplemental.dcm') - ) - image = MultiFrameImage.from_dataset(ct_multiframe) - - ptr = image.dimension_index_pointers[0] - - pickled = pickle.dumps(image) - - # Check that the pickling process has not damaged the db on the existing - # instance - # This is just an example operation that requires the db - assert not image.are_dimension_indices_unique([ptr]) - - unpickled = pickle.loads(pickled) - assert isinstance(unpickled, MultiFrameImage) - - # Check that the database has been successfully restored in the - # deserialization process - assert not unpickled.are_dimension_indices_unique([ptr]) From 9909cd4fc10a695b8443714047f0e884932d5719 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Mon, 23 Dec 2024 21:42:32 -0500 Subject: [PATCH 86/93] add further combined pixel transform tests --- src/highdicom/content.py | 10 ++++-- src/highdicom/image.py | 1 + tests/test_image.py | 76 +++++++++++++++++++++++++++++++++++++++- 3 files changed, 84 insertions(+), 3 deletions(-) diff --git a/src/highdicom/content.py b/src/highdicom/content.py index 0fdc9cae..bbd7b408 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -1954,8 +1954,14 @@ def lut_data(self) -> np.ndarray: if bits_per_entry == 8 and length % 2 == 1 and len(data) == length + 1: data = data[:-1] - # The LUT data attributes have VR OW (16-bit other words) - array = np.frombuffer(data, dtype=dtype) + # LUT Data may have value representation of either US (which pydicom + # will return as a list of ints) or OW, which pydicom will return as a + # bytes object + if self['LUTData'].VR == 'US': + array = np.array(data, dtype=dtype) + else: + # The LUT data attributes have VR OW (16-bit other words) + array = np.frombuffer(data, dtype=dtype) if len(array) != length: raise RuntimeError( 'Length of LUTData does not match the value expected from the ' diff --git a/src/highdicom/image.py b/src/highdicom/image.py index 20809dd6..c4d124fd 100644 --- a/src/highdicom/image.py +++ b/src/highdicom/image.py @@ -4,6 +4,7 @@ from copy import deepcopy from enum import Enum import logging +from os import readlink import sqlite3 from typing import ( Any, diff --git a/tests/test_image.py b/tests/test_image.py index faa3d0e9..c670ca01 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -2,7 +2,7 @@ import pickle import numpy as np import pydicom -from pydicom.data import get_testdata_file +from pydicom.data import get_testdata_file, get_testdata_files import pytest from highdicom.image import ( @@ -333,3 +333,77 @@ def test_combined_transform_ect_with_voi(): dcm, apply_modality_transform=True, ) + + +def test_combined_transform_modality_lut(): + # A test file that has a modality LUT + f = get_testdata_file('mlut_18.dcm') + dcm = pydicom.dcmread(f) + lut_data = dcm.ModalityLUTSequence[0].LUTData + + input_arr = np.array([[-2048, -2047], [-2046, -2045]], np.int16) + expected = np.array( + [ + [lut_data[0], lut_data[1]], + [lut_data[2], lut_data[3]], + ], + ) + + for output_dtype in [ + np.int32, + np.int64, + np.float16, + np.float32, + np.float64, + ]: + tf = _CombinedPixelTransformation( + dcm, + output_dtype=output_dtype + ) + assert tf._effective_lut_data is not None + + output_arr = tf(input_arr) + assert np.array_equal(output_arr, expected) + assert output_arr.dtype == output_dtype + + msg = ( + 'A VOI transform is required but not found in the image.' + ) + with pytest.raises(RuntimeError, match=msg): + _CombinedPixelTransformation( + dcm, + apply_voi_transform=True, + ) + + # Add a voi lut + dcm.WindowCenter = 24 + dcm.WindowWidth = 24 + + tf = _CombinedPixelTransformation(dcm, apply_voi_transform=None) + output_arr = tf(input_arr) + expected = np.array([[0.0, 0.17391304], [0.86956522, 1.0]]) + assert np.allclose(output_arr, expected) + + +def test_combined_transform_voi_lut(): + # A test file that has a modality LUT + f = get_testdata_file('vlut_04.dcm') + dcm = pydicom.dcmread(f) + lut_data = dcm.VOILUTSequence[0].LUTData + + for output_dtype in [ + np.float16, + np.float32, + np.float64, + ]: + tf = _CombinedPixelTransformation( + dcm, + output_dtype=output_dtype, + apply_voi_transform=None, + ) + # TODO this is failing because the code doesn't handle VOI lut when no modality lut is found + assert tf._effective_lut_data is not None + + output_arr = tf(input_arr) + assert np.array_equal(output_arr, expected) + assert output_arr.dtype == output_dtype From 7a796d3220d05077c4c8cfc50f7622a7d083d524 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 24 Dec 2024 10:26:46 -0500 Subject: [PATCH 87/93] More tests and fixes for combined pixel transform --- src/highdicom/image.py | 108 ++++++++++++++++--------------- tests/test_image.py | 141 +++++++++++++++++++++++++++++------------ 2 files changed, 155 insertions(+), 94 deletions(-) diff --git a/src/highdicom/image.py b/src/highdicom/image.py index c4d124fd..b1e42776 100644 --- a/src/highdicom/image.py +++ b/src/highdicom/image.py @@ -604,8 +604,50 @@ def __init__( # Determine how to combine modality, voi and presentation # transforms - if modality_slope_intercept is not None: - slope, intercept = modality_slope_intercept + if modality_lut is not None and not has_rwvm: + if voi_center_width is not None: + # Apply the window function to the modality LUT + self._effective_lut_data = voi_window( + array=modality_lut.lut_data, + window_center=voi_center_width[0], + window_width=voi_center_width[1], + output_range=voi_output_range, + dtype=output_dtype, + invert=invert, + ) + self._effective_lut_first_mapped_value = ( + modality_lut.first_mapped_value + ) + + elif voi_lut is not None and voi_scaled_lut_data is not None: + # "Compose" the two LUTs together by applying the + # second to the first + self._effective_lut_data = voi_lut.apply( + modality_lut.lut_data + ) + self._effective_lut_first_mapped_value = ( + modality_lut.first_mapped_value + ) + else: + # No VOI LUT transform so the modality lut operates alone + if invert: + self._effective_lut_data = ( + modality_lut.get_inverted_lut_data() + ) + else: + self._effective_lut_data = modality_lut.lut_data + self._effective_lut_first_mapped_value = ( + modality_lut.first_mapped_value + ) + + elif not has_rwvm: + # modality LUT either doesn't exist or is a rescale/slope + if modality_slope_intercept is not None: + slope, intercept = modality_slope_intercept + else: + # No rescale slope found in dataset, so treat them as the + # 'identity' values + slope, intercept = (1.0, 0.0) if voi_center_width is not None: # Shift and scale the window to account for the scaling @@ -627,7 +669,10 @@ def __init__( ) intercept = int(intercept) slope = int(slope) - self._effective_lut_data = voi_scaled_lut_data[::slope] + if slope != 1: + self._effective_lut_data = voi_scaled_lut_data[::slope] + else: + self._effective_lut_data = voi_scaled_lut_data adjusted_first_value = ( (voi_lut.first_mapped_value - intercept) / slope ) @@ -665,58 +710,6 @@ def __init__( modality_slope_intercept ) - elif modality_lut is not None: - if voi_center_width is not None: - # Apply the window function to the modality LUT - self._effective_lut_data = voi_window( - array=modality_lut.lut_data, - window_center=voi_center_width[0], - window_width=voi_center_width[1], - output_range=voi_output_range, - dtype=output_dtype, - invert=invert, - ) - self._effective_lut_first_mapped_value = ( - modality_lut.first_mapped_value - ) - - elif voi_lut is not None and voi_scaled_lut_data is not None: - # "Compose" the two LUTs together by applying the - # second to the first - self._effective_lut_data = voi_lut.apply( - modality_lut.lut_data - ) - self._effective_lut_first_mapped_value = ( - modality_lut.first_mapped_value - ) - else: - # No VOI LUT transform so the modality lut operates alone - if invert: - self._effective_lut_data = ( - modality_lut.get_inverted_lut_data() - ) - else: - self._effective_lut_data = modality_lut.lut_data - self._effective_lut_first_mapped_value = ( - modality_lut.first_mapped_value - ) - - else: - # No modality LUT, but may still require inversion - if invert: - # Use a rescale slope and intercept to invert the - # values within their existing range - if input_range is None: - eff_intercept = 0 - else: - imin, imax = input_range - eff_intercept = imin + imax - - self._effective_slope_intercept = ( - -1, - eff_intercept - ) - if self._effective_lut_data is not None: if self._effective_lut_data.dtype != output_dtype: self._effective_lut_data = ( @@ -728,6 +721,11 @@ def __init__( 'Images with floating point data may not contain LUTs.' ) + # Slope/intercept of 1/0 is just a no-op + if self._effective_slope_intercept is not None: + if self._effective_slope_intercept == (1.0, 0.0): + self._effective_slope_intercept = None + if self._effective_slope_intercept is not None: slope, intercept = self._effective_slope_intercept _check_rescale_dtype( diff --git a/tests/test_image.py b/tests/test_image.py index c670ca01..bc9dc308 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -131,6 +131,26 @@ def test_combined_transform_ect_rwvm(): output_dtype=np.uint32, ) + msg = ( + 'Palette color transform is required but the image is not a palette ' + 'color image.' + ) + with pytest.raises(ValueError, match=msg): + _CombinedPixelTransformation( + dcm, + apply_palette_color_lut=True, + ) + + msg = ( + 'ICC profile is required but the image is not a color or palette ' + 'color image.' + ) + with pytest.raises(ValueError, match=msg): + _CombinedPixelTransformation( + dcm, + apply_icc_profile=True, + ) + msg = ( f'Datatype int16 does not have capacity for values ' f'with slope 1.00 and intercept -1024.0.' @@ -278,34 +298,45 @@ def test_combined_transform_ect_with_voi(): np.float32, np.float64, ]: - tf = _CombinedPixelTransformation( - dcm, - output_dtype=output_dtype, - apply_real_world_transform=False, - apply_voi_transform=None, - ) - - assert tf.applies_to_all_frames - - assert tf._effective_window_center_width == ( - center - intercept, width / slope - ) - assert tf._input_range_check is None - - input_arr = np.array( - [ - [lower - intercept, center - intercept], - [upper - intercept - 1, upper - intercept - 1] - ], - np.uint16 - ) - expected = np.array([[0.0, 0.5], [1.0, 1.0]]) - - output_arr = tf(input_arr) - - assert np.allclose(output_arr, expected, atol=0.05) - - assert output_arr.dtype == output_dtype + for output_range in [ + (0., 1.), + (-10.0, 10.0), + (50., 100.0), + ]: + tf = _CombinedPixelTransformation( + dcm, + output_dtype=output_dtype, + apply_real_world_transform=False, + apply_voi_transform=None, + voi_output_range=output_range, + ) + + assert tf.applies_to_all_frames + + assert tf._effective_window_center_width == ( + center - intercept, width / slope + ) + assert tf._input_range_check is None + assert tf._effective_slope_intercept is None + assert tf._color_manager is None + assert tf._voi_output_range == output_range + + input_arr = np.array( + [ + [lower - intercept, center - intercept], + [upper - intercept - 1, upper - intercept - 1] + ], + np.uint16 + ) + expected = np.array([[0.0, 0.5], [1.0, 1.0]]) + output_scale = output_range[1] - output_range[0] + expected = expected * output_scale + output_range[0] + + output_arr = tf(input_arr) + + assert np.allclose(output_arr, expected, atol=0.5) + + assert output_arr.dtype == output_dtype msg = ( 'The VOI transformation requires a floating point data type.' @@ -361,6 +392,10 @@ def test_combined_transform_modality_lut(): output_dtype=output_dtype ) assert tf._effective_lut_data is not None + assert tf._effective_window_center_width is None + assert tf._effective_slope_intercept is None + assert tf._color_manager is None + assert tf._input_range_check is None output_arr = tf(input_arr) assert np.array_equal(output_arr, expected) @@ -390,20 +425,48 @@ def test_combined_transform_voi_lut(): f = get_testdata_file('vlut_04.dcm') dcm = pydicom.dcmread(f) lut_data = dcm.VOILUTSequence[0].LUTData + first_mapped_value = dcm.VOILUTSequence[0].LUTDescriptor[1] for output_dtype in [ np.float16, np.float32, np.float64, ]: - tf = _CombinedPixelTransformation( - dcm, - output_dtype=output_dtype, - apply_voi_transform=None, - ) - # TODO this is failing because the code doesn't handle VOI lut when no modality lut is found - assert tf._effective_lut_data is not None - - output_arr = tf(input_arr) - assert np.array_equal(output_arr, expected) - assert output_arr.dtype == output_dtype + for output_range in [ + (0., 1.), + (-10.0, 10.0), + (50., 100.0), + ]: + tf = _CombinedPixelTransformation( + dcm, + output_dtype=output_dtype, + apply_voi_transform=None, + voi_output_range=output_range, + ) + assert tf._effective_lut_data is not None + assert tf._effective_window_center_width is None + assert tf._effective_slope_intercept is None + assert tf._color_manager is None + assert tf._input_range_check is None + assert tf._voi_output_range == output_range + + input_arr = np.array( + [ + [first_mapped_value, first_mapped_value + 1], + [first_mapped_value + 2, first_mapped_value + 3], + ] + ) + output_scale = ( + (max(lut_data) - min(lut_data)) / + (output_range[1] - output_range[0]) + ) + expected = np.array( + [ + [lut_data[0], lut_data[1]], + [lut_data[2], lut_data[3]], + ] + ) / output_scale + output_range[0] + + output_arr = tf(input_arr) + assert np.allclose(output_arr, expected, atol=0.1) + assert output_arr.dtype == output_dtype From 3951eac29a445f612bce7825f309e0aaed89f216 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 24 Dec 2024 11:45:27 -0500 Subject: [PATCH 88/93] Tests for monochrome1 --- src/highdicom/image.py | 12 +++- tests/test_image.py | 142 ++++++++++++++++++++++++++++++++++++++++- 2 files changed, 150 insertions(+), 4 deletions(-) diff --git a/src/highdicom/image.py b/src/highdicom/image.py index b1e42776..a7af3faf 100644 --- a/src/highdicom/image.py +++ b/src/highdicom/image.py @@ -360,6 +360,11 @@ def __init__( 'a color or palette color image.' ) + if not isinstance(apply_presentation_lut, bool): + raise TypeError( + "Parameter 'apply_presentation_lut' must have type bool." + ) + output_min, output_max = voi_output_range if output_min >= output_max: raise ValueError( @@ -411,7 +416,7 @@ def __init__( self.input_dtype = np.dtype(np.uint16) elif image.BitsAllocated == 32: self.input_dtype = np.dtype(np.uint32) - input_range = (0, 2 ** image.BitsStored) + input_range = (0, 2 ** image.BitsStored - 1) if self._color_type == _ImageColorType.PALETTE_COLOR: if use_palette_color: @@ -808,8 +813,6 @@ def __call__(self, frame: np.ndarray) -> np.ndarray: frame = frame * slope if intercept != 0.0: frame = frame + intercept - if frame.dtype != self.output_dtype: - frame = frame.astype(self.output_dtype) elif self._effective_window_center_width is not None: frame = voi_window( @@ -824,6 +827,9 @@ def __call__(self, frame: np.ndarray) -> np.ndarray: if self._color_manager is not None: return self._color_manager.transform_frame(frame) + if frame.dtype != self.output_dtype: + frame = frame.astype(self.output_dtype) + return frame diff --git a/tests/test_image.py b/tests/test_image.py index bc9dc308..43ef500a 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -9,6 +9,9 @@ _CombinedPixelTransformation, MultiFrameImage, ) +from highdicom.pixel_transforms import ( + voi_window, +) def test_slice_spacing(): @@ -113,6 +116,9 @@ def test_combined_transform_ect_rwvm(): assert output_arr.dtype == output_dtype + full_output_arr = tf(dcm.pixel_array[0]) + assert full_output_arr.dtype == output_dtype + out_of_range_input = np.array( [[last + 1, 1], [1, 1]], np.uint16 @@ -219,6 +225,9 @@ def test_combined_transform_ect_modality(): assert output_arr.dtype == output_dtype + full_output_arr = tf(dcm.pixel_array[0]) + assert full_output_arr.dtype == output_dtype + # Same thing should work by requiring the modality LUT tf = _CombinedPixelTransformation( dcm, @@ -233,6 +242,9 @@ def test_combined_transform_ect_modality(): ) assert tf._input_range_check is None + full_output_arr = tf(dcm.pixel_array[0]) + assert full_output_arr.dtype == output_dtype + msg = ( 'An unsigned integer data type cannot be used if the intercept is ' 'negative.' @@ -338,6 +350,9 @@ def test_combined_transform_ect_with_voi(): assert output_arr.dtype == output_dtype + full_output_arr = tf(dcm.pixel_array[0]) + assert full_output_arr.dtype == output_dtype + msg = ( 'The VOI transformation requires a floating point data type.' ) @@ -401,6 +416,9 @@ def test_combined_transform_modality_lut(): assert np.array_equal(output_arr, expected) assert output_arr.dtype == output_dtype + full_output_arr = tf(dcm.pixel_array) + assert full_output_arr.dtype == output_dtype + msg = ( 'A VOI transform is required but not found in the image.' ) @@ -421,7 +439,7 @@ def test_combined_transform_modality_lut(): def test_combined_transform_voi_lut(): - # A test file that has a modality LUT + # A test file that has a voi LUT f = get_testdata_file('vlut_04.dcm') dcm = pydicom.dcmread(f) lut_data = dcm.VOILUTSequence[0].LUTData @@ -470,3 +488,125 @@ def test_combined_transform_voi_lut(): output_arr = tf(input_arr) assert np.allclose(output_arr, expected, atol=0.1) assert output_arr.dtype == output_dtype + + full_output_arr = tf(dcm.pixel_array) + assert full_output_arr.dtype == output_dtype + + +def test_combined_transform_monochrome(): + # A test file that has a modality LUT + f = get_testdata_file('RG1_UNCR.dcm') + dcm = pydicom.dcmread(f) + + center_width = (dcm.WindowCenter, dcm.WindowWidth) + + max_value = 2 ** dcm.BitsStored - 1 + + for output_dtype in [ + np.int32, + np.int64, + np.float16, + np.float32, + np.float64, + ]: + # Default behavior; inversion but no VOI + tf = _CombinedPixelTransformation( + dcm, + output_dtype=output_dtype, + ) + assert tf._effective_slope_intercept == (-1, max_value) + assert tf._effective_lut_data is None + assert tf._effective_window_center_width is None + assert tf._color_manager is None + assert tf._input_range_check is None + + output_arr = tf(dcm.pixel_array) + + expected = max_value - dcm.pixel_array + expected = expected.astype(output_dtype) + if output_dtype != np.float16: + # float16 seems to give a lot of precision related errors in this + # range + assert np.array_equal(output_arr, expected) + assert output_arr.dtype == output_dtype + + # No inversion + tf = _CombinedPixelTransformation( + dcm, + output_dtype=output_dtype, + apply_presentation_lut=False, + ) + assert tf._effective_slope_intercept is None + assert tf._effective_lut_data is None + assert tf._effective_window_center_width is None + assert tf._color_manager is None + assert tf._input_range_check is None + + output_arr = tf(dcm.pixel_array) + + expected = dcm.pixel_array + expected = expected.astype(output_dtype) + assert np.array_equal(output_arr, expected) + assert output_arr.dtype == output_dtype + + for output_dtype in [ + np.float16, + np.float32, + np.float64, + ]: + # VOI and inversion + tf = _CombinedPixelTransformation( + dcm, + output_dtype=output_dtype, + apply_voi_transform=None, + ) + assert tf._effective_slope_intercept is None + assert tf._effective_lut_data is None + assert tf._effective_window_center_width == center_width + assert tf._color_manager is None + assert tf._input_range_check is None + assert tf._invert + + output_arr = tf(dcm.pixel_array) + + expected = voi_window( + dcm.pixel_array, + window_width=dcm.WindowWidth, + window_center=dcm.WindowCenter, + dtype=output_dtype, + invert=True, + ) + if output_dtype != np.float16: + # float16 seems to give a lot of precision related errors in this + # range + assert np.array_equal(output_arr, expected) + assert output_arr.dtype == output_dtype + + # VOI and no inversion + tf = _CombinedPixelTransformation( + dcm, + output_dtype=output_dtype, + apply_voi_transform=None, + apply_presentation_lut=False, + ) + assert tf._effective_slope_intercept is None + assert tf._effective_lut_data is None + assert tf._effective_window_center_width == center_width + assert tf._color_manager is None + assert tf._input_range_check is None + assert not tf._invert + + output_arr = tf(dcm.pixel_array) + + expected = voi_window( + dcm.pixel_array, + window_width=dcm.WindowWidth, + window_center=dcm.WindowCenter, + dtype=output_dtype, + invert=False, + ) + if output_dtype != np.float16: + # float16 seems to give a lot of precision related errors in this + # range + assert np.array_equal(output_arr, expected) + assert output_arr.dtype == output_dtype From afd3d2c7f4f7087c49e492123676e5c471b004aa Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 24 Dec 2024 13:48:25 -0500 Subject: [PATCH 89/93] More tests and fixes for pixel transforms --- src/highdicom/_module_utils.py | 20 ++++- src/highdicom/content.py | 4 +- src/highdicom/image.py | 58 ++++++++----- src/highdicom/pixel_transforms.py | 8 +- tests/test_image.py | 140 +++++++++++++++++++++++++++++- 5 files changed, 200 insertions(+), 30 deletions(-) diff --git a/src/highdicom/_module_utils.py b/src/highdicom/_module_utils.py index 8a5f64ec..4586d9ad 100644 --- a/src/highdicom/_module_utils.py +++ b/src/highdicom/_module_utils.py @@ -216,7 +216,11 @@ def get_module_usage( return None -def is_attribute_in_iod(attribute: str, sop_class_uid: str) -> bool: +def is_attribute_in_iod( + attribute: str, + sop_class_uid: str, + exclude_path_elements: Sequence[str] | None = None, +) -> bool: """Check whether an attribute is present within an IOD. Parameters @@ -225,6 +229,9 @@ def is_attribute_in_iod(attribute: str, sop_class_uid: str) -> bool: Keyword for the attribute sop_class_uid: str SOP Class UID identifying the IOD. + exclude_path_elements: Sequence[str] | None, optional + If any of these elements are found anywhere in the attribute's path, + that occurrence is excluded. Returns ------- @@ -247,6 +254,11 @@ def is_attribute_in_iod(attribute: str, sop_class_uid: str) -> bool: for module in IOD_MODULE_MAP[iod_name]: module_attributes = MODULE_ATTRIBUTE_MAP[module['key']] for attr in module_attributes: + if exclude_path_elements is not None: + if any( + p in exclude_path_elements for p in attr['path'] + ): + continue if attr['keyword'] == attribute: return True @@ -279,7 +291,11 @@ def does_iod_have_pixel_data(sop_class_uid: str) -> bool: 'DoubleFloatPixelData', ] return any( - is_attribute_in_iod(attr, sop_class_uid) for attr in pixel_attrs + is_attribute_in_iod( + attr, + sop_class_uid, + exclude_path_elements=['IconImageSequence'], + ) for attr in pixel_attrs ) diff --git a/src/highdicom/content.py b/src/highdicom/content.py index bbd7b408..16641221 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -2090,7 +2090,7 @@ def get_scaled_lut_data( scale_factor = dtype.type(scale_factor) output_min = dtype.type(output_min) - lut_data = lut_data.astype(dtype) + lut_data = lut_data.astype(dtype, casting='safe') if invert: lut_data = -lut_data min = -max.astype(dtype) @@ -2098,7 +2098,7 @@ def get_scaled_lut_data( if min != 0: lut_data = lut_data - min - lut_data = lut_data.astype(dtype) * scale_factor + lut_data = lut_data * scale_factor if output_min != 0.0: lut_data = lut_data + output_min diff --git a/src/highdicom/image.py b/src/highdicom/image.py index a7af3faf..3424dba6 100644 --- a/src/highdicom/image.py +++ b/src/highdicom/image.py @@ -4,7 +4,6 @@ from copy import deepcopy from enum import Enum import logging -from os import readlink import sqlite3 from typing import ( Any, @@ -32,7 +31,10 @@ from pydicom.multival import MultiValue from pydicom.uid import ParametricMapStorage -from highdicom._module_utils import is_multiframe_image +from highdicom._module_utils import ( + does_iod_have_pixel_data, + is_multiframe_image, +) from highdicom.base import SOPClass, _check_little_endian from highdicom.color import ColorManager from highdicom.content import LUT @@ -250,6 +252,11 @@ def __init__( raised if it is not present. """ + if not does_iod_have_pixel_data(image.SOPClassUID): + raise ValueError( + 'Input dataset does not represent an image.' + ) + # TODO: choose VOI by explanation? # TODO: how to combine with multiframe? photometric_interpretation = image.PhotometricInterpretation @@ -259,7 +266,7 @@ def __init__( 'MONOCHROME2', ): self._color_type = _ImageColorType.MONOCHROME - elif photometric_interpretation == 'PALETTE_COLOR': + elif photometric_interpretation == 'PALETTE COLOR': self._color_type = _ImageColorType.PALETTE_COLOR else: self._color_type = _ImageColorType.COLOR @@ -424,9 +431,10 @@ def __init__( # TODO raise RuntimeError("Segmented LUTs are not implemented.") - self._first_mapped_value, self._effective_lut = ( - _get_combined_palette_color_lut(image) - ) + ( + self._effective_lut_first_mapped_value, + self._effective_lut_data + ) = _get_combined_palette_color_lut(image) elif self._color_type == _ImageColorType.MONOCHROME: # Create a list of all datasets to check for transforms for @@ -715,12 +723,28 @@ def __init__( modality_slope_intercept ) - if self._effective_lut_data is not None: - if self._effective_lut_data.dtype != output_dtype: - self._effective_lut_data = ( - self._effective_lut_data.astype(output_dtype) + # We don't use the color_correct_frame() function here, since we cache + # the ICC transform on the instance for improved performance. + if use_icc and 'ICCProfile' in image: + self._color_manager = ColorManager(image.ICCProfile) + else: + self._color_manager = None + if require_icc: + raise RuntimeError( + 'An ICC profile is required but not found in ' + 'the image.' ) + if self._effective_lut_data is not None: + if self._color_manager is None: + # If using palette color LUT, need to keep pixels as integers + # to pass into color manager, otherwise eagerly converted the + # LUT data to the requested output type + if self._effective_lut_data.dtype != output_dtype: + self._effective_lut_data = ( + self._effective_lut_data.astype(output_dtype) + ) + if self.input_dtype.kind == 'f': raise ValueError( 'Images with floating point data may not contain LUTs.' @@ -752,18 +776,6 @@ def __init__( 'type.' ) - # We don't use the color_correct_frame() function here, since we cache - # the ICC transform on the instance for improved performance. - if use_icc and 'ICCProfile' in image: - self._color_manager = ColorManager(image.ICCProfile) - else: - self._color_manager = None - if require_icc: - raise RuntimeError( - 'An ICC profile is required but not found in ' - 'the image.' - ) - def __call__(self, frame: np.ndarray) -> np.ndarray: """Apply the composed loss. @@ -825,7 +837,7 @@ def __call__(self, frame: np.ndarray) -> np.ndarray: ) if self._color_manager is not None: - return self._color_manager.transform_frame(frame) + frame = self._color_manager.transform_frame(frame) if frame.dtype != self.output_dtype: frame = frame.astype(self.output_dtype) diff --git a/src/highdicom/pixel_transforms.py b/src/highdicom/pixel_transforms.py index 5a2c7c88..b6628b71 100644 --- a/src/highdicom/pixel_transforms.py +++ b/src/highdicom/pixel_transforms.py @@ -56,11 +56,13 @@ def _parse_palette_color_lut_attributes(dataset: Dataset) -> Tuple[ if number_of_entries == 0: number_of_entries = 2 ** 16 + strip_final_byte = False if bits_per_entry == 8: expected_num_bytes = number_of_entries - if number_of_entries % 2 == 1: + if expected_num_bytes % 2 == 1: # Account for padding byte - number_of_entries += 1 + expected_num_bytes += 1 + strip_final_byte = True elif bits_per_entry == 16: expected_num_bytes = number_of_entries * 2 else: @@ -108,6 +110,8 @@ def _parse_palette_color_lut_attributes(dataset: Dataset) -> Tuple[ raise RuntimeError( "LUT data has incorrect length" ) + if strip_final_byte: + lut_bytes = lut_bytes[:-1] lut_data.append(lut_bytes) return ( diff --git a/tests/test_image.py b/tests/test_image.py index 43ef500a..0eff58ce 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -1,10 +1,15 @@ """Tests for the highdicom.image module.""" +from pathlib import Path import pickle import numpy as np import pydicom +import pkgutil from pydicom.data import get_testdata_file, get_testdata_files import pytest +from highdicom._module_utils import ( + does_iod_have_pixel_data, +) from highdicom.image import ( _CombinedPixelTransformation, MultiFrameImage, @@ -12,6 +17,9 @@ from highdicom.pixel_transforms import ( voi_window, ) +from highdicom.pr.content import ( + _add_icc_profile_attributes, +) def test_slice_spacing(): @@ -446,7 +454,6 @@ def test_combined_transform_voi_lut(): first_mapped_value = dcm.VOILUTSequence[0].LUTDescriptor[1] for output_dtype in [ - np.float16, np.float32, np.float64, ]: @@ -610,3 +617,134 @@ def test_combined_transform_monochrome(): # range assert np.array_equal(output_arr, expected) assert output_arr.dtype == output_dtype + + +def test_combined_transform_color(): + # A simple color image test file, with no ICC profile + f = get_testdata_file('color-pl.dcm') + dcm = pydicom.dcmread(f) + + # Not quite sure why this is needed... + # The original UID is not recognized + dcm.SOPClassUID = pydicom.uid.UltrasoundImageStorage + + tf = _CombinedPixelTransformation(dcm) + assert tf._effective_slope_intercept is None + assert tf._effective_lut_data is None + assert tf._effective_window_center_width is None + assert tf._color_manager is None + assert tf._input_range_check is None + assert not tf._invert + + output_arr = tf(dcm.pixel_array) + assert np.array_equal(output_arr, dcm.pixel_array) + + msg = "An ICC profile is required but not found in the image." + with pytest.raises(RuntimeError, match=msg): + _CombinedPixelTransformation( + dcm, + apply_icc_profile=True, + ) + + # Add an ICC profile + # Use default sRGB profile + icc_profile = pkgutil.get_data( + 'highdicom', + '_icc_profiles/sRGB_v4_ICC_preference.icc' + ) + _add_icc_profile_attributes( + dcm, + icc_profile=icc_profile, + ) + tf = _CombinedPixelTransformation(dcm) + assert tf._effective_slope_intercept is None + assert tf._effective_lut_data is None + assert tf._effective_window_center_width is None + assert tf._color_manager is not None + assert tf._input_range_check is None + assert not tf._invert + + output_arr = tf(dcm.pixel_array) + + +def test_combined_transform_labelmap_seg(): + file_path = Path(__file__) + data_dir = file_path.parent.parent.joinpath('data') + f = data_dir / 'test_files/seg_image_sm_control_labelmap_palette_color.dcm' + + dcm = pydicom.dcmread(f) + + for output_dtype in [ + np.uint8, + np.uint16, + np.uint32, + np.uint64, + np.int16, + np.int32, + np.int64, + np.float16, + np.float32, + np.float64, + ]: + tf = _CombinedPixelTransformation(dcm, output_dtype=output_dtype) + assert tf._effective_slope_intercept is None + assert tf._effective_lut_data is not None + assert tf._effective_window_center_width is None + assert tf._color_manager is not None + assert tf._input_range_check is None + assert not tf._invert + + input_arr = dcm.pixel_array[0] + output_arr = tf(input_arr) + assert output_arr.shape == (dcm.Rows, dcm.Columns, 3) + assert output_arr.dtype == output_dtype + + tf = _CombinedPixelTransformation( + dcm, + output_dtype=output_dtype, + apply_icc_profile=False, + ) + assert tf._effective_slope_intercept is None + assert tf._effective_lut_data is not None + assert tf._effective_lut_data.dtype == output_dtype + assert tf._effective_window_center_width is None + assert tf._color_manager is None + assert tf._input_range_check is None + assert not tf._invert + + input_arr = dcm.pixel_array[0] + output_arr = tf(input_arr) + assert output_arr.shape == (dcm.Rows, dcm.Columns, 3) + assert output_arr.dtype == output_dtype + + +def test_combined_transform_all_test_files(): + # A simple test that the trasnform at least does something for the default + # parameters for all images in the pydicom test suite + all_files = get_testdata_files() + + for f in all_files: + try: + dcm = pydicom.dcmread(f) + except: + continue + + if 'SOPClassUID' not in dcm: + continue + if not does_iod_have_pixel_data(dcm.SOPClassUID): + continue + + try: + pix = dcm.pixel_array + except: + continue + + tf = _CombinedPixelTransformation(dcm) + + # Crudely decide whether indexing by frame is needed + expected_dims = 3 if dcm.SamplesPerPixel > 1 else 2 + if pix.ndim > expected_dims: + pix = pix[0] + + out = tf(pix) + assert isinstance(out, np.ndarray) From a75954d23f8d1ee287dd05d015902726905d403c Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Tue, 24 Dec 2024 15:04:37 -0500 Subject: [PATCH 90/93] add test for rvwm with lut --- src/highdicom/image.py | 5 ++- src/highdicom/pm/sop.py | 2 +- tests/test_image.py | 83 +++++++++++++++++++++++++++++++++++++++-- 3 files changed, 84 insertions(+), 6 deletions(-) diff --git a/src/highdicom/image.py b/src/highdicom/image.py index 3424dba6..bab4b606 100644 --- a/src/highdicom/image.py +++ b/src/highdicom/image.py @@ -742,7 +742,10 @@ def __init__( # LUT data to the requested output type if self._effective_lut_data.dtype != output_dtype: self._effective_lut_data = ( - self._effective_lut_data.astype(output_dtype) + self._effective_lut_data.astype( + output_dtype, + casting='safe', + ) ) if self.input_dtype.kind == 'f': diff --git a/src/highdicom/pm/sop.py b/src/highdicom/pm/sop.py index 46ad0977..25f8e6b5 100644 --- a/src/highdicom/pm/sop.py +++ b/src/highdicom/pm/sop.py @@ -817,7 +817,7 @@ def _get_pixel_data_type_and_attr( self._pixel_data_type_map[_PixelDataType.USHORT], ) raise ValueError( - 'Unsupported data type for pixel data.' + 'Unsupported data type for pixel data. ' 'Supported are 8-bit or 16-bit unsigned integer types as well as ' '32-bit (single-precision) or 64-bit (double-precision) ' 'floating-point types.' diff --git a/tests/test_image.py b/tests/test_image.py index 0eff58ce..d7132f14 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -1,11 +1,13 @@ """Tests for the highdicom.image module.""" from pathlib import Path -import pickle +from pydicom.data import get_testdata_file, get_testdata_files +from pydicom.sr.codedict import codes import numpy as np -import pydicom +import pickle import pkgutil -from pydicom.data import get_testdata_file, get_testdata_files +import pydicom import pytest +import re from highdicom._module_utils import ( does_iod_have_pixel_data, @@ -20,6 +22,11 @@ from highdicom.pr.content import ( _add_icc_profile_attributes, ) +from highdicom.pm import ( + RealWorldValueMapping, + ParametricMap, +) +from highdicom.uid import UID def test_slice_spacing(): @@ -406,7 +413,6 @@ def test_combined_transform_modality_lut(): for output_dtype in [ np.int32, np.int64, - np.float16, np.float32, np.float64, ]: @@ -436,6 +442,13 @@ def test_combined_transform_modality_lut(): apply_voi_transform=True, ) + msg = re.escape( + "Cannot cast array data from dtype('uint16') to " + "dtype('float16') according to the rule 'safe'" + ) + with pytest.raises(TypeError, match=msg): + tf = _CombinedPixelTransformation(dcm, output_dtype=np.float16) + # Add a voi lut dcm.WindowCenter = 24 dcm.WindowWidth = 24 @@ -748,3 +761,65 @@ def test_combined_transform_all_test_files(): out = tf(pix) assert isinstance(out, np.ndarray) + + +def test_combined_transform_pmap_rwvm_lut(): + # Construct a temporary parametric map with a real world value map lut + file_path = Path(__file__) + data_dir = file_path.parent.parent.joinpath('data') + f = data_dir / 'test_files/ct_image.dcm' + source_image = pydicom.dcmread(f) + + m = RealWorldValueMapping( + lut_label='1', + lut_explanation='Feature 1', + unit=codes.UCUM.NoUnits, + value_range=(0, 255), + lut_data=[v**2 - 0.15 for v in range(256)] + ) + + pixel_array = np.zeros( + source_image.pixel_array.shape, + dtype=np.uint16 + ) + + pmap = ParametricMap( + pixel_array=pixel_array, + source_images=[source_image], + series_instance_uid=UID(), + series_number=1, + sop_instance_uid=UID(), + instance_number=1, + manufacturer='manufacturer', + manufacturer_model_name='manufacturer_model_name', + software_versions='software_versions', + device_serial_number='12345', + real_world_value_mappings=[m], + contains_recognizable_visual_features=False, + window_center=0, + window_width=100, + ) + + output_dtype = np.float64 + tf = _CombinedPixelTransformation(pmap, output_dtype=output_dtype) + assert tf._effective_lut_data is not None + assert tf._effective_lut_data.dtype == output_dtype + assert tf._effective_slope_intercept is None + assert tf._effective_window_center_width is None + assert tf._color_manager is None + assert tf._input_range_check is None + assert not tf._invert + + out = tf(pmap.pixel_array) + assert out.dtype == output_dtype + + test_arr = np.array([[0, 1], [254, 255]], np.uint16) + output_arr = tf(test_arr) + assert output_arr.dtype == output_dtype + + msg = re.escape( + "Cannot cast array data from dtype('float64') to " + "dtype('float32') according to the rule 'safe'" + ) + with pytest.raises(TypeError, match=msg): + tf = _CombinedPixelTransformation(pmap, output_dtype=np.float32) From 760bf832be7d33d1d6467a8a66df533f21133e8a Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 26 Dec 2024 10:35:21 -0500 Subject: [PATCH 91/93] Add ability to select VOI by string --- src/highdicom/content.py | 83 ++++++++------------ src/highdicom/image.py | 83 ++++++++++---------- src/highdicom/pixel_transforms.py | 125 ++++++++++++++++++++++++++++-- tests/test_content.py | 7 +- tests/test_image.py | 109 +++++++++++++++++++++++++- 5 files changed, 305 insertions(+), 102 deletions(-) diff --git a/src/highdicom/content.py b/src/highdicom/content.py index 16641221..a25eadb9 100644 --- a/src/highdicom/content.py +++ b/src/highdicom/content.py @@ -28,8 +28,10 @@ _check_rescale_dtype, _get_combined_palette_color_lut, _parse_palette_color_lut_attributes, + _select_voi_lut, + _select_voi_window_center_width, apply_lut, - voi_window, + apply_voi_window, ) from highdicom.sr.enum import ValueTypeValues from highdicom.sr.coding import CodedConcept @@ -2402,7 +2404,7 @@ def apply( self, array: np.ndarray, output_range: Tuple[float, float] = (0.0, 1.0), - voi_transform_index: int = 0, + voi_transform_selector: int | str = 0, dtype: Union[type, str, np.dtype, None] = None, invert: bool = False, prefer_lut: bool = False, @@ -2417,10 +2419,16 @@ def apply( transformation uses a LUT. output_range: Tuple[float, float], optional Range of output values to which the VOI range is mapped. - voi_transform_index: int, optional - Index (zero-based) of the VOI transform to apply if multiple are - included in the dataset. May be a negative integer, following - standard Python indexing convention. + voi_transform_selector: int | str, optional + Specification of the VOI transform to select (multiple may be + present). May either be an int or a str. If an int, it is + interpretted as a (zero-based) index of the list of VOI transforms + to apply. A negative integer may be used to index from the end of + the list following standard Python indexing convention. If a str, + the string that will be used to match the Window Center Width + Explanation or the LUT Explanation to choose from multiple VOI + transforms. Note that such explanations are optional according to + the standard and therefore may not be present. dtype: Union[type, str, numpy.dtype, None], optional Data type the output array. Should be a floating point data type. If not specified, ``numpy.float64`` is used. @@ -2453,13 +2461,14 @@ def apply( ) if not self.has_window() or (self.has_lut() and prefer_lut): - try: - voi_lut = self.VOILUTSequence[voi_transform_index] - except IndexError as e: + voi_lut = _select_voi_lut(self, voi_transform_selector) + + if voi_lut is None: raise IndexError( - "Requested 'voi_transform_index' is " + "Requested 'voi_transform_selector' is " "not present." - ) from e + ) + scaled_lut_data = voi_lut.get_scaled_lut_data( output_range=output_range, dtype=dtype, @@ -2471,50 +2480,24 @@ def apply( first_mapped_value=voi_lut.first_mapped_value, ) else: - voi_lut_function = 'LINEAR' - - window_center = self.WindowCenter - window_width = self.WindowWidth - - if 'VOILUTFunction' in self: - voi_lut_function = self.VOILUTFunction - - if isinstance(window_width, (list, MultiValue)): - try: - window_width = window_width[ - voi_transform_index - ] - except IndexError as e: - raise IndexError( - "Requested 'voi_transform_index' is " - "not present." - ) from e - elif voi_transform_index not in (0, -1): - raise IndexError( - "Requested 'voi_transform_index' is " - "not present." - ) + voi_lut_function = self.get('VOILUTFunction', 'LINEAR') + + center_width = _select_voi_window_center_width( + self, + voi_transform_selector + ) - if isinstance(window_center, (list, MultiValue)): - try: - window_center = window_center[ - voi_transform_index - ] - except IndexError as e: - raise IndexError( - "Requested 'voi_transform_index' is " - "not present." - ) from e - elif voi_transform_index not in (0, -1): + if center_width is None: raise IndexError( - "Requested 'voi_transform_index' is " - "not present." + "Requested 'voi_transform_selector' is not present." ) - array = voi_window( + window_center, window_width = center_width + + array = apply_voi_window( array, - window_center=cast(float, window_center), - window_width=cast(float, window_width), + window_center=window_center, + window_width=window_width, voi_lut_function=voi_lut_function, output_range=output_range, dtype=dtype, diff --git a/src/highdicom/image.py b/src/highdicom/image.py index bab4b606..c2629d59 100644 --- a/src/highdicom/image.py +++ b/src/highdicom/image.py @@ -44,8 +44,10 @@ from highdicom.pixel_transforms import ( _check_rescale_dtype, _get_combined_palette_color_lut, + _select_voi_lut, + _select_voi_window_center_width, apply_lut, - voi_window, + apply_voi_window, ) from highdicom.seg.enum import SpatialLocationsPreservedValues from highdicom.spatial import ( @@ -157,7 +159,7 @@ def __init__( real_world_value_map_index: int = 0, apply_modality_transform: bool | None = None, apply_voi_transform: bool | None = False, - voi_transform_index: int = 0, + voi_transform_selector: int | str = 0, voi_output_range: Tuple[float, float] = (0.0, 1.0), apply_presentation_lut: bool = True, apply_palette_color_lut: bool | None = None, @@ -217,11 +219,18 @@ def __init__( transform will be applied if it is present and no real world value map takes precedence, but no error will be raised if it is not present. - voi_transform_index: int, optional - Index (zero-based) of the VOI transform to apply if multiple are - included in the datasets. Ignored if ``apply_voi_transform`` is - ``False`` or no VOI transform is included in the datasets. May be a - negative integer, following standard Python indexing convention. + voi_transform_selector: int | str, optional + Specification of the VOI transform to select (multiple may be + present). May either be an int or a str. If an int, it is + interpretted as a (zero-based) index of the list of VOI transforms + to apply. A negative integer may be used to index from the end of + the list following standard Python indexing convention. If a str, + the string that will be used to match the Window Center Width + Explanation or the LUT Explanation to choose from multiple VOI + transforms. Note that such explanations are optional according to + the standard and therefore may not be present. Ignored if + ``apply_voi_transform`` is ``False`` or no VOI transform is + included in the datasets. voi_output_range: Tuple[float, float], optional Range of output values to which the VOI range is mapped. Only relevant if ``apply_voi_transform`` is True and a VOI transform is @@ -381,12 +390,13 @@ def __init__( self.output_dtype = np.dtype(output_dtype) self.applies_to_all_frames = True - self._input_range_check: Optional[Tuple[int, int]] = None + self._input_range_check: tuple[int, int] | None = None self._voi_output_range = voi_output_range - self._effective_lut_data: Optional[np.ndarray] = None + self._effective_lut_data: np.ndarray | None = None self._effective_lut_first_mapped_value = 0 - self._effective_window_center_width: Optional[Tuple[float, float]] = None - self._effective_slope_intercept: Optional[Tuple[float, float]] = None + self._effective_window_center_width: tuple[float, float] | None = None + self._effective_voi_function = None + self._effective_slope_intercept: tuple[float, float] | None = None self._invert = False self._clip = True @@ -555,9 +565,16 @@ def __init__( if not has_rwvm and use_voi: if 'VOILUTSequence' in image: - voi_lut = LUT.from_dataset( - image.VOILUTSequence[0] - ) + + voi_lut_ds = _select_voi_lut(image, voi_transform_selector) + + if voi_lut_ds is None: + raise IndexError( + "Requested 'voi_transform_selector' is " + "not present." + ) + + voi_lut = LUT.from_dataset(voi_lut_ds) voi_scaled_lut_data = voi_lut.get_scaled_lut_data( output_range=voi_output_range, dtype=output_dtype, @@ -574,35 +591,19 @@ def __init__( 'WindowCenter' in sub_ds or 'WindowWidth' in sub_ds ): - voi_center = sub_ds.WindowCenter - voi_width = sub_ds.WindowWidth - - if 'VOILUTFunction' in sub_ds: - voi_function = sub_ds.VOILUTFunction + voi_function = sub_ds.get('VOILUTFunction', 'LINEAR') - if isinstance(voi_width, list): - voi_width = voi_width[ - voi_transform_index - ] - elif voi_transform_index not in (0, -1): - raise IndexError( - "Requested 'voi_transform_index' is " - "not present." - ) - - if isinstance(voi_center, list): - voi_center = voi_center[ - voi_transform_index - ] - elif voi_transform_index not in (0, -1): + voi_center_width = _select_voi_window_center_width( + sub_ds, + voi_transform_selector, + ) + if voi_center_width is None: raise IndexError( - "Requested 'voi_transform_index' is " - "not present." - ) + "Requested 'voi_transform_selector' is not present." + ) self.applies_to_all_frames = ( self.applies_to_all_frames and is_shared ) - voi_center_width = (voi_center, voi_width) break if ( @@ -620,13 +621,14 @@ def __init__( if modality_lut is not None and not has_rwvm: if voi_center_width is not None: # Apply the window function to the modality LUT - self._effective_lut_data = voi_window( + self._effective_lut_data = apply_voi_window( array=modality_lut.lut_data, window_center=voi_center_width[0], window_width=voi_center_width[1], output_range=voi_output_range, dtype=output_dtype, invert=invert, + voi_lut_function=voi_function, ) self._effective_lut_first_mapped_value = ( modality_lut.first_mapped_value @@ -830,13 +832,14 @@ def __call__(self, frame: np.ndarray) -> np.ndarray: frame = frame + intercept elif self._effective_window_center_width is not None: - frame = voi_window( + frame = apply_voi_window( frame, window_center=self._effective_window_center_width[0], window_width=self._effective_window_center_width[1], dtype=self.output_dtype, invert=self._invert, output_range=self._voi_output_range, + voi_lut_function=self._effective_voi_function, ) if self._color_manager is not None: diff --git a/src/highdicom/pixel_transforms.py b/src/highdicom/pixel_transforms.py index b6628b71..0e5a0e4e 100644 --- a/src/highdicom/pixel_transforms.py +++ b/src/highdicom/pixel_transforms.py @@ -5,6 +5,7 @@ import numpy as np from pydicom import Dataset +from pydicom.multival import MultiValue from highdicom.enum import VOILUTFunctionValues @@ -129,10 +130,10 @@ def _get_combined_palette_color_lut( Parameters ---------- dataset: pydicom.Dataset - Dataset containing Palette Color LUT information. Note that any - number of other attributes may be included and will be ignored (for - example allowing an entire image with Palette Color LUT information - at the top level to be passed). + Dataset containing Palette Color LUT information. Note that any number + of other attributes may be included and will be ignored (for example + allowing an entire image dataset with Palette Color LUT information at + the top level to be passed). Returns ------- @@ -245,7 +246,121 @@ def _check_rescale_dtype( ) -def voi_window( +def _select_voi_window_center_width( + dataset: Dataset, + selector: int | str, +) -> tuple[float, float] | None: + """Get a specific window center and width from a VOI LUT dataset. + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset to search for window center and width information. This must + contain at a minimum the 'WindowCenter' and 'WindowWidth' attributes. + Note that the dataset is not search recursively, only window + information at the top level of the dataset is searched. + selector: int | str + Specification of the window to select. May either be an int or a str. + If an int, it is interpretted as a (zero-based) index of the list of + windows to apply. A negative integer may be used to index from the end + of the list following standard Python indexing convention. If a str, + the string that will be used to match the Window Center Width + Explanation to choose from multiple voi windows. Note that such + explanations are optional according to the standard and therefore may + not be present. + + Returns + ------- + tuple[float, float] | None: + If the specified window is found in the dataset, it is returned as a + tuple of (window center, window width). If it is not found, ``None`` is + returned. + + """ + voi_center = dataset.WindowCenter + voi_width = dataset.WindowWidth + + if isinstance(selector, str): + explanations = dataset.get( + 'WindowCenterWidthExplanation' + ) + if explanations is None: + return None + + if isinstance(explanations, str): + explanations = [explanations] + + try: + selector = explanations.index(selector) + except ValueError: + return None + + if isinstance(voi_width, MultiValue): + try: + voi_width = voi_width[selector] + except IndexError: + return None + elif selector not in (0, -1): + return None + + if isinstance(voi_center, MultiValue): + try: + voi_center = voi_center[selector] + except IndexError: + return None + elif selector not in (0, -1): + return None + + return float(voi_center), float(voi_width) + + +def _select_voi_lut( + dataset: Dataset, + selector: int | str +) -> Dataset | None: + """Get a specific VOI LUT dataset from dataset. + + Parameters + ---------- + dataset: pydicom.Dataset + Dataset to search for VOI LUT information. This must contain the + 'VOILUTSequence'. Note that the dataset is not search recursively, only + information at the top level of the dataset is searched. + selector: int | str + Specification of the LUT to select. May either be an int or a str. If + an int, it is interpretted as a (zero-based) index of the sequence of + LUTs to apply. A negative integer may be used to index from the end of + the list following standard Python indexing convention. If a str, the + string that will be used to match the LUT Explanation to choose from + multiple voi LUTs. Note that such explanations are optional according + to the standard and therefore may not be present. + + Returns + ------- + pydicom.Dataset | None: + If the LUT is found in the dataset, it is returned as a + ``pydicom.Dataset``. If it is not found, ``None`` is returned. + + """ + if isinstance(selector, str): + explanations = [ + ds.get('LUTExplanation') for ds in dataset.VOILUTSequence + ] + + try: + selector = explanations.index(selector) + except ValueError: + return None + + try: + voi_lut_ds = dataset.VOILUTSequence[selector] + except IndexError: + return None + + return voi_lut_ds + + +def apply_voi_window( array: np.ndarray, window_center: float, window_width: float, diff --git a/tests/test_content.py b/tests/test_content.py index c8826391..7ea01251 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -241,7 +241,6 @@ def test_construction_16bit(self): (100.0, 150.0), ]: for dtype in [ - np.float16, np.float32, np.float64, ]: @@ -1420,15 +1419,15 @@ def test_construction_multiple(self): ] ) - out = lut.apply(array=input_array_lung, voi_transform_index=0) + out = lut.apply(array=input_array_lung, voi_transform_selector=0) assert np.allclose(expected, out) assert out.dtype == np.float64 - out = lut.apply(array=input_array_soft_tissue, voi_transform_index=1) + out = lut.apply(array=input_array_soft_tissue, voi_transform_selector=1) assert np.array_equal(expected, out) assert out.dtype == np.float64 - out = lut.apply(array=input_array_soft_tissue, voi_transform_index=-1) + out = lut.apply(array=input_array_soft_tissue, voi_transform_selector=-1) assert np.array_equal(expected, out) assert out.dtype == np.float64 diff --git a/tests/test_image.py b/tests/test_image.py index d7132f14..950006f5 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -17,7 +17,7 @@ MultiFrameImage, ) from highdicom.pixel_transforms import ( - voi_window, + apply_voi_window, ) from highdicom.pr.content import ( _add_icc_profile_attributes, @@ -347,6 +347,7 @@ def test_combined_transform_ect_with_voi(): assert tf._effective_slope_intercept is None assert tf._color_manager is None assert tf._voi_output_range == output_range + assert tf._effective_voi_function == 'LINEAR' input_arr = np.array( [ @@ -459,6 +460,77 @@ def test_combined_transform_modality_lut(): assert np.allclose(output_arr, expected) +def test_combined_transform_multiple_vois(): + # This test file includes multiple windows + f = get_testdata_file('examples_overlay.dcm') + dcm = pydicom.dcmread(f) + c1, c2 = dcm.WindowCenter + w1, w2 = dcm.WindowWidth + + tf = _CombinedPixelTransformation(dcm, apply_voi_transform=None) + assert tf._effective_window_center_width == (c1, w1) + + tf = _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector=1, + ) + assert tf._effective_window_center_width == (c2, w2) + assert tf._effective_voi_function == 'LINEAR' + + tf = _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector=-1, + ) + assert tf._effective_window_center_width == (c2, w2) + assert tf._effective_voi_function == 'LINEAR' + + tf = _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector=-2, + ) + assert tf._effective_window_center_width == (c1, w1) + assert tf._effective_voi_function == 'LINEAR' + + tf = _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector='WINDOW1', + ) + assert tf._effective_window_center_width == (c1, w1) + assert tf._effective_voi_function == 'LINEAR' + + tf = _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector='WINDOW2', + ) + assert tf._effective_window_center_width == (c2, w2) + assert tf._effective_voi_function == 'LINEAR' + + msg = "Requested 'voi_transform_selector' is not present." + with pytest.raises(IndexError, match=msg): + _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector='DOES_NOT_EXIST', + ) + with pytest.raises(IndexError, match=msg): + _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector=2, + ) + with pytest.raises(IndexError, match=msg): + tf = _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector=-3, + ) + + def test_combined_transform_voi_lut(): # A test file that has a voi LUT f = get_testdata_file('vlut_04.dcm') @@ -512,6 +584,37 @@ def test_combined_transform_voi_lut(): full_output_arr = tf(dcm.pixel_array) assert full_output_arr.dtype == output_dtype + # Create an explanation to use for searching by explanation + dcm.VOILUTSequence[0].LUTExplanation = 'BONE' + + tf = _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector='BONE' + ) + assert tf._effective_lut_data is not None + + tf = _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector=-1, + ) + assert tf._effective_lut_data is not None + + msg = "Requested 'voi_transform_selector' is not present." + with pytest.raises(IndexError, match=msg): + _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector='NOT_BONE', + ) + with pytest.raises(IndexError, match=msg): + _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector=1, + ) + def test_combined_transform_monochrome(): # A test file that has a modality LUT @@ -589,7 +692,7 @@ def test_combined_transform_monochrome(): output_arr = tf(dcm.pixel_array) - expected = voi_window( + expected = apply_voi_window( dcm.pixel_array, window_width=dcm.WindowWidth, window_center=dcm.WindowCenter, @@ -618,7 +721,7 @@ def test_combined_transform_monochrome(): output_arr = tf(dcm.pixel_array) - expected = voi_window( + expected = apply_voi_window( dcm.pixel_array, window_width=dcm.WindowWidth, window_center=dcm.WindowCenter, From a26042d41077a3427fd71c74946c3bd10d52a43a Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 26 Dec 2024 11:11:37 -0500 Subject: [PATCH 92/93] Add ability to specify an external VOI transformation --- src/highdicom/image.py | 119 ++++++++++++++++++++++++++--------------- tests/test_image.py | 29 ++++++++++ 2 files changed, 106 insertions(+), 42 deletions(-) diff --git a/src/highdicom/image.py b/src/highdicom/image.py index c2629d59..30b97fc2 100644 --- a/src/highdicom/image.py +++ b/src/highdicom/image.py @@ -37,7 +37,7 @@ ) from highdicom.base import SOPClass, _check_little_endian from highdicom.color import ColorManager -from highdicom.content import LUT +from highdicom.content import LUT, VOILUTTransformation from highdicom.enum import ( CoordinateSystemNames, ) @@ -159,7 +159,7 @@ def __init__( real_world_value_map_index: int = 0, apply_modality_transform: bool | None = None, apply_voi_transform: bool | None = False, - voi_transform_selector: int | str = 0, + voi_transform_selector: int | str | VOILUTTransformation = 0, voi_output_range: Tuple[float, float] = (0.0, 1.0), apply_presentation_lut: bool = True, apply_palette_color_lut: bool | None = None, @@ -219,7 +219,7 @@ def __init__( transform will be applied if it is present and no real world value map takes precedence, but no error will be raised if it is not present. - voi_transform_selector: int | str, optional + voi_transform_selector: int | str | highdicom.content.VOILUTTransformation, optional Specification of the VOI transform to select (multiple may be present). May either be an int or a str. If an int, it is interpretted as a (zero-based) index of the list of VOI transforms @@ -260,12 +260,21 @@ def __init__( transform will be applied if it is present, but no error will be raised if it is not present. - """ + """ # noqa: E501 if not does_iod_have_pixel_data(image.SOPClassUID): raise ValueError( 'Input dataset does not represent an image.' ) + if not isinstance( + voi_transform_selector, + (int, str, VOILUTTransformation), + ): + raise TypeError( + "Parameter 'voi_transform_selector' must have type 'int', " + "'str', or 'highdicom.content.VOILUTTransformation'." + ) + # TODO: choose VOI by explanation? # TODO: how to combine with multiframe? photometric_interpretation = image.PhotometricInterpretation @@ -564,47 +573,68 @@ def __init__( if not has_rwvm and use_voi: - if 'VOILUTSequence' in image: - - voi_lut_ds = _select_voi_lut(image, voi_transform_selector) - - if voi_lut_ds is None: - raise IndexError( - "Requested 'voi_transform_selector' is " - "not present." - ) - - voi_lut = LUT.from_dataset(voi_lut_ds) - voi_scaled_lut_data = voi_lut.get_scaled_lut_data( - output_range=voi_output_range, - dtype=output_dtype, - invert=invert, - ) - else: - for ds, is_shared in datasets: - if 'FrameVOILUTSequence' in ds: - sub_ds = ds.FrameVOILUTSequence[0] - else: - sub_ds = ds + if isinstance(voi_transform_selector, VOILUTTransformation): + if voi_transform_selector.has_lut(): + if len(voi_transform_selector.VOILUTSequence) > 1: + raise ValueError( + "If providing a VOILUTTransformation as the " + "'voi_transform_selector', it must contain " + "a single transform." + ) + voi_lut = voi_transform_selector.VOILUTSequence[0] + else: + voi_center = voi_transform_selector.WindowCenter + voi_width = voi_transform_selector.WindowWidth if ( - 'WindowCenter' in sub_ds or - 'WindowWidth' in sub_ds + isinstance(voi_width, MultiValue) or + isinstance(voi_center, MultiValue) ): - voi_function = sub_ds.get('VOILUTFunction', 'LINEAR') - - voi_center_width = _select_voi_window_center_width( - sub_ds, - voi_transform_selector, + raise ValueError( + "If providing a VOILUTTransformation as the " + "'voi_transform_selector', it must contain " + "a single transform." ) - if voi_center_width is None: - raise IndexError( - "Requested 'voi_transform_selector' is not present." - ) - self.applies_to_all_frames = ( - self.applies_to_all_frames and is_shared + voi_center_width = (float(voi_center), float(voi_width)) + else: + # Need to find existing VOI LUT information + if 'VOILUTSequence' in image: + + voi_lut_ds = _select_voi_lut(image, voi_transform_selector) + + if voi_lut_ds is None: + raise IndexError( + "Requested 'voi_transform_selector' is " + "not present." ) - break + + voi_lut = LUT.from_dataset(voi_lut_ds) + else: + for ds, is_shared in datasets: + if 'FrameVOILUTSequence' in ds: + sub_ds = ds.FrameVOILUTSequence[0] + else: + sub_ds = ds + + if ( + 'WindowCenter' in sub_ds or + 'WindowWidth' in sub_ds + ): + voi_function = sub_ds.get('VOILUTFunction', 'LINEAR') + + voi_center_width = _select_voi_window_center_width( + sub_ds, + voi_transform_selector, + ) + if voi_center_width is None: + raise IndexError( + "Requested 'voi_transform_selector' is " + 'not present.' + ) + self.applies_to_all_frames = ( + self.applies_to_all_frames and is_shared + ) + break if ( require_voi and @@ -634,7 +664,7 @@ def __init__( modality_lut.first_mapped_value ) - elif voi_lut is not None and voi_scaled_lut_data is not None: + elif voi_lut is not None: # "Compose" the two LUTs together by applying the # second to the first self._effective_lut_data = voi_lut.apply( @@ -675,7 +705,7 @@ def __init__( self._effective_voi_function = voi_function self._invert = invert - elif voi_lut is not None and voi_scaled_lut_data is not None: + elif voi_lut is not None: # Shift and "scale" the LUT to account for the rescale if not intercept.is_integer() and slope.is_integer(): raise ValueError( @@ -684,6 +714,11 @@ def __init__( ) intercept = int(intercept) slope = int(slope) + voi_scaled_lut_data = voi_lut.get_scaled_lut_data( + output_range=voi_output_range, + dtype=output_dtype, + invert=invert, + ) if slope != 1: self._effective_lut_data = voi_scaled_lut_data[::slope] else: diff --git a/tests/test_image.py b/tests/test_image.py index 950006f5..b3524128 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -9,9 +9,11 @@ import pytest import re +import highdicom from highdicom._module_utils import ( does_iod_have_pixel_data, ) +from highdicom.content import VOILUTTransformation from highdicom.image import ( _CombinedPixelTransformation, MultiFrameImage, @@ -530,6 +532,33 @@ def test_combined_transform_multiple_vois(): voi_transform_selector=-3, ) + c3, w3 = (40, 400) + external_voi = VOILUTTransformation( + window_center=c3, + window_width=w3, + ) + tf = _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector=external_voi, + ) + assert tf._effective_window_center_width == (c3, w3) + + # External VOIs should not contain multiple transforms + invalid_external_voi = VOILUTTransformation( + window_center=[100, 200], + window_width=[300, 400], + ) + msg = ( + "If providing a VOILUTTransformation as the " + "'voi_transform_selector', it must contain a single transform." + ) + with pytest.raises(ValueError, match=msg): + tf = _CombinedPixelTransformation( + dcm, + apply_voi_transform=None, + voi_transform_selector=invalid_external_voi, + ) def test_combined_transform_voi_lut(): # A test file that has a voi LUT From 983cdba795c2ef4d8e2d33ddf42ad5a6905b6660 Mon Sep 17 00:00:00 2001 From: Chris Bridge Date: Thu, 26 Dec 2024 12:18:06 -0500 Subject: [PATCH 93/93] Add ability to search RWVMs --- src/highdicom/image.py | 45 +++++++++++++++-------- src/highdicom/pixel_transforms.py | 59 +++++++++++++++++++++++++++++++ tests/test_image.py | 20 +++++++++++ 3 files changed, 109 insertions(+), 15 deletions(-) diff --git a/src/highdicom/image.py b/src/highdicom/image.py index 30b97fc2..15b35ad5 100644 --- a/src/highdicom/image.py +++ b/src/highdicom/image.py @@ -29,6 +29,7 @@ tag_for_keyword, ) from pydicom.multival import MultiValue +from pydicom.sr.coding import Code from pydicom.uid import ParametricMapStorage from highdicom._module_utils import ( @@ -44,6 +45,7 @@ from highdicom.pixel_transforms import ( _check_rescale_dtype, _get_combined_palette_color_lut, + _select_real_world_value_map, _select_voi_lut, _select_voi_window_center_width, apply_lut, @@ -54,6 +56,7 @@ get_image_coordinate_system, get_volume_positions, ) +from highdicom.sr.coding import CodedConcept from highdicom.uid import UID as hd_UID from highdicom.utils import ( iter_tiled_full_frame_data, @@ -156,7 +159,7 @@ def __init__( *, output_dtype: Union[type, str, np.dtype, None] = np.float64, apply_real_world_transform: bool | None = None, - real_world_value_map_index: int = 0, + real_world_value_map_selector: int | str | Code | CodedConcept = 0, apply_modality_transform: bool | None = None, apply_voi_transform: bool | None = False, voi_transform_selector: int | str | VOILUTTransformation = 0, @@ -193,9 +196,16 @@ def __init__( preferentially. This also implies that specifying both ``apply_real_world_transform`` and ``apply_modality_transform`` to True is not permitted. - real_world_value_map_index: int, optional - Index of the real world value map to use (multiple may be stored - within the dataset). + real_world_value_map_selector: int | str | pydicom.sr.coding.Code | highdicom.sr.coding.CodedConcept, optional + Specification of the real world value map to use (multiple may be + present in the dataset). If an int, it is used to index the list of + available maps. A negative integer may be used to index from the + end of the list following standard Python indexing convention. If a + str, the string will be used to match the ``"LUTLabel"`` attribute + to select the map. If a ``pydicom.sr.coding.Code`` or + ``highdicom.sr.coding.CodedConcept``, this will be used to match + the units (contained in the ``"MeasurementUnitsCodeSequence"`` + attribute). apply_modality_transform: bool | None, optional Whether to apply to the modality transform (if present in the dataset) the frame. The modality transformation maps stored pixel @@ -225,12 +235,12 @@ def __init__( interpretted as a (zero-based) index of the list of VOI transforms to apply. A negative integer may be used to index from the end of the list following standard Python indexing convention. If a str, - the string that will be used to match the Window Center Width - Explanation or the LUT Explanation to choose from multiple VOI - transforms. Note that such explanations are optional according to - the standard and therefore may not be present. Ignored if - ``apply_voi_transform`` is ``False`` or no VOI transform is - included in the datasets. + the string that will be used to match the + ``"WindowCenterWidthExplanation"`` or the ``"LUTExplanation"`` + attributes to choose from multiple VOI transforms. Note that such + explanations are optional according to the standard and therefore + may not be present. Ignored if ``apply_voi_transform`` is ``False`` + or no VOI transform is included in the datasets. voi_output_range: Tuple[float, float], optional Range of output values to which the VOI range is mapped. Only relevant if ``apply_voi_transform`` is True and a VOI transform is @@ -493,13 +503,18 @@ def __init__( for ds, is_shared in datasets: rwvm_seq = ds.get('RealWorldValueMappingSequence') if rwvm_seq is not None: - try: - rwvm_item = rwvm_seq[real_world_value_map_index] - except IndexError as e: + + rwvm_item = _select_real_world_value_map( + rwvm_seq, + real_world_value_map_selector, + ) + + if rwvm_item is None: raise IndexError( - "Requested 'real_world_value_map_index' is " + "Requested 'real_world_value_map_selector' is " "not present." - ) from e + ) + if 'RealWorldValueLUTData' in rwvm_item: self._effective_lut_data = np.array( rwvm_item.RealWorldValueLUTData diff --git a/src/highdicom/pixel_transforms.py b/src/highdicom/pixel_transforms.py index 0e5a0e4e..f30819eb 100644 --- a/src/highdicom/pixel_transforms.py +++ b/src/highdicom/pixel_transforms.py @@ -5,8 +5,11 @@ import numpy as np from pydicom import Dataset +from pydicom.sr.coding import Code +from pydicom.sequence import Sequence as pydicom_sequence from pydicom.multival import MultiValue from highdicom.enum import VOILUTFunctionValues +from highdicom.sr.coding import CodedConcept def _parse_palette_color_lut_attributes(dataset: Dataset) -> Tuple[ @@ -552,3 +555,59 @@ def apply_lut( array = array - first_mapped_value return lut_data[array, ...] + + +def _select_real_world_value_map( + sequence: pydicom_sequence, + selector: int | str | CodedConcept | Code, +) -> Dataset | None: + """Select a real world value map from a sequence. + + Parameters + ---------- + sequence: pydicom.sequence.Sequence + Sequence representing a Real World Value Mapping Sequence. + selector: int | str | highdicom.sr.coding.CodedConcept | pydicom.sr.coding.Code + Selector specifying an item in the sequence. If an integer, it is used + as a index to the sequence in the usual way. If a string, the + ``"LUTLabel"`` attribute of the items will be searched for a value that + exactly matches the selector. If a code, the + ``"MeasurementUnitsCodeSequence"`` will be searched for a value that + matches the selector. + + Returns + ------- + pydicom.Dataset | None: + Either an item of the input sequence that matches the selector, or + ``None`` if no such item is found. + + """ + if isinstance(selector, int): + try: + item = sequence[selector] + except IndexError: + return None + + return item + + elif isinstance(selector, str): + labels = [item.LUTLabel for item in sequence] + + try: + index = labels.index(selector) + except ValueError: + return None + + return sequence[index] + + elif isinstance(selector, (CodedConcept, Code)): + units = [ + CodedConcept.from_dataset(item.MeasurementUnitsCodeSequence[0]) + for item in sequence + ] + try: + index = units.index(selector) + except ValueError: + return None + + return sequence[index] diff --git a/tests/test_image.py b/tests/test_image.py index b3524128..4c2a7afe 100644 --- a/tests/test_image.py +++ b/tests/test_image.py @@ -28,6 +28,7 @@ RealWorldValueMapping, ParametricMap, ) +from highdicom.sr.coding import CodedConcept from highdicom.uid import UID @@ -184,6 +185,25 @@ def test_combined_transform_ect_rwvm(): output_dtype=np.int16, ) + # Various different indexing methods + unit_code = CodedConcept('ml/100ml/s', 'UCUM', 'ml/100ml/s', '1.4') + for selector in [-1, 'RCBF', unit_code]: + tf = _CombinedPixelTransformation( + dcm, + real_world_value_map_selector=selector, + ) + assert tf._effective_slope_intercept == (slope, intercept) + + # Various different incorrect indexing methods + msg = "Requested 'real_world_value_map_selector' is not present." + other_unit_code = CodedConcept('m/s', 'UCUM', 'm/s', '1.4') + for selector in [2, -2, 'ABCD', other_unit_code]: + with pytest.raises(IndexError, match=msg): + _CombinedPixelTransformation( + dcm, + real_world_value_map_selector=selector, + ) + # Delete the real world value map del ( dcm