Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allow segmentations to reference multiple images #199

Open
wants to merge 14 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
118 changes: 69 additions & 49 deletions src/highdicom/seg/sop.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,29 +274,47 @@ def __init__(
if len(source_images) == 0:
raise ValueError('At least one source image is required.')

unique_sop_instance_uids = set(
[image.SOPInstanceUID for image in source_images]
)
if len(source_images) != len(unique_sop_instance_uids):
raise ValueError(
'Source images must all have a unique SOP Instance UID.'
)

uniqueness_criteria = set(
(
image.StudyInstanceUID,
image.SeriesInstanceUID,
image.Rows,
image.Columns,
int(getattr(image, 'NumberOfFrames', '1')),
hasattr(image, 'FrameOfReferenceUID'),
getattr(image, 'FrameOfReferenceUID', None),
hasattr(image, 'TotalPixelMatrixRows'),
getattr(image, 'TotalPixelMatrixRows', None),
hasattr(image, 'TotalPixelMatrixColumns'),
getattr(image, 'TotalPixelMatrixColumns', None),
hasattr(image, 'TotalPixelMatrixFocalPlanes'),
getattr(image, 'TotalPixelMatrixFocalPlanes', None),
tuple(getattr(image, 'ImageOrientation', [])),
tuple(getattr(image, 'ImageOrientationSlide', [])),
hasattr(image, 'DimensionOrganizationType'),
getattr(image, 'DimensionOrganizationType', None),
len(getattr(image, 'PerFrameFunctionalGroupsSequence', [])),
len(getattr(image, 'SharedFunctionalGroupsSequence', [])),
)
for image in source_images
)
if len(uniqueness_criteria) > 1:
raise ValueError(
'Source images must all be part of the same series and must '
'have the same image dimensions (number of rows/columns).'
'Source images must all be part of the same series, must '
CPBridge marked this conversation as resolved.
Show resolved Hide resolved
'have the same image dimensions (number of rows/columns), and '
'must have the same image orientation.'
)

src_img = source_images[0]
is_multiframe = hasattr(src_img, 'NumberOfFrames')
if is_multiframe and len(source_images) > 1:
raise ValueError(
'Only one source image should be provided in case images '
'are multi-frame images.'
)
is_tiled = hasattr(src_img, 'TotalPixelMatrixRows')
supported_transfer_syntaxes = {
ImplicitVRLittleEndian,
Expand Down Expand Up @@ -742,6 +760,8 @@ def __init__(
# bitpacking at the end
full_pixel_array = np.array([], np.bool_)

derivation_code = codes.cid7203.Segmentation
purpose_code = codes.cid7202.SourceImageForImageProcessingOperation
for i, segment_number in enumerate(described_segment_numbers):
# Pixel array for just this segment
if pixel_array.dtype in (np.float_, np.float32, np.float64):
Expand Down Expand Up @@ -787,17 +807,11 @@ def __init__(
# absent. Such frames should be removed
if omit_empty_frames and np.sum(planes[j]) == 0:
logger.info(
'skip empty plane {} of segment #{}'.format(
j, segment_number
)
f'skip empty plane {j} of segment #{segment_number}'
)
continue
contained_plane_index.append(j)
logger.info(
'add plane #{} for segment #{}'.format(
j, segment_number
)
)
logger.info(f'add plane #{j} for segment #{segment_number}')

pffp_item = Dataset()
frame_content_item = Dataset()
Expand Down Expand Up @@ -837,9 +851,9 @@ def __init__(
]
except IndexError as error:
raise IndexError(
'Could not determine position of plane #{} in '
f'Could not determine position of plane #{j} in '
'three dimensional coordinate system based on '
'dimension index values: {}'.format(j, error)
f'dimension index values: {error}'
)
frame_content_item.DimensionIndexValues = (
[segment_number] + index_values
Expand All @@ -858,43 +872,49 @@ def __init__(
pffp_item.DerivationImageSequence = []

if are_spatial_locations_preserved:
derivation_image_item = Dataset()
derivation_code = codes.cid7203.Segmentation
derivation_image_item.DerivationCodeSequence = [
derivation_img_item = Dataset()
derivation_img_item.DerivationCodeSequence = [
CodedConcept.from_code(derivation_code)
]

derivation_src_img_item = Dataset()
if hasattr(source_images[0], 'NumberOfFrames'):
# A single multi-frame source image
src_img_item = self.SourceImageSequence[0]
# Frame numbers are one-based
derivation_src_img_item.ReferencedFrameNumber = (
source_image_index + 1
)
else:
# Multiple single-frame source images
src_img_item = self.SourceImageSequence[
source_image_index
]
derivation_src_img_item.ReferencedSOPClassUID = \
src_img_item.ReferencedSOPClassUID
derivation_src_img_item.ReferencedSOPInstanceUID = \
src_img_item.ReferencedSOPInstanceUID
purpose_code = \
codes.cid7202.SourceImageForImageProcessingOperation
derivation_src_img_item.PurposeOfReferenceCodeSequence = [
CodedConcept.from_code(purpose_code)
]
derivation_src_img_item.SpatialLocationsPreserved = 'YES'
derivation_image_item.SourceImageSequence = [
derivation_src_img_item,
]
derivation_img_item.SourceImageSequence = []

for _, referenced_images in referenced_series.items():
if is_multiframe:
for src_item in referenced_images:
drv_src_item = Dataset()
drv_src_item.ReferencedFrameNumber = (
source_image_index + 1
)
drv_src_item.ReferencedSOPClassUID = \
src_item.ReferencedSOPClassUID
drv_src_item.ReferencedSOPInstanceUID = \
src_item.ReferencedSOPInstanceUID
drv_src_item.PurposeOfReferenceCodeSequence = [
CodedConcept.from_code(purpose_code)
]
drv_src_item.SpatialLocationsPreserved = 'YES'
derivation_img_item.SourceImageSequence.append(
drv_src_item
)
else:
src_item = referenced_images[source_image_index]
drv_src_item = Dataset()
drv_src_item.ReferencedSOPClassUID = \
src_item.ReferencedSOPClassUID
drv_src_item.ReferencedSOPInstanceUID = \
src_item.ReferencedSOPInstanceUID
drv_src_item.PurposeOfReferenceCodeSequence = [
CodedConcept.from_code(purpose_code)
]
drv_src_item.SpatialLocationsPreserved = 'YES'
derivation_img_item.SourceImageSequence.append(
drv_src_item
)
pffp_item.DerivationImageSequence.append(
derivation_image_item
derivation_img_item
)
else:
logger.warning('spatial locations not preserved')
logger.warning('spatial locations are not preserved')

identification = Dataset()
identification.ReferencedSegmentNumber = segment_number
Expand Down
97 changes: 96 additions & 1 deletion tests/test_seg.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from collections import defaultdict
import unittest
from pathlib import Path
from copy import deepcopy

import numpy as np
import pytest
Expand Down Expand Up @@ -644,7 +645,7 @@ def setUp(self):
axis=2
)[None, :]

# A microscopy image
# A microscopy (color) image
self._sm_image = dcmread(
str(data_dir.joinpath('test_files', 'sm_image.dcm'))
)
Expand All @@ -656,6 +657,16 @@ def setUp(self):
)
self._sm_pixel_array[2:3, 1:5, 7:9] = True

# A microscopy (grayscale) image
self._sm_image_grayscale = dcmread(
str(data_dir.joinpath('test_files', 'sm_image_grayscale.dcm'))
)
self._sm_pixel_array_grayscale = np.zeros(
self._sm_image_grayscale.pixel_array.shape,
dtype=bool
)
self._sm_pixel_array_grayscale[2:3, 1:5, 7:9] = True

# A series of single frame CT images
ct_series = [
dcmread(f)
Expand Down Expand Up @@ -1365,6 +1376,90 @@ def test_construction_7(self):
assert SegmentsOverlapValues[instance.SegmentsOverlap] == \
SegmentsOverlapValues.NO

def test_construction_8(self):
sm_image_one = deepcopy(self._sm_image_grayscale)
sm_image_two = deepcopy(self._sm_image_grayscale)
sm_image_two.SOPInstanceUID = UID()
instance = Segmentation(
[sm_image_one, sm_image_two],
self._sm_pixel_array_grayscale,
SegmentationTypeValues.FRACTIONAL.value,
self._segment_descriptions,
self._series_instance_uid,
self._series_number,
self._sop_instance_uid,
self._instance_number,
self._manufacturer,
self._manufacturer_model_name,
self._software_versions,
self._device_serial_number
)
assert len(instance.SegmentSequence) == 1
assert len(instance.SourceImageSequence) == 2
ref_item_one = instance.SourceImageSequence[0]
assert ref_item_one.ReferencedSOPInstanceUID == \
sm_image_one.SOPInstanceUID
ref_item_two = instance.SourceImageSequence[1]
assert ref_item_two.ReferencedSOPInstanceUID == \
sm_image_two.SOPInstanceUID

num_frames = (self._sm_pixel_array.sum(axis=(1, 2)) > 0).sum()
assert instance.NumberOfFrames == num_frames
assert len(instance.PerFrameFunctionalGroupsSequence) == num_frames
frame_item = instance.PerFrameFunctionalGroupsSequence[0]
for derivation_image_item in frame_item.DerivationImageSequence:
assert len(derivation_image_item.SourceImageSequence) == 2
source_image_item_one = derivation_image_item.SourceImageSequence[0]
assert source_image_item_one.ReferencedSOPInstanceUID == \
sm_image_one.SOPInstanceUID
assert hasattr(source_image_item_one, 'ReferencedFrameNumber')
source_image_item_two = derivation_image_item.SourceImageSequence[1]
assert source_image_item_two.ReferencedSOPInstanceUID == \
sm_image_two.SOPInstanceUID
self.check_dimension_index_vals(instance)

def test_construction_9(self):
sm_image_one = deepcopy(self._sm_image_grayscale)
sm_image_two = deepcopy(self._sm_image_grayscale)
sm_image_two.SOPInstanceUID = UID()
sm_image_two.Rows = sm_image_one.Rows - 1
with pytest.raises(ValueError):
Segmentation(
[sm_image_one, sm_image_two],
self._sm_pixel_array_grayscale,
SegmentationTypeValues.FRACTIONAL.value,
self._segment_descriptions,
self._series_instance_uid,
self._series_number,
self._sop_instance_uid,
self._instance_number,
self._manufacturer,
self._manufacturer_model_name,
self._software_versions,
self._device_serial_number
)

def test_construction_10(self):
sm_image_one = deepcopy(self._sm_image_grayscale)
sm_image_two = deepcopy(self._sm_image_grayscale)
sm_image_two.SOPInstanceUID = UID()
sm_image_two.NumberOfFrames = str(int(sm_image_one.NumberOfFrames) + 5)
with pytest.raises(ValueError):
Segmentation(
[sm_image_one, sm_image_two],
self._sm_pixel_array_grayscale,
SegmentationTypeValues.FRACTIONAL.value,
self._segment_descriptions,
self._series_instance_uid,
self._series_number,
self._sop_instance_uid,
self._instance_number,
self._manufacturer,
self._manufacturer_model_name,
self._software_versions,
self._device_serial_number
)

def test_pixel_types(self):
# A series of tests on different types of image
tests = [
Expand Down