Completed
Push — master ( ba5be7...1c9054 )
by Fernando
01:22
created

Subject.get_applied_transforms()   B

Complexity

Conditions 6

Size

Total Lines 22
Code Lines 20

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 6
eloc 20
nop 3
dl 0
loc 22
rs 8.4666
c 0
b 0
f 0
1
import copy
2
import pprint
3
from typing import Any, Dict, List, Tuple, Optional, Sequence, TYPE_CHECKING
4
5
import numpy as np
6
7
from ..constants import TYPE, INTENSITY
8
from .image import Image
9
from ..utils import get_subclasses
10
11
if TYPE_CHECKING:
12
    from ..transforms import Transform, Compose
13
14
15
class Subject(dict):
16
    """Class to store information about the images corresponding to a subject.
17
18
    Args:
19
        *args: If provided, a dictionary of items.
20
        **kwargs: Items that will be added to the subject sample.
21
22
    Example:
23
24
        >>> import torchio as tio
25
        >>> # One way:
26
        >>> subject = tio.Subject(
27
        ...     one_image=tio.ScalarImage('path_to_image.nii.gz'),
28
        ...     a_segmentation=tio.LabelMap('path_to_seg.nii.gz'),
29
        ...     age=45,
30
        ...     name='John Doe',
31
        ...     hospital='Hospital Juan Negrín',
32
        ... )
33
        >>> # If you want to create the mapping before, or have spaces in the keys:
34
        >>> subject_dict = {
35
        ...     'one image': tio.ScalarImage('path_to_image.nii.gz'),
36
        ...     'a segmentation': tio.LabelMap('path_to_seg.nii.gz'),
37
        ...     'age': 45,
38
        ...     'name': 'John Doe',
39
        ...     'hospital': 'Hospital Juan Negrín',
40
        ... }
41
        >>> subject = tio.Subject(subject_dict)
42
43
    """
44
45
    def __init__(self, *args, **kwargs: Dict[str, Any]):
46
        if args:
47
            if len(args) == 1 and isinstance(args[0], dict):
48
                kwargs.update(args[0])
49
            else:
50
                message = (
51
                    'Only one dictionary as positional argument is allowed')
52
                raise ValueError(message)
53
        super().__init__(**kwargs)
54
        self._parse_images(self.get_images(intensity_only=False))
55
        self.update_attributes()  # this allows me to do e.g. subject.t1
56
        self.applied_transforms = []
57
58
    def __repr__(self):
59
        num_images = len(self.get_images(intensity_only=False))
60
        string = (
61
            f'{self.__class__.__name__}'
62
            f'(Keys: {tuple(self.keys())}; images: {num_images})'
63
        )
64
        return string
65
66
    def __copy__(self):
67
        result_dict = {}
68
        for key, value in self.items():
69
            if isinstance(value, Image):
70
                value = copy.copy(value)
71
            else:
72
                value = copy.deepcopy(value)
73
            result_dict[key] = value
74
        new = Subject(result_dict)
75
        new.applied_transforms = self.applied_transforms[:]
76
        return new
77
78
    def __len__(self):
79
        return len(self.get_images(intensity_only=False))
80
81
    @staticmethod
82
    def _parse_images(images: List[Tuple[str, Image]]) -> None:
83
        # Check that it's not empty
84
        if not images:
85
            raise ValueError('A subject without images cannot be created')
86
87
    @property
88
    def shape(self):
89
        """Return shape of first image in subject.
90
91
        Consistency of shapes across images in the subject is checked first.
92
        """
93
        self.check_consistent_attribute('shape')
94
        return self.get_first_image().shape
95
96
    @property
97
    def spatial_shape(self):
98
        """Return spatial shape of first image in subject.
99
100
        Consistency of spatial shapes across images in the subject is checked
101
        first.
102
        """
103
        self.check_consistent_spatial_shape()
104
        return self.get_first_image().spatial_shape
105
106
    @property
107
    def spacing(self):
108
        """Return spacing of first image in subject.
109
110
        Consistency of spacings across images in the subject is checked first.
111
        """
112
        self.check_consistent_attribute('spacing')
113
        return self.get_first_image().spacing
114
115
    @property
116
    def history(self):
117
        # Kept for backwards compatibility
118
        return self.get_applied_transforms()
119
120
    def get_applied_transforms(
121
            self,
122
            ignore_intensity: bool = False,
123
            image_interpolation: Optional[str] = None,
124
            ) -> List['Transform']:
125
        from ..transforms.transform import Transform
126
        from ..transforms.intensity_transform import IntensityTransform
127
        name_to_transform = {
128
            cls.__name__: cls
129
            for cls in get_subclasses(Transform)
130
        }
131
        transforms_list = []
132
        for transform_name, arguments in self.applied_transforms:
133
            transform = name_to_transform[transform_name](**arguments)
134
            if ignore_intensity and isinstance(transform, IntensityTransform):
135
                continue
136
            resamples = hasattr(transform, 'image_interpolation')
137
            if resamples and image_interpolation is not None:
138
                parsed = transform.parse_interpolation(image_interpolation)
139
                transform.image_interpolation = parsed
140
            transforms_list.append(transform)
141
        return transforms_list
142
143
    def get_composed_history(
144
            self,
145
            ignore_intensity: bool = False,
146
            image_interpolation: Optional[str] = None,
147
            ) -> 'Compose':
148
        from ..transforms.augmentation.composition import Compose
149
        transforms = self.get_applied_transforms(
150
            ignore_intensity=ignore_intensity,
151
            image_interpolation=image_interpolation,
152
        )
153
        return Compose(transforms)
154
155
    def get_inverse_transform(
156
            self,
157
            warn: bool = True,
158
            ignore_intensity: bool = True,
159
            image_interpolation: Optional[str] = None,
160
            ) ->  'Compose':
161
        """Get a reversed list of the inverses of the applied transforms.
162
163
        Args:
164
            warn: Issue a warning if some transforms are not invertible.
165
            ignore_intensity: If ``True``, all instances of
166
                :class:`~torchio.transforms.intensity_transform.IntensityTransform`
167
                will be ignored.
168
            image_interpolation: Modify interpolation for scalar images inside
169
                transforms that perform resampling.
170
        """
171
        history_transform = self.get_composed_history(
172
            ignore_intensity=ignore_intensity,
173
            image_interpolation=image_interpolation,
174
        )
175
        inverse_transform = history_transform.inverse(warn=warn)
176
        return inverse_transform
177
178
    def apply_inverse_transform(self, **kwargs) -> 'Subject':
179
        """Try to apply the inverse of all applied transforms, in reverse order.
180
181
        Args:
182
            **kwargs: Keyword arguments passed on to
183
                :meth:`~torchio.data.subject.Subject.get_inverse_transform`.
184
        """
185
        inverse_transform = self.get_inverse_transform(**kwargs)
186
        transformed = inverse_transform(self)
187
        transformed.clear_history()
188
        return transformed
189
190
    def clear_history(self) -> None:
191
        self.applied_transforms = []
192
193
    def check_consistent_attribute(self, attribute: str) -> None:
194
        values_dict = {}
195
        iterable = self.get_images_dict(intensity_only=False).items()
196
        for image_name, image in iterable:
197
            values_dict[image_name] = getattr(image, attribute)
198
        num_unique_values = len(set(values_dict.values()))
199
        if num_unique_values > 1:
200
            message = (
201
                f'More than one {attribute} found in subject images:'
202
                f'\n{pprint.pformat(values_dict)}'
203
            )
204
            raise RuntimeError(message)
205
206
    def check_consistent_spatial_shape(self) -> None:
207
        self.check_consistent_attribute('spatial_shape')
208
209
    def check_consistent_orientation(self) -> None:
210
        self.check_consistent_attribute('orientation')
211
212
    def check_consistent_affine(self):
213
        # https://github.com/fepegar/torchio/issues/354
214
        affine = None
215
        first_image = None
216
        iterable = self.get_images_dict(intensity_only=False).items()
217
        for image_name, image in iterable:
218
            if affine is None:
219
                affine = image.affine
220
                first_image = image_name
221
            elif not np.allclose(affine, image.affine, rtol=1e-6, atol=1e-6):
222
                message = (
223
                    f'Images "{first_image}" and "{image_name}" do not occupy'
224
                    ' the same physical space.'
225
                    f'\nAffine of "{first_image}":'
226
                    f'\n{pprint.pformat(affine)}'
227
                    f'\nAffine of "{image_name}":'
228
                    f'\n{pprint.pformat(image.affine)}'
229
                )
230
                raise RuntimeError(message)
231
232
    def check_consistent_space(self):
233
        self.check_consistent_spatial_shape()
234
        self.check_consistent_affine()
235
236
    def get_images_dict(
237
            self,
238
            intensity_only=True,
239
            include: Optional[Sequence[str]] = None,
240
            exclude: Optional[Sequence[str]] = None,
241
            ) -> Dict[str, Image]:
242
        images = {}
243
        for image_name, image in self.items():
244
            if not isinstance(image, Image):
245
                continue
246
            if intensity_only and not image[TYPE] == INTENSITY:
247
                continue
248
            if include is not None and image_name not in include:
249
                continue
250
            if exclude is not None and image_name in exclude:
251
                continue
252
            images[image_name] = image
253
        return images
254
255
    def get_images(
256
            self,
257
            intensity_only=True,
258
            include: Optional[Sequence[str]] = None,
259
            exclude: Optional[Sequence[str]] = None,
260
            ) -> List[Image]:
261
        images_dict = self.get_images_dict(
262
            intensity_only=intensity_only,
263
            include=include,
264
            exclude=exclude,
265
        )
266
        return list(images_dict.values())
267
268
    def get_first_image(self) -> Image:
269
        return self.get_images(intensity_only=False)[0]
270
271
    # flake8: noqa: F821
272
    def add_transform(
273
            self,
274
            transform: 'Transform',
275
            parameters_dict: dict,
276
            ) -> None:
277
        self.applied_transforms.append((transform.name, parameters_dict))
278
279
    def load(self) -> None:
280
        """Load images in subject on RAM."""
281
        for image in self.get_images(intensity_only=False):
282
            image.load()
283
284
    def update_attributes(self) -> None:
285
        # This allows to get images using attribute notation, e.g. subject.t1
286
        self.__dict__.update(self)
287
288
    def add_image(self, image: Image, image_name: str) -> None:
289
        """Add an image."""
290
        self[image_name] = image
291
        self.update_attributes()
292
293
    def remove_image(self, image_name: str) -> None:
294
        """Remove an image."""
295
        del self[image_name]
296
        delattr(self, image_name)
297
298
    def plot(self, **kwargs) -> None:
299
        """Plot images using matplotlib.
300
301
        Args:
302
            **kwargs: Keyword arguments that will be passed on to
303
                :class:`~torchio.data.image.Image`.
304
        """
305
        from ..visualization import plot_subject  # avoid circular import
306
        plot_subject(self, **kwargs)
307