Passed
Push — master ( af8682...a51248 )
by Fernando
01:26
created

torchio.data.subject.Subject.__repr__()   A

Complexity

Conditions 1

Size

Total Lines 7
Code Lines 5

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 1
eloc 5
nop 1
dl 0
loc 7
rs 10
c 0
b 0
f 0
1
import copy
2
import pprint
3
from typing import Any, Dict, List, Tuple, Optional, Sequence, TYPE_CHECKING
4
5
import numpy as np
6
7
from ..constants import TYPE, INTENSITY
8
from .image import Image
9
from ..utils import get_subclasses
10
11
if TYPE_CHECKING:
12
    from ..transforms import Transform, Compose
13
14
15
class Subject(dict):
16
    """Class to store information about the images corresponding to a subject.
17
18
    Args:
19
        *args: If provided, a dictionary of items.
20
        **kwargs: Items that will be added to the subject sample.
21
22
    Example:
23
24
        >>> import torchio as tio
25
        >>> # One way:
26
        >>> subject = tio.Subject(
27
        ...     one_image=tio.ScalarImage('path_to_image.nii.gz'),
28
        ...     a_segmentation=tio.LabelMap('path_to_seg.nii.gz'),
29
        ...     age=45,
30
        ...     name='John Doe',
31
        ...     hospital='Hospital Juan Negrín',
32
        ... )
33
        >>> # If you want to create the mapping before, or have spaces in the keys:
34
        >>> subject_dict = {
35
        ...     'one image': tio.ScalarImage('path_to_image.nii.gz'),
36
        ...     'a segmentation': tio.LabelMap('path_to_seg.nii.gz'),
37
        ...     'age': 45,
38
        ...     'name': 'John Doe',
39
        ...     'hospital': 'Hospital Juan Negrín',
40
        ... }
41
        >>> subject = tio.Subject(subject_dict)
42
43
    """
44
45
    def __init__(self, *args, **kwargs: Dict[str, Any]):
46
        if args:
47
            if len(args) == 1 and isinstance(args[0], dict):
48
                kwargs.update(args[0])
49
            else:
50
                message = (
51
                    'Only one dictionary as positional argument is allowed')
52
                raise ValueError(message)
53
        super().__init__(**kwargs)
54
        self._parse_images(self.get_images(intensity_only=False))
55
        self.update_attributes()  # this allows me to do e.g. subject.t1
56
        self.applied_transforms = []
57
58
    def __repr__(self):
59
        num_images = len(self.get_images(intensity_only=False))
60
        string = (
61
            f'{self.__class__.__name__}'
62
            f'(Keys: {tuple(self.keys())}; images: {num_images})'
63
        )
64
        return string
65
66
    def __copy__(self):
67
        result_dict = {}
68
        for key, value in self.items():
69
            if isinstance(value, Image):
70
                value = copy.copy(value)
71
            else:
72
                value = copy.deepcopy(value)
73
            result_dict[key] = value
74
        new = Subject(result_dict)
75
        new.applied_transforms = self.applied_transforms[:]
76
        return new
77
78
    def __len__(self):
79
        return len(self.get_images(intensity_only=False))
80
81
    @staticmethod
82
    def _parse_images(images: List[Tuple[str, Image]]) -> None:
83
        # Check that it's not empty
84
        if not images:
85
            raise ValueError('A subject without images cannot be created')
86
87
    @property
88
    def shape(self):
89
        """Return shape of first image in subject.
90
91
        Consistency of shapes across images in the subject is checked first.
92
93
        Example::
94
95
            >>> import torchio as tio
96
            >>> colin = tio.datasets.Colin27()
97
            >>> colin.shape
98
            (1, 181, 217, 181)
99
100
        """
101
        self.check_consistent_attribute('shape')
102
        return self.get_first_image().shape
103
104
    @property
105
    def spatial_shape(self):
106
        """Return spatial shape of first image in subject.
107
108
        Consistency of spatial shapes across images in the subject is checked
109
        first.
110
111
        Example::
112
113
            >>> import torchio as tio
114
            >>> colin = tio.datasets.Colin27()
115
            >>> colin.shape
116
            (181, 217, 181)
117
        """
118
        self.check_consistent_spatial_shape()
119
        return self.get_first_image().spatial_shape
120
121
    @property
122
    def spacing(self):
123
        """Return spacing of first image in subject.
124
125
        Consistency of spacings across images in the subject is checked first.
126
127
        Example::
128
129
            >>> import torchio as tio
130
            >>> colin = tio.datasets.Slicer()
131
            >>> colin.shape
132
            (1.0, 1.0, 1.2999954223632812)
133
        """
134
        self.check_consistent_attribute('spacing')
135
        return self.get_first_image().spacing
136
137
    @property
138
    def history(self):
139
        # Kept for backwards compatibility
140
        return self.get_applied_transforms()
141
142
    def is_2d(self):
143
        return all(i.is_2d() for i in self.get_images(intensity_only=False))
144
145
    def get_applied_transforms(
146
            self,
147
            ignore_intensity: bool = False,
148
            image_interpolation: Optional[str] = None,
149
            ) -> List['Transform']:
150
        from ..transforms.transform import Transform
151
        from ..transforms.intensity_transform import IntensityTransform
152
        name_to_transform = {
153
            cls.__name__: cls
154
            for cls in get_subclasses(Transform)
155
        }
156
        transforms_list = []
157
        for transform_name, arguments in self.applied_transforms:
158
            transform = name_to_transform[transform_name](**arguments)
159
            if ignore_intensity and isinstance(transform, IntensityTransform):
160
                continue
161
            resamples = hasattr(transform, 'image_interpolation')
162
            if resamples and image_interpolation is not None:
163
                parsed = transform.parse_interpolation(image_interpolation)
164
                transform.image_interpolation = parsed
165
            transforms_list.append(transform)
166
        return transforms_list
167
168
    def get_composed_history(
169
            self,
170
            ignore_intensity: bool = False,
171
            image_interpolation: Optional[str] = None,
172
            ) -> 'Compose':
173
        from ..transforms.augmentation.composition import Compose
174
        transforms = self.get_applied_transforms(
175
            ignore_intensity=ignore_intensity,
176
            image_interpolation=image_interpolation,
177
        )
178
        return Compose(transforms)
179
180
    def get_inverse_transform(
181
            self,
182
            warn: bool = True,
183
            ignore_intensity: bool = True,
184
            image_interpolation: Optional[str] = None,
185
            ) ->  'Compose':
186
        """Get a reversed list of the inverses of the applied transforms.
187
188
        Args:
189
            warn: Issue a warning if some transforms are not invertible.
190
            ignore_intensity: If ``True``, all instances of
191
                :class:`~torchio.transforms.intensity_transform.IntensityTransform`
192
                will be ignored.
193
            image_interpolation: Modify interpolation for scalar images inside
194
                transforms that perform resampling.
195
        """
196
        history_transform = self.get_composed_history(
197
            ignore_intensity=ignore_intensity,
198
            image_interpolation=image_interpolation,
199
        )
200
        inverse_transform = history_transform.inverse(warn=warn)
201
        return inverse_transform
202
203
    def apply_inverse_transform(self, **kwargs) -> 'Subject':
204
        """Try to apply the inverse of all applied transforms, in reverse order.
205
206
        Args:
207
            **kwargs: Keyword arguments passed on to
208
                :meth:`~torchio.data.subject.Subject.get_inverse_transform`.
209
        """
210
        inverse_transform = self.get_inverse_transform(**kwargs)
211
        transformed = inverse_transform(self)
212
        transformed.clear_history()
213
        return transformed
214
215
    def clear_history(self) -> None:
216
        self.applied_transforms = []
217
218
    def check_consistent_attribute(self, attribute: str) -> None:
219
        values_dict = {}
220
        iterable = self.get_images_dict(intensity_only=False).items()
221
        for image_name, image in iterable:
222
            values_dict[image_name] = getattr(image, attribute)
223
        num_unique_values = len(set(values_dict.values()))
224
        if num_unique_values > 1:
225
            message = (
226
                f'More than one {attribute} found in subject images:'
227
                f'\n{pprint.pformat(values_dict)}'
228
            )
229
            raise RuntimeError(message)
230
231
    def check_consistent_spatial_shape(self) -> None:
232
        self.check_consistent_attribute('spatial_shape')
233
234
    def check_consistent_orientation(self) -> None:
235
        self.check_consistent_attribute('orientation')
236
237
    def check_consistent_affine(self):
238
        # https://github.com/fepegar/torchio/issues/354
239
        affine = None
240
        first_image = None
241
        iterable = self.get_images_dict(intensity_only=False).items()
242
        for image_name, image in iterable:
243
            if affine is None:
244
                affine = image.affine
245
                first_image = image_name
246
            elif not np.allclose(affine, image.affine, rtol=1e-6, atol=1e-6):
247
                message = (
248
                    f'Images "{first_image}" and "{image_name}" do not occupy'
249
                    ' the same physical space.'
250
                    f'\nAffine of "{first_image}":'
251
                    f'\n{pprint.pformat(affine)}'
252
                    f'\nAffine of "{image_name}":'
253
                    f'\n{pprint.pformat(image.affine)}'
254
                )
255
                raise RuntimeError(message)
256
257
    def check_consistent_space(self):
258
        self.check_consistent_spatial_shape()
259
        self.check_consistent_affine()
260
261
    def get_images_names(self) -> List[str]:
262
        return list(self.get_images_dict(intensity_only=False).keys())
263
264
    def get_images_dict(
265
            self,
266
            intensity_only=True,
267
            include: Optional[Sequence[str]] = None,
268
            exclude: Optional[Sequence[str]] = None,
269
            ) -> Dict[str, Image]:
270
        images = {}
271
        for image_name, image in self.items():
272
            if not isinstance(image, Image):
273
                continue
274
            if intensity_only and not image[TYPE] == INTENSITY:
275
                continue
276
            if include is not None and image_name not in include:
277
                continue
278
            if exclude is not None and image_name in exclude:
279
                continue
280
            images[image_name] = image
281
        return images
282
283
    def get_images(
284
            self,
285
            intensity_only=True,
286
            include: Optional[Sequence[str]] = None,
287
            exclude: Optional[Sequence[str]] = None,
288
            ) -> List[Image]:
289
        images_dict = self.get_images_dict(
290
            intensity_only=intensity_only,
291
            include=include,
292
            exclude=exclude,
293
        )
294
        return list(images_dict.values())
295
296
    def get_first_image(self) -> Image:
297
        return self.get_images(intensity_only=False)[0]
298
299
    # flake8: noqa: F821
300
    def add_transform(
301
            self,
302
            transform: 'Transform',
303
            parameters_dict: dict,
304
            ) -> None:
305
        self.applied_transforms.append((transform.name, parameters_dict))
306
307
    def load(self) -> None:
308
        """Load images in subject on RAM."""
309
        for image in self.get_images(intensity_only=False):
310
            image.load()
311
312
    def update_attributes(self) -> None:
313
        # This allows to get images using attribute notation, e.g. subject.t1
314
        self.__dict__.update(self)
315
316
    def add_image(self, image: Image, image_name: str) -> None:
317
        """Add an image."""
318
        self[image_name] = image
319
        self.update_attributes()
320
321
    def remove_image(self, image_name: str) -> None:
322
        """Remove an image."""
323
        del self[image_name]
324
        delattr(self, image_name)
325
326
    def plot(self, **kwargs) -> None:
327
        """Plot images using matplotlib.
328
329
        Args:
330
            **kwargs: Keyword arguments that will be passed on to
331
                :meth:`~torchio.Image.plot`.
332
        """
333
        from ..visualization import plot_subject  # avoid circular import
334
        plot_subject(self, **kwargs)
335