Passed
Push — master ( 1d7e64...04b07f )
by Fernando
01:12
created

torchio.data.image.Image.get_spacing_string()   A

Complexity

Conditions 1

Size

Total Lines 4
Code Lines 4

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 1
eloc 4
nop 1
dl 0
loc 4
rs 10
c 0
b 0
f 0
1
import warnings
2
from pathlib import Path
3
from typing import Any, Dict, Tuple, Optional
4
5
import torch
6
import numpy as np
7
import nibabel as nib
8
import SimpleITK as sitk
9
10
from ..utils import nib_to_sitk, get_rotation_and_spacing_from_affine
11
from ..torchio import (
12
    TypePath,
13
    TypeTripletInt,
14
    TypeTripletFloat,
15
    DATA,
16
    TYPE,
17
    AFFINE,
18
    PATH,
19
    STEM,
20
    INTENSITY,
21
)
22
from .io import read_image
23
24
25
class Image(dict):
26
    r"""Class to store information about an image.
27
28
    Args:
29
        path: Path to a file that can be read by
30
            :mod:`SimpleITK` or :mod:`nibabel` or to a directory containing
31
            DICOM files.
32
        type: Type of image, such as :attr:`torchio.INTENSITY` or
33
            :attr:`torchio.LABEL`. This will be used by the transforms to
34
            decide whether to apply an operation, or which interpolation to use
35
            when resampling.
36
        tensor: If :attr:`path` is not given, :attr:`tensor` must be a 4D
37
            :py:class:`torch.Tensor` with dimensions :math:`(C, D, H, W)`,
38
            where :math:`C` is the number of channels and :math:`D, H, W`
39
            are the spatial dimensions.
40
        affine: If :attr:`path` is not given, :attr:`affine` must be a
41
            :math:`4 \times 4` NumPy array. If ``None``, :attr:`affine` is an
42
            identity matrix.
43
        **kwargs: Items that will be added to image dictionary within the
44
            subject sample.
45
    """
46
    def __init__(
47
            self,
48
            path: Optional[TypePath] = None,
49
            type: str = INTENSITY,
50
            tensor: Optional[torch.Tensor] = None,
51
            affine: Optional[torch.Tensor] = None,
52
            **kwargs: Dict[str, Any],
53
            ):
54
        if path is None and tensor is None:
55
            raise ValueError('A value for path or tensor must be given')
56
        if path is not None:
57
            if tensor is not None or affine is not None:
58
                message = 'If a path is given, tensor and affine must be None'
59
                raise ValueError(message)
60
        self._tensor = self.parse_tensor(tensor)
61
        self._affine = self.parse_affine(affine)
62
        if self._affine is None:
63
            self._affine = np.eye(4)
64
        for key in (DATA, AFFINE, TYPE, PATH, STEM):
65
            if key in kwargs:
66
                raise ValueError(f'Key {key} is reserved. Use a different one')
67
68
        super().__init__(**kwargs)
69
        self.path = self._parse_path(path)
70
        self.type = type
71
        self.is_sample = False  # set to True by ImagesDataset
72
73
    def __repr__(self):
74
        properties = [
75
            f'shape: {self.shape}',
76
            f'spacing: {self.get_spacing_string()}',
77
            f'orientation: {"".join(self.orientation)}+',
78
        ]
79
        properties = '; '.join(properties)
80
        string = f'{self.__class__.__name__}({properties})'
81
        return string
82
83
    @property
84
    def data(self):
85
        return self[DATA]
86
87
    @property
88
    def affine(self):
89
        return self[AFFINE]
90
91
    @property
92
    def shape(self) -> Tuple[int, int, int, int]:
93
        return tuple(self[DATA].shape)
94
95
    @property
96
    def spatial_shape(self) -> TypeTripletInt:
97
        return self.shape[1:]
98
99
    @property
100
    def orientation(self):
101
        return nib.aff2axcodes(self[AFFINE])
102
103
    @property
104
    def spacing(self):
105
        _, spacing = get_rotation_and_spacing_from_affine(self.affine)
106
        return tuple(spacing)
107
108
    def get_spacing_string(self):
109
        strings = [f'{n:.2f}' for n in self.spacing]
110
        string = f'({", ".join(strings)})'
111
        return string
112
113
    @staticmethod
114
    def _parse_path(path: TypePath) -> Path:
115
        if path is None:
116
            return None
117
        try:
118
            path = Path(path).expanduser()
119
        except TypeError:
120
            message = f'Conversion to path not possible for variable: {path}'
121
            raise TypeError(message)
122
        if not (path.is_file() or path.is_dir()):  # might be a dir with DICOM
123
            raise FileNotFoundError(f'File not found: {path}')
124
        return path
125
126
    @staticmethod
127
    def parse_tensor(tensor: torch.Tensor) -> torch.Tensor:
128
        if tensor is None:
129
            return None
130
        num_dimensions = tensor.dim()
131
        if num_dimensions != 3:
132
            message = (
133
                'The input tensor must have 3 dimensions (D, H, W),'
134
                f' but has {num_dimensions}: {tensor.shape}'
135
            )
136
            raise RuntimeError(message)
137
        tensor = tensor.unsqueeze(0)  # add channels dimension
138
        return tensor
139
140
    @staticmethod
141
    def parse_affine(affine: np.ndarray) -> np.ndarray:
142
        if affine is None:
143
            return np.eye(4)
144
        if not isinstance(affine, np.ndarray):
145
            raise TypeError(f'Affine must be a NumPy array, not {type(affine)}')
146
        if affine.shape != (4, 4):
147
            raise ValueError(f'Affine shape must be (4, 4), not {affine.shape}')
148
        return affine
149
150
    def load(self, check_nans: bool = True) -> Tuple[torch.Tensor, np.ndarray]:
151
        r"""Load the image from disk.
152
153
        The file is expected to be monomodal/grayscale and 2D or 3D.
154
        A channels dimension is added to the tensor.
155
156
        Args:
157
            check_nans: If ``True``, issues a warning if NaNs are found
158
                in the image
159
160
        Returns:
161
            Tuple containing a 4D data tensor of size
162
            :math:`(1, D_{in}, H_{in}, W_{in})`
163
            and a 2D 4x4 affine matrix
164
        """
165
        if self.path is None:
166
            return self._tensor, self._affine
167
        tensor, affine = read_image(self.path)
168
        # https://github.com/pytorch/pytorch/issues/9410#issuecomment-404968513
169
        tensor = tensor[(None,) * (3 - tensor.ndim)]  # force to be 3D
170
        # Remove next line and uncomment the two following ones once/if this issue
171
        # gets fixed:
172
        # https://github.com/pytorch/pytorch/issues/29010
173
        # See also https://discuss.pytorch.org/t/collating-named-tensors/78650/4
174
        tensor = tensor.unsqueeze(0)  # add channels dimension
175
        # name_dimensions(tensor, affine)
176
        # tensor = tensor.align_to('channels', ...)
177
        if check_nans and torch.isnan(tensor).any():
178
            warnings.warn(f'NaNs found in file "{self.path}"')
179
        return tensor, affine
180
181
    def is_2d(self) -> bool:
182
        return self.shape[-3] == 1
183
184
    def numpy(self) -> np.ndarray:
185
        return self[DATA].numpy()
186
187
    def as_sitk(self) -> sitk.Image:
188
        return nib_to_sitk(self[DATA], self[AFFINE])
189
190
    def get_center(self, lps: bool = False) -> TypeTripletFloat:
191
        """Get image center in RAS (default) or LPS coordinates."""
192
        image = self.as_sitk()
193
        size = np.array(image.GetSize())
194
        center_index = (size - 1) / 2
195
        l, p, s = image.TransformContinuousIndexToPhysicalPoint(center_index)
196
        if lps:
197
            return (l, p, s)
198
        else:
199
            return (-l, -p, s)
200