Passed
Pull Request — master (#246)
by Fernando
01:10
created

torchio.utils.get_torchio_cache_dir()   A

Complexity

Conditions 1

Size

Total Lines 2
Code Lines 2

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 1
eloc 2
nop 0
dl 0
loc 2
rs 10
c 0
b 0
f 0
1
import ast
2
import shutil
3
import tempfile
4
from pathlib import Path
5
from typing import Union, Iterable, Tuple, Any, Optional, List
6
7
import torch
8
import numpy as np
9
import nibabel as nib
10
import SimpleITK as sitk
11
from tqdm import trange
12
from .torchio import (
13
    INTENSITY,
14
    LABEL,
15
    TypeData,
16
    TypeNumber,
17
    TypePath,
18
    REPO_URL,
19
)
20
21
22
FLIP_XY = np.diag((-1, -1, 1))  # used to switch between LPS and RAS
23
24
25
def to_tuple(
26
        value: Union[TypeNumber, Iterable[TypeNumber]],
27
        length: int = 1,
28
        ) -> Tuple[TypeNumber, ...]:
29
    """
30
    to_tuple(1, length=1) -> (1,)
31
    to_tuple(1, length=3) -> (1, 1, 1)
32
33
    If value is an iterable, n is ignored and tuple(value) is returned
34
    to_tuple((1,), length=1) -> (1,)
35
    to_tuple((1, 2), length=1) -> (1, 2)
36
    to_tuple([1, 2], length=3) -> (1, 2)
37
    """
38
    try:
39
        iter(value)
40
        value = tuple(value)
41
    except TypeError:
42
        value = length * (value,)
43
    return value
44
45
46
def get_stem(path: TypePath) -> str:
47
    """
48
    '/home/user/image.nii.gz' -> 'image'
49
    """
50
    path = Path(path)
51
    return path.name.split('.')[0]
52
53
54
def create_dummy_dataset(
55
        num_images: int,
56
        size_range: Tuple[int, int],
57
        directory: Optional[TypePath] = None,
58
        suffix: str = '.nii.gz',
59
        force: bool = False,
60
        verbose: bool = False,
61
        ):
62
    from .data import Image, Subject
63
    output_dir = tempfile.gettempdir() if directory is None else directory
64
    output_dir = Path(output_dir)
65
    images_dir = output_dir / 'dummy_images'
66
    labels_dir = output_dir / 'dummy_labels'
67
68
    if force:
69
        shutil.rmtree(images_dir)
70
        shutil.rmtree(labels_dir)
71
72
    subjects: List[Subject] = []
73
    if images_dir.is_dir():
74
        for i in trange(num_images):
75
            image_path = images_dir / f'image_{i}{suffix}'
76
            label_path = labels_dir / f'label_{i}{suffix}'
77
            subject = Subject(
78
                one_modality=Image(image_path, INTENSITY),
79
                segmentation=Image(label_path, LABEL),
80
            )
81
            subjects.append(subject)
82
    else:
83
        images_dir.mkdir(exist_ok=True, parents=True)
84
        labels_dir.mkdir(exist_ok=True, parents=True)
85
        if verbose:
86
            print('Creating dummy dataset...')
87
            iterable = trange(num_images)
88
        else:
89
            iterable = range(num_images)
90
        for i in iterable:
91
            shape = np.random.randint(*size_range, size=3)
92
            affine = np.eye(4)
93
            image = np.random.rand(*shape)
94
            label = np.ones_like(image)
95
            label[image < 0.33] = 0
96
            label[image > 0.66] = 2
97
            image *= 255
98
99
            image_path = images_dir / f'image_{i}{suffix}'
100
            nii = nib.Nifti1Image(image.astype(np.uint8), affine)
101
            nii.to_filename(str(image_path))
102
103
            label_path = labels_dir / f'label_{i}{suffix}'
104
            nii = nib.Nifti1Image(label.astype(np.uint8), affine)
105
            nii.to_filename(str(label_path))
106
107
            subject = Subject(
108
                one_modality=Image(image_path, INTENSITY),
109
                segmentation=Image(label_path, LABEL),
110
            )
111
            subjects.append(subject)
112
    return subjects
113
114
115
def apply_transform_to_file(
116
        input_path: TypePath,
117
        transform,  # : Transform seems to create a circular import (TODO)
118
        output_path: TypePath,
119
        type: str = INTENSITY,
120
        verbose: bool = False,
121
        ):
122
    from . import Image, ImagesDataset, Subject
123
    subject = Subject(image=Image(input_path, type))
124
    transformed = transform(subject)
125
    transformed.image.save(output_path)
126
    if verbose and transformed.history:
127
        print(transformed.history[0])
128
129
def guess_type(string: str) -> Any:
130
    # Adapted from
131
    # https://www.reddit.com/r/learnpython/comments/4599hl/module_to_guess_type_from_a_string/czw3f5s
132
    string = string.replace(' ', '')
133
    try:
134
        value = ast.literal_eval(string)
135
    except ValueError:
136
        result_type = str
137
    else:
138
        result_type = type(value)
139
    if result_type in (list, tuple):
140
        string = string[1:-1]  # remove brackets
141
        split = string.split(',')
142
        list_result = [guess_type(n) for n in split]
143
        value = tuple(list_result) if result_type is tuple else list_result
144
        return value
145
    try:
146
        value = result_type(string)
147
    except TypeError:
148
        value = None
149
    return value
150
151
152
def get_rotation_and_spacing_from_affine(
153
        affine: np.ndarray,
154
        ) -> Tuple[np.ndarray, np.ndarray]:
155
    # From https://github.com/nipy/nibabel/blob/master/nibabel/orientations.py
156
    rotation_zoom = affine[:3, :3]
157
    spacing = np.sqrt(np.sum(rotation_zoom * rotation_zoom, axis=0))
158
    rotation = rotation_zoom / spacing
159
    return rotation, spacing
160
161
162
def nib_to_sitk(data: TypeData, affine: TypeData) -> sitk.Image:
163
    """Create a SimpleITK image from a tensor and a 4x4 affine matrix.
164
165
    Args:
166
        data: PyTorch tensor or NumPy array
167
        affine: # TODO
168
    """
169
    array = data.numpy() if isinstance(data, torch.Tensor) else data
170
    affine = affine.numpy() if isinstance(affine, torch.Tensor) else affine
171
    origin = np.dot(FLIP_XY, affine[:3, 3]).astype(np.float64)
172
    rotation, spacing = get_rotation_and_spacing_from_affine(affine)
173
    direction = np.dot(FLIP_XY, rotation)
174
    array = array.transpose()  # (W, H, D, C) or (W, H, D)
175
    image = sitk.GetImageFromArray(array)
176
    if array.ndim == 2:  # ignore first dimension if 2D (1, 1, H, W)
177
        direction = direction[1:3, 1:3]
178
    image.SetOrigin(origin)
179
    image.SetSpacing(spacing)
180
    image.SetDirection(direction.flatten())
181
    if data.ndim == 4:
182
        assert image.GetNumberOfComponentsPerPixel() == data.shape[0]
183
    assert image.GetSize() == data.shape[-3:]
184
    return image
185
186
187
def sitk_to_nib(image: sitk.Image) -> Tuple[np.ndarray, np.ndarray]:
188
    data = sitk.GetArrayFromImage(image).transpose()
189
    num_components = image.GetNumberOfComponentsPerPixel()
190
    if num_components == 1:
191
        data = data[np.newaxis]  # add channels dimension
192
    input_spatial_dims = image.GetDimension()
193
    data = ensure_4d(data, False, num_spatial_dims=input_spatial_dims)
194
    assert data.shape[0] == num_components
195
    assert data.shape[-input_spatial_dims:] == image.GetSize()
196
    spacing = np.array(image.GetSpacing())
197
    direction = np.array(image.GetDirection())
198
    origin = image.GetOrigin()
199
    if len(direction) == 9:
200
        rotation = direction.reshape(3, 3)
201
    elif len(direction) == 4:  # ignore first dimension if 2D (1, 1, H, W)
202
        rotation_2d = direction.reshape(2, 2)
203
        rotation = np.eye(3)
204
        rotation[1:3, 1:3] = rotation_2d
205
        spacing = 1, *spacing
206
        origin = 0, *origin
207
    rotation = np.dot(FLIP_XY, rotation)
0 ignored issues
show
introduced by
The variable rotation does not seem to be defined for all execution paths.
Loading history...
208
    rotation_zoom = rotation * spacing
209
    translation = np.dot(FLIP_XY, origin)
210
    affine = np.eye(4)
211
    affine[:3, :3] = rotation_zoom
212
    affine[:3, 3] = translation
213
    return data, affine
214
215
216
def ensure_4d(
217
        tensor: TypeData,
218
        channels_last: bool,
219
        num_spatial_dims=None,
220
        ) -> TypeData:
221
    """[summary]
222
223
    Args:
224
        tensor: [description].
225
        channels_last: If ``True``, last dimension of the input represents
226
            channels.
227
        num_spatial_dims: [description].
228
229
    Raises:
230
        ValueError: [description]
231
    """
232
    # I wish named tensors were properly supported in PyTorch
233
    num_dimensions = tensor.ndim
234
    if num_dimensions == 5:  # hope (X, X, X, 1, X)
235
        if tensor.shape[-1] == 1:
236
            tensor = tensor[..., 0, :]
237
    if num_dimensions == 4:  # assume 3D multichannel
238
        if channels_last:  # (D, H, W, C)
239
            tensor = tensor.permute(3, 0, 1, 2)  # (C, D, H, W)
240
    elif num_dimensions == 2:  # assume 2D monochannel (H, W)
241
        tensor = tensor[np.newaxis, np.newaxis]  # (1, 1, H, W)
242
    elif num_dimensions == 3:  # 2D multichannel or 3D monochannel?
243
        if num_spatial_dims == 2:
244
            if channels_last:  # (H, W, C)
245
                tensor = tensor.permute(2, 0, 1)  # (C, H, W)
246
            tensor = tensor[:, np.newaxis]  # (C, 1, H, W)
247
        elif num_spatial_dims == 3:  # (D, H, W)
248
            tensor = tensor[np.newaxis]  # (1, D, H, W)
249
        else:  # try to guess
250
            shape = tensor.shape
251
            maybe_rgb = 3 in shape
252
            if maybe_rgb:
253
                if shape[-1] == 3:  # (H, W, 3)
254
                    tensor = tensor.permute(2, 0, 1)  # (3, H, W)
255
                tensor = tensor[:, np.newaxis]  # (3, 1, H, W)
256
            else:  # (D, H, W)
257
                tensor = tensor[np.newaxis]  # (1, D, H, W)
258
    else:
259
        message = (
260
            f'{num_dimensions}D images not supported yet. Please create an'
261
            f' issue in {REPO_URL} if you would like support for them'
262
        )
263
        raise ValueError(message)
264
    assert tensor.ndim == 4
265
    return tensor
266
267
268
def get_torchio_cache_dir():
269
    return Path('~/.cache/torchio').expanduser()
270
271
272
def round_up(value: float) -> float:
273
    """Round half towards infinity.
274
275
    Args:
276
        value: The value to round.
277
278
    Example:
279
280
        >>> round(2.5)
281
        2
282
        >>> round(3.5)
283
        4
284
        >>> round_up(2.5)
285
        3
286
        >>> round_up(3.5)
287
        4
288
289
    """
290
    return np.floor(value + 0.5)
291