1
|
|
|
from copy import deepcopy |
2
|
|
|
|
3
|
|
|
import numpy as np |
4
|
|
|
from nibabel.affines import apply_affine |
5
|
|
|
|
6
|
|
|
from ....data.image import Image |
7
|
|
|
from ....data.subject import Subject |
8
|
|
|
from .bounds_transform import BoundsTransform |
9
|
|
|
from .bounds_transform import TypeBounds |
10
|
|
|
|
11
|
|
|
|
12
|
|
|
class Crop(BoundsTransform): |
13
|
|
|
r"""Crop an image. |
14
|
|
|
|
15
|
|
|
Args: |
16
|
|
|
cropping: Tuple |
17
|
|
|
:math:`(w_{ini}, w_{fin}, h_{ini}, h_{fin}, d_{ini}, d_{fin})` |
18
|
|
|
defining the number of values cropped from the edges of each axis. |
19
|
|
|
If the initial shape of the image is |
20
|
|
|
:math:`W \times H \times D`, the final shape will be |
21
|
|
|
:math:`(- w_{ini} + W - w_{fin}) \times (- h_{ini} + H - h_{fin}) |
22
|
|
|
\times (- d_{ini} + D - d_{fin})`. |
23
|
|
|
If only three values :math:`(w, h, d)` are provided, then |
24
|
|
|
:math:`w_{ini} = w_{fin} = w`, |
25
|
|
|
:math:`h_{ini} = h_{fin} = h` and |
26
|
|
|
:math:`d_{ini} = d_{fin} = d`. |
27
|
|
|
If only one value :math:`n` is provided, then |
28
|
|
|
:math:`w_{ini} = w_{fin} = h_{ini} = h_{fin} |
29
|
|
|
= d_{ini} = d_{fin} = n`. |
30
|
|
|
copy: If ``True``, each image will be cropped and the patch copied to a new |
31
|
|
|
subject. If ``False``, each image will be cropped in place. This transform |
32
|
|
|
overwrites the copy argument of the base transform and copies only the |
33
|
|
|
cropped patch instead of the whole image. This can provide a significant |
34
|
|
|
speedup when cropping small patches from large images. |
35
|
|
|
**kwargs: See :class:`~torchio.transforms.Transform` for additional |
36
|
|
|
keyword arguments. |
37
|
|
|
|
38
|
|
|
.. seealso:: If you want to pass the output shape instead, please use |
39
|
|
|
:class:`~torchio.transforms.CropOrPad` instead. |
40
|
|
|
""" |
41
|
|
|
|
42
|
|
|
def __init__(self, cropping: TypeBounds, copy=True, **kwargs): |
43
|
|
|
self._copy_patch = copy |
44
|
|
|
# Transform base class deepcopies whole subject by default |
45
|
|
|
# We want to copy only the cropped patch, so we overwrite the functionality |
46
|
|
|
super().__init__(cropping, copy=False, **kwargs) |
47
|
|
|
self.cropping = cropping |
48
|
|
|
self.args_names = ['cropping'] |
49
|
|
|
|
50
|
|
|
def apply_transform(self, subject: Subject) -> Subject: |
51
|
|
|
assert self.bounds_parameters is not None |
52
|
|
|
low = self.bounds_parameters[::2] |
53
|
|
|
high = self.bounds_parameters[1::2] |
54
|
|
|
index_ini = low |
55
|
|
|
index_fin = np.array(subject.spatial_shape) - high |
56
|
|
|
|
57
|
|
|
if self._copy_patch: |
58
|
|
|
# Create a clean new subject to copy the images into |
59
|
|
|
# We do this __new__ to avoid calling __init__ so we don't have to specify images immediately |
60
|
|
|
cropped_subject = subject.__class__.__new__(subject.__class__) |
61
|
|
|
image_keys_to_crop = subject.get_images_dict( |
62
|
|
|
intensity_only=False, |
63
|
|
|
include=self.include, |
64
|
|
|
exclude=self.exclude, |
65
|
|
|
).keys() |
66
|
|
|
keys_to_expose = subject.keys() |
67
|
|
|
# Copy all attributes we don't want to crop |
68
|
|
|
# __dict__ returns all attributes, instead of just the images |
69
|
|
|
for key, value in subject.__dict__.items(): |
70
|
|
|
if key not in image_keys_to_crop: |
71
|
|
|
copied_value = deepcopy(value) |
72
|
|
|
# Setting __dict__ does not allow key indexing the attribute |
73
|
|
|
# so we set it explicitly if we want to expose it |
74
|
|
|
if key in keys_to_expose: |
75
|
|
|
cropped_subject[key] = copied_value |
76
|
|
|
cropped_subject.__dict__[str(key)] = copied_value |
|
|
|
|
77
|
|
|
else: |
78
|
|
|
# Images are always exposed, so we don't worry about setting __dict__ |
79
|
|
|
cropped_subject[key] = self._crop_image( |
80
|
|
|
value, |
81
|
|
|
index_ini, |
82
|
|
|
index_fin, |
83
|
|
|
copy_patch=self._copy_patch, |
84
|
|
|
) |
85
|
|
|
|
86
|
|
|
# Update the __dict__ attribute to include the cropped images |
87
|
|
|
cropped_subject.update_attributes() |
88
|
|
|
return cropped_subject |
89
|
|
|
else: |
90
|
|
|
# Crop in place |
91
|
|
|
for image in self.get_images(subject): |
92
|
|
|
self._crop_image( |
93
|
|
|
image, |
94
|
|
|
index_ini, |
95
|
|
|
index_fin, |
96
|
|
|
copy_patch=self._copy_patch, |
97
|
|
|
) |
98
|
|
|
return subject |
99
|
|
|
|
100
|
|
|
@staticmethod |
101
|
|
|
def _crop_image( |
102
|
|
|
image: Image, index_ini: tuple, index_fin: tuple, *, copy_patch: bool |
103
|
|
|
) -> Image: |
104
|
|
|
new_origin = apply_affine(image.affine, index_ini) |
105
|
|
|
new_affine = image.affine.copy() |
106
|
|
|
new_affine[:3, 3] = new_origin |
107
|
|
|
i0, j0, k0 = index_ini |
108
|
|
|
i1, j1, k1 = index_fin |
109
|
|
|
|
110
|
|
|
# Crop the image data |
111
|
|
|
if copy_patch: |
112
|
|
|
# Create a new image with the cropped data |
113
|
|
|
cropped_data = image.data[:, i0:i1, j0:j1, k0:k1].clone() |
114
|
|
|
new_image = type(image)( |
115
|
|
|
tensor=cropped_data, |
116
|
|
|
affine=new_affine, |
117
|
|
|
type=image.type, |
118
|
|
|
path=image.path, |
119
|
|
|
) |
120
|
|
|
return new_image |
121
|
|
|
else: |
122
|
|
|
image.set_data(image.data[:, i0:i1, j0:j1, k0:k1].clone()) |
123
|
|
|
image.affine = new_affine |
124
|
|
|
return image |
125
|
|
|
|
126
|
|
|
def inverse(self): |
127
|
|
|
from .pad import Pad |
128
|
|
|
|
129
|
|
|
return Pad(self.cropping) |
130
|
|
|
|