Passed
Pull Request — main (#1308)
by
unknown
01:29
created

Crop.apply_transform()   B

Complexity

Conditions 6

Size

Total Lines 37
Code Lines 24

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 6
eloc 24
nop 2
dl 0
loc 37
rs 8.3706
c 0
b 0
f 0
1
from copy import deepcopy
2
3
import nibabel as nib
4
import numpy as np
5
6
from ....data.image import Image
7
from ....data.subject import Subject
8
from .bounds_transform import BoundsTransform
9
from .bounds_transform import TypeBounds
10
11
12
class Crop(BoundsTransform):
13
    r"""Crop an image.
14
15
    Args:
16
        cropping: Tuple
17
            :math:`(w_{ini}, w_{fin}, h_{ini}, h_{fin}, d_{ini}, d_{fin})`
18
            defining the number of values cropped from the edges of each axis.
19
            If the initial shape of the image is
20
            :math:`W \times H \times D`, the final shape will be
21
            :math:`(- w_{ini} + W - w_{fin}) \times (- h_{ini} + H - h_{fin})
22
            \times (- d_{ini} + D - d_{fin})`.
23
            If only three values :math:`(w, h, d)` are provided, then
24
            :math:`w_{ini} = w_{fin} = w`,
25
            :math:`h_{ini} = h_{fin} = h` and
26
            :math:`d_{ini} = d_{fin} = d`.
27
            If only one value :math:`n` is provided, then
28
            :math:`w_{ini} = w_{fin} = h_{ini} = h_{fin}
29
            = d_{ini} = d_{fin} = n`.
30
        copy: bool, optional
31
            This transform overwrites the copy argument of the base transform and
32
            copies only the cropped patch, instead of the whole image.
33
            This can provide a significant speedup when cropping small patches from large images
34
            If ``True``, each image will be cropped and the patch copied to a new subject.
35
            If ``False``, each image will be cropped in place. Default: ``True``.
36
        **kwargs: See :class:`~torchio.transforms.Transform` for additional
37
            keyword arguments.
38
39
    .. seealso:: If you want to pass the output shape instead, please use
40
        :class:`~torchio.transforms.CropOrPad` instead.
41
    """
42
43
    def __init__(self, cropping: TypeBounds, copy=True, **kwargs):
44
        self.copy_patch = copy
45
        # Transform base class deepcopies whole subject by default
46
        # We want to copy only the cropped patch, so we overwrite the functionality
47
        super().__init__(cropping, copy=False, **kwargs)
48
        self.cropping = cropping
49
        self.args_names = ['cropping']
50
51
    def apply_transform(self, subject: Subject) -> Subject:
52
        assert self.bounds_parameters is not None
53
        low = self.bounds_parameters[::2]
54
        high = self.bounds_parameters[1::2]
55
        index_ini = low
56
        index_fin = np.array(subject.spatial_shape) - high
57
58
        if self.copy_patch:
59
            # Create a clean new subject to copy the images into
60
            # We do this __new__ to avoid calling __init__ so we don't have to specify images immediately
61
            cropped_subject = subject.__class__.__new__(subject.__class__)
62
            image_keys_to_crop = subject.get_images_dict(
63
                intensity_only=False, include=self.include, exclude=self.exclude
64
            ).keys()
65
            keys_to_expose = subject.keys()
66
            # Copy all attributes we don't want to crop
67
            # __dict__ returns all attributes, instead of just the images
68
            for key, value in subject.__dict__.items():
69
                if key not in image_keys_to_crop:
70
                    copied_value = deepcopy(value)
71
                    # Setting __dict__ does not allow key indexing the attribute
72
                    # so we set it explicitly if we want to expose it
73
                    if key in keys_to_expose:
74
                        cropped_subject[key] = copied_value
75
                    cropped_subject.__dict__[str(key)] = copied_value
0 ignored issues
show
Comprehensibility Best Practice introduced by
The variable str does not seem to be defined.
Loading history...
76
                else:
77
                    # Images are always exposed, so we don't worry about setting __dict__
78
                    cropped_subject[key] = self._crop_image(value, index_ini, index_fin)
79
80
            # Update the __dict__ attribute to include the cropped images
81
            cropped_subject.update_attributes()
82
            return cropped_subject
83
        else:
84
            # Crop in place
85
            for image in self.get_images(subject):
86
                self._crop_image(image, index_ini, index_fin)
87
            return subject
88
89
    def _crop_image(self, image: Image, index_ini: tuple, index_fin: tuple) -> Image:
90
        new_origin = nib.affines.apply_affine(image.affine, index_ini)
91
        new_affine = image.affine.copy()
92
        new_affine[:3, 3] = new_origin
93
        i0, j0, k0 = index_ini
94
        i1, j1, k1 = index_fin
95
96
        # Crop the image data
97
        if self.copy_patch:
98
            # Create a new image with the cropped data
99
            cropped_data = image.data[:, i0:i1, j0:j1, k0:k1].clone()
100
            new_image = type(image)(
101
                tensor=cropped_data,
102
                affine=new_affine,
103
                type=image.type,
104
                path=image.path,
105
            )
106
            return new_image
107
        else:
108
            image.set_data(image.data[:, i0:i1, j0:j1, k0:k1].clone())
109
            image.affine = new_affine
110
            return image
111
112
    def inverse(self):
113
        from .pad import Pad
114
115
        return Pad(self.cropping)
116