1
|
|
|
import copy |
2
|
|
|
from ....data.subject import Subject |
3
|
|
|
from ... import SpatialTransform |
4
|
|
|
|
5
|
|
|
|
6
|
|
|
class CopyAffine(SpatialTransform): |
7
|
|
|
"""Copy the spatial metadata from a reference image in the subject. |
8
|
|
|
|
9
|
|
|
Small unexpected differences in spatial metadata across different images |
10
|
|
|
of a subject can arise due to rounding errors while converting formats. |
11
|
|
|
|
12
|
|
|
If the ``shape`` and ``orientation`` of the images are the same and their |
13
|
|
|
``affine`` attributes are different but very similar, this transform can be |
14
|
|
|
used to avoid errors during safety checks in other transforms and samplers. |
15
|
|
|
|
16
|
|
|
Args: |
17
|
|
|
target: Name of the image within the subject whose affine matrix will |
18
|
|
|
be used. |
19
|
|
|
|
20
|
|
|
Example: |
21
|
|
|
>>> import torch |
22
|
|
|
>>> import torchio as tio |
23
|
|
|
>>> import numpy as np |
24
|
|
|
>>> np.random.seed(0) |
25
|
|
|
>>> affine = np.diag((*(np.random.rand(3) + 0.5), 1)) |
26
|
|
|
>>> t1 = tio.ScalarImage(tensor=torch.rand(1, 100, 100, 100), affine=affine) |
27
|
|
|
>>> # Let's simulate a loss of precision |
28
|
|
|
>>> # (caused for example by NIfTI storing spatial metadata in single precision) |
29
|
|
|
>>> bad_affine = affine.astype(np.float16) |
30
|
|
|
>>> t2 = tio.ScalarImage(tensor=torch.rand(1, 100, 100, 100), affine=bad_affine) |
31
|
|
|
>>> subject = tio.Subject(t1=t1, t2=t2) |
32
|
|
|
>>> resample = tio.Resample(0.5) |
33
|
|
|
>>> resample(subject).shape # error as images are in different spaces |
34
|
|
|
Traceback (most recent call last): |
35
|
|
|
File "<stdin>", line 1, in <module> |
36
|
|
|
File "/Users/fernando/git/torchio/torchio/data/subject.py", line 101, in shape |
37
|
|
|
self.check_consistent_attribute('shape') |
38
|
|
|
File "/Users/fernando/git/torchio/torchio/data/subject.py", line 229, in check_consistent_attribute |
39
|
|
|
raise RuntimeError(message) |
40
|
|
|
RuntimeError: More than one shape found in subject images: |
41
|
|
|
{'t1': (1, 210, 244, 221), 't2': (1, 210, 243, 221)} |
42
|
|
|
>>> transform = tio.CopyAffine('t1') |
43
|
|
|
>>> fixed = transform(subject) |
44
|
|
|
>>> resample(fixed).shape |
45
|
|
|
(1, 210, 244, 221) |
46
|
|
|
|
47
|
|
|
|
48
|
|
|
.. warning:: This transform should be used with caution. Modifying the |
49
|
|
|
spatial metadata of an image manually can lead to incorrect processing |
50
|
|
|
of the position of anatomical structures. For example, a machine |
51
|
|
|
learning algorithm might incorrectly predict that a lesion on the right |
52
|
|
|
lung is on the left lung. |
53
|
|
|
|
54
|
|
|
.. note:: For more information, see some related discussions on GitHub: |
55
|
|
|
|
56
|
|
|
* https://github.com/fepegar/torchio/issues/354 |
57
|
|
|
* https://github.com/fepegar/torchio/discussions/489 |
58
|
|
|
* https://github.com/fepegar/torchio/pull/584 |
59
|
|
|
* https://github.com/fepegar/torchio/issues/430 |
60
|
|
|
* https://github.com/fepegar/torchio/issues/382 |
61
|
|
|
* https://github.com/fepegar/torchio/pull/592 |
62
|
|
|
""" # noqa: E501 |
63
|
|
|
def __init__(self, target: str, **kwargs): |
64
|
|
|
super().__init__(**kwargs) |
65
|
|
|
if not isinstance(target, str): |
66
|
|
|
message = ( |
67
|
|
|
f'The target must be a string, but "{type(target)}" was found' |
68
|
|
|
) |
69
|
|
|
raise ValueError(message) |
70
|
|
|
self.target = target |
71
|
|
|
|
72
|
|
|
def apply_transform(self, subject: Subject) -> Subject: |
73
|
|
|
if self.target not in subject: |
74
|
|
|
message = f'Target image "{self.target}" not found in subject' |
75
|
|
|
raise RuntimeError(message) |
76
|
|
|
affine = subject[self.target].affine |
77
|
|
|
for image in self.get_images(subject): |
78
|
|
|
image.affine = copy.deepcopy(affine) |
79
|
|
|
return subject |
80
|
|
|
|