| Total Complexity | 4 |
| Total Lines | 39 |
| Duplicated Lines | 0 % |
| Changes | 0 | ||
| 1 | from ....data.subject import Subject |
||
| 2 | from ...spatial_transform import SpatialTransform |
||
| 3 | from .to_orientation import ToOrientation |
||
| 4 | |||
| 5 | |||
| 6 | class Transpose(SpatialTransform): |
||
| 7 | """Swap the first and last spatial dimensions of the image. |
||
| 8 | |||
| 9 | The spatial metadata is updated accordingly, so the world coordinates of |
||
| 10 | all voxels in the input and output spaces match. |
||
| 11 | |||
| 12 | Example: |
||
| 13 | |||
| 14 | >>> import torchio as tio |
||
| 15 | >>> image = tio.datasets.FPG().t1 |
||
| 16 | >>> image |
||
| 17 | ScalarImage(shape: (1, 256, 256, 176); spacing: (1.00, 1.00, 1.00); orientation: PIR+; path: "/home/fernando/.cache/torchio/fpg/t1.nii.gz") |
||
| 18 | >>> transpose = tio.Transpose() |
||
| 19 | >>> transposed = transpose(image) |
||
| 20 | >>> transposed |
||
| 21 | ScalarImage(shape: (1, 176, 256, 256); spacing: (1.00, 1.00, 1.00); orientation: RIP+; dtype: torch.IntTensor; memory: 44.0 MiB) |
||
| 22 | """ |
||
| 23 | |||
| 24 | def apply_transform(self, subject: Subject) -> Subject: |
||
| 25 | for image in self.get_images(subject): |
||
| 26 | old_orientation = image.orientation_str |
||
| 27 | new_orientation = old_orientation[::-1] |
||
| 28 | transform = ToOrientation(new_orientation) |
||
| 29 | transposed = transform(image) |
||
| 30 | image.set_data(transposed.data) |
||
| 31 | image.affine = transposed.affine |
||
| 32 | return subject |
||
| 33 | |||
| 34 | def is_invertible(self): |
||
| 35 | return True |
||
| 36 | |||
| 37 | def inverse(self): |
||
| 38 | return self |
||
| 39 |