| @@ 75-91 (lines=17) @@ | ||
| 72 | ) |
|
| 73 | return cropped_sample |
|
| 74 | ||
| 75 | @staticmethod |
|
| 76 | def copy_and_crop( |
|
| 77 | sample: Subject, |
|
| 78 | index_ini: np.ndarray, |
|
| 79 | index_fin: np.ndarray, |
|
| 80 | ) -> dict: |
|
| 81 | cropped_sample = {} |
|
| 82 | iterable = sample.get_images_dict(intensity_only=False).items() |
|
| 83 | for image_name, image in iterable: |
|
| 84 | cropped_sample[image_name] = copy.deepcopy(image) |
|
| 85 | sample_image_dict = image |
|
| 86 | cropped_image_dict = cropped_sample[image_name] |
|
| 87 | cropped_image_dict[DATA] = crop( |
|
| 88 | sample_image_dict[DATA], index_ini, index_fin) |
|
| 89 | # torch doesn't like uint16 |
|
| 90 | cropped_sample['index_ini'] = index_ini.astype(int) |
|
| 91 | return cropped_sample |
|
| 92 | ||
| 93 | @staticmethod |
|
| 94 | def _grid_spatial_coordinates( |
|
| @@ 53-69 (lines=17) @@ | ||
| 50 | shape = np.array(first_image_array.shape[1:], dtype=np.uint16) |
|
| 51 | return get_random_indices_from_shape(shape, patch_size) |
|
| 52 | ||
| 53 | @staticmethod |
|
| 54 | def copy_and_crop( |
|
| 55 | sample: Subject, |
|
| 56 | index_ini: np.ndarray, |
|
| 57 | index_fin: np.ndarray, |
|
| 58 | ) -> dict: |
|
| 59 | cropped_sample = copy.deepcopy(sample) |
|
| 60 | iterable = sample.get_images_dict(intensity_only=False).items() |
|
| 61 | for image_name, image in iterable: |
|
| 62 | cropped_sample[image_name] = copy.deepcopy(image) |
|
| 63 | sample_image_dict = image |
|
| 64 | cropped_image_dict = cropped_sample[image_name] |
|
| 65 | cropped_image_dict[DATA] = crop( |
|
| 66 | sample_image_dict[DATA], index_ini, index_fin) |
|
| 67 | # torch doesn't like uint16 |
|
| 68 | cropped_sample['index_ini'] = index_ini.astype(int) |
|
| 69 | return cropped_sample |
|
| 70 | ||
| 71 | ||
| 72 | def crop( |
|
| @@ 183-195 (lines=13) @@ | ||
| 180 | cropped_sample = self.copy_and_crop(index_ini) |
|
| 181 | return cropped_sample |
|
| 182 | ||
| 183 | def copy_and_crop(self, index_ini: np.ndarray) -> dict: |
|
| 184 | index_fin = index_ini + self.patch_size |
|
| 185 | cropped_sample = copy.deepcopy(self.sample) |
|
| 186 | iterable = self.sample.get_images_dict(intensity_only=False).items() |
|
| 187 | for image_name, image in iterable: |
|
| 188 | cropped_sample[image_name] = copy.deepcopy(image) |
|
| 189 | sample_image_dict = image |
|
| 190 | cropped_image_dict = cropped_sample[image_name] |
|
| 191 | cropped_image_dict[DATA] = self.crop( |
|
| 192 | sample_image_dict[DATA], index_ini, index_fin) |
|
| 193 | # torch doesn't like uint16 |
|
| 194 | cropped_sample['index_ini'] = index_ini.astype(int) |
|
| 195 | return cropped_sample |
|
| 196 | ||
| 197 | @staticmethod |
|
| 198 | def crop( |
|