| @@ 75-91 (lines=17) @@ | ||
| 72 | ) |
|
| 73 | return cropped_sample |
|
| 74 | ||
| 75 | @staticmethod |
|
| 76 | def copy_and_crop( |
|
| 77 | sample: Subject, |
|
| 78 | index_ini: np.ndarray, |
|
| 79 | index_fin: np.ndarray, |
|
| 80 | ) -> dict: |
|
| 81 | cropped_sample = {} |
|
| 82 | iterable = sample.get_images_dict(intensity_only=False).items() |
|
| 83 | for image_name, image in iterable: |
|
| 84 | cropped_sample[image_name] = copy.deepcopy(image) |
|
| 85 | sample_image_dict = image |
|
| 86 | cropped_image_dict = cropped_sample[image_name] |
|
| 87 | cropped_image_dict[DATA] = crop( |
|
| 88 | sample_image_dict[DATA], index_ini, index_fin) |
|
| 89 | # torch doesn't like uint16 |
|
| 90 | cropped_sample['index_ini'] = index_ini.astype(int) |
|
| 91 | return cropped_sample |
|
| 92 | ||
| 93 | @staticmethod |
|
| 94 | def _grid_spatial_coordinates( |
|
| @@ 53-69 (lines=17) @@ | ||
| 50 | shape = np.array(first_image_array.shape[1:], dtype=np.uint16) |
|
| 51 | return get_random_indices_from_shape(shape, patch_size) |
|
| 52 | ||
| 53 | @staticmethod |
|
| 54 | def copy_and_crop( |
|
| 55 | sample: Subject, |
|
| 56 | index_ini: np.ndarray, |
|
| 57 | index_fin: np.ndarray, |
|
| 58 | ) -> dict: |
|
| 59 | cropped_sample = copy.deepcopy(sample) |
|
| 60 | iterable = sample.get_images_dict(intensity_only=False).items() |
|
| 61 | for image_name, image in iterable: |
|
| 62 | cropped_sample[image_name] = copy.deepcopy(image) |
|
| 63 | sample_image_dict = image |
|
| 64 | cropped_image_dict = cropped_sample[image_name] |
|
| 65 | cropped_image_dict[DATA] = crop( |
|
| 66 | sample_image_dict[DATA], index_ini, index_fin) |
|
| 67 | # torch doesn't like uint16 |
|
| 68 | cropped_sample['index_ini'] = index_ini.astype(int) |
|
| 69 | return cropped_sample |
|
| 70 | ||
| 71 | ||
| 72 | def crop( |
|
| @@ 167-179 (lines=13) @@ | ||
| 164 | cropped_sample = self.copy_and_crop(index_ini) |
|
| 165 | return cropped_sample |
|
| 166 | ||
| 167 | def copy_and_crop(self, index_ini: np.ndarray) -> dict: |
|
| 168 | index_fin = index_ini + self.patch_size |
|
| 169 | cropped_sample = copy.deepcopy(self.sample) |
|
| 170 | iterable = self.sample.get_images_dict(intensity_only=False).items() |
|
| 171 | for image_name, image in iterable: |
|
| 172 | cropped_sample[image_name] = copy.deepcopy(image) |
|
| 173 | sample_image_dict = image |
|
| 174 | cropped_image_dict = cropped_sample[image_name] |
|
| 175 | cropped_image_dict[DATA] = self.crop( |
|
| 176 | sample_image_dict[DATA], index_ini, index_fin) |
|
| 177 | # torch doesn't like uint16 |
|
| 178 | cropped_sample['index_ini'] = index_ini.astype(int) |
|
| 179 | return cropped_sample |
|
| 180 | ||
| 181 | @staticmethod |
|
| 182 | def crop( |
|