|
1
|
|
|
# Copyright 2014 Diamond Light Source Ltd. |
|
2
|
|
|
# |
|
3
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); |
|
4
|
|
|
# you may not use this file except in compliance with the License. |
|
5
|
|
|
# You may obtain a copy of the License at |
|
6
|
|
|
# |
|
7
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0 |
|
8
|
|
|
# |
|
9
|
|
|
# Unless required by applicable law or agreed to in writing, software |
|
10
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS, |
|
11
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
12
|
|
|
# See the License for the specific language governing permissions and |
|
13
|
|
|
# limitations under the License. |
|
14
|
|
|
|
|
15
|
|
|
""" |
|
16
|
|
|
.. module:: image_stitching |
|
17
|
|
|
:platform: Unix |
|
18
|
|
|
:synopsis: A plugin for stitching two tomo-datasets. |
|
19
|
|
|
.. moduleauthor:: Nghia Vo <[email protected]> |
|
20
|
|
|
|
|
21
|
|
|
""" |
|
22
|
|
|
import copy |
|
23
|
|
|
import numpy as np |
|
24
|
|
|
import scipy.ndimage as ndi |
|
25
|
|
|
from savu.plugins.plugin import Plugin |
|
26
|
|
|
from savu.plugins.driver.cpu_plugin import CpuPlugin |
|
27
|
|
|
from savu.plugins.utils import register_plugin |
|
28
|
|
|
|
|
29
|
|
|
|
|
30
|
|
|
@register_plugin |
|
31
|
|
|
class ImageStitching(Plugin, CpuPlugin): |
|
32
|
|
|
def __init__(self): |
|
33
|
|
|
super(ImageStitching, self).__init__('ImageStitching') |
|
34
|
|
|
|
|
35
|
|
|
def setup(self): |
|
36
|
|
|
in_dataset, out_dataset = self.get_datasets() |
|
37
|
|
|
in_pData, out_pData = self.get_plugin_datasets() |
|
38
|
|
|
self.space = self.parameters['pattern'] |
|
39
|
|
|
|
|
40
|
|
|
if self.space == 'SINOGRAM': |
|
41
|
|
|
in_pData[0].plugin_data_setup('SINOGRAM_STACK', 2, |
|
42
|
|
|
fixed_length=False) |
|
43
|
|
|
else: |
|
44
|
|
|
in_pData[0].plugin_data_setup('PROJECTION_STACK', 2, |
|
45
|
|
|
fixed_length=False) |
|
46
|
|
|
rm_dim = in_dataset[0].get_slice_dimensions()[0] |
|
47
|
|
|
patterns = ['SINOGRAM.' + str(rm_dim), 'PROJECTION.' + str(rm_dim)] |
|
48
|
|
|
|
|
49
|
|
|
self.c_top, self.c_bot, self.c_left, self.c_right = self.parameters[ |
|
50
|
|
|
'crop'] |
|
51
|
|
|
self.overlap = int(self.parameters['overlap']) |
|
52
|
|
|
self.row_offset = int(self.parameters['row_offset']) |
|
53
|
|
|
self.norm = self.parameters['norm'] |
|
54
|
|
|
self.flat_use = self.parameters['flat_use'] |
|
55
|
|
|
|
|
56
|
|
|
axis_labels = copy.copy(in_dataset[0].get_axis_labels()) |
|
57
|
|
|
del axis_labels[rm_dim] |
|
58
|
|
|
width_dim = in_dataset[0].get_data_dimension_by_axis_label( |
|
59
|
|
|
'detector_x') |
|
60
|
|
|
height_dim = in_dataset[0].get_data_dimension_by_axis_label( |
|
61
|
|
|
'detector_y') |
|
62
|
|
|
shape = list(in_dataset[0].get_shape()) |
|
63
|
|
|
|
|
64
|
|
|
shape[width_dim] = 2 * shape[width_dim] - \ |
|
65
|
|
|
self.overlap - self.c_left - self.c_right |
|
66
|
|
|
if self.space == 'PROJECTION': |
|
67
|
|
|
shape[height_dim] = shape[height_dim] - self.c_top - self.c_bot |
|
68
|
|
|
del shape[rm_dim] |
|
69
|
|
|
out_dataset[0].create_dataset( |
|
70
|
|
|
patterns={in_dataset[0]: patterns}, |
|
71
|
|
|
axis_labels=axis_labels, |
|
72
|
|
|
shape=tuple(shape)) |
|
73
|
|
|
if self.space == 'SINOGRAM': |
|
74
|
|
|
out_pData[0].plugin_data_setup('SINOGRAM', 'single', |
|
75
|
|
|
fixed_length=False) |
|
76
|
|
|
else: |
|
77
|
|
|
out_pData[0].plugin_data_setup('PROJECTION', 'single', |
|
78
|
|
|
fixed_length=False) |
|
79
|
|
|
|
|
80
|
|
|
def make_weight_matrix(self, height1, width1, height2, width2, overlap, |
|
81
|
|
|
side): |
|
82
|
|
|
""" |
|
83
|
|
|
Generate a linear-ramp weighting matrix for image stitching. |
|
84
|
|
|
|
|
85
|
|
|
Parameters |
|
86
|
|
|
---------- |
|
87
|
|
|
height1, width1 : int |
|
88
|
|
|
Size of the 1st image. |
|
89
|
|
|
height2, width2 : int |
|
90
|
|
|
Size of the 2nd image. |
|
91
|
|
|
overlap : int |
|
92
|
|
|
Width of the overlap area between two images. |
|
93
|
|
|
side : {0, 1} |
|
94
|
|
|
Only two options: 0 or 1. It is used to indicate the overlap side |
|
95
|
|
|
respects to image 1. "0" corresponds to the left side. "1" |
|
96
|
|
|
corresponds to the right side. |
|
97
|
|
|
""" |
|
98
|
|
|
overlap = int(np.floor(overlap)) |
|
99
|
|
|
wei_mat1 = np.ones((height1, width1), dtype=np.float32) |
|
100
|
|
|
wei_mat2 = np.ones((height2, width2), dtype=np.float32) |
|
101
|
|
|
if side == 1: |
|
102
|
|
|
list_down = np.linspace(1.0, 0.0, overlap) |
|
103
|
|
|
list_up = 1.0 - list_down |
|
104
|
|
|
wei_mat1[:, -overlap:] = np.float32(list_down) |
|
105
|
|
|
wei_mat2[:, :overlap] = np.float32(list_up) |
|
106
|
|
|
else: |
|
107
|
|
|
list_down = np.linspace(1.0, 0.0, overlap) |
|
108
|
|
|
list_up = 1.0 - list_down |
|
109
|
|
|
wei_mat2[:, -overlap:] = np.float32(list_down) |
|
110
|
|
|
wei_mat1[:, :overlap] = np.float32(list_up) |
|
111
|
|
|
return wei_mat1, wei_mat2 |
|
112
|
|
|
|
|
113
|
|
|
def stitch_image(self, mat1, mat2, overlap, side, wei_mat1, wei_mat2, norm): |
|
114
|
|
|
""" |
|
115
|
|
|
Stitch projection images or sinogram images using a linear ramp. |
|
116
|
|
|
|
|
117
|
|
|
Parameters |
|
118
|
|
|
---------- |
|
119
|
|
|
mat1 : array_like |
|
120
|
|
|
2D array. Projection image or sinogram image. |
|
121
|
|
|
mat2 : array_like |
|
122
|
|
|
2D array. Projection image or sinogram image. |
|
123
|
|
|
overlap : float |
|
124
|
|
|
Width of the overlap area between two images. |
|
125
|
|
|
side : {0, 1} |
|
126
|
|
|
Only two options: 0 or 1. It is used to indicate the overlap side |
|
127
|
|
|
respects to image 1. "0" corresponds to the left side. "1" |
|
128
|
|
|
corresponds to the right side. |
|
129
|
|
|
wei_mat1 : array_like |
|
130
|
|
|
Weighting matrix used for image 1. |
|
131
|
|
|
wei_mat2 : array_like |
|
132
|
|
|
Weighting matrix used for image 2. |
|
133
|
|
|
norm : bool, optional |
|
134
|
|
|
Enable/disable normalization before stitching. |
|
135
|
|
|
|
|
136
|
|
|
Returns |
|
137
|
|
|
------- |
|
138
|
|
|
array_like |
|
139
|
|
|
Stitched image. |
|
140
|
|
|
""" |
|
141
|
|
|
(nrow1, ncol1) = mat1.shape |
|
142
|
|
|
(nrow2, ncol2) = mat2.shape |
|
143
|
|
|
overlap_int = int(np.floor(overlap)) |
|
144
|
|
|
sub_pixel = overlap - overlap_int |
|
145
|
|
|
if sub_pixel > 0.0: |
|
146
|
|
|
if side == 1: |
|
147
|
|
|
mat1 = ndi.shift(mat1, (0, sub_pixel), mode='nearest') |
|
148
|
|
|
mat2 = ndi.shift(mat2, (0, -sub_pixel), mode='nearest') |
|
149
|
|
|
else: |
|
150
|
|
|
mat1 = ndi.shift(mat1, (0, -sub_pixel), mode='nearest') |
|
151
|
|
|
mat2 = ndi.shift(mat2, (0, sub_pixel), mode='nearest') |
|
152
|
|
|
if nrow1 != nrow2: |
|
153
|
|
|
raise ValueError("Two images are not at the same height!!!") |
|
154
|
|
|
total_width0 = ncol1 + ncol2 - overlap_int |
|
155
|
|
|
mat_comb = np.zeros((nrow1, total_width0), dtype=np.float32) |
|
156
|
|
|
if side == 1: |
|
157
|
|
|
if norm is True: |
|
158
|
|
|
factor1 = np.mean(mat1[:, -overlap_int:]) |
|
159
|
|
|
factor2 = np.mean(mat2[:, :overlap_int]) |
|
160
|
|
|
mat2 = mat2 * factor1 / factor2 |
|
161
|
|
|
mat_comb[:, 0:ncol1] = mat1 * wei_mat1 |
|
162
|
|
|
mat_comb[:, (ncol1 - overlap_int):total_width0] += mat2 * wei_mat2 |
|
163
|
|
|
else: |
|
164
|
|
|
if norm is True: |
|
165
|
|
|
factor2 = np.mean(mat2[:, -overlap_int:]) |
|
166
|
|
|
factor1 = np.mean(mat1[:, :overlap_int]) |
|
167
|
|
|
mat2 = mat2 * factor1 / factor2 |
|
168
|
|
|
mat_comb[:, 0:ncol2] = mat2 * wei_mat2 |
|
169
|
|
|
mat_comb[:, (ncol2 - overlap_int):total_width0] += mat1 * wei_mat1 |
|
170
|
|
|
return mat_comb |
|
171
|
|
|
|
|
172
|
|
|
def pre_process(self): |
|
173
|
|
|
inData = self.get_in_datasets()[0] |
|
174
|
|
|
self.data_size = inData.get_shape() |
|
175
|
|
|
shape = len(self.get_plugin_in_datasets()[0].get_shape()) |
|
176
|
|
|
(self.depth0, self.height0, self.width0, _) = inData.get_shape() |
|
177
|
|
|
self.sl1 = [slice(None)] * (shape - 1) + [0] |
|
178
|
|
|
self.sl2 = [slice(None)] * (shape - 1) + [1] |
|
179
|
|
|
if self.flat_use is True: |
|
180
|
|
|
dark = inData.data.dark_mean() |
|
181
|
|
|
flat = inData.data.flat_mean() |
|
182
|
|
|
(h_df, w_df) = dark.shape |
|
183
|
|
|
dark1 = dark[:h_df // 2] |
|
184
|
|
|
flat1 = flat[:h_df // 2] |
|
185
|
|
|
dark2 = dark[-h_df // 2:] |
|
186
|
|
|
flat2 = flat[-h_df // 2:] |
|
187
|
|
|
if self.space == 'SINOGRAM': |
|
188
|
|
|
self.dark1 = 1.0 * dark1[:, self.c_left:] |
|
189
|
|
|
self.flat1 = 1.0 * flat1[:, self.c_left:] |
|
190
|
|
|
self.dark2 = 1.0 * dark2[:, :w_df - self.c_right] |
|
191
|
|
|
self.flat2 = 1.0 * flat2[:, :w_df - self.c_right] |
|
192
|
|
|
else: |
|
193
|
|
|
self.dark1 = 1.0 * \ |
|
194
|
|
|
dark1[self.c_top: h_df // 2 - self.c_bot, |
|
195
|
|
|
self.c_left:] |
|
196
|
|
|
self.flat1 = 1.0 * \ |
|
197
|
|
|
flat1[self.c_top: h_df // 2 - self.c_bot, |
|
198
|
|
|
self.c_left:] |
|
199
|
|
|
dark2 = np.roll(dark2, self.row_offset, axis=0) |
|
200
|
|
|
flat2 = np.roll(flat2, self.row_offset, axis=0) |
|
201
|
|
|
self.dark2 = 1.0 * dark2[self.c_top: h_df // 2 - |
|
202
|
|
|
self.c_bot, |
|
203
|
|
|
:w_df - self.c_right] |
|
204
|
|
|
self.flat2 = 1.0 * flat2[self.c_top: h_df // 2 - |
|
205
|
|
|
self.c_bot, |
|
206
|
|
|
:w_df - self.c_right] |
|
207
|
|
|
self.flatdark1 = self.flat1 - self.dark1 |
|
208
|
|
|
self.flatdark2 = self.flat2 - self.dark2 |
|
209
|
|
|
nmean = np.mean(np.abs(self.flatdark1)) |
|
210
|
|
|
self.flatdark1[self.flatdark1 == 0.0] = nmean |
|
211
|
|
|
nmean = np.mean(np.abs(self.flatdark2)) |
|
212
|
|
|
self.flatdark2[self.flatdark2 == 0.0] = nmean |
|
213
|
|
|
if self.space == 'SINOGRAM': |
|
214
|
|
|
(_, w_df1) = self.dark1.shape |
|
215
|
|
|
(_, w_df2) = self.dark2.shape |
|
216
|
|
|
(self.wei1, self.wei2) = self.make_weight_matrix( |
|
217
|
|
|
self.depth0, w_df1, self.depth0, w_df2, self.overlap, 1) |
|
218
|
|
|
else: |
|
219
|
|
|
(h_df1, w_df1) = self.dark1.shape |
|
220
|
|
|
(h_df2, w_df2) = self.dark2.shape |
|
221
|
|
|
(self.wei1, self.wei2) = self.make_weight_matrix( |
|
222
|
|
|
h_df1, w_df1, h_df2, w_df2, self.overlap, 1) |
|
223
|
|
|
|
|
224
|
|
|
outData = self.get_out_datasets()[0] |
|
225
|
|
|
angles = inData.meta_data.get("rotation_angle")[:, 0] |
|
226
|
|
|
outData.meta_data.set("rotation_angle", angles) |
|
227
|
|
|
|
|
228
|
|
|
def process_frames(self, data): |
|
229
|
|
|
mat1 = data[0][tuple(self.sl1)] |
|
230
|
|
|
mat2 = data[0][tuple(self.sl2)] |
|
231
|
|
|
if self.space == 'SINOGRAM': |
|
232
|
|
|
mat1 = np.float32(mat1[:, self.c_left:]) |
|
233
|
|
|
mat2 = np.float32(mat2[:, :self.width0 - self.c_right]) |
|
234
|
|
|
if self.flat_use is True: |
|
235
|
|
|
count = self.get_process_frames_counter() |
|
236
|
|
|
current_idx = self.get_global_frame_index()[count] |
|
237
|
|
|
mat1 = (mat1 - self.dark1[current_idx]) \ |
|
238
|
|
|
/ self.flatdark1[current_idx] |
|
239
|
|
|
mat2 = (mat2 - self.dark2[current_idx]) \ |
|
240
|
|
|
/ self.flatdark2[current_idx] |
|
241
|
|
|
else: |
|
242
|
|
|
mat1 = np.float32( |
|
243
|
|
|
mat1[self.c_top:self.height0 - self.c_bot, self.c_left:]) |
|
244
|
|
|
mat2 = np.roll(mat2, self.row_offset, axis=0) |
|
245
|
|
|
mat2 = np.float32(mat2[self.c_top:self.height0 - |
|
246
|
|
|
self.c_bot, |
|
247
|
|
|
:self.width0 - self.c_right]) |
|
248
|
|
|
if self.flat_use is True: |
|
249
|
|
|
mat1 = (mat1 - self.dark1) / self.flatdark1 |
|
250
|
|
|
mat2 = (mat2 - self.dark2) / self.flatdark2 |
|
251
|
|
|
mat = self.stitch_image(mat1, mat2, self.overlap, |
|
252
|
|
|
1, self.wei1, self.wei2, self.norm) |
|
253
|
|
|
return mat |
|
254
|
|
|
|