|
1
|
|
|
# coding=utf-8 |
|
2
|
|
|
|
|
3
|
|
|
""" |
|
4
|
|
|
Tests for deepreg/model/layer |
|
5
|
|
|
""" |
|
6
|
|
|
import numpy as np |
|
7
|
|
|
import pytest |
|
8
|
|
|
import tensorflow as tf |
|
9
|
|
|
|
|
10
|
|
|
import deepreg.model.layer as layer |
|
11
|
|
|
|
|
12
|
|
|
|
|
13
|
|
|
def test_deconv3d(): |
|
14
|
|
|
""" |
|
15
|
|
|
Test the layer.Deconv3d class and its default attributes.""" |
|
16
|
|
|
batch_size = 5 |
|
17
|
|
|
channels = 4 |
|
18
|
|
|
input_size = (32, 32, 16) |
|
19
|
|
|
output_size = (64, 64, 32) |
|
20
|
|
|
output_padding = (1, 1, 1) |
|
21
|
|
|
|
|
22
|
|
|
input_tensor_shape = (batch_size,) + input_size + (channels,) |
|
23
|
|
|
|
|
24
|
|
|
deconv3d = layer.Deconv3d(8, output_size, strides=2) |
|
25
|
|
|
deconv3d.build(input_tensor_shape) |
|
26
|
|
|
|
|
27
|
|
|
assert tuple(deconv3d._output_padding) == output_padding |
|
|
|
|
|
|
28
|
|
|
assert isinstance(deconv3d._deconv3d, tf.keras.layers.Conv3DTranspose) |
|
|
|
|
|
|
29
|
|
|
assert tuple(deconv3d._kernel_size) == (3, 3, 3) |
|
|
|
|
|
|
30
|
|
|
assert tuple(deconv3d._strides) == (2, 2, 2) |
|
|
|
|
|
|
31
|
|
|
assert deconv3d._padding == "same" |
|
|
|
|
|
|
32
|
|
|
assert deconv3d._deconv3d.use_bias is True |
|
|
|
|
|
|
33
|
|
|
|
|
34
|
|
|
|
|
35
|
|
|
def test_conv3d_block(): |
|
36
|
|
|
""" |
|
37
|
|
|
Test the layer.Conv3dBlock class and its default attributes. |
|
38
|
|
|
""" |
|
39
|
|
|
|
|
40
|
|
|
conv3d_block = layer.Conv3dBlock(8) |
|
41
|
|
|
|
|
42
|
|
|
assert conv3d_block._conv3d.kernel_size == (3, 3, 3) |
|
|
|
|
|
|
43
|
|
|
assert conv3d_block._conv3d.strides == (1, 1, 1) |
|
|
|
|
|
|
44
|
|
|
assert conv3d_block._conv3d.padding == "same" |
|
|
|
|
|
|
45
|
|
|
assert conv3d_block._conv3d.use_bias is False |
|
|
|
|
|
|
46
|
|
|
|
|
47
|
|
|
|
|
48
|
|
|
def test_deconv3d_block(): |
|
49
|
|
|
""" |
|
50
|
|
|
Test the layer.Deconv3dBlock class and its default attributes. |
|
51
|
|
|
""" |
|
52
|
|
|
|
|
53
|
|
|
deconv3d_block = layer.Deconv3dBlock(8) |
|
54
|
|
|
|
|
55
|
|
|
assert isinstance(deconv3d_block._deconv3d, layer.Deconv3d) |
|
|
|
|
|
|
56
|
|
|
assert deconv3d_block._deconv3d._deconv3d is None |
|
|
|
|
|
|
57
|
|
|
|
|
58
|
|
|
deconv3d_block._deconv3d.build((8, 8)) |
|
|
|
|
|
|
59
|
|
|
|
|
60
|
|
|
assert isinstance( |
|
61
|
|
|
deconv3d_block._deconv3d._deconv3d, tf.keras.layers.Conv3DTranspose |
|
|
|
|
|
|
62
|
|
|
) |
|
63
|
|
|
assert tuple(deconv3d_block._deconv3d._kernel_size) == (3, 3, 3) |
|
|
|
|
|
|
64
|
|
|
assert tuple(deconv3d_block._deconv3d._strides) == (1, 1, 1) |
|
|
|
|
|
|
65
|
|
|
assert deconv3d_block._deconv3d._padding == "same" |
|
|
|
|
|
|
66
|
|
|
assert deconv3d_block._deconv3d._deconv3d.use_bias is False |
|
|
|
|
|
|
67
|
|
|
|
|
68
|
|
|
|
|
69
|
|
|
def test_residual3d_block(): |
|
70
|
|
|
""" |
|
71
|
|
|
Test the layer.Residual3dBlock class and its default attributes. |
|
72
|
|
|
""" |
|
73
|
|
|
res3d_block = layer.Residual3dBlock(8) |
|
74
|
|
|
|
|
75
|
|
|
assert isinstance(res3d_block._conv3d_block, layer.Conv3dBlock) |
|
|
|
|
|
|
76
|
|
|
assert res3d_block._conv3d_block._conv3d.kernel_size == (3, 3, 3) |
|
|
|
|
|
|
77
|
|
|
assert res3d_block._conv3d_block._conv3d.strides == (1, 1, 1) |
|
|
|
|
|
|
78
|
|
|
|
|
79
|
|
|
assert res3d_block._conv3d.use_bias is False |
|
|
|
|
|
|
80
|
|
|
assert res3d_block._conv3d.kernel_size == (3, 3, 3) |
|
|
|
|
|
|
81
|
|
|
assert res3d_block._conv3d.strides == (1, 1, 1) |
|
|
|
|
|
|
82
|
|
|
|
|
83
|
|
|
|
|
84
|
|
|
def test_downsample_resnet_block(): |
|
85
|
|
|
""" |
|
86
|
|
|
Test the layer.DownSampleResnetBlock class and its default attributes. |
|
87
|
|
|
""" |
|
88
|
|
|
model = layer.DownSampleResnetBlock(8) |
|
89
|
|
|
|
|
90
|
|
|
assert model._pooling is True |
|
|
|
|
|
|
91
|
|
|
|
|
92
|
|
|
assert isinstance(model._conv3d_block, layer.Conv3dBlock) |
|
|
|
|
|
|
93
|
|
|
assert isinstance(model._residual_block, layer.Residual3dBlock) |
|
|
|
|
|
|
94
|
|
|
assert model._conv3d_block3 is None |
|
|
|
|
|
|
95
|
|
|
|
|
96
|
|
|
model = layer.DownSampleResnetBlock(8, pooling=False) |
|
97
|
|
|
assert model._max_pool3d is None |
|
|
|
|
|
|
98
|
|
|
assert isinstance(model._conv3d_block3, layer.Conv3dBlock) |
|
|
|
|
|
|
99
|
|
|
|
|
100
|
|
|
|
|
101
|
|
|
def test_upsample_resnet_block(): |
|
102
|
|
|
""" |
|
103
|
|
|
Test the layer.UpSampleResnetBlock class and its default attributes. |
|
104
|
|
|
""" |
|
105
|
|
|
batch_size = 5 |
|
106
|
|
|
channels = 4 |
|
107
|
|
|
input_size = (32, 32, 16) |
|
108
|
|
|
output_size = (64, 64, 32) |
|
109
|
|
|
|
|
110
|
|
|
input_tensor_size = (batch_size,) + input_size + (channels,) |
|
111
|
|
|
skip_tensor_size = (batch_size,) + output_size + (channels // 2,) |
|
112
|
|
|
|
|
113
|
|
|
model = layer.UpSampleResnetBlock(8) |
|
114
|
|
|
model.build([input_tensor_size, skip_tensor_size]) |
|
115
|
|
|
|
|
116
|
|
|
assert model._filters == 8 |
|
|
|
|
|
|
117
|
|
|
assert model._concat is False |
|
|
|
|
|
|
118
|
|
|
assert isinstance(model._conv3d_block, layer.Conv3dBlock) |
|
|
|
|
|
|
119
|
|
|
assert isinstance(model._residual_block, layer.Residual3dBlock) |
|
|
|
|
|
|
120
|
|
|
assert isinstance(model._deconv3d_block, layer.Deconv3dBlock) |
|
|
|
|
|
|
121
|
|
|
|
|
122
|
|
|
|
|
123
|
|
|
def test_init_conv3d_with_resize(): |
|
124
|
|
|
""" |
|
125
|
|
|
Test the layer.Conv3dWithResize class's default attributes and call function. |
|
126
|
|
|
""" |
|
127
|
|
|
batch_size = 5 |
|
128
|
|
|
channels = 4 |
|
129
|
|
|
input_size = (32, 32, 16) |
|
130
|
|
|
output_size = (62, 62, 24) |
|
131
|
|
|
filters = 8 |
|
132
|
|
|
|
|
133
|
|
|
input_tensor_size = (batch_size,) + input_size + (channels,) |
|
134
|
|
|
output_tensor_size = (batch_size,) + output_size + (filters,) |
|
135
|
|
|
|
|
136
|
|
|
model = layer.Conv3dWithResize(output_size, filters) |
|
137
|
|
|
|
|
138
|
|
|
assert model._output_shape == output_size |
|
|
|
|
|
|
139
|
|
|
|
|
140
|
|
|
# Pass an input of all zeros |
|
141
|
|
|
inputs = np.zeros(input_tensor_size) |
|
142
|
|
|
# Get outputs by calling |
|
143
|
|
|
output = model.call(inputs) |
|
144
|
|
|
# Expected shape is (5, 1, 2, 3, 3) |
|
145
|
|
|
assert all(x == y for x, y in zip(output_tensor_size, output.shape)) |
|
146
|
|
|
|
|
147
|
|
|
|
|
148
|
|
|
def test_warping(): |
|
149
|
|
|
""" |
|
150
|
|
|
Test the layer.Warping class, its default attributes and its call() method. |
|
151
|
|
|
""" |
|
152
|
|
|
batch_size = 5 |
|
153
|
|
|
fixed_image_size = (32, 32, 16) |
|
154
|
|
|
moving_image_size = (24, 24, 16) |
|
155
|
|
|
ndims = len(moving_image_size) |
|
156
|
|
|
|
|
157
|
|
|
grid_size = (1,) + fixed_image_size + (3,) |
|
158
|
|
|
model = layer.Warping(fixed_image_size) |
|
159
|
|
|
|
|
160
|
|
|
assert all(x == y for x, y in zip(grid_size, model.grid_ref.shape)) |
|
161
|
|
|
|
|
162
|
|
|
# Pass an input of all zeros |
|
163
|
|
|
inputs = [ |
|
164
|
|
|
np.ones((batch_size, *fixed_image_size, ndims), dtype="float32"), |
|
165
|
|
|
np.ones((batch_size, *moving_image_size), dtype="float32"), |
|
166
|
|
|
] |
|
167
|
|
|
# Get outputs by calling |
|
168
|
|
|
output = model.call(inputs) |
|
169
|
|
|
# Expected shape is (5, 1, 2, 3, 3) |
|
170
|
|
|
assert all(x == y for x, y in zip((batch_size,) + fixed_image_size, output.shape)) |
|
171
|
|
|
|
|
172
|
|
|
|
|
173
|
|
|
def test_init_dvf(): |
|
174
|
|
|
""" |
|
175
|
|
|
Test the layer.IntDVF class, its default attributes and its call() method. |
|
176
|
|
|
""" |
|
177
|
|
|
|
|
178
|
|
|
batch_size = 5 |
|
179
|
|
|
fixed_image_size = (32, 32, 16) |
|
180
|
|
|
ndims = len(fixed_image_size) |
|
181
|
|
|
|
|
182
|
|
|
model = layer.IntDVF(fixed_image_size) |
|
183
|
|
|
|
|
184
|
|
|
assert isinstance(model._warping, layer.Warping) |
|
|
|
|
|
|
185
|
|
|
assert model._num_steps == 7 |
|
|
|
|
|
|
186
|
|
|
|
|
187
|
|
|
inputs = np.ones((batch_size, *fixed_image_size, ndims)) |
|
188
|
|
|
output = model.call(inputs) |
|
189
|
|
|
assert all( |
|
190
|
|
|
x == y |
|
191
|
|
|
for x, y in zip((batch_size,) + fixed_image_size + (ndims,), output.shape) |
|
192
|
|
|
) |
|
193
|
|
|
|
|
194
|
|
|
|
|
195
|
|
|
def test_additive_upsampling(): |
|
196
|
|
|
""" |
|
197
|
|
|
Test the layer.AdditiveUpSampling class and its default attributes. |
|
198
|
|
|
""" |
|
199
|
|
|
channels = 8 |
|
200
|
|
|
batch_size = 5 |
|
201
|
|
|
output_size = (32, 32, 16) |
|
202
|
|
|
input_size = (24, 24, 16) |
|
203
|
|
|
|
|
204
|
|
|
# Test __init__() |
|
205
|
|
|
model = layer.AdditiveUpSampling(output_size) |
|
206
|
|
|
assert model._stride == 2 |
|
|
|
|
|
|
207
|
|
|
assert model._output_shape == output_size |
|
|
|
|
|
|
208
|
|
|
|
|
209
|
|
|
# Test call() |
|
210
|
|
|
inputs = np.ones( |
|
211
|
|
|
(batch_size, input_size[0], input_size[1], input_size[2], channels) |
|
212
|
|
|
) |
|
213
|
|
|
output = model(inputs) |
|
214
|
|
|
assert all( |
|
215
|
|
|
x == y |
|
216
|
|
|
for x, y in zip((batch_size,) + output_size + (channels / 2,), output.shape) |
|
217
|
|
|
) |
|
218
|
|
|
|
|
219
|
|
|
# Test the exceptions |
|
220
|
|
|
model = layer.AdditiveUpSampling(output_size, stride=3) |
|
221
|
|
|
with pytest.raises(ValueError): |
|
222
|
|
|
model(inputs) |
|
223
|
|
|
|
|
224
|
|
|
|
|
225
|
|
|
def test_local_net_residual3d_block(): |
|
226
|
|
|
""" |
|
227
|
|
|
Test the layer.LocalNetResidual3dBlock class's, |
|
228
|
|
|
default attributes and call() function. |
|
229
|
|
|
""" |
|
230
|
|
|
|
|
231
|
|
|
# Test __init__() |
|
232
|
|
|
conv3d_block = layer.LocalNetResidual3dBlock(8) |
|
233
|
|
|
|
|
234
|
|
|
assert conv3d_block._conv3d.kernel_size == (3, 3, 3) |
|
|
|
|
|
|
235
|
|
|
assert conv3d_block._conv3d.strides == (1, 1, 1) |
|
|
|
|
|
|
236
|
|
|
assert conv3d_block._conv3d.padding == "same" |
|
|
|
|
|
|
237
|
|
|
assert conv3d_block._conv3d.use_bias is False |
|
|
|
|
|
|
238
|
|
|
|
|
239
|
|
|
|
|
240
|
|
|
def test_local_net_upsample_resnet_block(): |
|
241
|
|
|
""" |
|
242
|
|
|
Test the layer.LocalNetUpSampleResnetBlock class, |
|
243
|
|
|
its default attributes and its call() function. |
|
244
|
|
|
""" |
|
245
|
|
|
batch_size = 5 |
|
246
|
|
|
channels = 4 |
|
247
|
|
|
input_size = (32, 32, 16) |
|
248
|
|
|
output_size = (64, 64, 32) |
|
249
|
|
|
|
|
250
|
|
|
nonskip_tensor_size = (batch_size,) + input_size + (channels,) |
|
251
|
|
|
skip_tensor_size = (batch_size,) + output_size + (channels,) |
|
252
|
|
|
|
|
253
|
|
|
# Test __init__() and build() |
|
254
|
|
|
model = layer.LocalNetUpSampleResnetBlock(8) |
|
255
|
|
|
model.build([nonskip_tensor_size, skip_tensor_size]) |
|
256
|
|
|
|
|
257
|
|
|
assert model._filters == 8 |
|
|
|
|
|
|
258
|
|
|
assert model._use_additive_upsampling is True |
|
|
|
|
|
|
259
|
|
|
|
|
260
|
|
|
assert isinstance(model._deconv3d_block, layer.Deconv3dBlock) |
|
|
|
|
|
|
261
|
|
|
assert isinstance(model._additive_upsampling, layer.AdditiveUpSampling) |
|
|
|
|
|
|
262
|
|
|
assert isinstance(model._conv3d_block, layer.Conv3dBlock) |
|
|
|
|
|
|
263
|
|
|
assert isinstance(model._residual_block, layer.LocalNetResidual3dBlock) |
|
|
|
|
|
|
264
|
|
|
|
|
265
|
|
|
|
|
266
|
|
|
class TestResizeCPTransform: |
|
|
|
|
|
|
267
|
|
|
@pytest.mark.parametrize( |
|
268
|
|
|
"parameter,cp_spacing", [((8, 8, 8), 8), ((8, 24, 16), (8, 24, 16))] |
|
269
|
|
|
) |
|
|
|
|
|
|
270
|
|
|
def test_attributes(self, parameter, cp_spacing): |
|
271
|
|
|
model = layer.ResizeCPTransform(cp_spacing) |
|
272
|
|
|
|
|
273
|
|
|
if isinstance(cp_spacing, int): |
|
274
|
|
|
cp_spacing = [cp_spacing] * 3 |
|
275
|
|
|
assert list(model.cp_spacing) == list(parameter) |
|
276
|
|
|
assert model.kernel_sigma == [0.44 * cp for cp in cp_spacing] |
|
277
|
|
|
|
|
278
|
|
|
@pytest.mark.parametrize( |
|
279
|
|
|
"input_size,output_size,cp_spacing", |
|
280
|
|
|
[ |
|
281
|
|
|
((1, 8, 8, 8, 3), (12, 8, 12), (8, 16, 8)), |
|
282
|
|
|
((1, 8, 8, 8, 3), (12, 12, 12), 8), |
|
283
|
|
|
], |
|
|
|
|
|
|
284
|
|
|
) |
|
285
|
|
|
def test_build(self, input_size, output_size, cp_spacing): |
|
286
|
|
|
model = layer.ResizeCPTransform(cp_spacing) |
|
287
|
|
|
model.build(input_size) |
|
288
|
|
|
|
|
289
|
|
|
assert [a == b for a, b, in zip(model._output_shape, output_size)] |
|
|
|
|
|
|
290
|
|
|
|
|
291
|
|
|
@pytest.mark.parametrize( |
|
292
|
|
|
"input_size,output_size,cp_spacing", |
|
293
|
|
|
[ |
|
294
|
|
|
((1, 68, 68, 68, 3), (1, 12, 8, 12, 3), (8, 16, 8)), |
|
295
|
|
|
((1, 68, 68, 68, 3), (1, 12, 12, 12, 3), 8), |
|
296
|
|
|
], |
|
|
|
|
|
|
297
|
|
|
) |
|
298
|
|
|
def test_call(self, input_size, output_size, cp_spacing): |
|
299
|
|
|
model = layer.ResizeCPTransform(cp_spacing) |
|
300
|
|
|
model.build(input_size) |
|
301
|
|
|
|
|
302
|
|
|
input = tf.random.normal(shape=input_size, dtype=tf.float32) |
|
|
|
|
|
|
303
|
|
|
output = model(input) |
|
304
|
|
|
|
|
305
|
|
|
assert output.shape == output_size |
|
306
|
|
|
|
|
307
|
|
|
|
|
308
|
|
|
class TestBSplines3DTransform: |
|
309
|
|
|
""" |
|
310
|
|
|
Test the layer.BSplines3DTransform class, |
|
311
|
|
|
its default attributes and its call() function. |
|
312
|
|
|
""" |
|
313
|
|
|
|
|
314
|
|
|
@pytest.mark.parametrize( |
|
315
|
|
|
"input_size,cp", |
|
316
|
|
|
[((1, 8, 8, 8, 3), 8), ((1, 8, 8, 8, 3), (8, 16, 12))], |
|
317
|
|
|
) |
|
|
|
|
|
|
318
|
|
|
def test_init(self, input_size, cp): |
|
319
|
|
|
model = layer.BSplines3DTransform(cp, input_size[1:-1]) |
|
320
|
|
|
|
|
321
|
|
|
if isinstance(cp, int): |
|
322
|
|
|
cp = (cp, cp, cp) |
|
323
|
|
|
|
|
324
|
|
|
assert model.cp_spacing == cp |
|
325
|
|
|
|
|
326
|
|
|
@pytest.mark.parametrize( |
|
327
|
|
|
"input_size,cp", |
|
328
|
|
|
[((1, 8, 8, 8, 3), (8, 8, 8)), ((1, 8, 8, 8, 3), (8, 16, 12))], |
|
329
|
|
|
) |
|
|
|
|
|
|
330
|
|
|
def generate_filter_coefficients(self, cp_spacing): |
|
331
|
|
|
|
|
332
|
|
|
b = { |
|
333
|
|
|
0: lambda u: np.float64((1 - u) ** 3 / 6), |
|
334
|
|
|
1: lambda u: np.float64((3 * (u ** 3) - 6 * (u ** 2) + 4) / 6), |
|
335
|
|
|
2: lambda u: np.float64((-3 * (u ** 3) + 3 * (u ** 2) + 3 * u + 1) / 6), |
|
336
|
|
|
3: lambda u: np.float64(u ** 3 / 6), |
|
337
|
|
|
} |
|
338
|
|
|
|
|
339
|
|
|
filters = np.zeros( |
|
340
|
|
|
( |
|
341
|
|
|
4 * cp_spacing[0], |
|
342
|
|
|
4 * cp_spacing[1], |
|
343
|
|
|
4 * cp_spacing[2], |
|
344
|
|
|
3, |
|
345
|
|
|
3, |
|
346
|
|
|
), |
|
347
|
|
|
dtype=np.float32, |
|
348
|
|
|
) |
|
349
|
|
|
|
|
350
|
|
|
for u in range(cp_spacing[0]): |
|
|
|
|
|
|
351
|
|
|
for v in range(cp_spacing[1]): |
|
352
|
|
|
for w in range(cp_spacing[2]): |
|
353
|
|
|
for x in range(4): |
|
354
|
|
|
for y in range(4): |
|
355
|
|
|
for z in range(4): |
|
356
|
|
|
for it_dim in range(3): |
|
357
|
|
|
u_norm = 1 - (u + 0.5) / cp_spacing[0] |
|
358
|
|
|
v_norm = 1 - (v + 0.5) / cp_spacing[1] |
|
359
|
|
|
w_norm = 1 - (w + 0.5) / cp_spacing[2] |
|
360
|
|
|
filters[ |
|
361
|
|
|
x * cp_spacing[0] + u, |
|
362
|
|
|
y * cp_spacing[1] + v, |
|
363
|
|
|
z * cp_spacing[2] + w, |
|
364
|
|
|
it_dim, |
|
365
|
|
|
it_dim, |
|
366
|
|
|
] = ( |
|
367
|
|
|
b[x](u_norm) * b[y](v_norm) * b[z](w_norm) |
|
368
|
|
|
) |
|
369
|
|
|
return filters |
|
370
|
|
|
|
|
371
|
|
|
@pytest.mark.parametrize( |
|
372
|
|
|
"input_size,cp", |
|
373
|
|
|
[((1, 8, 8, 8, 3), (8, 8, 8)), ((1, 8, 8, 8, 3), (8, 16, 12))], |
|
374
|
|
|
) |
|
|
|
|
|
|
375
|
|
|
def test_build(self, input_size, cp): |
|
376
|
|
|
model = layer.BSplines3DTransform(cp, input_size[1:-1]) |
|
377
|
|
|
|
|
378
|
|
|
model.build(input_size) |
|
379
|
|
|
assert model.filter.shape == ( |
|
380
|
|
|
4 * cp[0], |
|
381
|
|
|
4 * cp[1], |
|
382
|
|
|
4 * cp[2], |
|
383
|
|
|
3, |
|
384
|
|
|
3, |
|
385
|
|
|
) |
|
386
|
|
|
|
|
387
|
|
|
@pytest.mark.parametrize( |
|
388
|
|
|
"input_size,cp", |
|
389
|
|
|
[((1, 8, 8, 8, 3), (8, 8, 8)), ((1, 8, 8, 8, 3), (8, 16, 12))], |
|
390
|
|
|
) |
|
|
|
|
|
|
391
|
|
|
def test_coefficients(self, input_size, cp): |
|
392
|
|
|
|
|
393
|
|
|
filters = self.generate_filter_coefficients(cp_spacing=cp) |
|
394
|
|
|
|
|
395
|
|
|
model = layer.BSplines3DTransform(cp, input_size[1:-1]) |
|
396
|
|
|
model.build(input_size) |
|
397
|
|
|
|
|
398
|
|
|
assert np.allclose(filters, model.filter.numpy(), atol=1e-8) |
|
399
|
|
|
|
|
400
|
|
|
@pytest.mark.parametrize( |
|
401
|
|
|
"input_size,cp", |
|
402
|
|
|
[((1, 8, 8, 8, 3), (8, 8, 8)), ((1, 8, 8, 8, 3), (8, 16, 12))], |
|
403
|
|
|
) |
|
|
|
|
|
|
404
|
|
|
def test_interpolation(self, input_size, cp): |
|
405
|
|
|
model = layer.BSplines3DTransform(cp, input_size[1:-1]) |
|
406
|
|
|
model.build(input_size) |
|
407
|
|
|
|
|
408
|
|
|
vol_shape = input_size[1:-1] |
|
409
|
|
|
num_cp = ( |
|
410
|
|
|
[input_size[0]] |
|
411
|
|
|
+ [int(np.ceil(isize / cpsize) + 3) for isize, cpsize in zip(vol_shape, cp)] |
|
412
|
|
|
+ [input_size[-1]] |
|
413
|
|
|
) |
|
414
|
|
|
|
|
415
|
|
|
field = tf.random.normal(shape=num_cp, dtype=tf.float32) |
|
416
|
|
|
|
|
417
|
|
|
ddf = model.call(field) |
|
418
|
|
|
assert ddf.shape == input_size |
|
419
|
|
|
|
Prefixing a member variable
_is usually regarded as the equivalent of declaring it with protected visibility that exists in other languages. Consequentially, such a member should only be accessed from the same class or a child class: