1
|
|
|
# coding=utf-8 |
2
|
|
|
|
3
|
|
|
""" |
4
|
|
|
Tests for deepreg/model/loss/label.py in |
5
|
|
|
pytest style |
6
|
|
|
""" |
7
|
|
|
|
8
|
|
|
from test.unit.util import is_equal_tf |
9
|
|
|
|
10
|
|
|
import numpy as np |
11
|
|
|
import pytest |
12
|
|
|
import tensorflow as tf |
13
|
|
|
|
14
|
|
|
from deepreg.loss.util import ( |
15
|
|
|
NegativeLossMixin, |
16
|
|
|
cauchy_kernel1d, |
17
|
|
|
gaussian_kernel1d_sigma, |
18
|
|
|
gaussian_kernel1d_size, |
19
|
|
|
rectangular_kernel1d, |
20
|
|
|
separable_filter, |
21
|
|
|
triangular_kernel1d, |
22
|
|
|
) |
23
|
|
|
|
24
|
|
|
|
25
|
|
|
@pytest.mark.parametrize("sigma", [1, 3, 2.2]) |
26
|
|
|
def test_cauchy_kernel1d(sigma): |
|
|
|
|
27
|
|
|
""" |
28
|
|
|
Testing the 1-D cauchy kernel |
29
|
|
|
:param sigma: float |
30
|
|
|
:return: |
31
|
|
|
""" |
32
|
|
|
tail = int(sigma * 5) |
33
|
|
|
expected = [1 / ((x / sigma) ** 2 + 1) for x in range(-tail, tail + 1)] |
34
|
|
|
expected = expected / np.sum(expected) |
35
|
|
|
got = cauchy_kernel1d(sigma) |
36
|
|
|
assert is_equal_tf(got, expected) |
37
|
|
|
|
38
|
|
|
|
39
|
|
|
@pytest.mark.parametrize("sigma", [1, 3, 2.2]) |
40
|
|
|
def test_gaussian_kernel1d_sigma(sigma): |
|
|
|
|
41
|
|
|
""" |
42
|
|
|
Testing the 1-D gaussian kernel given sigma as input |
43
|
|
|
:param sigma: float |
44
|
|
|
:return: |
45
|
|
|
""" |
46
|
|
|
tail = int(sigma * 3) |
47
|
|
|
expected = [np.exp(-0.5 * x ** 2 / sigma ** 2) for x in range(-tail, tail + 1)] |
48
|
|
|
expected = expected / np.sum(expected) |
49
|
|
|
got = gaussian_kernel1d_sigma(sigma) |
50
|
|
|
assert is_equal_tf(got, expected) |
51
|
|
|
|
52
|
|
|
|
53
|
|
|
@pytest.mark.parametrize("kernel_size", [3, 7, 11]) |
54
|
|
|
def test_gaussian_kernel1d_size(kernel_size): |
|
|
|
|
55
|
|
|
""" |
56
|
|
|
Testing the 1-D gaussian kernel given size as input |
57
|
|
|
:param kernel_size: int |
58
|
|
|
:return: |
59
|
|
|
""" |
60
|
|
|
mean = (kernel_size - 1) / 2.0 |
61
|
|
|
sigma = kernel_size / 3 |
62
|
|
|
|
63
|
|
|
grid = tf.range(0, kernel_size, dtype=tf.float32) |
64
|
|
|
expected = tf.exp(-tf.square(grid - mean) / (2 * sigma ** 2)) |
|
|
|
|
65
|
|
|
|
66
|
|
|
got = gaussian_kernel1d_size(kernel_size) |
67
|
|
|
assert is_equal_tf(got, expected) |
68
|
|
|
|
69
|
|
|
|
70
|
|
|
@pytest.mark.parametrize("kernel_size", [3, 7, 11]) |
71
|
|
|
def test_rectangular_kernel1d(kernel_size): |
|
|
|
|
72
|
|
|
""" |
73
|
|
|
Testing the 1-D rectangular kernel |
74
|
|
|
:param kernel_size: int |
75
|
|
|
:return: |
76
|
|
|
""" |
77
|
|
|
expected = tf.ones(shape=(kernel_size,), dtype=tf.float32) |
78
|
|
|
got = rectangular_kernel1d(kernel_size) |
79
|
|
|
assert is_equal_tf(got, expected) |
80
|
|
|
|
81
|
|
|
|
82
|
|
|
@pytest.mark.parametrize("kernel_size", [3, 7, 11]) |
83
|
|
|
def test_triangular_kernel1d(kernel_size): |
|
|
|
|
84
|
|
|
""" |
85
|
|
|
Testing the 1-D triangular kernel |
86
|
|
|
:param kernel_size: int (odd number) |
87
|
|
|
:return: |
88
|
|
|
""" |
89
|
|
|
expected = np.zeros(shape=(kernel_size,), dtype=np.float32) |
90
|
|
|
expected[kernel_size // 2] = kernel_size // 2 + 1 |
91
|
|
|
for it_k in range(kernel_size // 2): |
92
|
|
|
expected[it_k] = it_k + 1 |
93
|
|
|
expected[-it_k - 1] = it_k + 1 |
94
|
|
|
|
95
|
|
|
got = triangular_kernel1d(kernel_size) |
96
|
|
|
assert is_equal_tf(got, expected) |
97
|
|
|
|
98
|
|
|
|
99
|
|
|
def test_separable_filter(): |
100
|
|
|
""" |
101
|
|
|
Testing separable filter case where non |
102
|
|
|
zero length tensor is passed to the |
103
|
|
|
function. |
104
|
|
|
""" |
105
|
|
|
k = np.ones((3, 3, 3, 3, 1), dtype=np.float32) |
106
|
|
|
array_eye = np.identity(3, dtype=np.float32) |
107
|
|
|
tensor_pred = np.zeros((3, 3, 3, 3, 1), dtype=np.float32) |
108
|
|
|
tensor_pred[:, :, 0, 0, 0] = array_eye |
109
|
|
|
tensor_pred = tf.convert_to_tensor(tensor_pred, dtype=tf.float32) |
110
|
|
|
k = tf.convert_to_tensor(k, dtype=tf.float32) |
111
|
|
|
|
112
|
|
|
expect = np.ones((3, 3, 3, 3, 1), dtype=np.float32) |
113
|
|
|
expect = tf.convert_to_tensor(expect, dtype=tf.float32) |
114
|
|
|
|
115
|
|
|
get = separable_filter(tensor_pred, k) |
116
|
|
|
assert is_equal_tf(get, expect) |
117
|
|
|
|
118
|
|
|
|
119
|
|
|
class MinusClass(tf.keras.losses.Loss): |
|
|
|
|
120
|
|
|
def __init__(self): |
121
|
|
|
super().__init__() |
122
|
|
|
self.name = "MinusClass" |
123
|
|
|
|
124
|
|
|
def call(self, y_true, y_pred): |
125
|
|
|
return y_true - y_pred |
126
|
|
|
|
127
|
|
|
|
128
|
|
|
class MinusClassLoss(NegativeLossMixin, MinusClass): |
|
|
|
|
129
|
|
|
pass |
130
|
|
|
|
131
|
|
|
|
132
|
|
|
@pytest.mark.parametrize("y_true,y_pred,expected", [(1, 2, 1), (2, 1, -1), (0, 0, 0)]) |
133
|
|
|
def test_negative_loss_mixin(y_true, y_pred, expected): |
|
|
|
|
134
|
|
|
""" |
135
|
|
|
Testing NegativeLossMixin class that |
136
|
|
|
inverts the sign of any value |
137
|
|
|
returned by a function |
138
|
|
|
|
139
|
|
|
:param y_true: int |
140
|
|
|
:param y_pred: int |
141
|
|
|
:param expected: int |
142
|
|
|
:return: |
143
|
|
|
""" |
144
|
|
|
|
145
|
|
|
y_true = tf.constant(y_true, dtype=tf.float32) |
146
|
|
|
y_pred = tf.constant(y_pred, dtype=tf.float32) |
147
|
|
|
|
148
|
|
|
got = MinusClassLoss().call( |
149
|
|
|
y_true, |
150
|
|
|
y_pred, |
151
|
|
|
) |
152
|
|
|
assert is_equal_tf(got, expected) |
153
|
|
|
|