1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst |
||
2 | """Unit tests for the Fit class""" |
||
3 | import pytest |
||
4 | from numpy.testing import assert_allclose |
||
5 | from astropy.table import Table |
||
6 | from gammapy.datasets import Dataset |
||
7 | from gammapy.modeling import Fit, Parameter |
||
8 | from gammapy.modeling.models import ModelBase, Models |
||
9 | from gammapy.utils.testing import requires_dependency |
||
10 | |||
11 | pytest.importorskip("iminuit") |
||
12 | |||
13 | |||
14 | class MyModel(ModelBase): |
||
15 | x = Parameter("x", 2) |
||
16 | y = Parameter("y", 3e2) |
||
17 | z = Parameter("z", 4e-2) |
||
18 | name = "test" |
||
19 | datasets_names = ["test"] |
||
20 | type = "model" |
||
21 | |||
22 | |||
23 | class MyDataset(Dataset): |
||
24 | tag = "MyDataset" |
||
25 | |||
26 | def __init__(self, name="test"): |
||
27 | self._name = name |
||
28 | self._models = Models([MyModel(x=1.99, y=2.99e3, z=3.99e-2)]) |
||
29 | self.data_shape = (1,) |
||
30 | self.meta_table = Table() |
||
31 | |||
32 | @property |
||
33 | def models(self): |
||
34 | return self._models |
||
35 | |||
36 | def stat_sum(self): |
||
37 | # self._model.parameters = parameters |
||
38 | x, y, z = [p.value for p in self.models.parameters] |
||
39 | x_opt, y_opt, z_opt = 2, 3e2, 4e-2 |
||
40 | return (x - x_opt) ** 2 + (y - y_opt) ** 2 + (z - z_opt) ** 2 |
||
41 | |||
42 | def fcn(self): |
||
43 | x, y, z = [p.value for p in self.models.parameters] |
||
44 | x_opt, y_opt, z_opt = 2, 3e5, 4e-5 |
||
45 | x_err, y_err, z_err = 0.2, 3e4, 4e-6 |
||
46 | return ( |
||
47 | ((x - x_opt) / x_err) ** 2 |
||
48 | + ((y - y_opt) / y_err) ** 2 |
||
49 | + ((z - z_opt) / z_err) ** 2 |
||
50 | ) |
||
51 | |||
52 | def stat_array(self): |
||
53 | """Statistic array, one value per data point.""" |
||
54 | |||
55 | |||
56 | @requires_dependency("iminuit") |
||
57 | @requires_dependency("sherpa") |
||
58 | @pytest.mark.parametrize("backend", ["sherpa", "scipy"]) |
||
59 | def test_optimize_backend_and_covariance(backend): |
||
60 | dataset = MyDataset() |
||
61 | |||
62 | if backend == "scipy": |
||
63 | kwargs = {"method": "L-BFGS-B"} |
||
64 | else: |
||
65 | kwargs = {} |
||
66 | |||
67 | kwargs["backend"] = backend |
||
68 | |||
69 | fit = Fit(optimize_opts=kwargs) |
||
70 | result = fit.run([dataset]) |
||
71 | |||
72 | pars = result.parameters |
||
73 | assert_allclose(pars["x"].value, 2, rtol=1e-3) |
||
74 | assert_allclose(pars["y"].value, 3e2, rtol=1e-3) |
||
75 | assert_allclose(pars["z"].value, 4e-2, rtol=1e-2) |
||
76 | |||
77 | assert_allclose(pars["x"].error, 1, rtol=1e-7) |
||
78 | assert_allclose(pars["y"].error, 1, rtol=1e-7) |
||
79 | assert_allclose(pars["z"].error, 1, rtol=1e-7) |
||
80 | |||
81 | correlation = dataset.models.covariance.correlation |
||
82 | assert_allclose(correlation[0, 1], 0, atol=1e-7) |
||
83 | assert_allclose(correlation[0, 2], 0, atol=1e-7) |
||
84 | assert_allclose(correlation[1, 2], 0, atol=1e-7) |
||
85 | |||
86 | |||
87 | @pytest.mark.parametrize("backend", ["minuit"]) |
||
88 | def test_run(backend): |
||
89 | dataset = MyDataset() |
||
90 | fit = Fit(backend=backend) |
||
91 | result = fit.run([dataset]) |
||
92 | pars = result.parameters |
||
93 | |||
94 | assert result.success is True |
||
95 | |||
96 | assert_allclose(pars["x"].value, 2, rtol=1e-3) |
||
97 | assert_allclose(pars["y"].value, 3e2, rtol=1e-3) |
||
98 | assert_allclose(pars["z"].value, 4e-2, rtol=1e-3) |
||
99 | |||
100 | assert_allclose(pars["x"].error, 1, rtol=1e-7) |
||
101 | assert_allclose(pars["y"].error, 1, rtol=1e-7) |
||
102 | assert_allclose(pars["z"].error, 1, rtol=1e-7) |
||
103 | |||
104 | correlation = dataset.models.covariance.correlation |
||
105 | assert_allclose(correlation[0, 1], 0, atol=1e-7) |
||
106 | assert_allclose(correlation[0, 2], 0, atol=1e-7) |
||
107 | assert_allclose(correlation[1, 2], 0, atol=1e-7) |
||
108 | |||
109 | |||
110 | @requires_dependency("sherpa") |
||
111 | @pytest.mark.parametrize("backend", ["minuit", "sherpa", "scipy"]) |
||
112 | def test_optimize(backend): |
||
113 | dataset = MyDataset() |
||
114 | |||
115 | if backend == "scipy": |
||
116 | kwargs = {"method": "L-BFGS-B"} |
||
117 | else: |
||
118 | kwargs = {} |
||
119 | |||
120 | fit = Fit(store_trace=True, backend=backend, optimize_opts=kwargs) |
||
121 | result = fit.optimize([dataset]) |
||
122 | pars = dataset.models.parameters |
||
123 | |||
124 | assert result.success is True |
||
125 | assert_allclose(result.total_stat, 0, atol=1) |
||
126 | |||
127 | assert_allclose(pars["x"].value, 2, rtol=1e-3) |
||
128 | assert_allclose(pars["y"].value, 3e2, rtol=1e-3) |
||
129 | assert_allclose(pars["z"].value, 4e-2, rtol=1e-2) |
||
130 | |||
131 | assert len(result.trace) == result.nfev |
||
132 | |||
133 | |||
134 | # TODO: add some extra covariance tests, in addition to run |
||
135 | # Probably mainly if error message is OK if optimize didn't run first. |
||
136 | # def test_covariance(): |
||
137 | |||
138 | |||
139 | View Code Duplication | @pytest.mark.parametrize("backend", ["minuit"]) |
|
0 ignored issues
–
show
Duplication
introduced
by
Loading history...
|
|||
140 | def test_confidence(backend): |
||
141 | dataset = MyDataset() |
||
142 | fit = Fit(backend=backend) |
||
143 | fit.optimize([dataset]) |
||
144 | result = fit.confidence(datasets=[dataset], parameter="x") |
||
145 | |||
146 | assert result["success"] is True |
||
147 | assert_allclose(result["errp"], 1) |
||
148 | assert_allclose(result["errn"], 1) |
||
149 | |||
150 | # Check that original value state wasn't changed |
||
151 | assert_allclose(dataset.models.parameters["x"].value, 2) |
||
152 | |||
153 | |||
154 | View Code Duplication | @pytest.mark.parametrize("backend", ["minuit"]) |
|
0 ignored issues
–
show
|
|||
155 | def test_confidence_frozen(backend): |
||
156 | dataset = MyDataset() |
||
157 | dataset.models.parameters["x"].frozen = True |
||
158 | fit = Fit(backend=backend) |
||
159 | fit.optimize([dataset]) |
||
160 | result = fit.confidence(datasets=[dataset], parameter="y") |
||
161 | |||
162 | assert result["success"] is True |
||
163 | assert_allclose(result["errp"], 1) |
||
164 | assert_allclose(result["errn"], 1) |
||
165 | |||
166 | |||
167 | def test_stat_profile(): |
||
168 | dataset = MyDataset() |
||
169 | fit = Fit() |
||
170 | fit.run([dataset]) |
||
171 | dataset.models.parameters["x"].scan_n_values = 3 |
||
172 | result = fit.stat_profile(datasets=[dataset], parameter="x") |
||
173 | |||
174 | assert_allclose(result["x_scan"], [0, 2, 4], atol=1e-7) |
||
175 | assert_allclose(result["stat_scan"], [4, 0, 4], atol=1e-7) |
||
176 | assert len(result["fit_results"]) == 0 |
||
177 | |||
178 | # Check that original value state wasn't changed |
||
179 | assert_allclose(dataset.models.parameters["x"].value, 2) |
||
180 | |||
181 | |||
182 | def test_stat_profile_reoptimize(): |
||
183 | dataset = MyDataset() |
||
184 | fit = Fit() |
||
185 | fit.run([dataset]) |
||
186 | |||
187 | dataset.models.parameters["y"].value = 0 |
||
188 | dataset.models.parameters["x"].scan_n_values = 3 |
||
189 | result = fit.stat_profile(datasets=[dataset], parameter="x", reoptimize=True) |
||
190 | |||
191 | assert_allclose(result["x_scan"], [0, 2, 4], atol=1e-7) |
||
192 | assert_allclose(result["stat_scan"], [4, 0, 4], atol=1e-7) |
||
193 | assert_allclose( |
||
194 | result["fit_results"][0].total_stat, result["stat_scan"][0], atol=1e-7 |
||
195 | ) |
||
196 | |||
197 | |||
198 | def test_stat_surface(): |
||
199 | dataset = MyDataset() |
||
200 | fit = Fit() |
||
201 | fit.run([dataset]) |
||
202 | |||
203 | x_values = [1, 2, 3] |
||
204 | y_values = [2e2, 3e2, 4e2] |
||
205 | |||
206 | dataset.models.parameters["x"].scan_values = x_values |
||
207 | dataset.models.parameters["y"].scan_values = y_values |
||
208 | result = fit.stat_surface(datasets=[dataset], x="x", y="y") |
||
209 | |||
210 | assert_allclose(result["x_scan"], x_values, atol=1e-7) |
||
211 | assert_allclose(result["y_scan"], y_values, atol=1e-7) |
||
212 | expected_stat = [ |
||
213 | [1.0001e04, 1.0000e00, 1.0001e04], |
||
214 | [1.0000e04, 0.0000e00, 1.0000e04], |
||
215 | [1.0001e04, 1.0000e00, 1.0001e04], |
||
216 | ] |
||
217 | assert_allclose(list(result["stat_scan"]), expected_stat, atol=1e-7) |
||
218 | assert len(result["fit_results"]) == 0 |
||
219 | |||
220 | # Check that original value state wasn't changed |
||
221 | assert_allclose(dataset.models.parameters["x"].value, 2) |
||
222 | assert_allclose(dataset.models.parameters["y"].value, 3e2) |
||
223 | |||
224 | |||
225 | def test_stat_surface_reoptimize(): |
||
226 | dataset = MyDataset() |
||
227 | fit = Fit() |
||
228 | fit.run([dataset]) |
||
229 | |||
230 | x_values = [1, 2, 3] |
||
231 | y_values = [2e2, 3e2, 4e2] |
||
232 | |||
233 | dataset.models.parameters["z"].value = 0 |
||
234 | dataset.models.parameters["x"].scan_values = x_values |
||
235 | dataset.models.parameters["y"].scan_values = y_values |
||
236 | |||
237 | result = fit.stat_surface( |
||
238 | datasets=[dataset], x="x", y="y", reoptimize=True |
||
239 | ) |
||
240 | |||
241 | assert_allclose(result["x_scan"], x_values, atol=1e-7) |
||
242 | assert_allclose(result["y_scan"], y_values, atol=1e-7) |
||
243 | expected_stat = [ |
||
244 | [1.0001e04, 1.0000e00, 1.0001e04], |
||
245 | [1.0000e04, 0.0000e00, 1.0000e04], |
||
246 | [1.0001e04, 1.0000e00, 1.0001e04], |
||
247 | ] |
||
248 | |||
249 | assert_allclose(list(result["stat_scan"]), expected_stat, atol=1e-7) |
||
250 | assert_allclose( |
||
251 | result["fit_results"][0][0].total_stat, result["stat_scan"][0][0], atol=1e-7 |
||
252 | ) |
||
253 | |||
254 | |||
255 | def test_stat_contour(): |
||
256 | dataset = MyDataset() |
||
257 | dataset.models.parameters["x"].frozen = True |
||
258 | fit = Fit(backend="minuit") |
||
259 | fit.optimize([dataset]) |
||
260 | result = fit.stat_contour(datasets=[dataset], x="y", y="z") |
||
261 | |||
262 | assert result["success"] |
||
263 | |||
264 | x = result["y"] |
||
265 | assert_allclose(len(x), 10) |
||
266 | assert_allclose(x[0], 299, rtol=1e-5) |
||
267 | assert_allclose(x[-1], 299.133975, rtol=1e-5) |
||
268 | y = result["z"] |
||
269 | assert_allclose(len(y), 10) |
||
270 | assert_allclose(y[0], 0.04, rtol=1e-5) |
||
271 | assert_allclose(y[-1], 0.54, rtol=1e-5) |
||
272 | |||
273 | # Check that original value state wasn't changed |
||
274 | assert_allclose(dataset.models.parameters["y"].value, 300) |
||
275 |