1
|
|
|
import collections |
|
|
|
|
2
|
|
|
from contextlib import contextmanager |
3
|
|
|
import matplotlib.pyplot as plt |
|
|
|
|
4
|
|
|
import numpy as np |
|
|
|
|
5
|
|
|
|
6
|
|
|
from astromodels import Parameter, Uniform_prior |
|
|
|
|
7
|
|
|
from threeML import PluginPrototype |
|
|
|
|
8
|
|
|
from threeML.io.plotting.step_plot import step_plot |
|
|
|
|
9
|
|
|
from threeML.utils.binner import Rebinner |
|
|
|
|
10
|
|
|
from threeML.utils.polarization.binned_polarization import BinnedModulationCurve |
|
|
|
|
11
|
|
|
from threeML.utils.statistics.likelihood_functions import poisson_observed_poisson_background, \ |
|
|
|
|
12
|
|
|
poisson_observed_gaussian_background |
13
|
|
|
|
14
|
|
|
from polarpy.modulation_curve_file import ModulationCurveFile |
15
|
|
|
from polarpy.polar_response import PolarResponse |
16
|
|
|
|
17
|
|
|
|
18
|
|
|
class PolarLike(PluginPrototype): |
|
|
|
|
19
|
|
|
""" |
20
|
|
|
Preliminary POLAR polarization plugin |
21
|
|
|
""" |
22
|
|
|
|
23
|
|
|
def __init__(self, name, observation, background, response, interval_number=None, verbose=False): |
|
|
|
|
24
|
|
|
""" |
25
|
|
|
|
26
|
|
|
|
27
|
|
|
|
28
|
|
|
:param interval_number: |
29
|
|
|
:param name: |
30
|
|
|
:param observation: |
31
|
|
|
:param background: |
32
|
|
|
:param response: |
33
|
|
|
|
34
|
|
|
:param verbose: |
35
|
|
|
|
36
|
|
|
""" |
37
|
|
|
|
38
|
|
|
# if we pass a string, there may be multiple time intervals |
39
|
|
|
# saved so we must specify a time interval |
40
|
|
|
|
41
|
|
|
if isinstance(observation, str): |
42
|
|
|
assert interval_number is not None, 'must specify an interval number' |
43
|
|
|
|
44
|
|
|
# this is a file |
45
|
|
|
read_file = ModulationCurveFile.read(observation) |
46
|
|
|
|
47
|
|
|
# create the bmc |
48
|
|
|
observation = read_file.to_binned_modulation_curve(interval=interval_number) |
49
|
|
|
|
50
|
|
|
# the same applies for the background |
51
|
|
|
if isinstance(background, str): |
52
|
|
|
assert interval_number is not None, 'must specify an interval number' |
53
|
|
|
|
54
|
|
|
# this is a file |
55
|
|
|
read_file = ModulationCurveFile.read(background) |
56
|
|
|
|
57
|
|
|
background = read_file.to_binned_modulation_curve(interval=interval_number) |
58
|
|
|
|
59
|
|
|
assert isinstance(observation, BinnedModulationCurve), 'The observation must be a BinnedModulationCurve' |
|
|
|
|
60
|
|
|
assert isinstance(background, BinnedModulationCurve), 'The observation must be a BinnedModulationCurve' |
|
|
|
|
61
|
|
|
|
62
|
|
|
# attach the required variables |
63
|
|
|
|
64
|
|
|
self._observation = observation |
65
|
|
|
self._background = background |
66
|
|
|
|
67
|
|
|
self._observed_counts = observation.counts |
68
|
|
|
self._background_counts = background.counts |
69
|
|
|
self._background_count_errors = background.count_errors |
70
|
|
|
self._scale = observation.exposure / background.exposure |
71
|
|
|
self._exposure = observation.exposure |
72
|
|
|
self._background_exposure = background.exposure |
73
|
|
|
|
74
|
|
|
self._likelihood_model = None |
75
|
|
|
self._rebinner = None |
76
|
|
|
|
77
|
|
|
# now do some double checks |
78
|
|
|
|
79
|
|
|
assert len(self._observed_counts) == len(self._background_counts) |
80
|
|
|
|
81
|
|
|
self._n_synthetic_datasets = 0 |
82
|
|
|
|
83
|
|
|
# set up the effective area correction |
84
|
|
|
|
85
|
|
|
self._nuisance_parameter = Parameter( |
86
|
|
|
"cons_%s" % name, |
87
|
|
|
1.0, |
88
|
|
|
min_value=0.8, |
89
|
|
|
max_value=1.2, |
90
|
|
|
delta=0.05, |
91
|
|
|
free=False, |
92
|
|
|
desc="Effective area correction for %s" % name) |
93
|
|
|
|
94
|
|
|
nuisance_parameters = collections.OrderedDict() |
95
|
|
|
nuisance_parameters[self._nuisance_parameter.name] = self._nuisance_parameter |
96
|
|
|
|
97
|
|
|
# pass to the plugin proto |
98
|
|
|
|
99
|
|
|
super(PolarLike, self).__init__(name, nuisance_parameters) |
100
|
|
|
|
101
|
|
|
# The following vectors are the ones that will be really used for the computation. At the beginning they just |
|
|
|
|
102
|
|
|
# point to the original ones, but if a rebinner is used and/or a mask is created through set_active_measurements, |
|
|
|
|
103
|
|
|
# they will contain the rebinned and/or masked versions |
104
|
|
|
|
105
|
|
|
self._current_observed_counts = self._observed_counts |
106
|
|
|
self._current_background_counts = self._background_counts |
107
|
|
|
self._current_background_count_errors = self._background_count_errors |
108
|
|
|
|
109
|
|
|
self._verbose = verbose |
110
|
|
|
|
111
|
|
|
# we can either attach or build a response |
112
|
|
|
|
113
|
|
|
assert isinstance(response, str) or isinstance( |
|
|
|
|
114
|
|
|
response, PolarResponse), 'The response must be a file name or a PolarResponse' |
115
|
|
|
|
116
|
|
|
if isinstance(response, PolarResponse): |
117
|
|
|
|
118
|
|
|
self._response = response |
119
|
|
|
|
120
|
|
|
else: |
121
|
|
|
|
122
|
|
|
self._response = PolarResponse(response) |
123
|
|
|
|
124
|
|
|
# attach the interpolators to the |
125
|
|
|
|
126
|
|
|
self._all_interp = self._response.interpolators |
127
|
|
|
|
128
|
|
|
# we also make sure the lengths match up here |
129
|
|
|
assert self._response.n_scattering_bins == len( |
130
|
|
|
self._observation.counts), 'observation counts shape does not agree with response shape' |
131
|
|
|
|
132
|
|
|
def use_effective_area_correction(self, lower=0.5, upper=1.5): |
133
|
|
|
""" |
134
|
|
|
Use an area constant to correct for response issues |
135
|
|
|
|
136
|
|
|
:param lower: |
137
|
|
|
:param upper: |
138
|
|
|
:return: |
139
|
|
|
""" |
140
|
|
|
|
141
|
|
|
self._nuisance_parameter.free = True |
142
|
|
|
self._nuisance_parameter.bounds = (lower, upper) |
143
|
|
|
self._nuisance_parameter.prior = Uniform_prior(lower_bound=lower, upper_bound=upper) |
144
|
|
|
if self._verbose: |
145
|
|
|
print('Using effective area correction') |
146
|
|
|
|
147
|
|
|
def fix_effective_area_correction(self, value=1): |
148
|
|
|
""" |
149
|
|
|
|
150
|
|
|
fix the effective area correction to a particular values |
151
|
|
|
|
152
|
|
|
:param value: |
153
|
|
|
:return: |
154
|
|
|
""" |
155
|
|
|
|
156
|
|
|
# allow the value to be outside the bounds |
157
|
|
|
if self._nuisance_parameter.max_value < value: |
158
|
|
|
|
159
|
|
|
self._nuisance_parameter.max_value = value + 0.1 |
160
|
|
|
|
161
|
|
|
elif self._nuisance_parameter.min_value > value: |
162
|
|
|
|
163
|
|
|
self._nuisance_parameter.min_value = value = 0.1 |
164
|
|
|
|
165
|
|
|
self._nuisance_parameter.fix = True |
166
|
|
|
self._nuisance_parameter.value = value |
167
|
|
|
|
168
|
|
|
if self._verbose: |
169
|
|
|
print('Fixing effective area correction') |
170
|
|
|
|
171
|
|
|
@property |
172
|
|
|
def effective_area_correction(self): |
|
|
|
|
173
|
|
|
|
174
|
|
|
return self._nuisance_parameter |
175
|
|
|
|
176
|
|
|
def get_simulated_dataset(self, new_name=None, **kwargs): |
|
|
|
|
177
|
|
|
""" |
178
|
|
|
Returns another Binned instance where data have been obtained by randomizing the current expectation from the |
|
|
|
|
179
|
|
|
model, as well as from the background (depending on the respective noise models) |
180
|
|
|
|
181
|
|
|
:return: an BinnedSpectrum or child instance |
182
|
|
|
""" |
183
|
|
|
|
184
|
|
|
assert self._likelihood_model is not None, "You need to set up a model before randomizing" |
185
|
|
|
|
186
|
|
|
# Keep track of how many syntethic datasets we have generated |
187
|
|
|
|
188
|
|
|
self._n_synthetic_datasets += 1 |
189
|
|
|
|
190
|
|
|
# Generate a name for the new dataset if needed |
191
|
|
|
if new_name is None: |
192
|
|
|
new_name = "%s_sim_%i" % (self.name, self._n_synthetic_datasets) |
193
|
|
|
|
194
|
|
|
# Generate randomized data depending on the different noise models |
195
|
|
|
|
196
|
|
|
# We remove the mask temporarily because we need the various elements for all channels. We will restore it |
|
|
|
|
197
|
|
|
# at the end |
198
|
|
|
|
199
|
|
|
# Get the source model for all channels (that's why we don't use the .folded_model property) |
200
|
|
|
|
201
|
|
|
# We remove the mask temporarily because we need the various elements for all channels. We will restore it |
|
|
|
|
202
|
|
|
# at the end |
203
|
|
|
|
204
|
|
|
original_rebinner = self._rebinner |
205
|
|
|
|
206
|
|
|
with self._without_rebinner(): |
207
|
|
|
|
208
|
|
|
# Get the source model for all channels (that's why we don't use the .folded_model property) |
|
|
|
|
209
|
|
|
|
210
|
|
|
source_model_counts = self._get_model_counts() |
211
|
|
|
|
212
|
|
|
if self._background.is_poisson: |
213
|
|
|
_, background_model_counts = poisson_observed_poisson_background( |
214
|
|
|
self._current_observed_counts, self._current_background_counts, self._scale, source_model_counts) |
|
|
|
|
215
|
|
|
randomized_background_counts = np.random.poisson(background_model_counts) |
216
|
|
|
|
217
|
|
|
background_count_errors = None |
218
|
|
|
else: |
219
|
|
|
|
220
|
|
|
_, background_model_counts = poisson_observed_gaussian_background( |
221
|
|
|
self._current_observed_counts, self._current_background_counts, |
222
|
|
|
self._current_background_count_errors, source_model_counts) |
223
|
|
|
|
224
|
|
|
randomized_background_counts = np.zeros_like(background_model_counts) |
225
|
|
|
|
226
|
|
|
idx = (self._background_count_errors > 0) |
227
|
|
|
|
228
|
|
|
randomized_background_counts[idx] = np.random.normal( |
229
|
|
|
loc=background_model_counts[idx], scale=self._background_count_errors[idx]) |
230
|
|
|
|
231
|
|
|
# Issue a warning if the generated background is less than zero, and fix it by placing it at zero |
|
|
|
|
232
|
|
|
|
233
|
|
|
idx = (randomized_background_counts < 0) # type: np.ndarray |
234
|
|
|
|
235
|
|
|
negative_background_n = np.sum(idx) |
236
|
|
|
|
237
|
|
|
if negative_background_n > 0: |
238
|
|
|
custom_warnings.warn("Generated background has negative counts " |
|
|
|
|
239
|
|
|
"in %i channels. Fixing them to zero" % (negative_background_n)) |
|
|
|
|
240
|
|
|
|
241
|
|
|
randomized_background_counts[idx] = 0 |
242
|
|
|
|
243
|
|
|
background_count_errors = self._background_count_errors |
244
|
|
|
|
245
|
|
|
# Now randomize the expectations |
246
|
|
|
|
247
|
|
|
# Randomize expectations for the source |
248
|
|
|
|
249
|
|
|
randomized_source_counts = np.random.poisson(source_model_counts + background_model_counts) |
|
|
|
|
250
|
|
|
|
251
|
|
|
# |
252
|
|
|
|
253
|
|
|
new_observation = self._observation.clone(new_counts=randomized_source_counts) |
254
|
|
|
|
255
|
|
|
new_background = self._background.clone( |
256
|
|
|
new_counts=randomized_background_counts, new_count_errors=background_count_errors) |
257
|
|
|
|
258
|
|
|
new_plugin = PolarLike( |
259
|
|
|
name=new_name, |
260
|
|
|
observation=new_observation, |
261
|
|
|
background=new_background, |
262
|
|
|
response=self._response, |
263
|
|
|
verbose=False, |
264
|
|
|
) |
265
|
|
|
|
266
|
|
|
# Apply the same selections as the current data set |
267
|
|
|
if original_rebinner is not None: |
268
|
|
|
|
269
|
|
|
# Apply rebinning, which also applies the mask |
270
|
|
|
new_plugin._apply_rebinner(original_rebinner) |
|
|
|
|
271
|
|
|
|
272
|
|
|
return new_plugin |
273
|
|
|
|
274
|
|
|
def set_model(self, likelihood_model_instance): |
275
|
|
|
""" |
276
|
|
|
Set the model to be used in the joint minimization. Must be a LikelihoodModel instance. |
277
|
|
|
:param likelihood_model_instance: instance of Model |
278
|
|
|
:type likelihood_model_instance: astromodels.Model |
279
|
|
|
""" |
280
|
|
|
|
281
|
|
|
if likelihood_model_instance is None: |
282
|
|
|
return |
283
|
|
|
|
284
|
|
|
# if self._source_name is not None: |
285
|
|
|
|
286
|
|
|
# # Make sure that the source is in the model |
287
|
|
|
# assert self._source_name in likelihood_model_instance.sources, \ |
288
|
|
|
# "This XYLike plugin refers to the source %s, " \ |
289
|
|
|
# "but that source is not in the likelihood model" % (self._source_name) |
|
|
|
|
290
|
|
|
|
291
|
|
|
for k, v in likelihood_model_instance.free_parameters.items(): |
|
|
|
|
292
|
|
|
|
293
|
|
|
if 'polarization.degree' in k: |
294
|
|
|
self._pol_degree = v |
|
|
|
|
295
|
|
|
|
296
|
|
|
if 'polarization.angle' in k: |
297
|
|
|
self._pol_angle = v |
|
|
|
|
298
|
|
|
|
299
|
|
|
# now we need to get the intergal flux |
300
|
|
|
|
301
|
|
|
_, integral = self._get_diff_flux_and_integral(likelihood_model_instance) |
302
|
|
|
|
303
|
|
|
self._integral_flux = integral |
|
|
|
|
304
|
|
|
|
305
|
|
|
self._likelihood_model = likelihood_model_instance |
306
|
|
|
|
307
|
|
|
def _get_diff_flux_and_integral(self, likelihood_model): |
308
|
|
|
|
309
|
|
|
n_point_sources = likelihood_model.get_number_of_point_sources() |
310
|
|
|
|
311
|
|
|
# Make a function which will stack all point sources (OGIP do not support spatial dimension) |
312
|
|
|
|
313
|
|
|
def differential_flux(scattering_edges): |
|
|
|
|
314
|
|
|
fluxes = likelihood_model.get_point_source_fluxes(0, scattering_edges, tag=self._tag) |
315
|
|
|
|
316
|
|
|
# If we have only one point source, this will never be executed |
317
|
|
|
for i in range(1, n_point_sources): |
318
|
|
|
fluxes += likelihood_model.get_point_source_fluxes(i, scattering_edges, tag=self._tag) |
|
|
|
|
319
|
|
|
|
320
|
|
|
return fluxes |
321
|
|
|
|
322
|
|
|
# The following integrates the diffFlux function using Simpson's rule |
323
|
|
|
# This assume that the intervals e1,e2 are all small, which is guaranteed |
324
|
|
|
# for any reasonable response matrix, given that e1 and e2 are Monte-Carlo |
325
|
|
|
# scattering_edges. It also assumes that the function is smooth in the interval |
326
|
|
|
# e1 - e2 and twice-differentiable, again reasonable on small intervals for |
327
|
|
|
# decent models. It might fail for models with too sharp features, smaller |
328
|
|
|
# than the size of the monte carlo interval. |
329
|
|
|
|
330
|
|
|
def integral(e1, e2): |
|
|
|
|
331
|
|
|
# Simpson's rule |
332
|
|
|
|
333
|
|
|
return (e2 - e1) / 6.0 * (differential_flux(e1) + 4 * differential_flux( |
334
|
|
|
(e1 + e2) / 2.0) + differential_flux(e2)) |
335
|
|
|
|
336
|
|
|
return differential_flux, integral |
337
|
|
|
|
338
|
|
|
def _get_model_rate(self): |
339
|
|
|
|
340
|
|
|
# first we need to get the integrated expectation from the spectrum |
341
|
|
|
|
342
|
|
|
intergal_spectrum = np.array( |
343
|
|
|
[self._integral_flux(emin, emax) for emin, emax in zip(self._response.ene_lo, self._response.ene_hi)]) |
|
|
|
|
344
|
|
|
|
345
|
|
|
# we evaluate at the center of the bin. the bin widths are already included |
346
|
|
|
eval_points = np.array( |
347
|
|
|
[[ene, self._pol_angle.value, self._pol_degree.value] for ene in self._response.energy_mid]) |
|
|
|
|
348
|
|
|
|
349
|
|
|
expectation = [] |
350
|
|
|
|
351
|
|
|
# create the model counts by summing over energy |
352
|
|
|
|
353
|
|
|
for i, interpolator in enumerate(self._all_interp): |
|
|
|
|
354
|
|
|
rate = np.dot(interpolator(eval_points), intergal_spectrum) |
355
|
|
|
|
356
|
|
|
expectation.append(rate) |
357
|
|
|
|
358
|
|
|
return np.array(expectation) |
359
|
|
|
|
360
|
|
|
def _get_model_counts(self): |
361
|
|
|
|
362
|
|
|
if self._rebinner is None: |
363
|
|
|
model_rate = self._get_model_rate() |
364
|
|
|
|
365
|
|
|
else: |
366
|
|
|
|
367
|
|
|
model_rate, = self._rebinner.rebin(self._get_model_rate()) |
368
|
|
|
|
369
|
|
|
return self._nuisance_parameter.value * self._exposure * model_rate |
370
|
|
|
|
371
|
|
|
def get_log_like(self): |
|
|
|
|
372
|
|
|
|
373
|
|
|
model_counts = self._get_model_counts() |
374
|
|
|
|
375
|
|
|
if self._background.is_poisson: |
376
|
|
|
|
377
|
|
|
loglike, bkg_model = poisson_observed_poisson_background( |
|
|
|
|
378
|
|
|
self._current_observed_counts, self._current_background_counts, self._scale, model_counts) |
|
|
|
|
379
|
|
|
|
380
|
|
|
else: |
381
|
|
|
|
382
|
|
|
loglike, bkg_model = poisson_observed_gaussian_background( |
383
|
|
|
self._current_observed_counts, self._current_background_counts, self._current_background_count_errors, |
|
|
|
|
384
|
|
|
model_counts) |
385
|
|
|
|
386
|
|
|
return np.sum(loglike) |
387
|
|
|
|
388
|
|
|
def inner_fit(self): |
|
|
|
|
389
|
|
|
|
390
|
|
|
return self.get_log_like() |
391
|
|
|
|
392
|
|
|
def writeto(self, file_name): |
393
|
|
|
""" |
394
|
|
|
Write the data to HDF5 modulation curve files. Both background and observation |
395
|
|
|
files are created |
396
|
|
|
:param file_name: the file name header. The .h5 extension is added automatically |
397
|
|
|
""" |
398
|
|
|
# first create a file container |
399
|
|
|
observation_file = ModulationCurveFile.from_binned_modulation_curve(self._observation) |
400
|
|
|
|
401
|
|
|
background_file = ModulationCurveFile.from_binned_modulation_curve(self._background) |
402
|
|
|
|
403
|
|
|
observation_file.writeto("%s.h5" % file_name) |
404
|
|
|
|
405
|
|
|
background_file.writeto("%s_bak.h5" % file_name) |
406
|
|
|
|
407
|
|
|
@property |
408
|
|
|
def scattering_boundaries(self): |
409
|
|
|
""" |
410
|
|
|
Energy boundaries of channels currently in use (rebinned, if a rebinner is active) |
411
|
|
|
|
412
|
|
|
:return: (sa_min, sa_max) |
413
|
|
|
""" |
414
|
|
|
|
415
|
|
|
scattering_edges = np.array(self._observation.edges) |
416
|
|
|
|
417
|
|
|
sa_min, sa_max = scattering_edges[:-1], scattering_edges[1:] |
418
|
|
|
|
419
|
|
|
if self._rebinner is not None: |
420
|
|
|
# Get the rebinned chans. NOTE: these are already masked |
421
|
|
|
|
422
|
|
|
sa_min, sa_max = self._rebinner.get_new_start_and_stop(sa_min, sa_max) |
423
|
|
|
|
424
|
|
|
return sa_min, sa_max |
425
|
|
|
|
426
|
|
|
@property |
427
|
|
|
def bin_widths(self): |
|
|
|
|
428
|
|
|
|
429
|
|
|
sa_min, sa_max = self.scattering_boundaries |
430
|
|
|
|
431
|
|
|
return sa_max - sa_min |
432
|
|
|
|
433
|
|
|
def display(self, |
|
|
|
|
434
|
|
|
ax=None, |
435
|
|
|
show_data=True, |
436
|
|
|
show_model=True, |
437
|
|
|
show_total=False, |
438
|
|
|
model_kwargs={}, |
439
|
|
|
data_kwargs={}, |
440
|
|
|
edges=True, |
441
|
|
|
min_rate=None): |
442
|
|
|
""" |
443
|
|
|
|
444
|
|
|
:param ax: |
445
|
|
|
:param show_data: |
446
|
|
|
:param show_model: |
447
|
|
|
:param show_total: |
448
|
|
|
:param model_kwargs: |
449
|
|
|
:param data_kwargs: |
450
|
|
|
:return: |
451
|
|
|
""" |
452
|
|
|
|
453
|
|
|
tmp = ((self._observed_counts / self._exposure) - self._background_counts / self._background_exposure) |
|
|
|
|
454
|
|
|
|
455
|
|
|
scattering_edges = np.array(self._observation.edges) |
456
|
|
|
|
457
|
|
|
sa_min, sa_max = scattering_edges[:-1], scattering_edges[1:] |
458
|
|
|
|
459
|
|
|
tmp_db = ((self._observed_counts / self._exposure) - self._background_counts / self._background_exposure) / ( |
|
|
|
|
460
|
|
|
sa_max - sa_min) |
461
|
|
|
|
462
|
|
|
old_rebinner = self._rebinner |
463
|
|
|
|
464
|
|
|
if min_rate is not None: |
465
|
|
|
|
466
|
|
|
rebinner = Rebinner(tmp_db, min_rate, mask=None) |
467
|
|
|
|
468
|
|
|
self._apply_rebinner(rebinner) |
469
|
|
|
|
470
|
|
|
net_rate = rebinner.rebin(tmp) |
471
|
|
|
else: |
472
|
|
|
|
473
|
|
|
net_rate = tmp |
474
|
|
|
|
475
|
|
|
sa_min, sa_max = self.scattering_boundaries |
476
|
|
|
|
477
|
|
|
if show_total: |
478
|
|
|
show_model = False |
479
|
|
|
show_data = False |
480
|
|
|
|
481
|
|
|
if ax is None: |
482
|
|
|
|
483
|
|
|
fig, ax = plt.subplots() |
484
|
|
|
|
485
|
|
|
else: |
486
|
|
|
|
487
|
|
|
fig = ax.get_figure() |
488
|
|
|
|
489
|
|
|
xs = self.scattering_boundaries |
|
|
|
|
490
|
|
|
if show_total: |
491
|
|
|
|
492
|
|
|
total_rate = self._current_observed_counts / self._exposure / self.bin_widths |
493
|
|
|
bkg_rate = self._current_background_counts / self._background_exposure / self.bin_widths |
494
|
|
|
|
495
|
|
|
total_errors = np.sqrt(total_rate) |
496
|
|
|
|
497
|
|
|
if self._background.is_poisson: |
498
|
|
|
|
499
|
|
|
bkg_errors = np.sqrt(bkg_rate) |
500
|
|
|
|
501
|
|
|
else: |
502
|
|
|
|
503
|
|
|
bkg_errors = self._current_background_count_errors / self.bin_widths |
504
|
|
|
|
505
|
|
|
ax.hlines(total_rate, sa_min, sa_max, color='#7D0505', **data_kwargs) |
506
|
|
|
ax.vlines( |
507
|
|
|
np.mean([xs], axis=1), |
508
|
|
|
total_rate - total_errors, |
509
|
|
|
total_rate + total_errors, |
510
|
|
|
color='#7D0505', |
511
|
|
|
**data_kwargs) |
512
|
|
|
|
513
|
|
|
ax.hlines(bkg_rate, sa_min, sa_max, color='#0D5BAE', **data_kwargs) |
514
|
|
|
ax.vlines( |
515
|
|
|
np.mean([xs], axis=1), bkg_rate - bkg_errors, bkg_rate + bkg_errors, color='#0D5BAE', **data_kwargs) |
|
|
|
|
516
|
|
|
|
517
|
|
|
if show_data: |
518
|
|
|
|
519
|
|
|
if self._background.is_poisson: |
520
|
|
|
|
521
|
|
|
errors = np.sqrt((self._current_observed_counts / self._exposure) + |
522
|
|
|
(self._current_background_counts / self._background_exposure)) |
523
|
|
|
|
524
|
|
|
else: |
525
|
|
|
|
526
|
|
|
errors = np.sqrt((self._current_observed_counts / self._exposure) + |
527
|
|
|
(self._current_background_count_errors / self._background_exposure)**2) |
|
|
|
|
528
|
|
|
|
529
|
|
|
ax.hlines(net_rate / self.bin_widths, sa_min, sa_max, **data_kwargs) |
530
|
|
|
ax.vlines( |
531
|
|
|
np.mean([xs], axis=1), (net_rate - errors) / self.bin_widths, (net_rate + errors) / self.bin_widths, |
|
|
|
|
532
|
|
|
**data_kwargs) |
533
|
|
|
|
534
|
|
|
if show_model: |
535
|
|
|
|
536
|
|
|
if edges: |
537
|
|
|
|
538
|
|
|
step_plot( |
539
|
|
|
ax=ax, |
540
|
|
|
xbins=np.vstack([sa_min, sa_max]).T, |
541
|
|
|
y=self._get_model_counts() / self._exposure / self.bin_widths, |
542
|
|
|
**model_kwargs) |
543
|
|
|
|
544
|
|
|
else: |
545
|
|
|
|
546
|
|
|
y = self._get_model_counts() / self._exposure / self.bin_widths |
|
|
|
|
547
|
|
|
ax.hlines(y, sa_min, sa_max, **model_kwargs) |
548
|
|
|
|
549
|
|
|
ax.set_xlabel('Scattering Angle') |
550
|
|
|
ax.set_ylabel('Net Rate (cnt/s/bin)') |
551
|
|
|
|
552
|
|
|
if old_rebinner is not None: |
553
|
|
|
|
554
|
|
|
# There was a rebinner, use it. Note that the rebinner applies the mask by itself |
555
|
|
|
|
556
|
|
|
self._apply_rebinner(old_rebinner) |
557
|
|
|
|
558
|
|
|
else: |
559
|
|
|
|
560
|
|
|
self.remove_rebinning() |
561
|
|
|
|
562
|
|
|
return fig |
563
|
|
|
|
564
|
|
|
# def display_circle(self, |
565
|
|
|
# ax=None, |
566
|
|
|
# show_data=True, |
567
|
|
|
# show_model=True, |
568
|
|
|
# show_total=False, |
569
|
|
|
# model_kwargs={}, |
570
|
|
|
# data_kwargs={}, |
571
|
|
|
# edges=True, |
572
|
|
|
# min_rate=None, |
573
|
|
|
# projection=None): |
574
|
|
|
# """ |
575
|
|
|
|
576
|
|
|
# :param ax: |
577
|
|
|
# :param show_data: |
578
|
|
|
# :param show_model: |
579
|
|
|
# :param show_total: |
580
|
|
|
# :param model_kwargs: |
581
|
|
|
# :param data_kwargs: |
582
|
|
|
# :return: |
583
|
|
|
# """ |
584
|
|
|
|
585
|
|
|
# tmp = ((self._observed_counts / self._exposure) - self._background_counts / self._background_exposure) |
|
|
|
|
586
|
|
|
|
587
|
|
|
# scattering_edges = np.deg2rad(np.array(self._observation.edges)) |
588
|
|
|
|
589
|
|
|
# sa_min, sa_max = scattering_edges[:-1], scattering_edges[1:] |
590
|
|
|
|
591
|
|
|
# tmp_db = ((self._observed_counts / self._exposure) - self._background_counts / self._background_exposure) / ( |
|
|
|
|
592
|
|
|
# sa_max - sa_min) |
593
|
|
|
|
594
|
|
|
# old_rebinner = self._rebinner |
595
|
|
|
|
596
|
|
|
# if min_rate is not None: |
597
|
|
|
|
598
|
|
|
# rebinner = Rebinner(tmp_db, min_rate, mask=None) |
599
|
|
|
|
600
|
|
|
# self._apply_rebinner(rebinner) |
601
|
|
|
|
602
|
|
|
# net_rate = rebinner.rebin(tmp) |
603
|
|
|
# else: |
604
|
|
|
|
605
|
|
|
# net_rate = tmp |
606
|
|
|
|
607
|
|
|
# sa_min, sa_max = np.deg2rad(self.scattering_boundaries) |
608
|
|
|
# xs = np.deg2rad(self.scattering_boundaries) |
609
|
|
|
|
610
|
|
|
# if show_total: |
611
|
|
|
# show_model = False |
612
|
|
|
# show_data = False |
613
|
|
|
|
614
|
|
|
# if ax is None: |
615
|
|
|
|
616
|
|
|
# fig, ax = plt.subplots(subplot_kw=dict(projection=projection)) |
617
|
|
|
|
618
|
|
|
# else: |
619
|
|
|
|
620
|
|
|
# fig = ax.get_figure() |
621
|
|
|
|
622
|
|
|
# if show_total: |
623
|
|
|
# pass |
624
|
|
|
|
625
|
|
|
# # total_rate = self._current_observed_counts / self._exposure / self.bin_widths |
626
|
|
|
# # bkg_rate = self._current_background_counts / self._background_exposure /self.bin_widths |
|
|
|
|
627
|
|
|
|
628
|
|
|
# # total_errors = np.sqrt(total_rate) |
629
|
|
|
|
630
|
|
|
# # if self._background.is_poisson: |
631
|
|
|
|
632
|
|
|
# # bkg_errors = np.sqrt(bkg_rate) |
633
|
|
|
|
634
|
|
|
# # else: |
635
|
|
|
|
636
|
|
|
# # bkg_errors = self._current_background_count_errors / self.bin_widths |
637
|
|
|
|
638
|
|
|
# # xs = self.scattering_boundaries |
639
|
|
|
|
640
|
|
|
# # xs = np.deg2rad(xs) |
641
|
|
|
# # sa_min = np.deg2rad(sa_min) |
642
|
|
|
# # sa_max = np.deg2rad(sa_max) |
643
|
|
|
|
644
|
|
|
# # ax.hlines( |
645
|
|
|
# # total_rate, |
646
|
|
|
# # sa_min, |
647
|
|
|
# # sa_max, |
648
|
|
|
# # color='#7D0505', |
649
|
|
|
# # **data_kwargs) |
650
|
|
|
# # ax.vlines( |
651
|
|
|
# # np.mean([xs],axis=1), |
652
|
|
|
# # total_rate - total_errors, |
653
|
|
|
# # total_rate + total_errors, |
654
|
|
|
# # color='#7D0505', |
655
|
|
|
# # **data_kwargs) |
656
|
|
|
|
657
|
|
|
# # ax.hlines( |
658
|
|
|
# # bkg_rate, |
659
|
|
|
# # sa_min, |
660
|
|
|
# # sa_max, |
661
|
|
|
# # color='#0D5BAE', |
662
|
|
|
# # **data_kwargs) |
663
|
|
|
# # ax.vlines( |
664
|
|
|
# # np.mean([xs],axis=1), |
665
|
|
|
# # bkg_rate - bkg_errors, |
666
|
|
|
# # bkg_rate + bkg_errors, |
667
|
|
|
# # color='#0D5BAE', |
668
|
|
|
# # **data_kwargs) |
669
|
|
|
|
670
|
|
|
# if show_data: |
671
|
|
|
|
672
|
|
|
# if self._background.is_poisson: |
673
|
|
|
|
674
|
|
|
# errors = np.sqrt((self._current_observed_counts / self._exposure) + |
675
|
|
|
# (self._current_background_counts / self._background_exposure)) |
676
|
|
|
|
677
|
|
|
# else: |
678
|
|
|
|
679
|
|
|
# errors = np.sqrt((self._current_observed_counts / self._exposure) + |
680
|
|
|
# (self._current_background_count_errors / self._background_exposure)**2) |
|
|
|
|
681
|
|
|
|
682
|
|
|
# ax.hlines(net_rate / self.bin_widths, sa_min, sa_max, **data_kwargs) |
683
|
|
|
# ax.vlines( |
684
|
|
|
# np.mean(xs, axis=1), (net_rate - errors) / self.bin_widths, (net_rate + errors) / self.bin_widths, |
|
|
|
|
685
|
|
|
# **data_kwargs) |
686
|
|
|
|
687
|
|
|
# if show_model: |
688
|
|
|
|
689
|
|
|
# y = self._get_model_counts() / self._exposure / self.bin_widths |
690
|
|
|
# width = sa_max - sa_min |
691
|
|
|
|
692
|
|
|
# ax.bar(np.mean(xs, axis=0), y, width=sa_max - sa_min, bottom=y, **model_kwargs) |
693
|
|
|
|
694
|
|
|
# #ax.set_xlabel('Scattering Angle') |
695
|
|
|
# #ax.set_ylabel('Net Rate (cnt/s/bin)') |
696
|
|
|
|
697
|
|
|
# if old_rebinner is not None: |
698
|
|
|
|
699
|
|
|
# # There was a rebinner, use it. Note that the rebinner applies the mask by itself |
700
|
|
|
|
701
|
|
|
# self._apply_rebinner(old_rebinner) |
702
|
|
|
|
703
|
|
|
# else: |
704
|
|
|
|
705
|
|
|
# self.remove_rebinning() |
706
|
|
|
|
707
|
|
|
# return fig |
708
|
|
|
|
709
|
|
|
@property |
710
|
|
|
def observation(self): |
|
|
|
|
711
|
|
|
return self._observation |
712
|
|
|
|
713
|
|
|
@property |
714
|
|
|
def background(self): |
|
|
|
|
715
|
|
|
return self._background |
716
|
|
|
|
717
|
|
|
@contextmanager |
718
|
|
|
def _without_rebinner(self): |
719
|
|
|
|
720
|
|
|
# Store rebinner for later use |
721
|
|
|
|
722
|
|
|
rebinner = self._rebinner |
723
|
|
|
|
724
|
|
|
# Clean mask and rebinning |
725
|
|
|
|
726
|
|
|
self.remove_rebinning() |
727
|
|
|
|
728
|
|
|
# Execute whathever |
729
|
|
|
|
730
|
|
|
yield |
731
|
|
|
|
732
|
|
|
# Restore mask and rebinner (if any) |
733
|
|
|
|
734
|
|
|
if rebinner is not None: |
735
|
|
|
|
736
|
|
|
# There was a rebinner, use it. Note that the rebinner applies the mask by itself |
737
|
|
|
|
738
|
|
|
self._apply_rebinner(rebinner) |
739
|
|
|
|
740
|
|
|
def rebin_on_background(self, min_number_of_counts): |
741
|
|
|
""" |
742
|
|
|
Rebin the spectrum guaranteeing the provided minimum number of counts in each background bin. This is usually |
|
|
|
|
743
|
|
|
required for spectra with very few background counts to make the Poisson profile likelihood meaningful. |
|
|
|
|
744
|
|
|
Of course this is not relevant if you treat the background as ideal, nor if the background spectrum has |
|
|
|
|
745
|
|
|
Gaussian errors. |
746
|
|
|
|
747
|
|
|
The observed spectrum will be rebinned in the same fashion as the background spectrum. |
748
|
|
|
|
749
|
|
|
To neutralize this completely, use "remove_rebinning" |
750
|
|
|
|
751
|
|
|
:param min_number_of_counts: the minimum number of counts in each bin |
752
|
|
|
:return: none |
753
|
|
|
""" |
754
|
|
|
|
755
|
|
|
# NOTE: the rebinner takes care of the mask already |
756
|
|
|
|
757
|
|
|
assert self._background is not None, "This data has no background, cannot rebin on background!" |
|
|
|
|
758
|
|
|
|
759
|
|
|
rebinner = Rebinner(self._background_counts, min_number_of_counts, mask=None) |
760
|
|
|
|
761
|
|
|
self._apply_rebinner(rebinner) |
762
|
|
|
|
763
|
|
|
def rebin_on_source(self, min_number_of_counts): |
764
|
|
|
""" |
765
|
|
|
Rebin the spectrum guaranteeing the provided minimum number of counts in each source bin. |
766
|
|
|
|
767
|
|
|
To neutralize this completely, use "remove_rebinning" |
768
|
|
|
|
769
|
|
|
:param min_number_of_counts: the minimum number of counts in each bin |
770
|
|
|
:return: none |
771
|
|
|
""" |
772
|
|
|
|
773
|
|
|
# NOTE: the rebinner takes care of the mask already |
774
|
|
|
|
775
|
|
|
rebinner = Rebinner(self._observed_counts, min_number_of_counts, mask=None) |
776
|
|
|
|
777
|
|
|
self._apply_rebinner(rebinner) |
778
|
|
|
|
779
|
|
|
def _apply_rebinner(self, rebinner): |
780
|
|
|
|
781
|
|
|
self._rebinner = rebinner |
782
|
|
|
|
783
|
|
|
# Apply the rebinning to everything. |
784
|
|
|
# NOTE: the output of the .rebin method are the vectors with the mask *already applied* |
785
|
|
|
|
786
|
|
|
self._current_observed_counts, = self._rebinner.rebin(self._observed_counts) |
787
|
|
|
|
788
|
|
|
if self._background is not None: |
789
|
|
|
|
790
|
|
|
self._current_background_counts, = self._rebinner.rebin(self._background_counts) |
791
|
|
|
|
792
|
|
|
if self._background_count_errors is not None: |
793
|
|
|
# NOTE: the output of the .rebin method are the vectors with the mask *already applied* |
|
|
|
|
794
|
|
|
|
795
|
|
|
self._current_background_count_errors, = self._rebinner.rebin_errors(self._background_count_errors) |
|
|
|
|
796
|
|
|
|
797
|
|
|
if self._verbose: |
798
|
|
|
print("Now using %s bins" % self._rebinner.n_bins) |
799
|
|
|
|
800
|
|
|
def remove_rebinning(self): |
801
|
|
|
""" |
802
|
|
|
Remove the rebinning scheme set with rebin_on_background. |
803
|
|
|
|
804
|
|
|
:return: |
805
|
|
|
""" |
806
|
|
|
|
807
|
|
|
self._rebinner = None |
808
|
|
|
|
809
|
|
|
self._current_observed_counts = self._observed_counts |
810
|
|
|
self._current_background_counts = self._background_counts |
811
|
|
|
self._current_background_count_errors = self._background_count_errors |
812
|
|
|
|
The coding style of this project requires that you add a docstring to this code element. Below, you find an example for methods:
If you would like to know more about docstrings, we recommend to read PEP-257: Docstring Conventions.