1
|
|
|
import collections |
|
|
|
|
2
|
|
|
from contextlib import contextmanager |
3
|
|
|
import matplotlib.pyplot as plt |
|
|
|
|
4
|
|
|
import numpy as np |
|
|
|
|
5
|
|
|
|
6
|
|
|
from astromodels import Parameter, Uniform_prior |
|
|
|
|
7
|
|
|
from threeML import PluginPrototype |
|
|
|
|
8
|
|
|
from threeML.io.plotting.step_plot import step_plot |
|
|
|
|
9
|
|
|
from threeML.utils.binner import Rebinner |
|
|
|
|
10
|
|
|
from threeML.utils.polarization.binned_polarization import BinnedModulationCurve |
|
|
|
|
11
|
|
|
from threeML.utils.statistics.likelihood_functions import poisson_observed_poisson_background, \ |
|
|
|
|
12
|
|
|
poisson_observed_gaussian_background |
13
|
|
|
|
14
|
|
|
from polarpy.modulation_curve_file import ModulationCurveFile |
15
|
|
|
from polarpy.polar_response import PolarResponse |
16
|
|
|
|
17
|
|
|
|
18
|
|
|
class PolarLike(PluginPrototype): |
|
|
|
|
19
|
|
|
""" |
20
|
|
|
Preliminary POLAR polarization plugin |
21
|
|
|
""" |
22
|
|
|
|
23
|
|
|
def __init__(self, name, observation, background, response, interval_number=None, verbose=False): |
|
|
|
|
24
|
|
|
""" |
25
|
|
|
|
26
|
|
|
|
27
|
|
|
|
28
|
|
|
:param interval_number: |
29
|
|
|
:param name: |
30
|
|
|
:param observation: |
31
|
|
|
:param background: |
32
|
|
|
:param response: |
33
|
|
|
|
34
|
|
|
:param verbose: |
35
|
|
|
|
36
|
|
|
""" |
37
|
|
|
|
38
|
|
|
# if we pass a string, there may be multiple time intervals |
39
|
|
|
# saved so we must specify a time interval |
40
|
|
|
|
41
|
|
|
if isinstance(observation, str): |
42
|
|
|
assert interval_number is not None, 'must specify an interval number' |
43
|
|
|
|
44
|
|
|
# this is a file |
45
|
|
|
read_file = ModulationCurveFile.read(observation) |
46
|
|
|
|
47
|
|
|
# create the bmc |
48
|
|
|
observation = read_file.to_binned_modulation_curve(interval=interval_number) |
49
|
|
|
|
50
|
|
|
# the same applies for the background |
51
|
|
|
if isinstance(background, str): |
52
|
|
|
assert interval_number is not None, 'must specify an interval number' |
53
|
|
|
|
54
|
|
|
# this is a file |
55
|
|
|
read_file = ModulationCurveFile.read(background) |
56
|
|
|
|
57
|
|
|
background = read_file.to_binned_modulation_curve(interval=interval_number) |
58
|
|
|
|
59
|
|
|
assert isinstance(observation, BinnedModulationCurve), 'The observation must be a BinnedModulationCurve' |
|
|
|
|
60
|
|
|
assert isinstance(background, BinnedModulationCurve), 'The observation must be a BinnedModulationCurve' |
|
|
|
|
61
|
|
|
|
62
|
|
|
# attach the required variables |
63
|
|
|
|
64
|
|
|
self._observation = observation |
65
|
|
|
self._background = background |
66
|
|
|
|
67
|
|
|
self._observed_counts = observation.counts |
68
|
|
|
self._background_counts = background.counts |
69
|
|
|
self._background_count_errors = background.count_errors |
70
|
|
|
self._scale = observation.exposure / background.exposure |
71
|
|
|
self._exposure = observation.exposure |
72
|
|
|
self._background_exposure = background.exposure |
73
|
|
|
|
74
|
|
|
self._likelihood_model = None |
75
|
|
|
self._rebinner = None |
76
|
|
|
|
|
|
|
|
77
|
|
|
# now do some double checks |
78
|
|
|
|
79
|
|
|
assert len(self._observed_counts) == len(self._background_counts) |
80
|
|
|
|
81
|
|
|
self._n_synthetic_datasets = 0 |
82
|
|
|
|
83
|
|
|
# set up the effective area correction |
84
|
|
|
|
85
|
|
|
self._nuisance_parameter = Parameter( |
86
|
|
|
"cons_%s" % name, |
87
|
|
|
1.0, |
88
|
|
|
min_value=0.8, |
89
|
|
|
max_value=1.2, |
90
|
|
|
delta=0.05, |
91
|
|
|
free=False, |
92
|
|
|
desc="Effective area correction for %s" % name) |
93
|
|
|
|
94
|
|
|
nuisance_parameters = collections.OrderedDict() |
95
|
|
|
nuisance_parameters[self._nuisance_parameter.name] = self._nuisance_parameter |
96
|
|
|
|
97
|
|
|
# pass to the plugin proto |
98
|
|
|
|
99
|
|
|
super(PolarLike, self).__init__(name, nuisance_parameters) |
100
|
|
|
|
101
|
|
|
|
102
|
|
|
# The following vectors are the ones that will be really used for the computation. At the beginning they just |
|
|
|
|
103
|
|
|
# point to the original ones, but if a rebinner is used and/or a mask is created through set_active_measurements, |
|
|
|
|
104
|
|
|
# they will contain the rebinned and/or masked versions |
105
|
|
|
|
106
|
|
|
self._current_observed_counts = self._observed_counts |
107
|
|
|
self._current_background_counts = self._background_counts |
108
|
|
|
self._current_background_count_errors = self._background_count_errors |
109
|
|
|
|
110
|
|
|
|
111
|
|
|
self._verbose = verbose |
112
|
|
|
|
113
|
|
|
# we can either attach or build a response |
114
|
|
|
|
115
|
|
|
assert isinstance(response, str) or isinstance( |
|
|
|
|
116
|
|
|
response, PolarResponse), 'The response must be a file name or a PolarResponse' |
117
|
|
|
|
118
|
|
|
if isinstance(response, PolarResponse): |
119
|
|
|
|
120
|
|
|
self._response = response |
121
|
|
|
|
122
|
|
|
else: |
123
|
|
|
|
124
|
|
|
self._response = PolarResponse(response) |
125
|
|
|
|
126
|
|
|
# attach the interpolators to the |
127
|
|
|
|
128
|
|
|
self._all_interp = self._response.interpolators |
129
|
|
|
|
130
|
|
|
# we also make sure the lengths match up here |
131
|
|
|
assert self._response.n_scattering_bins == len( |
132
|
|
|
self._observation.counts), 'observation counts shape does not agree with response shape' |
133
|
|
|
|
134
|
|
|
def use_effective_area_correction(self, lower=0.5, upper=1.5): |
135
|
|
|
""" |
136
|
|
|
Use an area constant to correct for response issues |
137
|
|
|
|
138
|
|
|
:param lower: |
139
|
|
|
:param upper: |
140
|
|
|
:return: |
141
|
|
|
""" |
142
|
|
|
|
143
|
|
|
self._nuisance_parameter.free = True |
144
|
|
|
self._nuisance_parameter.bounds = (lower, upper) |
145
|
|
|
self._nuisance_parameter.prior = Uniform_prior(lower_bound=lower, upper_bound=upper) |
146
|
|
|
if self._verbose: |
147
|
|
|
print('Using effective area correction') |
148
|
|
|
|
149
|
|
|
def fix_effective_area_correction(self, value=1): |
150
|
|
|
""" |
151
|
|
|
|
152
|
|
|
fix the effective area correction to a particular values |
153
|
|
|
|
154
|
|
|
:param value: |
155
|
|
|
:return: |
156
|
|
|
""" |
157
|
|
|
|
158
|
|
|
# allow the value to be outside the bounds |
159
|
|
|
if self._nuisance_parameter.max_value < value: |
160
|
|
|
|
161
|
|
|
self._nuisance_parameter.max_value = value + 0.1 |
162
|
|
|
|
163
|
|
|
elif self._nuisance_parameter.min_value > value: |
164
|
|
|
|
165
|
|
|
self._nuisance_parameter.min_value = value = 0.1 |
166
|
|
|
|
167
|
|
|
self._nuisance_parameter.fix = True |
168
|
|
|
self._nuisance_parameter.value = value |
169
|
|
|
|
170
|
|
|
if self._verbose: |
171
|
|
|
print('Fixing effective area correction') |
172
|
|
|
|
173
|
|
|
@property |
174
|
|
|
def effective_area_correction(self): |
|
|
|
|
175
|
|
|
|
176
|
|
|
return self._nuisance_parameter |
177
|
|
|
|
178
|
|
|
def get_simulated_dataset(self, new_name=None, **kwargs): |
|
|
|
|
179
|
|
|
""" |
180
|
|
|
Returns another Binned instance where data have been obtained by randomizing the current expectation from the |
|
|
|
|
181
|
|
|
model, as well as from the background (depending on the respective noise models) |
182
|
|
|
|
183
|
|
|
:return: an BinnedSpectrum or child instance |
184
|
|
|
""" |
185
|
|
|
|
186
|
|
|
assert self._likelihood_model is not None, "You need to set up a model before randomizing" |
187
|
|
|
|
188
|
|
|
# Keep track of how many syntethic datasets we have generated |
189
|
|
|
|
190
|
|
|
self._n_synthetic_datasets += 1 |
191
|
|
|
|
192
|
|
|
# Generate a name for the new dataset if needed |
193
|
|
|
if new_name is None: |
194
|
|
|
new_name = "%s_sim_%i" % (self.name, self._n_synthetic_datasets) |
195
|
|
|
|
196
|
|
|
# Generate randomized data depending on the different noise models |
197
|
|
|
|
198
|
|
|
# We remove the mask temporarily because we need the various elements for all channels. We will restore it |
|
|
|
|
199
|
|
|
# at the end |
200
|
|
|
|
201
|
|
|
# Get the source model for all channels (that's why we don't use the .folded_model property) |
202
|
|
|
|
203
|
|
|
|
|
|
|
|
204
|
|
|
# We remove the mask temporarily because we need the various elements for all channels. We will restore it |
|
|
|
|
205
|
|
|
# at the end |
206
|
|
|
|
207
|
|
|
original_rebinner = self._rebinner |
208
|
|
|
|
209
|
|
|
with self._without_rebinner(): |
210
|
|
|
|
211
|
|
|
# Get the source model for all channels (that's why we don't use the .folded_model property) |
|
|
|
|
212
|
|
|
|
213
|
|
|
|
|
|
|
|
214
|
|
|
source_model_counts = self._get_model_counts() |
215
|
|
|
|
216
|
|
|
if self._background.is_poisson: |
217
|
|
|
_, background_model_counts = poisson_observed_poisson_background( |
218
|
|
|
self._current_observed_counts, self._current_background_counts, self._scale, source_model_counts) |
|
|
|
|
219
|
|
|
else: |
220
|
|
|
|
221
|
|
|
_, background_model_counts = poisson_observed_gaussian_background( |
222
|
|
|
self._current_observed_counts, self._current_background_counts, self._current_background_count_errors, source_model_counts) |
|
|
|
|
223
|
|
|
|
224
|
|
|
# Now randomize the expectations |
225
|
|
|
|
226
|
|
|
# Randomize expectations for the source |
227
|
|
|
|
228
|
|
|
randomized_source_counts = np.random.poisson(source_model_counts + background_model_counts) |
|
|
|
|
229
|
|
|
|
230
|
|
|
randomized_background_counts = np.random.poisson(background_model_counts) |
231
|
|
|
|
232
|
|
|
new_observation = self._observation.clone(new_counts=randomized_source_counts) |
233
|
|
|
|
234
|
|
|
new_background = self._background.clone(new_counts=randomized_background_counts) |
235
|
|
|
|
236
|
|
|
new_plugin = PolarLike( |
237
|
|
|
name=new_name, |
238
|
|
|
observation=new_observation, |
239
|
|
|
background=new_background, |
240
|
|
|
response=self._response, |
241
|
|
|
verbose=False, |
242
|
|
|
) |
243
|
|
|
|
244
|
|
|
# Apply the same selections as the current data set |
245
|
|
|
if original_rebinner is not None: |
246
|
|
|
|
247
|
|
|
# Apply rebinning, which also applies the mask |
248
|
|
|
new_plugin._apply_rebinner(original_rebinner) |
|
|
|
|
249
|
|
|
|
250
|
|
|
|
|
|
|
|
251
|
|
|
return new_plugin |
252
|
|
|
|
253
|
|
|
def set_model(self, likelihood_model_instance): |
254
|
|
|
""" |
255
|
|
|
Set the model to be used in the joint minimization. Must be a LikelihoodModel instance. |
256
|
|
|
:param likelihood_model_instance: instance of Model |
257
|
|
|
:type likelihood_model_instance: astromodels.Model |
258
|
|
|
""" |
259
|
|
|
|
260
|
|
|
if likelihood_model_instance is None: |
261
|
|
|
return |
262
|
|
|
|
263
|
|
|
# if self._source_name is not None: |
264
|
|
|
|
265
|
|
|
# # Make sure that the source is in the model |
266
|
|
|
# assert self._source_name in likelihood_model_instance.sources, \ |
267
|
|
|
# "This XYLike plugin refers to the source %s, " \ |
268
|
|
|
# "but that source is not in the likelihood model" % (self._source_name) |
|
|
|
|
269
|
|
|
|
270
|
|
|
for k, v in likelihood_model_instance.free_parameters.items(): |
|
|
|
|
271
|
|
|
|
272
|
|
|
if 'polarization.degree' in k: |
273
|
|
|
self._pol_degree = v |
|
|
|
|
274
|
|
|
|
275
|
|
|
if 'polarization.angle' in k: |
276
|
|
|
self._pol_angle = v |
|
|
|
|
277
|
|
|
|
278
|
|
|
# now we need to get the intergal flux |
279
|
|
|
|
280
|
|
|
_, integral = self._get_diff_flux_and_integral(likelihood_model_instance) |
281
|
|
|
|
282
|
|
|
self._integral_flux = integral |
|
|
|
|
283
|
|
|
|
284
|
|
|
self._likelihood_model = likelihood_model_instance |
285
|
|
|
|
286
|
|
|
def _get_diff_flux_and_integral(self, likelihood_model): |
287
|
|
|
|
288
|
|
|
n_point_sources = likelihood_model.get_number_of_point_sources() |
289
|
|
|
|
290
|
|
|
# Make a function which will stack all point sources (OGIP do not support spatial dimension) |
291
|
|
|
|
292
|
|
|
def differential_flux(scattering_edges): |
|
|
|
|
293
|
|
|
fluxes = likelihood_model.get_point_source_fluxes(0, scattering_edges, tag=self._tag) |
294
|
|
|
|
295
|
|
|
# If we have only one point source, this will never be executed |
296
|
|
|
for i in range(1, n_point_sources): |
297
|
|
|
fluxes += likelihood_model.get_point_source_fluxes(i, scattering_edges, tag=self._tag) |
|
|
|
|
298
|
|
|
|
299
|
|
|
return fluxes |
300
|
|
|
|
301
|
|
|
# The following integrates the diffFlux function using Simpson's rule |
302
|
|
|
# This assume that the intervals e1,e2 are all small, which is guaranteed |
303
|
|
|
# for any reasonable response matrix, given that e1 and e2 are Monte-Carlo |
304
|
|
|
# scattering_edges. It also assumes that the function is smooth in the interval |
305
|
|
|
# e1 - e2 and twice-differentiable, again reasonable on small intervals for |
306
|
|
|
# decent models. It might fail for models with too sharp features, smaller |
307
|
|
|
# than the size of the monte carlo interval. |
308
|
|
|
|
309
|
|
|
def integral(e1, e2): |
|
|
|
|
310
|
|
|
# Simpson's rule |
311
|
|
|
|
312
|
|
|
return (e2 - e1) / 6.0 * (differential_flux(e1) + 4 * differential_flux( |
313
|
|
|
(e1 + e2) / 2.0) + differential_flux(e2)) |
314
|
|
|
|
315
|
|
|
return differential_flux, integral |
316
|
|
|
|
317
|
|
|
def _get_model_rate(self): |
318
|
|
|
|
319
|
|
|
# first we need to get the integrated expectation from the spectrum |
320
|
|
|
|
321
|
|
|
intergal_spectrum = np.array( |
322
|
|
|
[self._integral_flux(emin, emax) for emin, emax in zip(self._response.ene_lo, self._response.ene_hi)]) |
|
|
|
|
323
|
|
|
|
324
|
|
|
# we evaluate at the center of the bin. the bin widths are already included |
325
|
|
|
eval_points = np.array( |
326
|
|
|
[[ene, self._pol_angle.value, self._pol_degree.value] for ene in self._response.energy_mid]) |
|
|
|
|
327
|
|
|
|
328
|
|
|
expectation = [] |
329
|
|
|
|
330
|
|
|
# create the model counts by summing over energy |
331
|
|
|
|
332
|
|
|
for i, interpolator in enumerate(self._all_interp): |
|
|
|
|
333
|
|
|
rate = np.dot(interpolator(eval_points), intergal_spectrum) |
334
|
|
|
|
335
|
|
|
expectation.append(rate) |
336
|
|
|
|
337
|
|
|
return np.array(expectation) |
338
|
|
|
|
339
|
|
|
def _get_model_counts(self): |
340
|
|
|
|
341
|
|
|
|
342
|
|
|
if self._rebinner is None: |
343
|
|
|
model_rate = self._get_model_rate() |
344
|
|
|
|
345
|
|
|
else: |
346
|
|
|
|
347
|
|
|
model_rate, = self._rebinner.rebin(self._get_model_rate) |
348
|
|
|
|
349
|
|
|
|
|
|
|
|
350
|
|
|
return self._nuisance_parameter.value * self._exposure * model_rate |
351
|
|
|
|
352
|
|
|
def get_log_like(self): |
|
|
|
|
353
|
|
|
|
354
|
|
|
model_counts = self._get_model_counts() |
355
|
|
|
|
356
|
|
|
if self._background.is_poisson: |
357
|
|
|
|
358
|
|
|
loglike, bkg_model = poisson_observed_poisson_background( self._current_observed_counts, self._current_background_counts, |
|
|
|
|
359
|
|
|
self._scale, model_counts) |
|
|
|
|
360
|
|
|
|
361
|
|
|
else: |
362
|
|
|
|
363
|
|
|
loglike, bkg_model = poisson_observed_gaussian_background( self._current_observed_counts, self._current_background_counts, |
|
|
|
|
364
|
|
|
self._current_background_count_errors, model_counts) |
|
|
|
|
365
|
|
|
|
366
|
|
|
return np.sum(loglike) |
367
|
|
|
|
368
|
|
|
def inner_fit(self): |
|
|
|
|
369
|
|
|
|
370
|
|
|
return self.get_log_like() |
371
|
|
|
|
372
|
|
|
def writeto(self, file_name): |
373
|
|
|
""" |
374
|
|
|
Write the data to HDF5 modulation curve files. Both background and observation |
375
|
|
|
files are created |
376
|
|
|
:param file_name: the file name header. The .h5 extension is added automatically |
377
|
|
|
""" |
378
|
|
|
# first create a file container |
379
|
|
|
observation_file = ModulationCurveFile.from_binned_modulation_curve(self._observation) |
380
|
|
|
|
381
|
|
|
background_file = ModulationCurveFile.from_binned_modulation_curve(self._background) |
382
|
|
|
|
383
|
|
|
observation_file.writeto("%s.h5" % file_name) |
384
|
|
|
|
385
|
|
|
background_file.writeto("%s_bak.h5" % file_name) |
386
|
|
|
|
387
|
|
|
|
388
|
|
|
|
389
|
|
|
@property |
390
|
|
|
def scattering_boundaries(self): |
391
|
|
|
""" |
392
|
|
|
Energy boundaries of channels currently in use (rebinned, if a rebinner is active) |
393
|
|
|
|
394
|
|
|
:return: (sa_min, sa_max) |
395
|
|
|
""" |
396
|
|
|
|
397
|
|
|
scattering_edges = np.array(self._observation.edges) |
398
|
|
|
|
399
|
|
|
sa_min, sa_max = scattering_edges[:-1], scattering_edges[1:] |
400
|
|
|
|
401
|
|
|
if self._rebinner is not None: |
402
|
|
|
# Get the rebinned chans. NOTE: these are already masked |
403
|
|
|
|
404
|
|
|
sa_min, sa_max = self._rebinner.get_new_start_and_stop(sa_min, sa_max) |
405
|
|
|
|
406
|
|
|
|
|
|
|
|
407
|
|
|
return sa_min, sa_max |
408
|
|
|
|
409
|
|
|
|
410
|
|
|
|
|
|
|
|
411
|
|
|
def display(self, ax=None, show_data=True, show_model=True, show_total=False, model_kwargs={}, data_kwargs={}): |
|
|
|
|
412
|
|
|
""" |
413
|
|
|
|
414
|
|
|
:param ax: |
415
|
|
|
:param show_data: |
416
|
|
|
:param show_model: |
417
|
|
|
:param show_total: |
418
|
|
|
:param model_kwargs: |
419
|
|
|
:param data_kwargs: |
420
|
|
|
:return: |
421
|
|
|
""" |
422
|
|
|
|
423
|
|
|
sa_min, sa_max = self.scattering_boundaries |
424
|
|
|
|
|
|
|
|
425
|
|
|
if show_total: |
426
|
|
|
show_model = False |
427
|
|
|
show_data = False |
428
|
|
|
|
429
|
|
|
if ax is None: |
430
|
|
|
|
431
|
|
|
fig, ax = plt.subplots() |
432
|
|
|
|
433
|
|
|
else: |
434
|
|
|
|
435
|
|
|
fig = ax.get_figure() |
436
|
|
|
|
437
|
|
|
if show_total: |
438
|
|
|
|
439
|
|
|
total_rate = self._current_observed_counts / self._exposure |
|
|
|
|
440
|
|
|
bkg_rate = self._current_background_counts / self._background_exposure |
441
|
|
|
|
442
|
|
|
total_errors = np.sqrt(total_rate) |
443
|
|
|
|
444
|
|
|
if self._background.is_poisson: |
445
|
|
|
|
446
|
|
|
bkg_errors = np.sqrt(bkg_rate) |
447
|
|
|
|
448
|
|
|
else: |
449
|
|
|
|
450
|
|
|
bkg_errors = self._current_background_count_errors |
451
|
|
|
|
452
|
|
|
ax.hlines( |
453
|
|
|
total_rate, |
454
|
|
|
sa_min, |
455
|
|
|
sa_max, |
456
|
|
|
color='#7D0505', |
457
|
|
|
**data_kwargs) |
458
|
|
|
ax.vlines( |
459
|
|
|
np.mean([self.scattering_boundaries],axis=1), |
|
|
|
|
460
|
|
|
total_rate - total_errors, |
461
|
|
|
total_rate + total_errors, |
462
|
|
|
color='#7D0505', |
463
|
|
|
**data_kwargs) |
464
|
|
|
|
465
|
|
|
ax.hlines( |
466
|
|
|
bkg_rate, |
467
|
|
|
sa_min, |
468
|
|
|
sa_max, |
469
|
|
|
color='#0D5BAE', |
470
|
|
|
**data_kwargs) |
471
|
|
|
ax.vlines( |
472
|
|
|
np.mean([self.scattering_boundaries],axis=1), |
|
|
|
|
473
|
|
|
bkg_rate - bkg_errors, |
474
|
|
|
bkg_rate + bkg_errors, |
475
|
|
|
color='#0D5BAE', |
476
|
|
|
**data_kwargs) |
477
|
|
|
|
478
|
|
|
if show_data: |
479
|
|
|
|
480
|
|
|
net_rate = ( self._observed_counts / self._exposure) - self._background_counts / self._background_exposure |
|
|
|
|
481
|
|
|
|
482
|
|
|
if self._background.is_poisson: |
483
|
|
|
|
484
|
|
|
errors = np.sqrt(( self._observed_counts / self._exposure) + |
|
|
|
|
485
|
|
|
(self._background_counts / self._background_exposure)) |
486
|
|
|
|
487
|
|
|
else: |
488
|
|
|
|
489
|
|
|
errors = np.sqrt(( self._observed_counts / self._exposure) + |
|
|
|
|
490
|
|
|
(self._background.count_errors / self._background_exposure)**2) |
491
|
|
|
|
492
|
|
|
ax.hlines(net_rate, self._response.scattering_bins_lo, self._response.scattering_bins_hi, **data_kwargs) |
|
|
|
|
493
|
|
|
ax.vlines(self._response.scattering_bins, net_rate - errors, net_rate + errors, **data_kwargs) |
|
|
|
|
494
|
|
|
|
495
|
|
|
if show_model: |
496
|
|
|
step_plot( |
497
|
|
|
ax=ax, |
498
|
|
|
xbins=np.vstack([self._response.scattering_bins_lo, self._response.scattering_bins_hi]).T, |
|
|
|
|
499
|
|
|
y=self._get_model_counts() / self._exposure, |
500
|
|
|
**model_kwargs) |
501
|
|
|
|
502
|
|
|
ax.set_xlabel('Scattering Angle') |
503
|
|
|
ax.set_ylabel('Net Rate (cnt/s/bin)') |
504
|
|
|
|
505
|
|
|
return fig |
506
|
|
|
|
507
|
|
|
@property |
508
|
|
|
def observation(self): |
|
|
|
|
509
|
|
|
return self._observation |
510
|
|
|
|
511
|
|
|
@property |
512
|
|
|
def background(self): |
|
|
|
|
513
|
|
|
return self._background |
514
|
|
|
|
515
|
|
|
@contextmanager |
516
|
|
|
def _without_rebinner(self): |
517
|
|
|
|
518
|
|
|
# Store rebinner for later use |
519
|
|
|
|
520
|
|
|
rebinner = self._rebinner |
521
|
|
|
|
522
|
|
|
# Clean mask and rebinning |
523
|
|
|
|
524
|
|
|
self.remove_rebinning() |
525
|
|
|
|
|
|
|
|
526
|
|
|
|
527
|
|
|
# Execute whathever |
528
|
|
|
|
529
|
|
|
yield |
530
|
|
|
|
531
|
|
|
# Restore mask and rebinner (if any) |
532
|
|
|
|
533
|
|
|
|
534
|
|
|
|
535
|
|
|
if rebinner is not None: |
536
|
|
|
|
537
|
|
|
# There was a rebinner, use it. Note that the rebinner applies the mask by itself |
538
|
|
|
|
539
|
|
|
self._apply_rebinner(rebinner) |
540
|
|
|
|
541
|
|
|
|
542
|
|
|
|
543
|
|
|
|
544
|
|
|
|
|
|
|
|
545
|
|
|
def rebin_on_background(self, min_number_of_counts): |
546
|
|
|
""" |
547
|
|
|
Rebin the spectrum guaranteeing the provided minimum number of counts in each background bin. This is usually |
|
|
|
|
548
|
|
|
required for spectra with very few background counts to make the Poisson profile likelihood meaningful. |
|
|
|
|
549
|
|
|
Of course this is not relevant if you treat the background as ideal, nor if the background spectrum has |
|
|
|
|
550
|
|
|
Gaussian errors. |
551
|
|
|
|
552
|
|
|
The observed spectrum will be rebinned in the same fashion as the background spectrum. |
553
|
|
|
|
554
|
|
|
To neutralize this completely, use "remove_rebinning" |
555
|
|
|
|
556
|
|
|
:param min_number_of_counts: the minimum number of counts in each bin |
557
|
|
|
:return: none |
558
|
|
|
""" |
559
|
|
|
|
560
|
|
|
# NOTE: the rebinner takes care of the mask already |
561
|
|
|
|
562
|
|
|
assert self._background is not None, "This data has no background, cannot rebin on background!" |
|
|
|
|
563
|
|
|
|
564
|
|
|
rebinner = Rebinner(self._background_counts, min_number_of_counts, mask = None) |
|
|
|
|
565
|
|
|
|
566
|
|
|
self._apply_rebinner(rebinner) |
567
|
|
|
|
568
|
|
|
def rebin_on_source(self, min_number_of_counts): |
569
|
|
|
""" |
570
|
|
|
Rebin the spectrum guaranteeing the provided minimum number of counts in each source bin. |
571
|
|
|
|
572
|
|
|
To neutralize this completely, use "remove_rebinning" |
573
|
|
|
|
574
|
|
|
:param min_number_of_counts: the minimum number of counts in each bin |
575
|
|
|
:return: none |
576
|
|
|
""" |
577
|
|
|
|
578
|
|
|
# NOTE: the rebinner takes care of the mask already |
579
|
|
|
|
580
|
|
|
|
581
|
|
|
|
582
|
|
|
rebinner = Rebinner(self._observed_counts, min_number_of_counts, self._mask) |
583
|
|
|
|
584
|
|
|
self._apply_rebinner(rebinner) |
585
|
|
|
|
586
|
|
|
def _apply_rebinner(self, rebinner): |
587
|
|
|
|
588
|
|
|
self._rebinner = rebinner |
589
|
|
|
|
590
|
|
|
# Apply the rebinning to everything. |
591
|
|
|
# NOTE: the output of the .rebin method are the vectors with the mask *already applied* |
592
|
|
|
|
593
|
|
|
self._current_observed_counts, = self._rebinner.rebin(self._observed_counts) |
594
|
|
|
|
595
|
|
|
if self._background is not None: |
596
|
|
|
|
597
|
|
|
self._current_background_counts, = self._rebinner.rebin(self._background_counts) |
598
|
|
|
|
599
|
|
|
if self._background_count_errors is not None: |
600
|
|
|
# NOTE: the output of the .rebin method are the vectors with the mask *already applied* |
|
|
|
|
601
|
|
|
|
602
|
|
|
self._current_background_count_errors, = self._rebinner.rebin_errors(self._background_count_errors) |
|
|
|
|
603
|
|
|
|
604
|
|
|
if self._verbose: |
605
|
|
|
print("Now using %s bins" % self._rebinner.n_bins) |
606
|
|
|
|
607
|
|
|
def remove_rebinning(self): |
608
|
|
|
""" |
609
|
|
|
Remove the rebinning scheme set with rebin_on_background. |
610
|
|
|
|
611
|
|
|
:return: |
612
|
|
|
""" |
613
|
|
|
|
614
|
|
|
self._rebinner = None |
615
|
|
|
|
616
|
|
|
self._current_observed_counts = self._observed_counts |
617
|
|
|
self._current_background_counts = self._background_counts |
618
|
|
|
self._current_background_count_errors = self._background_count_errors |
619
|
|
|
|
The coding style of this project requires that you add a docstring to this code element. Below, you find an example for methods:
If you would like to know more about docstrings, we recommend to read PEP-257: Docstring Conventions.