polarpy.polarlike.PolarLike.background()   A
last analyzed

Complexity

Conditions 1

Size

Total Lines 3
Code Lines 3

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 1
eloc 3
nop 1
dl 0
loc 3
rs 10
c 0
b 0
f 0
1
import collections
2
from contextlib import contextmanager
3
4
import matplotlib.pyplot as plt
5
import numpy as np
6
7
from astromodels import Parameter, Uniform_prior
8
from polarpy.modulation_curve_file import ModulationCurveFile
9
from polarpy.polar_response import PolarResponse
10
from threeML import PluginPrototype
11
from threeML.io.plotting.step_plot import step_plot
12
from threeML.utils.binner import Rebinner
13
from threeML.utils.polarization.binned_polarization import \
14
    BinnedModulationCurve
15
from threeML.utils.statistics.likelihood_functions import (
16
    poisson_observed_gaussian_background, poisson_observed_poisson_background)
17
18
19
class PolarLike(PluginPrototype):
20
    """
21
    Preliminary POLAR polarization plugin
22
    """
23
24
    def __init__(self, name, observation, background, response, interval_number=None, verbose=False):
25
        """
26
27
        The Polarization likelihood for POLAR. This plugin is heavily modeled off
28
        the 3ML dispersion based plugins. It interpolates the spectral photon model
29
        over the scattering angle bins to allow for spectral + polarization analysis.
30
31
32
33
        :param interval_number: The time interval starting from 1.
34
        :param name: The name of the plugin
35
        :param observation: The POLAR observation file
36
        :param background: The POLAR background file
37
        :param response: The POLAR polarization response
38
39
        :param verbose:
40
41
        """
42
43
        # if we pass a string, there may be multiple time intervals
44
        # saved so we must specify a time interval
45
46
        if isinstance(observation, str):
47
            assert interval_number is not None, 'must specify an interval number'
48
49
            # this is a file
50
            read_file = ModulationCurveFile.read(observation)
51
52
            # create the bmc
53
            observation = read_file.to_binned_modulation_curve(
54
                interval=interval_number)
55
56
        # the same applies for the background
57
        if isinstance(background, str):
58
            assert interval_number is not None, 'must specify an interval number'
59
60
            # this is a file
61
            read_file = ModulationCurveFile.read(background)
62
63
            background = read_file.to_binned_modulation_curve(
64
                interval=interval_number)
65
66
        assert isinstance(
67
            observation, BinnedModulationCurve), 'The observation must be a BinnedModulationCurve'
68
        assert isinstance(
69
            background, BinnedModulationCurve), 'The observation must be a BinnedModulationCurve'
70
71
        # attach the required variables
72
73
        self._observation = observation
74
        self._background = background
75
76
        self._observed_counts = observation.counts.astype(np.int64)
77
        self._background_counts = background.counts
78
        self._background_count_errors = background.count_errors
79
        self._scale = observation.exposure / background.exposure
80
        self._exposure = observation.exposure
81
        self._background_exposure = background.exposure
82
83
        self._likelihood_model = None
84
        self._rebinner = None
85
86
        # now do some double checks
87
88
        assert len(self._observed_counts) == len(self._background_counts)
89
90
        self._n_synthetic_datasets = 0
91
92
        # set up the effective area correction
93
94
        self._nuisance_parameter = Parameter(
95
            "cons_%s" % name,
96
            1.0,
97
            min_value=0.8,
98
            max_value=1.2,
99
            delta=0.05,
100
            free=False,
101
            desc="Effective area correction for %s" % name)
102
103
        nuisance_parameters = collections.OrderedDict()
104
        nuisance_parameters[self._nuisance_parameter.name] = self._nuisance_parameter
105
106
        # pass to the plugin proto
107
108
        super(PolarLike, self).__init__(name, nuisance_parameters)
109
110
        # The following vectors are the ones that will be really used for the computation. At the beginning they just
111
        # point to the original ones, but if a rebinner is used and/or a mask is created through set_active_measurements,
112
        # they will contain the rebinned and/or masked versions
113
114
        self._current_observed_counts = self._observed_counts
115
        self._current_background_counts = self._background_counts
116
        self._current_background_count_errors = self._background_count_errors
117
118
        self._verbose = verbose
119
120
        # we can either attach or build a response
121
122
        assert isinstance(response, str) or isinstance(
123
            response, PolarResponse), 'The response must be a file name or a PolarResponse'
124
125
        if isinstance(response, PolarResponse):
126
127
            self._response = response
128
129
        else:
130
131
            self._response = PolarResponse(response)
132
133
        # attach the interpolators to the
134
135
        self._all_interp = self._response.interpolators
136
137
        # we also make sure the lengths match up here
138
        assert self._response.n_scattering_bins == len(
139
            self._observation.counts), 'observation counts shape does not agree with response shape'
140
141
    def use_effective_area_correction(self, lower=0.5, upper=1.5):
142
        """
143
        Use an area constant to correct for response issues
144
145
        :param lower:
146
        :param upper:
147
        :return:
148
        """
149
150
        self._nuisance_parameter.free = True
151
        self._nuisance_parameter.bounds = (lower, upper)
152
        self._nuisance_parameter.prior = Uniform_prior(
153
            lower_bound=lower, upper_bound=upper)
154
        if self._verbose:
155
            print('Using effective area correction')
156
157
    def fix_effective_area_correction(self, value=1):
158
        """
159
160
        fix the effective area correction to a particular values
161
162
        :param value:
163
        :return:
164
        """
165
166
        # allow the value to be outside the bounds
167
        if self._nuisance_parameter.max_value < value:
168
169
            self._nuisance_parameter.max_value = value + 0.1
170
171
        elif self._nuisance_parameter.min_value > value:
172
173
            self._nuisance_parameter.min_value = value = 0.1
174
175
        self._nuisance_parameter.fix = True
176
        self._nuisance_parameter.value = value
177
178
        if self._verbose:
179
            print('Fixing effective area correction')
180
181
    @property
182
    def effective_area_correction(self):
183
184
        return self._nuisance_parameter
185
186
    def get_simulated_dataset(self, new_name=None, **kwargs):
187
        """
188
        Returns another Binned instance where data have been obtained by randomizing the current expectation from the
189
        model, as well as from the background (depending on the respective noise models)
190
191
        :return: an BinnedSpectrum or child instance
192
        """
193
194
        assert self._likelihood_model is not None, "You need to set up a model before randomizing"
195
196
        # Keep track of how many syntethic datasets we have generated
197
198
        self._n_synthetic_datasets += 1
199
200
        # Generate a name for the new dataset if needed
201
        if new_name is None:
202
            new_name = "%s_sim_%i" % (self.name, self._n_synthetic_datasets)
203
204
        # Generate randomized data depending on the different noise models
205
206
        # We remove the mask temporarily because we need the various elements for all channels. We will restore it
207
        # at the end
208
209
        # Get the source model for all channels (that's why we don't use the .folded_model property)
210
211
        # We remove the mask temporarily because we need the various elements for all channels. We will restore it
212
        # at the end
213
214
        original_rebinner = self._rebinner
215
216
        with self._without_rebinner():
217
218
            # Get the source model for all channels (that's why we don't use the .folded_model property)
219
220
            source_model_counts = self._get_model_counts()
221
222
            if self._background.is_poisson:
223
                _, background_model_counts = poisson_observed_poisson_background(
224
                    self._current_observed_counts, self._current_background_counts, self._scale, source_model_counts)
225
                randomized_background_counts = np.random.poisson(
226
                    background_model_counts)
227
228
                background_count_errors = None
229
            else:
230
231
                _, background_model_counts = poisson_observed_gaussian_background(
232
                    self._current_observed_counts, self._current_background_counts,
233
                    self._current_background_count_errors, source_model_counts)
234
235
                randomized_background_counts = np.zeros_like(
236
                    background_model_counts)
237
238
                idx = (self._background_count_errors > 0)
239
240
                randomized_background_counts[idx] = np.random.normal(
241
                    loc=background_model_counts[idx], scale=self._background_count_errors[idx])
242
243
                # Issue a warning if the generated background is less than zero, and fix it by placing it at zero
244
245
                idx = (randomized_background_counts < 0)    # type: np.ndarray
246
247
                negative_background_n = np.sum(idx)
248
249
                if negative_background_n > 0:
250
                    # custom_warnings.warn("Generated background has negative counts "
251
                    #                      "in %i channels. Fixing them to zero" % (negative_background_n))
252
253
                    randomized_background_counts[idx] = 0
254
255
                background_count_errors = self._background_count_errors
256
257
            # Now randomize the expectations
258
259
            # Randomize expectations for the source
260
261
            randomized_source_counts = np.random.poisson(
262
                source_model_counts + background_model_counts)
263
264
            #
265
266
            new_observation = self._observation.clone(
267
                new_counts=randomized_source_counts)
268
269
            new_background = self._background.clone(
270
                new_counts=randomized_background_counts, new_count_errors=background_count_errors)
271
272
            new_plugin = PolarLike(
273
                name=new_name,
274
                observation=new_observation,
275
                background=new_background,
276
                response=self._response,
277
                verbose=False,
278
            )
279
280
            # Apply the same selections as the current data set
281
            if original_rebinner is not None:
282
283
                # Apply rebinning, which also applies the mask
284
                new_plugin._apply_rebinner(original_rebinner)
285
286
            return new_plugin
287
288
    def set_model(self, likelihood_model_instance):
289
        """
290
        Set the model to be used in the joint minimization. Must be a LikelihoodModel instance.
291
        :param likelihood_model_instance: instance of Model
292
        :type likelihood_model_instance: astromodels.Model
293
        """
294
295
        if likelihood_model_instance is None:
296
            return
297
298
        # if self._source_name is not None:
299
300
        #     # Make sure that the source is in the model
301
        #     assert self._source_name in likelihood_model_instance.sources, \
302
        #                                         "This XYLike plugin refers to the source %s, " \
303
        #                                         "but that source is not in the likelihood model" % (self._source_name)
304
305
        for k, v in likelihood_model_instance.free_parameters.items():
306
307
            if 'polarization.degree' in k:
308
                self._pol_degree = v
309
310
            if 'polarization.angle' in k:
311
                self._pol_angle = v
312
313
        # now we need to get the intergal flux
314
315
        _, integral = self._get_diff_flux_and_integral(
316
            likelihood_model_instance)
317
318
        self._integral_flux = integral
319
320
        self._likelihood_model = likelihood_model_instance
321
322
    def _get_diff_flux_and_integral(self, likelihood_model):
323
324
        n_point_sources = likelihood_model.get_number_of_point_sources()
325
326
        # Make a function which will stack all point sources (OGIP do not support spatial dimension)
327
328
        def differential_flux(scattering_edges):
329
            fluxes = likelihood_model.get_point_source_fluxes(
330
                0, scattering_edges, tag=self._tag)
331
332
            # If we have only one point source, this will never be executed
333
            for i in range(1, n_point_sources):
0 ignored issues
show
Comprehensibility Best Practice introduced by
The variable range does not seem to be defined.
Loading history...
334
                fluxes += likelihood_model.get_point_source_fluxes(
335
                    i, scattering_edges, tag=self._tag)
336
337
            return fluxes
338
339
        # The following integrates the diffFlux function using Simpson's rule
340
        # This assume that the intervals e1,e2 are all small, which is guaranteed
341
        # for any reasonable response matrix, given that e1 and e2 are Monte-Carlo
342
        # scattering_edges. It also assumes that the function is smooth in the interval
343
        # e1 - e2 and twice-differentiable, again reasonable on small intervals for
344
        # decent models. It might fail for models with too sharp features, smaller
345
        # than the size of the monte carlo interval.
346
347
        def integral(e1, e2):
348
            # Simpson's rule
349
350
            return (e2 - e1) / 6.0 * (differential_flux(e1) + 4 * differential_flux(
351
                (e1 + e2) / 2.0) + differential_flux(e2))
352
353
        return differential_flux, integral
354
355
    def _get_model_rate(self):
356
357
        # first we need to get the integrated expectation from the spectrum
358
359
        intergal_spectrum = np.array(
360
            [self._integral_flux(emin, emax) for emin, emax in zip(self._response.ene_lo, self._response.ene_hi)])
361
362
        # we evaluate at the center of the bin. the bin widths are already included
363
        eval_points = np.array(
364
            [[ene, self._pol_angle.value, self._pol_degree.value] for ene in self._response.energy_mid])
365
366
        expectation = []
367
368
        # create the model counts by summing over energy
369
370
        for i, interpolator in enumerate(self._all_interp):
371
            rate = np.dot(interpolator(eval_points), intergal_spectrum)
372
373
            expectation.append(rate)
374
375
        return np.array(expectation)
376
377
    def _get_model_counts(self):
378
379
        if self._rebinner is None:
380
            model_rate = self._get_model_rate()
381
382
        else:
383
384
            model_rate, = self._rebinner.rebin(self._get_model_rate())
385
386
        return self._nuisance_parameter.value * self._exposure * model_rate
387
388
    def get_log_like(self):
389
390
        model_counts = self._get_model_counts()
391
392
        if self._background.is_poisson:
393
394
            loglike, bkg_model = poisson_observed_poisson_background(
395
                self._current_observed_counts, self._current_background_counts, self._scale, model_counts)
396
397
        else:
398
399
            loglike, bkg_model = poisson_observed_gaussian_background(
400
                self._current_observed_counts, self._current_background_counts, self._current_background_count_errors,
401
                model_counts)
402
403
        return np.sum(loglike)
404
405
    def inner_fit(self):
406
407
        return self.get_log_like()
408
409
    def writeto(self, file_name):
410
        """
411
        Write the data to HDF5 modulation curve files. Both background and observation
412
        files are created
413
        :param file_name: the file name header. The .h5 extension is added automatically
414
        """
415
        # first create a file container
416
        observation_file = ModulationCurveFile.from_binned_modulation_curve(
417
            self._observation)
418
419
        background_file = ModulationCurveFile.from_binned_modulation_curve(
420
            self._background)
421
422
        observation_file.writeto("%s.h5" % file_name)
423
424
        background_file.writeto("%s_bak.h5" % file_name)
425
426
    @property
427
    def scattering_boundaries(self):
428
        """
429
        Energy boundaries of channels currently in use (rebinned, if a rebinner is active)
430
431
        :return: (sa_min, sa_max)
432
        """
433
434
        scattering_edges = np.array(self._observation.edges)
435
436
        sa_min, sa_max = scattering_edges[:-1], scattering_edges[1:]
437
438
        if self._rebinner is not None:
439
            # Get the rebinned chans. NOTE: these are already masked
440
441
            sa_min, sa_max = self._rebinner.get_new_start_and_stop(
442
                sa_min, sa_max)
443
444
        return sa_min, sa_max
445
446
    @property
447
    def bin_widths(self):
448
449
        sa_min, sa_max = self.scattering_boundaries
450
451
        return sa_max - sa_min
452
453
    def display(self,
454
                ax=None,
455
                show_data=True,
456
                show_model=True,
457
                show_total=False,
458
                model_kwargs={},
459
                data_kwargs={},
460
                edges=True,
461
                min_rate=None):
462
        """
463
464
        Display the data, model, or both.
465
466
        :param ax:
467
        :param show_data:
468
        :param show_model:
469
        :param show_total:
470
        :param model_kwargs:
471
        :param data_kwargs:
472
        :return:
473
        """
474
475
        tmp = ((self._observed_counts / self._exposure) -
476
               self._background_counts / self._background_exposure)
477
478
        scattering_edges = np.array(self._observation.edges)
479
480
        sa_min, sa_max = scattering_edges[:-1], scattering_edges[1:]
481
482
        tmp_db = ((self._observed_counts / self._exposure) - self._background_counts / self._background_exposure) / (
483
            sa_max - sa_min)
484
485
        old_rebinner = self._rebinner
486
487
        if min_rate is not None:
488
489
            rebinner = Rebinner(tmp_db, min_rate, mask=None)
490
491
            self._apply_rebinner(rebinner)
492
493
            net_rate = rebinner.rebin(tmp)
494
        else:
495
496
            net_rate = tmp
497
498
        sa_min, sa_max = self.scattering_boundaries
499
500
        if show_total:
501
            show_model = False
502
            show_data = False
503
504
        if ax is None:
505
506
            fig, ax = plt.subplots()
507
508
        else:
509
510
            fig = ax.get_figure()
511
512
        xs = self.scattering_boundaries
513
514
        if show_total:
515
516
            total_rate = self._current_observed_counts / self._exposure / self.bin_widths
517
518
            bkg_rate = self._current_background_counts / \
519
                self._background_exposure / self.bin_widths
520
521
            total_errors = np.sqrt(total_rate)
522
523
            if self._background.is_poisson:
524
525
                bkg_errors = np.sqrt(bkg_rate)
526
527
            else:
528
529
                bkg_errors = self._current_background_count_errors / self.bin_widths
530
531
            ax.hlines(total_rate, sa_min, sa_max,
532
                      color='#7D0505', **data_kwargs)
533
            ax.vlines(
534
                np.mean([xs], axis=1),
535
                total_rate - total_errors,
536
                total_rate + total_errors,
537
                color='#7D0505',
538
                **data_kwargs)
539
540
            ax.hlines(bkg_rate, sa_min, sa_max, color='#0D5BAE', **data_kwargs)
541
            ax.vlines(
542
                np.mean([xs], axis=1), bkg_rate - bkg_errors, bkg_rate + bkg_errors, color='#0D5BAE', **data_kwargs)
543
544
        if show_data:
545
546
            if self._background.is_poisson:
547
548
                errors = np.sqrt((self._current_observed_counts / self._exposure) +
549
                                 (self._current_background_counts / self._background_exposure))
550
551
            else:
552
553
                errors = np.sqrt((self._current_observed_counts / self._exposure) +
554
                                 (self._current_background_count_errors / self._background_exposure)**2)
555
556
            ax.hlines(net_rate / self.bin_widths,
557
                      sa_min, sa_max, **data_kwargs)
558
            ax.vlines(
559
                np.mean([xs], axis=1), (net_rate - errors) /
560
                self.bin_widths, (net_rate + errors) / self.bin_widths,
561
                **data_kwargs)
562
563
        if show_model:
564
565
            if edges:
566
567
                step_plot(
568
                    ax=ax,
569
                    xbins=np.vstack([sa_min, sa_max]).T,
570
                    y=self._get_model_counts() / self._exposure / self.bin_widths,
571
                    **model_kwargs)
572
573
            else:
574
575
                y = self._get_model_counts() / self._exposure / self.bin_widths
576
                ax.hlines(y, sa_min, sa_max, **model_kwargs)
577
578
        ax.set_xlabel('Scattering Angle')
579
        ax.set_ylabel('Net Rate (cnt/s/bin)')
580
581
        if old_rebinner is not None:
582
583
            # There was a rebinner, use it. Note that the rebinner applies the mask by itself
584
585
            self._apply_rebinner(old_rebinner)
586
587
        else:
588
589
            self.remove_rebinning()
590
591
        return fig
592
593
    # def display_circle(self,
594
    #                    ax=None,
595
    #                    show_data=True,
596
    #                    show_model=True,
597
    #                    show_total=False,
598
    #                    model_kwargs={},
599
    #                    data_kwargs={},
600
    #                    edges=True,
601
    #                    min_rate=None,
602
    #                    projection=None):
603
    #     """
604
605
    #     :param ax:
606
    #     :param show_data:
607
    #     :param show_model:
608
    #     :param show_total:
609
    #     :param model_kwargs:
610
    #     :param data_kwargs:
611
    #     :return:
612
    #     """
613
614
    #     tmp = ((self._observed_counts / self._exposure) - self._background_counts / self._background_exposure)
615
616
    #     scattering_edges = np.deg2rad(np.array(self._observation.edges))
617
618
    #     sa_min, sa_max = scattering_edges[:-1], scattering_edges[1:]
619
620
    #     tmp_db = ((self._observed_counts / self._exposure) - self._background_counts / self._background_exposure) / (
621
    #         sa_max - sa_min)
622
623
    #     old_rebinner = self._rebinner
624
625
    #     if min_rate is not None:
626
627
    #         rebinner = Rebinner(tmp_db, min_rate, mask=None)
628
629
    #         self._apply_rebinner(rebinner)
630
631
    #         net_rate = rebinner.rebin(tmp)
632
    #     else:
633
634
    #         net_rate = tmp
635
636
    #     sa_min, sa_max = np.deg2rad(self.scattering_boundaries)
637
    #     xs = np.deg2rad(self.scattering_boundaries)
638
639
    #     if show_total:
640
    #         show_model = False
641
    #         show_data = False
642
643
    #     if ax is None:
644
645
    #         fig, ax = plt.subplots(subplot_kw=dict(projection=projection))
646
647
    #     else:
648
649
    #         fig = ax.get_figure()
650
651
    #     if show_total:
652
    #         pass
653
654
    #         # total_rate = self._current_observed_counts / self._exposure / self.bin_widths
655
    #         # bkg_rate = self._current_background_counts / self._background_exposure /self.bin_widths
656
657
    #         # total_errors = np.sqrt(total_rate)
658
659
    #         # if self._background.is_poisson:
660
661
    #         #     bkg_errors = np.sqrt(bkg_rate)
662
663
    #         # else:
664
665
    #         #     bkg_errors = self._current_background_count_errors / self.bin_widths
666
667
    #         # xs = self.scattering_boundaries
668
669
    #         # xs = np.deg2rad(xs)
670
    #         # sa_min = np.deg2rad(sa_min)
671
    #         # sa_max = np.deg2rad(sa_max)
672
673
    #         # ax.hlines(
674
    #         #     total_rate,
675
    #         #     sa_min,
676
    #         #     sa_max,
677
    #         #     color='#7D0505',
678
    #         #     **data_kwargs)
679
    #         # ax.vlines(
680
    #         #     np.mean([xs],axis=1),
681
    #         #     total_rate - total_errors,
682
    #         #     total_rate + total_errors,
683
    #         #     color='#7D0505',
684
    #         #     **data_kwargs)
685
686
    #         # ax.hlines(
687
    #         #     bkg_rate,
688
    #         #     sa_min,
689
    #         #     sa_max,
690
    #         #     color='#0D5BAE',
691
    #         #     **data_kwargs)
692
    #         # ax.vlines(
693
    #         #     np.mean([xs],axis=1),
694
    #         #     bkg_rate - bkg_errors,
695
    #         #     bkg_rate + bkg_errors,
696
    #         #     color='#0D5BAE',
697
    #         #     **data_kwargs)
698
699
    #     if show_data:
700
701
    #         if self._background.is_poisson:
702
703
    #             errors = np.sqrt((self._current_observed_counts / self._exposure) +
704
    #                              (self._current_background_counts / self._background_exposure))
705
706
    #         else:
707
708
    #             errors = np.sqrt((self._current_observed_counts / self._exposure) +
709
    #                              (self._current_background_count_errors / self._background_exposure)**2)
710
711
    #         ax.hlines(net_rate / self.bin_widths, sa_min, sa_max, **data_kwargs)
712
    #         ax.vlines(
713
    #             np.mean(xs, axis=1), (net_rate - errors) / self.bin_widths, (net_rate + errors) / self.bin_widths,
714
    #             **data_kwargs)
715
716
    #     if show_model:
717
718
    #         y = self._get_model_counts() / self._exposure / self.bin_widths
719
    #         width = sa_max - sa_min
720
721
    #         ax.bar(np.mean(xs, axis=0), y, width=sa_max - sa_min, bottom=y, **model_kwargs)
722
723
    #     #ax.set_xlabel('Scattering Angle')
724
    #     #ax.set_ylabel('Net Rate (cnt/s/bin)')
725
726
    #     if old_rebinner is not None:
727
728
    #         # There was a rebinner, use it. Note that the rebinner applies the mask by itself
729
730
    #         self._apply_rebinner(old_rebinner)
731
732
    #     else:
733
734
    #         self.remove_rebinning()
735
736
    #     return fig
737
738
    @property
739
    def observation(self):
740
        return self._observation
741
742
    @property
743
    def background(self):
744
        return self._background
745
746
    @contextmanager
747
    def _without_rebinner(self):
748
749
        # Store rebinner for later use
750
751
        rebinner = self._rebinner
752
753
        # Clean mask and rebinning
754
755
        self.remove_rebinning()
756
757
        # Execute whathever
758
759
        yield
760
761
        # Restore mask and rebinner (if any)
762
763
        if rebinner is not None:
764
765
            # There was a rebinner, use it. Note that the rebinner applies the mask by itself
766
767
            self._apply_rebinner(rebinner)
768
769
    def rebin_on_background(self, min_number_of_counts):
770
        """
771
        Rebin the spectrum guaranteeing the provided minimum number of counts in each background bin. This is usually
772
        required for spectra with very few background counts to make the Poisson profile likelihood meaningful.
773
        Of course this is not relevant if you treat the background as ideal, nor if the background spectrum has
774
        Gaussian errors.
775
776
        The observed spectrum will be rebinned in the same fashion as the background spectrum.
777
778
        To neutralize this completely, use "remove_rebinning"
779
780
        :param min_number_of_counts: the minimum number of counts in each bin
781
        :return: none
782
        """
783
784
        # NOTE: the rebinner takes care of the mask already
785
786
        assert self._background is not None, "This data has no background, cannot rebin on background!"
787
788
        rebinner = Rebinner(self._background_counts,
789
                            min_number_of_counts, mask=None)
790
791
        self._apply_rebinner(rebinner)
792
793
    def rebin_on_source(self, min_number_of_counts):
794
        """
795
        Rebin the spectrum guaranteeing the provided minimum number of counts in each source bin.
796
797
        To neutralize this completely, use "remove_rebinning"
798
799
        :param min_number_of_counts: the minimum number of counts in each bin
800
        :return: none
801
        """
802
803
        # NOTE: the rebinner takes care of the mask already
804
805
        rebinner = Rebinner(self._observed_counts,
806
                            min_number_of_counts, mask=None)
807
808
        self._apply_rebinner(rebinner)
809
810
    def _apply_rebinner(self, rebinner):
811
812
        self._rebinner = rebinner
813
814
        # Apply the rebinning to everything.
815
        # NOTE: the output of the .rebin method are the vectors with the mask *already applied*
816
817
        self._current_observed_counts, = self._rebinner.rebin(
818
            self._observed_counts)
819
820
        if self._background is not None:
821
822
            self._current_background_counts, = self._rebinner.rebin(
823
                self._background_counts)
824
825
            if self._background_count_errors is not None:
826
                # NOTE: the output of the .rebin method are the vectors with the mask *already applied*
827
828
                self._current_background_count_errors, = self._rebinner.rebin_errors(
829
                    self._background_count_errors)
830
831
        if self._verbose:
832
            print("Now using %s bins" % self._rebinner.n_bins)
833
834
    def remove_rebinning(self):
835
        """
836
        Remove the rebinning scheme set with rebin_on_background.
837
838
        :return:
839
        """
840
841
        self._rebinner = None
842
843
        self._current_observed_counts = self._observed_counts
844
        self._current_background_counts = self._background_counts
845
        self._current_background_count_errors = self._background_count_errors
846