Passed
Push — master ( c60e30...319854 )
by J. Michael
01:01
created

polarpy.polarlike.PolarLike.display()   C

Complexity

Conditions 9

Size

Total Lines 110
Code Lines 62

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 9
eloc 62
nop 8
dl 0
loc 110
rs 5.9103
c 0
b 0
f 0

How to fix   Long Method    Many Parameters   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

Many Parameters

Methods with many parameters are not only hard to understand, but their parameters also often become inconsistent when you need more, or different data.

There are several approaches to avoid long parameter lists:

1
import collections
2
from contextlib import contextmanager
3
import matplotlib.pyplot as plt
4
import numpy as np
5
6
from astromodels import Parameter, Uniform_prior
7
from threeML import PluginPrototype
8
from threeML.io.plotting.step_plot import step_plot
9
from threeML.utils.binner import Rebinner
10
from threeML.utils.polarization.binned_polarization import BinnedModulationCurve
11
from threeML.utils.statistics.likelihood_functions import poisson_observed_poisson_background, \
12
    poisson_observed_gaussian_background
13
14
from polarpy.modulation_curve_file import ModulationCurveFile
15
from polarpy.polar_response import PolarResponse
16
17
18
class PolarLike(PluginPrototype):
19
    """
20
    Preliminary POLAR polarization plugin
21
    """
22
23
    def __init__(self, name, observation, background, response, interval_number=None, verbose=False):
24
        """
25
26
27
28
        :param interval_number:
29
        :param name:
30
        :param observation:
31
        :param background:
32
        :param response:
33
34
        :param verbose:
35
36
        """
37
38
        # if we pass a string, there may be multiple time intervals
39
        # saved so we must specify a time interval
40
41
        if isinstance(observation, str):
42
            assert interval_number is not None, 'must specify an interval number'
43
44
            # this is a file
45
            read_file = ModulationCurveFile.read(observation)
46
47
            # create the bmc
48
            observation = read_file.to_binned_modulation_curve(interval=interval_number)
49
50
        # the same applies for the background
51
        if isinstance(background, str):
52
            assert interval_number is not None, 'must specify an interval number'
53
54
            # this is a file
55
            read_file = ModulationCurveFile.read(background)
56
57
            background = read_file.to_binned_modulation_curve(interval=interval_number)
58
59
        assert isinstance(observation, BinnedModulationCurve), 'The observation must be a BinnedModulationCurve'
60
        assert isinstance(background, BinnedModulationCurve), 'The observation must be a BinnedModulationCurve'
61
62
        # attach the required variables
63
64
        self._observation = observation
65
        self._background = background
66
67
        self._observed_counts = observation.counts
68
        self._background_counts = background.counts
69
        self._background_count_errors = background.count_errors
70
        self._scale = observation.exposure / background.exposure
71
        self._exposure = observation.exposure
72
        self._background_exposure = background.exposure
73
74
        self._likelihood_model = None
75
        self._rebinner = None
76
        
77
        # now do some double checks
78
79
        assert len(self._observed_counts) == len(self._background_counts)
80
81
        self._n_synthetic_datasets = 0
82
83
        # set up the effective area correction
84
85
        self._nuisance_parameter = Parameter(
86
            "cons_%s" % name,
87
            1.0,
88
            min_value=0.8,
89
            max_value=1.2,
90
            delta=0.05,
91
            free=False,
92
            desc="Effective area correction for %s" % name)
93
94
        nuisance_parameters = collections.OrderedDict()
95
        nuisance_parameters[self._nuisance_parameter.name] = self._nuisance_parameter
96
97
        # pass to the plugin proto
98
99
        super(PolarLike, self).__init__(name, nuisance_parameters)
100
101
102
        # The following vectors are the ones that will be really used for the computation. At the beginning they just
103
        # point to the original ones, but if a rebinner is used and/or a mask is created through set_active_measurements,
104
        # they will contain the rebinned and/or masked versions
105
106
        self._current_observed_counts = self._observed_counts
107
        self._current_background_counts = self._background_counts
108
        self._current_background_count_errors = self._background_count_errors
109
110
111
        self._verbose = verbose
112
113
        # we can either attach or build a response
114
115
        assert isinstance(response, str) or isinstance(
116
            response, PolarResponse), 'The response must be a file name or a PolarResponse'
117
118
        if isinstance(response, PolarResponse):
119
120
            self._response = response
121
122
        else:
123
124
            self._response = PolarResponse(response)
125
126
        # attach the interpolators to the
127
128
        self._all_interp = self._response.interpolators
129
130
        # we also make sure the lengths match up here
131
        assert self._response.n_scattering_bins == len(
132
            self._observation.counts), 'observation counts shape does not agree with response shape'
133
134
    def use_effective_area_correction(self, lower=0.5, upper=1.5):
135
        """
136
        Use an area constant to correct for response issues
137
138
        :param lower:
139
        :param upper:
140
        :return:
141
        """
142
143
        self._nuisance_parameter.free = True
144
        self._nuisance_parameter.bounds = (lower, upper)
145
        self._nuisance_parameter.prior = Uniform_prior(lower_bound=lower, upper_bound=upper)
146
        if self._verbose:
147
            print('Using effective area correction')
148
149
    def fix_effective_area_correction(self, value=1):
150
        """
151
152
        fix the effective area correction to a particular values
153
154
        :param value:
155
        :return:
156
        """
157
158
        # allow the value to be outside the bounds
159
        if self._nuisance_parameter.max_value < value:
160
161
            self._nuisance_parameter.max_value = value + 0.1
162
163
        elif self._nuisance_parameter.min_value > value:
164
165
            self._nuisance_parameter.min_value = value = 0.1
166
167
        self._nuisance_parameter.fix = True
168
        self._nuisance_parameter.value = value
169
170
        if self._verbose:
171
            print('Fixing effective area correction')
172
173
    @property
174
    def effective_area_correction(self):
175
176
        return self._nuisance_parameter
177
178
    def get_simulated_dataset(self, new_name=None, **kwargs):
179
        """
180
        Returns another Binned instance where data have been obtained by randomizing the current expectation from the
181
        model, as well as from the background (depending on the respective noise models)
182
183
        :return: an BinnedSpectrum or child instance
184
        """
185
186
        assert self._likelihood_model is not None, "You need to set up a model before randomizing"
187
188
        # Keep track of how many syntethic datasets we have generated
189
190
        self._n_synthetic_datasets += 1
191
192
        # Generate a name for the new dataset if needed
193
        if new_name is None:
194
            new_name = "%s_sim_%i" % (self.name, self._n_synthetic_datasets)
195
196
        # Generate randomized data depending on the different noise models
197
198
        # We remove the mask temporarily because we need the various elements for all channels. We will restore it
199
        # at the end
200
201
        # Get the source model for all channels (that's why we don't use the .folded_model property)
202
203
        
204
        # We remove the mask temporarily because we need the various elements for all channels. We will restore it
205
        # at the end
206
207
        original_rebinner = self._rebinner
208
209
        with self._without_rebinner():
210
211
            # Get the source model for all channels (that's why we don't use the .folded_model property)
212
213
        
214
            source_model_counts = self._get_model_counts()
215
216
            if self._background.is_poisson:
217
                _, background_model_counts = poisson_observed_poisson_background(
218
                            self._current_observed_counts, self._current_background_counts, self._scale, source_model_counts)
219
                randomized_background_counts = np.random.poisson(background_model_counts)
220
221
                background_count_errors = None
222
            else:
223
224
                _, background_model_counts = poisson_observed_gaussian_background(
225
                            self._current_observed_counts, self._current_background_counts, self._current_background_count_errors, source_model_counts)
226
227
                 randomized_background_counts = np.zeros_like(background_model_counts)
0 ignored issues
show
introduced by
unexpected indent (<unknown>, line 227)
Loading history...
228
229
                 randomized_background_counts[idx] = np.random.normal(loc=background_model_counts[idx],
230
                                                             scale=self._spectrum_plugin.background_count_errors[idx])
231
232
                 # Issue a warning if the generated background is less than zero, and fix it by placing it at zero
233
234
                 idx = (randomized_background_counts < 0)  # type: np.ndarray
235
236
                 negative_background_n = np.sum(idx)
237
238
                 if negative_background_n > 0:
239
                     custom_warnings.warn("Generated background has negative counts "
240
                                          "in %i channels. Fixing them to zero" % (negative_background_n))
241
242
                     randomized_background_counts[idx] = 0
243
244
                background_count_errors = self._background_count_errors
245
246
            # Now randomize the expectations
247
248
            # Randomize expectations for the source
249
250
            randomized_source_counts = np.random.poisson(source_model_counts + background_model_counts)
251
252
            #
253
254
            new_observation = self._observation.clone(new_counts=randomized_source_counts)
255
256
            new_background = self._background.clone(new_counts=randomized_background_counts,
257
                                                    new_count_errors=background_count_errors)
258
259
            new_plugin = PolarLike(
260
                name=new_name,
261
                observation=new_observation,
262
                background=new_background,
263
                response=self._response,
264
                verbose=False,
265
            )
266
267
            # Apply the same selections as the current data set
268
            if original_rebinner is not None:
269
270
                # Apply rebinning, which also applies the mask
271
                new_plugin._apply_rebinner(original_rebinner)
272
273
            
274
            return new_plugin
275
276
    def set_model(self, likelihood_model_instance):
277
        """
278
        Set the model to be used in the joint minimization. Must be a LikelihoodModel instance.
279
        :param likelihood_model_instance: instance of Model
280
        :type likelihood_model_instance: astromodels.Model
281
        """
282
283
        if likelihood_model_instance is None:
284
            return
285
286
        # if self._source_name is not None:
287
288
        #     # Make sure that the source is in the model
289
        #     assert self._source_name in likelihood_model_instance.sources, \
290
        #                                         "This XYLike plugin refers to the source %s, " \
291
        #                                         "but that source is not in the likelihood model" % (self._source_name)
292
293
        for k, v in likelihood_model_instance.free_parameters.items():
294
295
            if 'polarization.degree' in k:
296
                self._pol_degree = v
297
298
            if 'polarization.angle' in k:
299
                self._pol_angle = v
300
301
        # now we need to get the intergal flux
302
303
        _, integral = self._get_diff_flux_and_integral(likelihood_model_instance)
304
305
        self._integral_flux = integral
306
307
        self._likelihood_model = likelihood_model_instance
308
309
    def _get_diff_flux_and_integral(self, likelihood_model):
310
311
        n_point_sources = likelihood_model.get_number_of_point_sources()
312
313
        # Make a function which will stack all point sources (OGIP do not support spatial dimension)
314
315
        def differential_flux(scattering_edges):
316
            fluxes = likelihood_model.get_point_source_fluxes(0, scattering_edges, tag=self._tag)
317
318
            # If we have only one point source, this will never be executed
319
            for i in range(1, n_point_sources):
320
                fluxes += likelihood_model.get_point_source_fluxes(i, scattering_edges, tag=self._tag)
321
322
            return fluxes
323
324
        # The following integrates the diffFlux function using Simpson's rule
325
        # This assume that the intervals e1,e2 are all small, which is guaranteed
326
        # for any reasonable response matrix, given that e1 and e2 are Monte-Carlo
327
        # scattering_edges. It also assumes that the function is smooth in the interval
328
        # e1 - e2 and twice-differentiable, again reasonable on small intervals for
329
        # decent models. It might fail for models with too sharp features, smaller
330
        # than the size of the monte carlo interval.
331
332
        def integral(e1, e2):
333
            # Simpson's rule
334
335
            return (e2 - e1) / 6.0 * (differential_flux(e1) + 4 * differential_flux(
336
                (e1 + e2) / 2.0) + differential_flux(e2))
337
338
        return differential_flux, integral
339
340
    def _get_model_rate(self):
341
342
        # first we need to get the integrated expectation from the spectrum
343
344
        intergal_spectrum = np.array(
345
            [self._integral_flux(emin, emax) for emin, emax in zip(self._response.ene_lo, self._response.ene_hi)])
346
347
        # we evaluate at the center of the bin. the bin widths are already included
348
        eval_points = np.array(
349
            [[ene, self._pol_angle.value, self._pol_degree.value] for ene in self._response.energy_mid])
350
351
        expectation = []
352
353
        # create the model counts by summing over energy
354
355
        for i, interpolator in enumerate(self._all_interp):
356
            rate = np.dot(interpolator(eval_points), intergal_spectrum)
357
358
            expectation.append(rate)
359
360
        return np.array(expectation)
361
362
    def _get_model_counts(self):
363
364
365
        if self._rebinner is None:
366
            model_rate = self._get_model_rate()
367
368
        else:
369
370
            model_rate, = self._rebinner.rebin(self._get_model_rate())
371
372
        
373
        return self._nuisance_parameter.value * self._exposure * model_rate
374
375
    def get_log_like(self):
376
377
        model_counts = self._get_model_counts()
378
379
        if self._background.is_poisson:
380
381
            loglike, bkg_model = poisson_observed_poisson_background(        self._current_observed_counts, self._current_background_counts,
382
                                                                     self._scale, model_counts)
383
384
        else:
385
386
            loglike, bkg_model = poisson_observed_gaussian_background(        self._current_observed_counts, self._current_background_counts,
387
                                                                      self._current_background_count_errors, model_counts)
388
389
        return np.sum(loglike)
390
391
    def inner_fit(self):
392
393
        return self.get_log_like()
394
395
    def writeto(self, file_name):
396
        """
397
        Write the data to HDF5 modulation curve files. Both background and observation
398
        files are created
399
        :param file_name: the file name header. The .h5 extension is added automatically
400
        """
401
        # first create a file container
402
        observation_file = ModulationCurveFile.from_binned_modulation_curve(self._observation)
403
404
        background_file = ModulationCurveFile.from_binned_modulation_curve(self._background)
405
406
        observation_file.writeto("%s.h5" % file_name)
407
408
        background_file.writeto("%s_bak.h5" % file_name)
409
410
411
412
    @property
413
    def scattering_boundaries(self):
414
        """
415
        Energy boundaries of channels currently in use (rebinned, if a rebinner is active)
416
417
        :return: (sa_min, sa_max)
418
        """
419
420
        scattering_edges = np.array(self._observation.edges)
421
422
        sa_min, sa_max = scattering_edges[:-1], scattering_edges[1:]
423
424
        if self._rebinner is not None:
425
            # Get the rebinned chans. NOTE: these are already masked
426
427
            sa_min, sa_max = self._rebinner.get_new_start_and_stop(sa_min, sa_max)
428
429
 
430
        return sa_min, sa_max
431
432
    @property
433
    def bin_widths(self):
434
435
        sa_min, sa_max = self.scattering_boundaries
436
437
        return sa_max - sa_min
438
439
        
440
    def display(self, ax=None, show_data=True, show_model=True, show_total=False, model_kwargs={}, data_kwargs={}, edges=True, min_rate=None):
441
        """
442
443
        :param ax:
444
        :param show_data:
445
        :param show_model:
446
        :param show_total:
447
        :param model_kwargs:
448
        :param data_kwargs:
449
        :return:
450
        """
451
        
452
        tmp = (( self._observed_counts / self._exposure) - self._background_counts / self._background_exposure)
453
454
        scattering_edges = np.array(self._observation.edges)
455
456
        sa_min, sa_max = scattering_edges[:-1], scattering_edges[1:]
457
458
        
459
        tmp_db = (( self._observed_counts / self._exposure) - self._background_counts / self._background_exposure)/(sa_max - sa_min)
460
461
462
463
        
464
        old_rebinner = self._rebinner
465
466
        if min_rate is not None:
467
468
469
            
470
471
            rebinner = Rebinner(tmp_db, min_rate, mask = None)
472
473
            self._apply_rebinner(rebinner)
474
475
476
            net_rate = rebinner.rebin(tmp)
477
        else:
478
479
            net_rate = tmp
480
481
        
482
        sa_min, sa_max = self.scattering_boundaries
483
        
484
        if show_total:
485
            show_model = False
486
            show_data = False
487
488
        if ax is None:
489
490
            fig, ax = plt.subplots()                
491
492
        else:
493
494
            fig = ax.get_figure()
495
496
        xs = self.scattering_boundaries
497
        if show_total:
498
499
            total_rate = self._current_observed_counts / self._exposure / self.bin_widths
500
            bkg_rate = self._current_background_counts / self._background_exposure /self.bin_widths
501
502
            total_errors = np.sqrt(total_rate)
503
504
            if self._background.is_poisson:
505
506
                bkg_errors = np.sqrt(bkg_rate)
507
508
            else:
509
510
                bkg_errors = self._current_background_count_errors / self.bin_widths
511
512
            
513
                            
514
                
515
            ax.hlines(
516
                total_rate,
517
                sa_min,
518
                sa_max,
519
                color='#7D0505',
520
                **data_kwargs)
521
            ax.vlines(
522
                np.mean([xs],axis=1),
523
                total_rate - total_errors,
524
                total_rate + total_errors,
525
                color='#7D0505',
526
                **data_kwargs)
527
528
            ax.hlines(
529
                bkg_rate,
530
                sa_min,
531
                sa_max,
532
                color='#0D5BAE',
533
                **data_kwargs)
534
            ax.vlines(
535
                np.mean([xs],axis=1),
536
                bkg_rate - bkg_errors,
537
                bkg_rate + bkg_errors,
538
                color='#0D5BAE',
539
                **data_kwargs)
540
541
        if show_data:
542
543
            if self._background.is_poisson:
544
545
                errors = np.sqrt((self._current_observed_counts / self._exposure) +
546
                                 (self._current_background_counts / self._background_exposure))
547
548
            else:
549
550
                errors = np.sqrt((        self._current_observed_counts/ self._exposure) +
551
                                 (self._current_background_count_errors / self._background_exposure)**2)
552
553
            ax.hlines(net_rate/self.bin_widths, sa_min, sa_max, **data_kwargs)
554
            ax.vlines(np.mean([xs], axis=1),
555
                      (net_rate - errors)/self.bin_widths, (net_rate + errors)/self.bin_widths,
556
                      **data_kwargs)
557
558
        if show_model:
559
560
561
            if edges:
562
563
                step_plot(
564
                    ax=ax,
565
                    xbins=np.vstack([sa_min, sa_max]).T,
566
                    y=self._get_model_counts() / self._exposure /self.bin_widths,
567
                    **model_kwargs)
568
569
            else:
570
571
                y=self._get_model_counts() / self._exposure /self.bin_widths
572
                ax.hlines(y, sa_min, sa_max, **model_kwargs)
573
574
                
575
576
                
577
                
578
        ax.set_xlabel('Scattering Angle')
579
        ax.set_ylabel('Net Rate (cnt/s/bin)')
580
581
        if old_rebinner is not None:
582
583
            
584
585
            # There was a rebinner, use it. Note that the rebinner applies the mask by itself
586
587
            self._apply_rebinner(old_rebinner)
588
589
        else:
590
591
            self.remove_rebinning()
592
        
593
        return fig
594
595
596
    def display_circle(self,ax=None, show_data=True, show_model=True, show_total=False, model_kwargs={}, data_kwargs={}, edges=True, min_rate=None,projection=None):
597
        """
598
599
        :param ax:
600
        :param show_data:
601
        :param show_model:
602
        :param show_total:
603
        :param model_kwargs:
604
        :param data_kwargs:
605
        :return:
606
        """
607
        
608
        tmp = (( self._observed_counts / self._exposure) - self._background_counts / self._background_exposure)
609
610
        scattering_edges = np.deg2rad(np.array(self._observation.edges))
611
        
612
        sa_min, sa_max = scattering_edges[:-1], scattering_edges[1:]
613
614
        
615
        tmp_db = (( self._observed_counts / self._exposure) - self._background_counts / self._background_exposure)/(sa_max - sa_min)
616
617
618
619
        
620
        old_rebinner = self._rebinner
621
622
        if min_rate is not None:
623
624
625
            
626
627
            rebinner = Rebinner(tmp_db, min_rate, mask = None)
628
629
            self._apply_rebinner(rebinner)
630
631
632
            net_rate = rebinner.rebin(tmp)
633
        else:
634
635
            net_rate = tmp
636
637
        
638
        sa_min, sa_max = np.deg2rad(self.scattering_boundaries)
639
        xs = np.deg2rad(self.scattering_boundaries)
640
        
641
        if show_total:
642
            show_model = False
643
            show_data = False
644
645
        if ax is None:
646
          
647
            fig, ax = plt.subplots(subplot_kw=dict(projection=projection))
648
                
649
650
        else:
651
652
            fig = ax.get_figure()
653
654
        
655
        if show_total:
656
            pass
657
658
            # total_rate = self._current_observed_counts / self._exposure / self.bin_widths
659
            # bkg_rate = self._current_background_counts / self._background_exposure /self.bin_widths
660
661
            # total_errors = np.sqrt(total_rate)
662
663
            # if self._background.is_poisson:
664
665
            #     bkg_errors = np.sqrt(bkg_rate)
666
667
            # else:
668
669
            #     bkg_errors = self._current_background_count_errors / self.bin_widths
670
671
            # xs = self.scattering_boundaries
672
            
673
            # xs = np.deg2rad(xs)
674
            # sa_min = np.deg2rad(sa_min)
675
            # sa_max = np.deg2rad(sa_max)
676
            
677
678
                
679
                
680
            # ax.hlines(
681
            #     total_rate,
682
            #     sa_min,
683
            #     sa_max,
684
            #     color='#7D0505',
685
            #     **data_kwargs)
686
            # ax.vlines(
687
            #     np.mean([xs],axis=1),
688
            #     total_rate - total_errors,
689
            #     total_rate + total_errors,
690
            #     color='#7D0505',
691
            #     **data_kwargs)
692
693
            # ax.hlines(
694
            #     bkg_rate,
695
            #     sa_min,
696
            #     sa_max,
697
            #     color='#0D5BAE',
698
            #     **data_kwargs)
699
            # ax.vlines(
700
            #     np.mean([xs],axis=1),
701
            #     bkg_rate - bkg_errors,
702
            #     bkg_rate + bkg_errors,
703
            #     color='#0D5BAE',
704
            #     **data_kwargs)
705
706
        if show_data:
707
708
            if self._background.is_poisson:
709
710
                errors = np.sqrt((self._current_observed_counts / self._exposure) +
711
                                 (self._current_background_counts / self._background_exposure))
712
713
            else:
714
715
                errors = np.sqrt((        self._current_observed_counts/ self._exposure) +
716
                                 (self._current_background_count_errors / self._background_exposure)**2)
717
718
            ax.hlines(net_rate/self.bin_widths, sa_min, sa_max, **data_kwargs)
719
            ax.vlines(np.mean(xs, axis=1),
720
                      (net_rate - errors)/self.bin_widths, (net_rate + errors)/self.bin_widths,
721
                      **data_kwargs)
722
723
        if show_model:
724
725
726
            y=self._get_model_counts() / self._exposure /self.bin_widths
727
            width = sa_max-sa_min
728
       
729
            ax.bar(np.mean(xs,axis=0),
730
                   y,
731
                   width=sa_max-sa_min,
732
                   bottom=y, 
733
                   **model_kwargs)
734
735
                
736
737
            
738
                
739
        #ax.set_xlabel('Scattering Angle')
740
        #ax.set_ylabel('Net Rate (cnt/s/bin)')
741
742
        if old_rebinner is not None:
743
744
            
745
746
            # There was a rebinner, use it. Note that the rebinner applies the mask by itself
747
748
            self._apply_rebinner(old_rebinner)
749
750
        else:
751
752
            self.remove_rebinning()
753
        
754
        return fig
755
756
    
757
    @property
758
    def observation(self):
759
        return self._observation
760
761
    @property
762
    def background(self):
763
        return self._background
764
765
    @contextmanager
766
    def _without_rebinner(self):
767
768
        # Store rebinner for later use
769
770
        rebinner = self._rebinner
771
772
        # Clean mask and rebinning
773
774
        self.remove_rebinning()
775
        
776
777
        # Execute whathever
778
779
        yield
780
781
        # Restore mask and rebinner (if any)
782
783
784
785
        if rebinner is not None:
786
787
            # There was a rebinner, use it. Note that the rebinner applies the mask by itself
788
789
            self._apply_rebinner(rebinner)
790
791
792
793
794
            
795
    def rebin_on_background(self, min_number_of_counts):
796
        """
797
        Rebin the spectrum guaranteeing the provided minimum number of counts in each background bin. This is usually
798
        required for spectra with very few background counts to make the Poisson profile likelihood meaningful.
799
        Of course this is not relevant if you treat the background as ideal, nor if the background spectrum has
800
        Gaussian errors.
801
802
        The observed spectrum will be rebinned in the same fashion as the background spectrum.
803
804
        To neutralize this completely, use "remove_rebinning"
805
806
        :param min_number_of_counts: the minimum number of counts in each bin
807
        :return: none
808
        """
809
810
        # NOTE: the rebinner takes care of the mask already
811
812
        assert self._background is not None, "This data has no background, cannot rebin on background!"
813
814
        rebinner = Rebinner(self._background_counts, min_number_of_counts, mask = None)
815
816
        self._apply_rebinner(rebinner)
817
818
    def rebin_on_source(self, min_number_of_counts):
819
        """
820
        Rebin the spectrum guaranteeing the provided minimum number of counts in each source bin.
821
822
        To neutralize this completely, use "remove_rebinning"
823
824
        :param min_number_of_counts: the minimum number of counts in each bin
825
        :return: none
826
        """
827
828
        # NOTE: the rebinner takes care of the mask already
829
830
831
832
        rebinner = Rebinner(self._observed_counts, min_number_of_counts, mask = None)
833
834
        self._apply_rebinner(rebinner)
835
836
    def _apply_rebinner(self, rebinner):
837
838
        self._rebinner = rebinner
839
840
        # Apply the rebinning to everything.
841
        # NOTE: the output of the .rebin method are the vectors with the mask *already applied*
842
843
        self._current_observed_counts, = self._rebinner.rebin(self._observed_counts)
844
845
        if self._background is not None:
846
847
            self._current_background_counts, = self._rebinner.rebin(self._background_counts)
848
849
            if self._background_count_errors is not None:
850
                # NOTE: the output of the .rebin method are the vectors with the mask *already applied*
851
852
                self._current_background_count_errors, = self._rebinner.rebin_errors(self._background_count_errors)
853
854
        if self._verbose:
855
            print("Now using %s bins" % self._rebinner.n_bins)
856
857
    def remove_rebinning(self):
858
        """
859
        Remove the rebinning scheme set with rebin_on_background.
860
861
        :return:
862
        """
863
864
        self._rebinner = None
865
866
        self._current_observed_counts = self._observed_counts
867
        self._current_background_counts = self._background_counts
868
        self._current_background_count_errors = self._background_count_errors
869