| Total Complexity | 66 |
| Total Lines | 810 |
| Duplicated Lines | 3.95 % |
| Changes | 0 | ||
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like polarpy.polarlike often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | import collections |
||
|
|
|||
| 2 | from contextlib import contextmanager |
||
| 3 | import matplotlib.pyplot as plt |
||
| 4 | import numpy as np |
||
| 5 | |||
| 6 | from astromodels import Parameter, Uniform_prior |
||
| 7 | from threeML import PluginPrototype |
||
| 8 | from threeML.io.plotting.step_plot import step_plot |
||
| 9 | from threeML.utils.binner import Rebinner |
||
| 10 | from threeML.utils.polarization.binned_polarization import BinnedModulationCurve |
||
| 11 | from threeML.utils.statistics.likelihood_functions import poisson_observed_poisson_background, \ |
||
| 12 | poisson_observed_gaussian_background |
||
| 13 | |||
| 14 | from polarpy.modulation_curve_file import ModulationCurveFile |
||
| 15 | from polarpy.polar_response import PolarResponse |
||
| 16 | |||
| 17 | |||
| 18 | class PolarLike(PluginPrototype): |
||
| 19 | """ |
||
| 20 | Preliminary POLAR polarization plugin |
||
| 21 | """ |
||
| 22 | |||
| 23 | def __init__(self, name, observation, background, response, interval_number=None, verbose=False): |
||
| 24 | """ |
||
| 25 | |||
| 26 | |||
| 27 | |||
| 28 | :param interval_number: |
||
| 29 | :param name: |
||
| 30 | :param observation: |
||
| 31 | :param background: |
||
| 32 | :param response: |
||
| 33 | |||
| 34 | :param verbose: |
||
| 35 | |||
| 36 | """ |
||
| 37 | |||
| 38 | # if we pass a string, there may be multiple time intervals |
||
| 39 | # saved so we must specify a time interval |
||
| 40 | |||
| 41 | if isinstance(observation, str): |
||
| 42 | assert interval_number is not None, 'must specify an interval number' |
||
| 43 | |||
| 44 | # this is a file |
||
| 45 | read_file = ModulationCurveFile.read(observation) |
||
| 46 | |||
| 47 | # create the bmc |
||
| 48 | observation = read_file.to_binned_modulation_curve(interval=interval_number) |
||
| 49 | |||
| 50 | # the same applies for the background |
||
| 51 | if isinstance(background, str): |
||
| 52 | assert interval_number is not None, 'must specify an interval number' |
||
| 53 | |||
| 54 | # this is a file |
||
| 55 | read_file = ModulationCurveFile.read(background) |
||
| 56 | |||
| 57 | background = read_file.to_binned_modulation_curve(interval=interval_number) |
||
| 58 | |||
| 59 | assert isinstance(observation, BinnedModulationCurve), 'The observation must be a BinnedModulationCurve' |
||
| 60 | assert isinstance(background, BinnedModulationCurve), 'The observation must be a BinnedModulationCurve' |
||
| 61 | |||
| 62 | # attach the required variables |
||
| 63 | |||
| 64 | self._observation = observation |
||
| 65 | self._background = background |
||
| 66 | |||
| 67 | self._observed_counts = observation.counts |
||
| 68 | self._background_counts = background.counts |
||
| 69 | self._background_count_errors = background.count_errors |
||
| 70 | self._scale = observation.exposure / background.exposure |
||
| 71 | self._exposure = observation.exposure |
||
| 72 | self._background_exposure = background.exposure |
||
| 73 | |||
| 74 | self._likelihood_model = None |
||
| 75 | self._rebinner = None |
||
| 76 | |||
| 77 | # now do some double checks |
||
| 78 | |||
| 79 | assert len(self._observed_counts) == len(self._background_counts) |
||
| 80 | |||
| 81 | self._n_synthetic_datasets = 0 |
||
| 82 | |||
| 83 | # set up the effective area correction |
||
| 84 | |||
| 85 | self._nuisance_parameter = Parameter( |
||
| 86 | "cons_%s" % name, |
||
| 87 | 1.0, |
||
| 88 | min_value=0.8, |
||
| 89 | max_value=1.2, |
||
| 90 | delta=0.05, |
||
| 91 | free=False, |
||
| 92 | desc="Effective area correction for %s" % name) |
||
| 93 | |||
| 94 | nuisance_parameters = collections.OrderedDict() |
||
| 95 | nuisance_parameters[self._nuisance_parameter.name] = self._nuisance_parameter |
||
| 96 | |||
| 97 | # pass to the plugin proto |
||
| 98 | |||
| 99 | super(PolarLike, self).__init__(name, nuisance_parameters) |
||
| 100 | |||
| 101 | # The following vectors are the ones that will be really used for the computation. At the beginning they just |
||
| 102 | # point to the original ones, but if a rebinner is used and/or a mask is created through set_active_measurements, |
||
| 103 | # they will contain the rebinned and/or masked versions |
||
| 104 | |||
| 105 | self._current_observed_counts = self._observed_counts |
||
| 106 | self._current_background_counts = self._background_counts |
||
| 107 | self._current_background_count_errors = self._background_count_errors |
||
| 108 | |||
| 109 | self._verbose = verbose |
||
| 110 | |||
| 111 | # we can either attach or build a response |
||
| 112 | |||
| 113 | assert isinstance(response, str) or isinstance( |
||
| 114 | response, PolarResponse), 'The response must be a file name or a PolarResponse' |
||
| 115 | |||
| 116 | if isinstance(response, PolarResponse): |
||
| 117 | |||
| 118 | self._response = response |
||
| 119 | |||
| 120 | else: |
||
| 121 | |||
| 122 | self._response = PolarResponse(response) |
||
| 123 | |||
| 124 | # attach the interpolators to the |
||
| 125 | |||
| 126 | self._all_interp = self._response.interpolators |
||
| 127 | |||
| 128 | # we also make sure the lengths match up here |
||
| 129 | assert self._response.n_scattering_bins == len( |
||
| 130 | self._observation.counts), 'observation counts shape does not agree with response shape' |
||
| 131 | |||
| 132 | def use_effective_area_correction(self, lower=0.5, upper=1.5): |
||
| 133 | """ |
||
| 134 | Use an area constant to correct for response issues |
||
| 135 | |||
| 136 | :param lower: |
||
| 137 | :param upper: |
||
| 138 | :return: |
||
| 139 | """ |
||
| 140 | |||
| 141 | self._nuisance_parameter.free = True |
||
| 142 | self._nuisance_parameter.bounds = (lower, upper) |
||
| 143 | self._nuisance_parameter.prior = Uniform_prior(lower_bound=lower, upper_bound=upper) |
||
| 144 | if self._verbose: |
||
| 145 | print('Using effective area correction') |
||
| 146 | |||
| 147 | def fix_effective_area_correction(self, value=1): |
||
| 148 | """ |
||
| 149 | |||
| 150 | fix the effective area correction to a particular values |
||
| 151 | |||
| 152 | :param value: |
||
| 153 | :return: |
||
| 154 | """ |
||
| 155 | |||
| 156 | # allow the value to be outside the bounds |
||
| 157 | if self._nuisance_parameter.max_value < value: |
||
| 158 | |||
| 159 | self._nuisance_parameter.max_value = value + 0.1 |
||
| 160 | |||
| 161 | elif self._nuisance_parameter.min_value > value: |
||
| 162 | |||
| 163 | self._nuisance_parameter.min_value = value = 0.1 |
||
| 164 | |||
| 165 | self._nuisance_parameter.fix = True |
||
| 166 | self._nuisance_parameter.value = value |
||
| 167 | |||
| 168 | if self._verbose: |
||
| 169 | print('Fixing effective area correction') |
||
| 170 | |||
| 171 | @property |
||
| 172 | def effective_area_correction(self): |
||
| 173 | |||
| 174 | return self._nuisance_parameter |
||
| 175 | |||
| 176 | def get_simulated_dataset(self, new_name=None, **kwargs): |
||
| 177 | """ |
||
| 178 | Returns another Binned instance where data have been obtained by randomizing the current expectation from the |
||
| 179 | model, as well as from the background (depending on the respective noise models) |
||
| 180 | |||
| 181 | :return: an BinnedSpectrum or child instance |
||
| 182 | """ |
||
| 183 | |||
| 184 | assert self._likelihood_model is not None, "You need to set up a model before randomizing" |
||
| 185 | |||
| 186 | # Keep track of how many syntethic datasets we have generated |
||
| 187 | |||
| 188 | self._n_synthetic_datasets += 1 |
||
| 189 | |||
| 190 | # Generate a name for the new dataset if needed |
||
| 191 | if new_name is None: |
||
| 192 | new_name = "%s_sim_%i" % (self.name, self._n_synthetic_datasets) |
||
| 193 | |||
| 194 | # Generate randomized data depending on the different noise models |
||
| 195 | |||
| 196 | # We remove the mask temporarily because we need the various elements for all channels. We will restore it |
||
| 197 | # at the end |
||
| 198 | |||
| 199 | # Get the source model for all channels (that's why we don't use the .folded_model property) |
||
| 200 | |||
| 201 | # We remove the mask temporarily because we need the various elements for all channels. We will restore it |
||
| 202 | # at the end |
||
| 203 | |||
| 204 | original_rebinner = self._rebinner |
||
| 205 | |||
| 206 | with self._without_rebinner(): |
||
| 207 | |||
| 208 | # Get the source model for all channels (that's why we don't use the .folded_model property) |
||
| 209 | |||
| 210 | source_model_counts = self._get_model_counts() |
||
| 211 | |||
| 212 | if self._background.is_poisson: |
||
| 213 | _, background_model_counts = poisson_observed_poisson_background( |
||
| 214 | self._current_observed_counts, self._current_background_counts, self._scale, source_model_counts) |
||
| 215 | randomized_background_counts = np.random.poisson(background_model_counts) |
||
| 216 | |||
| 217 | background_count_errors = None |
||
| 218 | else: |
||
| 219 | |||
| 220 | _, background_model_counts = poisson_observed_gaussian_background( |
||
| 221 | self._current_observed_counts, self._current_background_counts, |
||
| 222 | self._current_background_count_errors, source_model_counts) |
||
| 223 | |||
| 224 | randomized_background_counts = np.zeros_like(background_model_counts) |
||
| 225 | |||
| 226 | randomized_background_counts[idx] = np.random.normal( |
||
| 227 | loc=background_model_counts[idx], scale=self._spectrum_plugin.background_count_errors[idx]) |
||
| 228 | |||
| 229 | # Issue a warning if the generated background is less than zero, and fix it by placing it at zero |
||
| 230 | |||
| 231 | idx = (randomized_background_counts < 0) # type: np.ndarray |
||
| 232 | |||
| 233 | negative_background_n = np.sum(idx) |
||
| 234 | |||
| 235 | if negative_background_n > 0: |
||
| 236 | custom_warnings.warn("Generated background has negative counts " |
||
| 237 | "in %i channels. Fixing them to zero" % (negative_background_n)) |
||
| 238 | |||
| 239 | randomized_background_counts[idx] = 0 |
||
| 240 | |||
| 241 | background_count_errors = self._background_count_errors |
||
| 242 | |||
| 243 | # Now randomize the expectations |
||
| 244 | |||
| 245 | # Randomize expectations for the source |
||
| 246 | |||
| 247 | randomized_source_counts = np.random.poisson(source_model_counts + background_model_counts) |
||
| 248 | |||
| 249 | # |
||
| 250 | |||
| 251 | new_observation = self._observation.clone(new_counts=randomized_source_counts) |
||
| 252 | |||
| 253 | new_background = self._background.clone( |
||
| 254 | new_counts=randomized_background_counts, new_count_errors=background_count_errors) |
||
| 255 | |||
| 256 | new_plugin = PolarLike( |
||
| 257 | name=new_name, |
||
| 258 | observation=new_observation, |
||
| 259 | background=new_background, |
||
| 260 | response=self._response, |
||
| 261 | verbose=False, |
||
| 262 | ) |
||
| 263 | |||
| 264 | # Apply the same selections as the current data set |
||
| 265 | if original_rebinner is not None: |
||
| 266 | |||
| 267 | # Apply rebinning, which also applies the mask |
||
| 268 | new_plugin._apply_rebinner(original_rebinner) |
||
| 269 | |||
| 270 | return new_plugin |
||
| 271 | |||
| 272 | def set_model(self, likelihood_model_instance): |
||
| 273 | """ |
||
| 274 | Set the model to be used in the joint minimization. Must be a LikelihoodModel instance. |
||
| 275 | :param likelihood_model_instance: instance of Model |
||
| 276 | :type likelihood_model_instance: astromodels.Model |
||
| 277 | """ |
||
| 278 | |||
| 279 | if likelihood_model_instance is None: |
||
| 280 | return |
||
| 281 | |||
| 282 | # if self._source_name is not None: |
||
| 283 | |||
| 284 | # # Make sure that the source is in the model |
||
| 285 | # assert self._source_name in likelihood_model_instance.sources, \ |
||
| 286 | # "This XYLike plugin refers to the source %s, " \ |
||
| 287 | # "but that source is not in the likelihood model" % (self._source_name) |
||
| 288 | |||
| 289 | for k, v in likelihood_model_instance.free_parameters.items(): |
||
| 290 | |||
| 291 | if 'polarization.degree' in k: |
||
| 292 | self._pol_degree = v |
||
| 293 | |||
| 294 | if 'polarization.angle' in k: |
||
| 295 | self._pol_angle = v |
||
| 296 | |||
| 297 | # now we need to get the intergal flux |
||
| 298 | |||
| 299 | _, integral = self._get_diff_flux_and_integral(likelihood_model_instance) |
||
| 300 | |||
| 301 | self._integral_flux = integral |
||
| 302 | |||
| 303 | self._likelihood_model = likelihood_model_instance |
||
| 304 | |||
| 305 | def _get_diff_flux_and_integral(self, likelihood_model): |
||
| 306 | |||
| 307 | n_point_sources = likelihood_model.get_number_of_point_sources() |
||
| 308 | |||
| 309 | # Make a function which will stack all point sources (OGIP do not support spatial dimension) |
||
| 310 | |||
| 311 | def differential_flux(scattering_edges): |
||
| 312 | fluxes = likelihood_model.get_point_source_fluxes(0, scattering_edges, tag=self._tag) |
||
| 313 | |||
| 314 | # If we have only one point source, this will never be executed |
||
| 315 | for i in range(1, n_point_sources): |
||
| 316 | fluxes += likelihood_model.get_point_source_fluxes(i, scattering_edges, tag=self._tag) |
||
| 317 | |||
| 318 | return fluxes |
||
| 319 | |||
| 320 | # The following integrates the diffFlux function using Simpson's rule |
||
| 321 | # This assume that the intervals e1,e2 are all small, which is guaranteed |
||
| 322 | # for any reasonable response matrix, given that e1 and e2 are Monte-Carlo |
||
| 323 | # scattering_edges. It also assumes that the function is smooth in the interval |
||
| 324 | # e1 - e2 and twice-differentiable, again reasonable on small intervals for |
||
| 325 | # decent models. It might fail for models with too sharp features, smaller |
||
| 326 | # than the size of the monte carlo interval. |
||
| 327 | |||
| 328 | def integral(e1, e2): |
||
| 329 | # Simpson's rule |
||
| 330 | |||
| 331 | return (e2 - e1) / 6.0 * (differential_flux(e1) + 4 * differential_flux( |
||
| 332 | (e1 + e2) / 2.0) + differential_flux(e2)) |
||
| 333 | |||
| 334 | return differential_flux, integral |
||
| 335 | |||
| 336 | def _get_model_rate(self): |
||
| 337 | |||
| 338 | # first we need to get the integrated expectation from the spectrum |
||
| 339 | |||
| 340 | intergal_spectrum = np.array( |
||
| 341 | [self._integral_flux(emin, emax) for emin, emax in zip(self._response.ene_lo, self._response.ene_hi)]) |
||
| 342 | |||
| 343 | # we evaluate at the center of the bin. the bin widths are already included |
||
| 344 | eval_points = np.array( |
||
| 345 | [[ene, self._pol_angle.value, self._pol_degree.value] for ene in self._response.energy_mid]) |
||
| 346 | |||
| 347 | expectation = [] |
||
| 348 | |||
| 349 | # create the model counts by summing over energy |
||
| 350 | |||
| 351 | for i, interpolator in enumerate(self._all_interp): |
||
| 352 | rate = np.dot(interpolator(eval_points), intergal_spectrum) |
||
| 353 | |||
| 354 | expectation.append(rate) |
||
| 355 | |||
| 356 | return np.array(expectation) |
||
| 357 | |||
| 358 | def _get_model_counts(self): |
||
| 359 | |||
| 360 | if self._rebinner is None: |
||
| 361 | model_rate = self._get_model_rate() |
||
| 362 | |||
| 363 | else: |
||
| 364 | |||
| 365 | model_rate, = self._rebinner.rebin(self._get_model_rate()) |
||
| 366 | |||
| 367 | return self._nuisance_parameter.value * self._exposure * model_rate |
||
| 368 | |||
| 369 | def get_log_like(self): |
||
| 370 | |||
| 371 | model_counts = self._get_model_counts() |
||
| 372 | |||
| 373 | if self._background.is_poisson: |
||
| 374 | |||
| 375 | loglike, bkg_model = poisson_observed_poisson_background( |
||
| 376 | self._current_observed_counts, self._current_background_counts, self._scale, model_counts) |
||
| 377 | |||
| 378 | else: |
||
| 379 | |||
| 380 | loglike, bkg_model = poisson_observed_gaussian_background( |
||
| 381 | self._current_observed_counts, self._current_background_counts, self._current_background_count_errors, |
||
| 382 | model_counts) |
||
| 383 | |||
| 384 | return np.sum(loglike) |
||
| 385 | |||
| 386 | def inner_fit(self): |
||
| 387 | |||
| 388 | return self.get_log_like() |
||
| 389 | |||
| 390 | def writeto(self, file_name): |
||
| 391 | """ |
||
| 392 | Write the data to HDF5 modulation curve files. Both background and observation |
||
| 393 | files are created |
||
| 394 | :param file_name: the file name header. The .h5 extension is added automatically |
||
| 395 | """ |
||
| 396 | # first create a file container |
||
| 397 | observation_file = ModulationCurveFile.from_binned_modulation_curve(self._observation) |
||
| 398 | |||
| 399 | background_file = ModulationCurveFile.from_binned_modulation_curve(self._background) |
||
| 400 | |||
| 401 | observation_file.writeto("%s.h5" % file_name) |
||
| 402 | |||
| 403 | background_file.writeto("%s_bak.h5" % file_name) |
||
| 404 | |||
| 405 | @property |
||
| 406 | def scattering_boundaries(self): |
||
| 407 | """ |
||
| 408 | Energy boundaries of channels currently in use (rebinned, if a rebinner is active) |
||
| 409 | |||
| 410 | :return: (sa_min, sa_max) |
||
| 411 | """ |
||
| 412 | |||
| 413 | scattering_edges = np.array(self._observation.edges) |
||
| 414 | |||
| 415 | sa_min, sa_max = scattering_edges[:-1], scattering_edges[1:] |
||
| 416 | |||
| 417 | if self._rebinner is not None: |
||
| 418 | # Get the rebinned chans. NOTE: these are already masked |
||
| 419 | |||
| 420 | sa_min, sa_max = self._rebinner.get_new_start_and_stop(sa_min, sa_max) |
||
| 421 | |||
| 422 | return sa_min, sa_max |
||
| 423 | |||
| 424 | @property |
||
| 425 | def bin_widths(self): |
||
| 426 | |||
| 427 | sa_min, sa_max = self.scattering_boundaries |
||
| 428 | |||
| 429 | return sa_max - sa_min |
||
| 430 | |||
| 431 | def display(self, |
||
| 432 | ax=None, |
||
| 433 | show_data=True, |
||
| 434 | show_model=True, |
||
| 435 | show_total=False, |
||
| 436 | model_kwargs={}, |
||
| 437 | data_kwargs={}, |
||
| 438 | edges=True, |
||
| 439 | min_rate=None): |
||
| 440 | """ |
||
| 441 | |||
| 442 | :param ax: |
||
| 443 | :param show_data: |
||
| 444 | :param show_model: |
||
| 445 | :param show_total: |
||
| 446 | :param model_kwargs: |
||
| 447 | :param data_kwargs: |
||
| 448 | :return: |
||
| 449 | """ |
||
| 450 | |||
| 451 | tmp = ((self._observed_counts / self._exposure) - self._background_counts / self._background_exposure) |
||
| 452 | |||
| 453 | scattering_edges = np.array(self._observation.edges) |
||
| 454 | |||
| 455 | sa_min, sa_max = scattering_edges[:-1], scattering_edges[1:] |
||
| 456 | |||
| 457 | tmp_db = ((self._observed_counts / self._exposure) - self._background_counts / self._background_exposure) / ( |
||
| 458 | sa_max - sa_min) |
||
| 459 | |||
| 460 | old_rebinner = self._rebinner |
||
| 461 | |||
| 462 | if min_rate is not None: |
||
| 463 | |||
| 464 | rebinner = Rebinner(tmp_db, min_rate, mask=None) |
||
| 465 | |||
| 466 | self._apply_rebinner(rebinner) |
||
| 467 | |||
| 468 | net_rate = rebinner.rebin(tmp) |
||
| 469 | else: |
||
| 470 | |||
| 471 | net_rate = tmp |
||
| 472 | |||
| 473 | sa_min, sa_max = self.scattering_boundaries |
||
| 474 | |||
| 475 | if show_total: |
||
| 476 | show_model = False |
||
| 477 | show_data = False |
||
| 478 | |||
| 479 | if ax is None: |
||
| 480 | |||
| 481 | fig, ax = plt.subplots() |
||
| 482 | |||
| 483 | else: |
||
| 484 | |||
| 485 | fig = ax.get_figure() |
||
| 486 | |||
| 487 | xs = self.scattering_boundaries |
||
| 488 | if show_total: |
||
| 489 | |||
| 490 | total_rate = self._current_observed_counts / self._exposure / self.bin_widths |
||
| 491 | bkg_rate = self._current_background_counts / self._background_exposure / self.bin_widths |
||
| 492 | |||
| 493 | total_errors = np.sqrt(total_rate) |
||
| 494 | |||
| 495 | if self._background.is_poisson: |
||
| 496 | |||
| 497 | bkg_errors = np.sqrt(bkg_rate) |
||
| 498 | |||
| 499 | else: |
||
| 500 | |||
| 501 | bkg_errors = self._current_background_count_errors / self.bin_widths |
||
| 502 | |||
| 503 | ax.hlines(total_rate, sa_min, sa_max, color='#7D0505', **data_kwargs) |
||
| 504 | ax.vlines( |
||
| 505 | np.mean([xs], axis=1), |
||
| 506 | total_rate - total_errors, |
||
| 507 | total_rate + total_errors, |
||
| 508 | color='#7D0505', |
||
| 509 | **data_kwargs) |
||
| 510 | |||
| 511 | ax.hlines(bkg_rate, sa_min, sa_max, color='#0D5BAE', **data_kwargs) |
||
| 512 | ax.vlines( |
||
| 513 | np.mean([xs], axis=1), bkg_rate - bkg_errors, bkg_rate + bkg_errors, color='#0D5BAE', **data_kwargs) |
||
| 514 | |||
| 515 | View Code Duplication | if show_data: |
|
| 516 | |||
| 517 | if self._background.is_poisson: |
||
| 518 | |||
| 519 | errors = np.sqrt((self._current_observed_counts / self._exposure) + |
||
| 520 | (self._current_background_counts / self._background_exposure)) |
||
| 521 | |||
| 522 | else: |
||
| 523 | |||
| 524 | errors = np.sqrt((self._current_observed_counts / self._exposure) + |
||
| 525 | (self._current_background_count_errors / self._background_exposure)**2) |
||
| 526 | |||
| 527 | ax.hlines(net_rate / self.bin_widths, sa_min, sa_max, **data_kwargs) |
||
| 528 | ax.vlines( |
||
| 529 | np.mean([xs], axis=1), (net_rate - errors) / self.bin_widths, (net_rate + errors) / self.bin_widths, |
||
| 530 | **data_kwargs) |
||
| 531 | |||
| 532 | if show_model: |
||
| 533 | |||
| 534 | if edges: |
||
| 535 | |||
| 536 | step_plot( |
||
| 537 | ax=ax, |
||
| 538 | xbins=np.vstack([sa_min, sa_max]).T, |
||
| 539 | y=self._get_model_counts() / self._exposure / self.bin_widths, |
||
| 540 | **model_kwargs) |
||
| 541 | |||
| 542 | else: |
||
| 543 | |||
| 544 | y = self._get_model_counts() / self._exposure / self.bin_widths |
||
| 545 | ax.hlines(y, sa_min, sa_max, **model_kwargs) |
||
| 546 | |||
| 547 | ax.set_xlabel('Scattering Angle') |
||
| 548 | ax.set_ylabel('Net Rate (cnt/s/bin)') |
||
| 549 | |||
| 550 | if old_rebinner is not None: |
||
| 551 | |||
| 552 | # There was a rebinner, use it. Note that the rebinner applies the mask by itself |
||
| 553 | |||
| 554 | self._apply_rebinner(old_rebinner) |
||
| 555 | |||
| 556 | else: |
||
| 557 | |||
| 558 | self.remove_rebinning() |
||
| 559 | |||
| 560 | return fig |
||
| 561 | |||
| 562 | def display_circle(self, |
||
| 563 | ax=None, |
||
| 564 | show_data=True, |
||
| 565 | show_model=True, |
||
| 566 | show_total=False, |
||
| 567 | model_kwargs={}, |
||
| 568 | data_kwargs={}, |
||
| 569 | edges=True, |
||
| 570 | min_rate=None, |
||
| 571 | projection=None): |
||
| 572 | """ |
||
| 573 | |||
| 574 | :param ax: |
||
| 575 | :param show_data: |
||
| 576 | :param show_model: |
||
| 577 | :param show_total: |
||
| 578 | :param model_kwargs: |
||
| 579 | :param data_kwargs: |
||
| 580 | :return: |
||
| 581 | """ |
||
| 582 | |||
| 583 | tmp = ((self._observed_counts / self._exposure) - self._background_counts / self._background_exposure) |
||
| 584 | |||
| 585 | scattering_edges = np.deg2rad(np.array(self._observation.edges)) |
||
| 586 | |||
| 587 | sa_min, sa_max = scattering_edges[:-1], scattering_edges[1:] |
||
| 588 | |||
| 589 | tmp_db = ((self._observed_counts / self._exposure) - self._background_counts / self._background_exposure) / ( |
||
| 590 | sa_max - sa_min) |
||
| 591 | |||
| 592 | old_rebinner = self._rebinner |
||
| 593 | |||
| 594 | if min_rate is not None: |
||
| 595 | |||
| 596 | rebinner = Rebinner(tmp_db, min_rate, mask=None) |
||
| 597 | |||
| 598 | self._apply_rebinner(rebinner) |
||
| 599 | |||
| 600 | net_rate = rebinner.rebin(tmp) |
||
| 601 | else: |
||
| 602 | |||
| 603 | net_rate = tmp |
||
| 604 | |||
| 605 | sa_min, sa_max = np.deg2rad(self.scattering_boundaries) |
||
| 606 | xs = np.deg2rad(self.scattering_boundaries) |
||
| 607 | |||
| 608 | if show_total: |
||
| 609 | show_model = False |
||
| 610 | show_data = False |
||
| 611 | |||
| 612 | if ax is None: |
||
| 613 | |||
| 614 | fig, ax = plt.subplots(subplot_kw=dict(projection=projection)) |
||
| 615 | |||
| 616 | else: |
||
| 617 | |||
| 618 | fig = ax.get_figure() |
||
| 619 | |||
| 620 | if show_total: |
||
| 621 | pass |
||
| 622 | |||
| 623 | # total_rate = self._current_observed_counts / self._exposure / self.bin_widths |
||
| 624 | # bkg_rate = self._current_background_counts / self._background_exposure /self.bin_widths |
||
| 625 | |||
| 626 | # total_errors = np.sqrt(total_rate) |
||
| 627 | |||
| 628 | # if self._background.is_poisson: |
||
| 629 | |||
| 630 | # bkg_errors = np.sqrt(bkg_rate) |
||
| 631 | |||
| 632 | # else: |
||
| 633 | |||
| 634 | # bkg_errors = self._current_background_count_errors / self.bin_widths |
||
| 635 | |||
| 636 | # xs = self.scattering_boundaries |
||
| 637 | |||
| 638 | # xs = np.deg2rad(xs) |
||
| 639 | # sa_min = np.deg2rad(sa_min) |
||
| 640 | # sa_max = np.deg2rad(sa_max) |
||
| 641 | |||
| 642 | # ax.hlines( |
||
| 643 | # total_rate, |
||
| 644 | # sa_min, |
||
| 645 | # sa_max, |
||
| 646 | # color='#7D0505', |
||
| 647 | # **data_kwargs) |
||
| 648 | # ax.vlines( |
||
| 649 | # np.mean([xs],axis=1), |
||
| 650 | # total_rate - total_errors, |
||
| 651 | # total_rate + total_errors, |
||
| 652 | # color='#7D0505', |
||
| 653 | # **data_kwargs) |
||
| 654 | |||
| 655 | # ax.hlines( |
||
| 656 | # bkg_rate, |
||
| 657 | # sa_min, |
||
| 658 | # sa_max, |
||
| 659 | # color='#0D5BAE', |
||
| 660 | # **data_kwargs) |
||
| 661 | # ax.vlines( |
||
| 662 | # np.mean([xs],axis=1), |
||
| 663 | # bkg_rate - bkg_errors, |
||
| 664 | # bkg_rate + bkg_errors, |
||
| 665 | # color='#0D5BAE', |
||
| 666 | # **data_kwargs) |
||
| 667 | |||
| 668 | View Code Duplication | if show_data: |
|
| 669 | |||
| 670 | if self._background.is_poisson: |
||
| 671 | |||
| 672 | errors = np.sqrt((self._current_observed_counts / self._exposure) + |
||
| 673 | (self._current_background_counts / self._background_exposure)) |
||
| 674 | |||
| 675 | else: |
||
| 676 | |||
| 677 | errors = np.sqrt((self._current_observed_counts / self._exposure) + |
||
| 678 | (self._current_background_count_errors / self._background_exposure)**2) |
||
| 679 | |||
| 680 | ax.hlines(net_rate / self.bin_widths, sa_min, sa_max, **data_kwargs) |
||
| 681 | ax.vlines( |
||
| 682 | np.mean(xs, axis=1), (net_rate - errors) / self.bin_widths, (net_rate + errors) / self.bin_widths, |
||
| 683 | **data_kwargs) |
||
| 684 | |||
| 685 | if show_model: |
||
| 686 | |||
| 687 | y = self._get_model_counts() / self._exposure / self.bin_widths |
||
| 688 | width = sa_max - sa_min |
||
| 689 | |||
| 690 | ax.bar(np.mean(xs, axis=0), y, width=sa_max - sa_min, bottom=y, **model_kwargs) |
||
| 691 | |||
| 692 | #ax.set_xlabel('Scattering Angle') |
||
| 693 | #ax.set_ylabel('Net Rate (cnt/s/bin)') |
||
| 694 | |||
| 695 | if old_rebinner is not None: |
||
| 696 | |||
| 697 | # There was a rebinner, use it. Note that the rebinner applies the mask by itself |
||
| 698 | |||
| 699 | self._apply_rebinner(old_rebinner) |
||
| 700 | |||
| 701 | else: |
||
| 702 | |||
| 703 | self.remove_rebinning() |
||
| 704 | |||
| 705 | return fig |
||
| 706 | |||
| 707 | @property |
||
| 708 | def observation(self): |
||
| 709 | return self._observation |
||
| 710 | |||
| 711 | @property |
||
| 712 | def background(self): |
||
| 713 | return self._background |
||
| 714 | |||
| 715 | @contextmanager |
||
| 716 | def _without_rebinner(self): |
||
| 717 | |||
| 718 | # Store rebinner for later use |
||
| 719 | |||
| 720 | rebinner = self._rebinner |
||
| 721 | |||
| 722 | # Clean mask and rebinning |
||
| 723 | |||
| 724 | self.remove_rebinning() |
||
| 725 | |||
| 726 | # Execute whathever |
||
| 727 | |||
| 728 | yield |
||
| 729 | |||
| 730 | # Restore mask and rebinner (if any) |
||
| 731 | |||
| 732 | if rebinner is not None: |
||
| 733 | |||
| 734 | # There was a rebinner, use it. Note that the rebinner applies the mask by itself |
||
| 735 | |||
| 736 | self._apply_rebinner(rebinner) |
||
| 737 | |||
| 738 | def rebin_on_background(self, min_number_of_counts): |
||
| 739 | """ |
||
| 740 | Rebin the spectrum guaranteeing the provided minimum number of counts in each background bin. This is usually |
||
| 741 | required for spectra with very few background counts to make the Poisson profile likelihood meaningful. |
||
| 742 | Of course this is not relevant if you treat the background as ideal, nor if the background spectrum has |
||
| 743 | Gaussian errors. |
||
| 744 | |||
| 745 | The observed spectrum will be rebinned in the same fashion as the background spectrum. |
||
| 746 | |||
| 747 | To neutralize this completely, use "remove_rebinning" |
||
| 748 | |||
| 749 | :param min_number_of_counts: the minimum number of counts in each bin |
||
| 750 | :return: none |
||
| 751 | """ |
||
| 752 | |||
| 753 | # NOTE: the rebinner takes care of the mask already |
||
| 754 | |||
| 755 | assert self._background is not None, "This data has no background, cannot rebin on background!" |
||
| 756 | |||
| 757 | rebinner = Rebinner(self._background_counts, min_number_of_counts, mask=None) |
||
| 758 | |||
| 759 | self._apply_rebinner(rebinner) |
||
| 760 | |||
| 761 | def rebin_on_source(self, min_number_of_counts): |
||
| 762 | """ |
||
| 763 | Rebin the spectrum guaranteeing the provided minimum number of counts in each source bin. |
||
| 764 | |||
| 765 | To neutralize this completely, use "remove_rebinning" |
||
| 766 | |||
| 767 | :param min_number_of_counts: the minimum number of counts in each bin |
||
| 768 | :return: none |
||
| 769 | """ |
||
| 770 | |||
| 771 | # NOTE: the rebinner takes care of the mask already |
||
| 772 | |||
| 773 | rebinner = Rebinner(self._observed_counts, min_number_of_counts, mask=None) |
||
| 774 | |||
| 775 | self._apply_rebinner(rebinner) |
||
| 776 | |||
| 777 | def _apply_rebinner(self, rebinner): |
||
| 778 | |||
| 779 | self._rebinner = rebinner |
||
| 780 | |||
| 781 | # Apply the rebinning to everything. |
||
| 782 | # NOTE: the output of the .rebin method are the vectors with the mask *already applied* |
||
| 783 | |||
| 784 | self._current_observed_counts, = self._rebinner.rebin(self._observed_counts) |
||
| 785 | |||
| 786 | if self._background is not None: |
||
| 787 | |||
| 788 | self._current_background_counts, = self._rebinner.rebin(self._background_counts) |
||
| 789 | |||
| 790 | if self._background_count_errors is not None: |
||
| 791 | # NOTE: the output of the .rebin method are the vectors with the mask *already applied* |
||
| 792 | |||
| 793 | self._current_background_count_errors, = self._rebinner.rebin_errors(self._background_count_errors) |
||
| 794 | |||
| 795 | if self._verbose: |
||
| 796 | print("Now using %s bins" % self._rebinner.n_bins) |
||
| 797 | |||
| 798 | def remove_rebinning(self): |
||
| 799 | """ |
||
| 800 | Remove the rebinning scheme set with rebin_on_background. |
||
| 801 | |||
| 802 | :return: |
||
| 803 | """ |
||
| 804 | |||
| 805 | self._rebinner = None |
||
| 806 | |||
| 807 | self._current_observed_counts = self._observed_counts |
||
| 808 | self._current_background_counts = self._background_counts |
||
| 809 | self._current_background_count_errors = self._background_count_errors |
||
| 810 |
The coding style of this project requires that you add a docstring to this code element. Below, you find an example for methods:
If you would like to know more about docstrings, we recommend to read PEP-257: Docstring Conventions.