Total Complexity | 1 |
Total Lines | 30 |
Duplicated Lines | 0 % |
Changes | 0 |
1 | from numba import jit |
||
|
|||
2 | import numpy as np |
||
3 | |||
4 | |||
5 | # This function has two signatures in numba because if there are no sources in the likelihood model, |
||
6 | # then expected_model_counts is 0.0 |
||
7 | @jit(["float64(float64[:], float64[:], float64[:])", "float64(float64[:], float64[:], float64)"], |
||
8 | nopython=True, parallel=False) |
||
9 | def log_likelihood(observed_counts, expected_bkg_counts, expected_model_counts): # pragma: no cover |
||
10 | """ |
||
11 | Poisson log-likelihood minus log factorial minus bias. The bias migth be needed to keep the numerical value |
||
12 | of the likelihood small enough so that there aren't numerical problems when computing differences between two |
||
13 | likelihood values. |
||
14 | |||
15 | :param observed_counts: |
||
16 | :param expected_bkg_counts: |
||
17 | :param expected_model_counts: |
||
18 | :param bias: |
||
19 | :return: |
||
20 | """ |
||
21 | |||
22 | predicted_counts = expected_bkg_counts + expected_model_counts |
||
23 | |||
24 | # Remember: because of how the DataAnalysisBin in map_tree.py initializes the maps, |
||
25 | # observed_counts > 0 everywhere |
||
26 | |||
27 | log_likes = observed_counts * np.log(predicted_counts) - predicted_counts |
||
28 | |||
29 | return np.sum(log_likes) |
||
The coding style of this project requires that you add a docstring to this code element. Below, you find an example for methods:
If you would like to know more about docstrings, we recommend to read PEP-257: Docstring Conventions.