asgardpy.stats.utils.fetch_all_analysis_fit_info()   A
last analyzed

Complexity

Conditions 3

Size

Total Lines 53
Code Lines 34

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 3
eloc 34
nop 2
dl 0
loc 53
rs 9.064
c 0
b 0
f 0

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
"""
2
Module containing additional utility functions for selecting a preferred model.
3
"""
4
5
import numpy as np
6
from astropy.table import QTable
7
8
from asgardpy.config.operations import all_model_templates
9
from asgardpy.stats.stats import check_model_preference_lrt
10
11
__all__ = [
12
    "fetch_all_analysis_fit_info",
13
    "get_model_config_files",
14
    "tabulate_best_fit_stats",
15
    "copy_target_config",
16
]
17
18
19
def get_model_config_files(select_model_tags):
20
    """From the default model templates, select some."""
21
22
    all_tags, template_files = all_model_templates()
23
24
    spec_model_temp_files = []
25
    for tag in select_model_tags:
26
        spec_model_temp_files.append(template_files[np.where(all_tags == tag)[0][0]])
27
28
    spec_model_temp_files = np.array(spec_model_temp_files)
29
30
    return spec_model_temp_files
31
32
33
def get_spec_params_indices(aa_config):
34
    """
35
    For copying the spectral flux amplitude and flux normalization energy,
36
    from one config to another, find the correct parameter indices within a
37
    given config.
38
    """
39
    par_names = []
40
    for p in aa_config.config.target.components[0].spectral.parameters:
41
        par_names.append(p.name)
42
    par_names = np.array(par_names)
43
44
    amp_idx = None
45
    # For models without this parameter, name has not yet been included or
46
    # checked with Asgardpy
47
    if "amplitude" in par_names:
48
        amp_idx = np.where(par_names == "amplitude")[0][0]
49
50
    if "reference" in par_names:
51
        eref_idx = np.where(par_names == "reference")[0][0]
52
    else:
53
        eref_idx = np.where(par_names == "ebreak")[0][0]
54
55
    return amp_idx, eref_idx
56
57
58
def copy_target_config(aa_config_1, aa_config_2):
59
    """From aa_config_1 update information in aa_config_2."""
60
61
    amp_idx_1, eref_idx_1 = get_spec_params_indices(aa_config_1)
62
    amp_idx_2, eref_idx_2 = get_spec_params_indices(aa_config_2)
63
64
    # Have the same value of amplitude
65
    aa_config_2.config.target.components[0].spectral.parameters[amp_idx_2].value = (
66
        aa_config_1.config.target.components[0].spectral.parameters[amp_idx_1].value
67
    )
68
    # Have the same value of reference/e_break energy
69
    aa_config_2.config.target.components[0].spectral.parameters[eref_idx_2].value = (
70
        aa_config_1.config.target.components[0].spectral.parameters[eref_idx_1].value
71
    )
72
    # Have the same value of redshift value and EBL reference model
73
    aa_config_2.config.target.components[0].spectral.ebl_abs.redshift = aa_config_1.config.target.components[
74
        0
75
    ].spectral.ebl_abs.redshift
76
77
    # Make sure the source names are the same
78
    aa_config_2.config.target.source_name = aa_config_1.config.target.source_name
79
    aa_config_2.config.target.components[0].name = aa_config_1.config.target.components[0].name
80
81
    return aa_config_2
82
83
84
def fetch_all_analysis_fit_info(main_analysis_list, spec_models_list):
85
    """
86
    For a list of spectral models, with the AsgardpyAnalysis run till the fit
87
    step, get the relevant information for testing the model preference.
88
    """
89
    fit_success_list = []
90
    pref_over_pl_chi2_list = []
91
    stat_list = []
92
    dof_list = []
93
94
    for tag in spec_models_list:
95
        dict_tag = main_analysis_list[tag]["Analysis"].instrument_spectral_info
96
        dict_pl = main_analysis_list["pl"]["Analysis"].instrument_spectral_info
97
98
        # Collect parameters for AIC check
99
        stat = dict_tag["best_fit_stat"]
100
        dof = dict_tag["DoF"]
101
102
        fit_success = main_analysis_list[tag]["Analysis"].fit_result.success
103
104
        fit_success_list.append(fit_success)
105
        stat_list.append(stat)
106
        dof_list.append(dof)
107
108
        # Checking the preference of a "nested" spectral model (observed),
109
        # over Power Law.
110
        if tag == "pl":
111
            main_analysis_list[tag]["Pref_over_pl_chi2"] = 0
112
            main_analysis_list[tag]["Pref_over_pl_pval"] = 0
113
            main_analysis_list[tag]["DoF_over_pl"] = 0
114
            pref_over_pl_chi2_list.append(0)
115
            continue
116
117
        p_pl_x, g_pl_x, ndof_pl_x = check_model_preference_lrt(
118
            dict_pl["best_fit_stat"],
119
            dict_tag["best_fit_stat"],
120
            dict_pl["DoF"],
121
            dict_tag["DoF"],
122
        )
123
124
        main_analysis_list[tag]["Pref_over_pl_chi2"] = g_pl_x
125
        pref_over_pl_chi2_list.append(g_pl_x)
126
        main_analysis_list[tag]["Pref_over_pl_pval"] = p_pl_x
127
        main_analysis_list[tag]["DoF_over_pl"] = ndof_pl_x
128
129
    fit_success_list = np.array(fit_success_list)
130
131
    # Only select fit results that were successful for comparisons
132
    stat_list = np.array(stat_list)[fit_success_list]
133
    dof_list = np.array(dof_list)[fit_success_list]
134
    pref_over_pl_chi2_list = np.array(pref_over_pl_chi2_list)[fit_success_list]
135
136
    return fit_success_list, stat_list, dof_list, pref_over_pl_chi2_list
137
138
139
def tabulate_best_fit_stats(spec_models_list, fit_success_list, main_analysis_list, list_rel_p):
140
    """For a list of spectral models, tabulate the best fit information."""
141
142
    fit_stats_table = []
143
144
    for i, tag in enumerate(spec_models_list[fit_success_list]):
145
        info_ = main_analysis_list[tag]["Analysis"].instrument_spectral_info
146
147
        t = main_analysis_list[tag]
148
149
        ts_gof = round(info_["best_fit_stat"] - info_["max_fit_stat"], 3)
150
        t_fits = {
151
            "Spectral Model": tag.upper(),
152
            "TS of Best Fit": round(info_["best_fit_stat"], 3),
153
            "TS of Max Fit": round(info_["max_fit_stat"], 3),
154
            "TS of Goodness of Fit": ts_gof,
155
            "DoF of Fit": info_["DoF"],
156
            r"Significance ($\sigma$) of Goodness of Fit": round(info_["fit_chi2_sig"], 3),
157
            "p-value of Goodness of Fit": float(f"{info_['fit_pval']:.4g}"),
158
            "Pref over PL (chi2)": round(t["Pref_over_pl_chi2"], 3),
159
            "Pref over PL (p-value)": float(f"{t['Pref_over_pl_pval']:.4g}"),
160
            "Pref over PL (DoF)": t["DoF_over_pl"],
161
            "Relative p-value (AIC)": float(f"{list_rel_p[i]:.4g}"),
162
        }
163
        fit_stats_table.append(t_fits)
164
    stats_table = QTable(fit_stats_table)
165
166
    return stats_table
167