Completed
Push — main ( 6464b6...34edd0 )
by Chaitanya
18s queued 14s
created

test_preferred_model()   B

Complexity

Conditions 5

Size

Total Lines 77
Code Lines 47

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 5
eloc 47
nop 1
dl 0
loc 77
rs 8.2678
c 0
b 0
f 0

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
import pytest
2
3
4
@pytest.mark.test_data
5
def test_preferred_model(base_config_1d):
6
    """
7
    Testing the script code of checking the preferred spectral model.
8
    """
9
10
    import numpy as np
11
12
    from asgardpy.analysis import AsgardpyAnalysis
13
    from asgardpy.stats import (
14
        check_model_preference_aic,
15
        check_model_preference_lrt,
16
        copy_target_config,
17
        fetch_all_analysis_fit_info,
18
        get_model_config_files,
19
        tabulate_best_fit_stats,
20
        write_output_config_yaml,
21
    )
22
23
    select_model_tags = ["lp", "bpl2", "ecpl", "pl", "eclp"]
24
    spec_model_temp_files = []
25
    spec_model_temp_files = get_model_config_files(select_model_tags)
26
27
    main_analysis_list = {}
28
    spec_models_list = []
29
30
    for temp in spec_model_temp_files:
31
        temp_model = AsgardpyAnalysis(base_config_1d)
32
        temp_model.config.fit_params.fit_range.min = "100 GeV"
33
34
        temp_model.config.target.models_file = temp
35
36
        temp_model_2 = AsgardpyAnalysis(temp_model.config)
37
38
        copy_target_config(temp_model, temp_model_2)
39
40
        spec_tag = temp.name.split(".")[0].split("_")[-1]
41
        spec_models_list.append(spec_tag)
42
        main_analysis_list[spec_tag] = {}
43
44
        main_analysis_list[spec_tag]["Analysis"] = temp_model_2
45
46
    spec_models_list = np.array(spec_models_list)
47
48
    # Run Analysis Steps till Fit
49
    for tag in spec_models_list:
50
        main_analysis_list[tag]["Analysis"].run(["datasets-1d", "fit"])
51
52
    fit_success_list, stat_list, dof_list, pref_over_pl_chi2_list = fetch_all_analysis_fit_info(
53
        main_analysis_list, spec_models_list
54
    )
55
56
    # If any spectral model has at least 5 sigmas preference over PL
57
    best_sp_idx_lrt = np.nonzero(pref_over_pl_chi2_list == np.nanmax(pref_over_pl_chi2_list))[0]
58
    for idx in best_sp_idx_lrt:
59
        if pref_over_pl_chi2_list[idx] > 5:
60
            lrt_best_model = spec_models_list[idx]
61
62
    list_rel_p = check_model_preference_aic(stat_list, dof_list)
63
64
    best_sp_idx_aic = np.nonzero(list_rel_p == np.nanmax(list_rel_p))[0]
65
66
    aic_best_model = select_model_tags[best_sp_idx_aic[0]]
67
68
    stats_table = tabulate_best_fit_stats(spec_models_list, fit_success_list, main_analysis_list, list_rel_p)
69
70
    assert lrt_best_model == "lp"
1 ignored issue
show
introduced by
The variable lrt_best_model does not seem to be defined for all execution paths.
Loading history...
71
    assert aic_best_model == "lp"
72
    assert len(stats_table.colnames) == 11
73
74
    tag = spec_models_list[fit_success_list][0]
75
    write_output_config_yaml(main_analysis_list[tag]["Analysis"].final_model[0])
76
77
    # Check for bad comparisons, same dof
78
    p_val_0, g_sig_0, dof_0 = check_model_preference_lrt(4.4, 2.2, 2, 2)
79
80
    assert np.isnan(p_val_0)
81