Passed
Pull Request — main (#178)
by Chaitanya
01:22
created

test_preferred_model()   B

Complexity

Conditions 5

Size

Total Lines 69
Code Lines 41

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 5
eloc 41
nop 1
dl 0
loc 69
rs 8.4293
c 0
b 0
f 0

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
import pytest
2
3
4
@pytest.mark.test_data
5
def test_preferred_model(base_config_1d):
6
    """
7
    Testing the script code of checking the preferred spectral model.
8
    """
9
10
    import numpy as np
11
12
    from asgardpy.analysis import AsgardpyAnalysis
13
    from asgardpy.stats import (
14
        check_model_preference_aic,
15
        check_model_preference_lrt,
16
        copy_target_config,
17
        fetch_all_analysis_fit_info,
18
        get_model_config_files,
19
    )
20
21
    select_model_tags = ["lp", "bpl2", "ecpl", "pl", "eclp"]
22
    spec_model_temp_files = []
23
    spec_model_temp_files = get_model_config_files(select_model_tags)
24
25
    main_analysis_list = {}
26
    spec_models_list = []
27
28
    for temp in spec_model_temp_files:
29
        temp_model = AsgardpyAnalysis(base_config_1d)
30
        temp_model.config.fit_params.fit_range.min = "100 GeV"
31
32
        temp_model.config.target.models_file = temp
33
34
        temp_model_2 = AsgardpyAnalysis(temp_model.config)
35
36
        copy_target_config(temp_model, temp_model_2)
37
38
        spec_tag = temp.name.split(".")[0].split("_")[-1]
39
        spec_models_list.append(spec_tag)
40
        main_analysis_list[spec_tag] = {}
41
42
        main_analysis_list[spec_tag]["Analysis"] = temp_model_2
43
44
    spec_models_list = np.array(spec_models_list)
45
46
    # Run Analysis Steps till Fit
47
    for tag in spec_models_list:
48
        main_analysis_list[tag]["Analysis"].run(["datasets-1d", "fit"])
49
50
    fit_success_list, stat_list, dof_list, pref_over_pl_chi2_list = fetch_all_analysis_fit_info(
51
        main_analysis_list, spec_models_list
52
    )
53
54
    # If any spectral model has at least 5 sigmas preference over PL
55
    best_sp_idx_lrt = np.nonzero(pref_over_pl_chi2_list == np.nanmax(pref_over_pl_chi2_list))[0]
56
    for idx in best_sp_idx_lrt:
57
        if pref_over_pl_chi2_list[idx] > 5:
58
            lrt_best_model = spec_models_list[idx]
59
60
    list_rel_p = check_model_preference_aic(stat_list, dof_list)
61
62
    best_sp_idx_aic = np.nonzero(list_rel_p == np.nanmax(list_rel_p))[0]
63
64
    aic_best_model = select_model_tags[best_sp_idx_aic[0]]
65
66
    assert lrt_best_model == "lp"
1 ignored issue
show
introduced by
The variable lrt_best_model does not seem to be defined for all execution paths.
Loading history...
67
    assert aic_best_model == "lp"
68
69
    # Check for bad comparisons, same dof
70
    p_val_0, g_sig_0, dof_0 = check_model_preference_lrt(4.4, 2.2, 2, 2)
71
72
    assert np.isnan(p_val_0)
73