Passed
Push — master ( b0177e...bb843a )
by
unknown
01:25
created

check_estimator()   A

Complexity

Conditions 4

Size

Total Lines 131
Code Lines 32

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 32
dl 0
loc 131
rs 9.112
c 0
b 0
f 0
cc 4
nop 7

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
"""Estimator checker for extension."""
2
3
__author__ = ["fkiraly"]
4
__all__ = ["check_estimator"]
5
6
from skbase.utils.dependencies import _check_soft_dependencies
7
8
9
def check_estimator(
10
    estimator,
11
    raise_exceptions=False,
12
    tests_to_run=None,
13
    fixtures_to_run=None,
14
    verbose=True,
15
    tests_to_exclude=None,
16
    fixtures_to_exclude=None,
17
):
18
    """Run all tests on one single estimator.
19
20
    Tests that are run on estimator:
21
22
    * all tests in test_all_estimators
23
    * all interface compatibility tests from the module of estimator's scitype
24
25
    Parameters
26
    ----------
27
    estimator : estimator class or estimator instance
28
    raise_exceptions : bool, optional, default=False
29
        whether to return exceptions/failures in the results dict, or raise them
30
31
        * if False: returns exceptions in returned `results` dict
32
        * if True: raises exceptions as they occur
33
34
    tests_to_run : str or list of str, optional. Default = run all tests.
35
        Names (test/function name string) of tests to run.
36
        sub-sets tests that are run to the tests given here.
37
    fixtures_to_run : str or list of str, optional. Default = run all tests.
38
        pytest test-fixture combination codes, which test-fixture combinations to run.
39
        sub-sets tests and fixtures to run to the list given here.
40
        If both tests_to_run and fixtures_to_run are provided, runs the *union*,
41
        i.e., all test-fixture combinations for tests in tests_to_run,
42
            plus all test-fixture combinations in fixtures_to_run.
43
    verbose : str, optional, default=True.
44
        whether to print out informative summary of tests run.
45
    tests_to_exclude : str or list of str, names of tests to exclude. default = None
46
        removes tests that should not be run, after subsetting via tests_to_run.
47
    fixtures_to_exclude : str or list of str, fixtures to exclude. default = None
48
        removes test-fixture combinations that should not be run.
49
        This is done after subsetting via fixtures_to_run.
50
51
    Returns
52
    -------
53
    results : dict of results of the tests in self
54
        keys are test/fixture strings, identical as in pytest, e.g., test[fixture]
55
        entries are the string "PASSED" if the test passed,
56
        or the exception raised if the test did not pass
57
        returned only if all tests pass, or raise_exceptions=False
58
59
    Raises
60
    ------
61
    if raise_exceptions=True,
62
    raises any exception produced by the tests directly
63
64
    Examples
65
    --------
66
    >>> from hyperactive.opt import HillClimbing
67
    >>> from hyperactive.utils import check_estimator
68
69
    Running all tests for HillClimbing class,
70
    this uses all instances from get_test_params and compatible scenarios
71
72
    >>> results = check_estimator(HillClimbing)
73
    All tests PASSED!
74
75
    Running all tests for a specific HillClimbing
76
    this uses the instance that is passed and compatible scenarios
77
78
    >>> specific_hill_climbing = HillClimbing.create_test_instance()
79
    >>> results = check_estimator(specific_hill_climbing)
80
    All tests PASSED!
81
82
    Running specific test (all fixtures) HillClimbing
83
84
    >>> results = check_estimator(HillClimbing, tests_to_run="test_clone")
85
    All tests PASSED!
86
87
    {'test_clone[HillClimbing-0]': 'PASSED',
88
    'test_clone[HillClimbing-1]': 'PASSED'}
89
90
    Running one specific test-fixture-combination for ResidualDouble
91
92
    >>> check_estimator(
93
    ...    HillClimbing, fixtures_to_run="test_clone[HillClimbing-1]"
94
    ... )
95
    All tests PASSED!
96
    {'test_clone[HillClimbing-1]': 'PASSED'}
97
    """
98
    msg = (
99
        "check_estimator is a testing utility for developers, and "
100
        "requires pytest to be present "
101
        "in the python environment, but pytest was not found. "
102
        "pytest is a developer dependency and not included in the base "
103
        "sktime installation. Please run: `pip install pytest` to "
104
        "install the pytest package. "
105
        "To install sktime with all developer dependencies, run:"
106
        " `pip install hyperactive[dev]`"
107
    )
108
    _check_soft_dependencies("pytest", msg=msg)
109
110
    from hyperactive.tests.test_class_register import get_test_classes_for_obj
111
112
    test_clss_for_est = get_test_classes_for_obj(estimator)
113
114
    results = {}
115
116
    for test_cls in test_clss_for_est:
117
        test_cls_results = test_cls().run_tests(
118
            obj=estimator,
119
            raise_exceptions=raise_exceptions,
120
            tests_to_run=tests_to_run,
121
            fixtures_to_run=fixtures_to_run,
122
            tests_to_exclude=tests_to_exclude,
123
            fixtures_to_exclude=fixtures_to_exclude,
124
        )
125
        results.update(test_cls_results)
126
127
    failed_tests = [key for key in results.keys() if results[key] != "PASSED"]
128
    if len(failed_tests) > 0:
129
        msg = failed_tests
130
        msg = ["FAILED: " + x for x in msg]
131
        msg = "\n".join(msg)
132
    else:
133
        msg = "All tests PASSED!"
134
135
    if verbose:
136
        # printing is an intended feature, for console usage and interactive debugging
137
        print(msg)  # noqa T001
138
139
    return results
140