Passed
Push — master ( 4a40b1...18f8a0 )
by Simon
01:39
created

experiments.MyExperiment._evaluate()   A

Complexity

Conditions 1

Size

Total Lines 23
Code Lines 4

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 4
dl 0
loc 23
rs 10
c 0
b 0
f 0
cc 1
nop 2
1
# copyright: hyperactive developers, MIT License (see LICENSE file)
2
"""Extension template for experiments.
3
4
Purpose of this implementation template:
5
    quick implementation of new estimators following the template
6
    NOT a concrete class to import! This is NOT a base class or concrete class!
7
    This is to be used as a "fill-in" coding template.
8
9
How to use this implementation template to implement a new estimator:
10
- make a copy of the template in a suitable location, give it a descriptive name.
11
- work through all the "todo" comments below
12
- fill in code for mandatory methods, and optionally for optional methods
13
- do not write to reserved variables: _tags, _tags_dynamic
14
- you can add more private methods, but do not override BaseEstimator's private methods
15
    an easy way to be safe is to prefix your methods with "_custom"
16
- change docstrings for functions and the file
17
- ensure interface compatibility by hyperactive.utils.check_estimator
18
- once complete: use as a local library, or contribute to hyperactive via PR
19
20
Mandatory methods:
21
    scoring         - _score(self, params: dict) -> np.float64
22
    parameter names - _paramnames(self) -> list[str]
23
24
Testing - required for automated test framework and check_estimator usage:
25
    get default parameters for test instance(s) - get_test_params()
26
"""
27
# todo: write an informative docstring for the file or module, remove the above
28
# todo: add an appropriate copyright notice for your estimator
29
#       estimators contributed should have the copyright notice at the top
30
#       estimators of your own do not need to have permissive or MIT copyright
31
32
# todo: uncomment the following line, enter authors' GitHub IDs
33
# __author__ = [authorGitHubID, anotherAuthorGitHubID]
34
35
from hyperactive.base import BaseExperiment
36
37
# todo: add any necessary imports here
38
39
# todo: for imports of soft dependencies:
40
# make sure to fill in the "python_dependencies" tag with the package import name
41
# import soft dependencies only inside methods of the class, not at the top of the file
42
43
44
class MyExperiment(BaseExperiment):
45
    """Custom experiment. todo: write docstring.
46
47
    todo: describe your custom experiment here
48
49
    Parameters
50
    ----------
51
    parama : int
52
        descriptive explanation of parama
53
    paramb : string, optional (default='default')
54
        descriptive explanation of paramb
55
    paramc : boolean, optional (default=MyOtherEstimator(foo=42))
56
        descriptive explanation of paramc
57
    and so on
58
59
    Examples
60
    --------
61
    >>> from somehwere import MyExperiment
62
    >>> great_example(code)
63
    >>> multi_line_expressions(
64
    ...     require_dots_on_new_lines_so_that_expression_continues_properly
65
    ... )
66
    """
67
68
    # todo: fill in tags - most tags have sensible defaults below
69
    _tags = {
70
        # tags and full specifications are available in the tag API reference
71
        # TO BE ADDED
72
        #
73
        # property tags: subtype
74
        # ----------------------
75
        #
76
        "property:randomness": "random",
77
        # valid values: "random", "deterministic"
78
        # if "deterministic", two calls of "evaluate" must result in the same value
79
        #
80
        "property:higher_or_lower_is_better": "lower",
81
        # valid values: "higher", "lower", "mixed"
82
        # whether higher or lower returns of "evaluate" are better
83
        #
84
        # --------------
85
        # packaging info
86
        # --------------
87
        #
88
        # ownership and contribution tags
89
        # -------------------------------
90
        #
91
        # author = author(s) of th estimator
92
        # an author is anyone with significant contribution to the code at some point
93
        "authors": ["author1", "author2"],
94
        # valid values: str or list of str, should be GitHub handles
95
        # this should follow best scientific contribution practices
96
        # scope is the code, not the methodology (method is per paper citation)
97
        # if interfacing a 3rd party estimator, ensure to give credit to the
98
        # authors of the interfaced estimator
99
        #
100
        # maintainer = current maintainer(s) of the estimator
101
        # per algorithm maintainer role, see governance document
102
        # this is an "owner" type role, with rights and maintenance duties
103
        # for 3rd party interfaces, the scope is the class only
104
        "maintainers": ["maintainer1", "maintainer2"],
105
        # valid values: str or list of str, should be GitHub handles
106
        # remove tag if maintained by package core team
107
        #
108
        # dependency tags: python version and soft dependencies
109
        # -----------------------------------------------------
110
        #
111
        # python version requirement
112
        "python_version": None,
113
        # valid values: str, PEP 440 valid python version specifiers
114
        # raises exception at construction if local python version is incompatible
115
        # delete tag if no python version requirement
116
        #
117
        # soft dependency requirement
118
        "python_dependencies": None,
119
        # valid values: str or list of str, PEP 440 valid package version specifiers
120
        # raises exception at construction if modules at strings cannot be imported
121
        # delete tag if no soft dependency requirement
122
    }
123
124
    # todo: add any hyper-parameters and components to constructor
125
    def __init__(self, parama, paramb="default", paramc=None):
126
        # todo: write any hyper-parameters to self
127
        self.parama = parama
128
        self.paramb = paramb
129
        self.paramc = paramc
130
        # IMPORTANT: the self.params should never be overwritten or mutated from now on
131
        # for handling defaults etc, write to other attributes, e.g., self._parama
132
133
        # leave this as is
134
        super().__init__()
135
136
        # todo: optional, parameter checking logic (if applicable) should happen here
137
        # if writes derived values to self, should *not* overwrite self.parama etc
138
        # instead, write to self._parama, self._newparam (starting with _)
139
140
    # todo: implement this, mandatory
141
    def _paramnames(self):
142
        """Return the parameter names of the search.
143
144
        Returns
145
        -------
146
        list of str
147
            The parameter names of the search parameters.
148
        """
149
        # for every instance, this should return the correct parameter names
150
        # i.e., the maximal set of keys of the dict expected by _score
151
        return ["score_param1", "score_param2"]
152
153
    # todo: implement this, mandatory
154
    def _evaluate(self, params):
155
        """Evaluate the parameters.
156
157
        Parameters
158
        ----------
159
        params : dict with string keys
160
            Parameters to evaluate.
161
162
        Returns
163
        -------
164
        float
165
            The value of the parameters as per evaluation.
166
        dict
167
            Additional metadata about the search.
168
        """
169
        # params is a dictionary with keys being paramnames or subset thereof
170
        # IMPORTANT: avoid side effects to params!
171
        #
172
        # the method may work if only a subset of the parameters in paramnames is passed
173
        # but this is not necessary
174
        value = 42  # must be numpy.float64
175
        metadata = {"some": "metadata"}  # can be any dict
176
        return value, metadata
177
178
179
    # todo: implement this for testing purposes!
180
    #   required to run local automated unit and integration testing of estimator
181
    #   method should return default parameters, so that a test instance can be created
182
    @classmethod
183
    def get_test_params(cls, parameter_set="default"):
184
        """Return testing parameter settings for the estimator.
185
186
        Parameters
187
        ----------
188
        parameter_set : str, default="default"
189
            Name of the set of test parameters to return, for use in tests. If no
190
            special parameters are defined for a value, will return `"default"` set.
191
            There are currently no reserved values for this type of estimator.
192
193
        Returns
194
        -------
195
        params : dict or list of dict, default = {}
196
            Parameters to create testing instances of the class
197
            Each dict are parameters to construct an "interesting" test instance, i.e.,
198
            `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
199
            `create_test_instance` uses the first (or only) dictionary in `params`
200
        """
201
        # todo: set the testing parameters for the estimators
202
        # Testing parameters can be dictionary or list of dictionaries.
203
        # Testing parameter choice should cover internal cases well.
204
        #   for "simple" extension, ignore the parameter_set argument.
205
        paramset1 = {"parama": 0, "paramb": "default", "paramc": None}
206
        paramset2 = {"parama": 1, "paramb": "foo", "paramc": 42}
207
        return [paramset1, paramset2]
208
209
        # this method can, if required, use:
210
        #   class properties (e.g., inherited); parent class test case
211
        #   imported objects such as estimators from sklearn
212
        # important: all such imports should be *inside get_test_params*, not at the top
213
        #            since imports are used only at testing time
214
        #
215
        # A good parameter set should primarily satisfy two criteria,
216
        #   1. Chosen set of parameters should have a low testing time,
217
        #      ideally in the magnitude of few seconds for the entire test suite.
218
        #       This is vital for the cases where default values result in
219
        #       "big" models which not only increases test time but also
220
        #       run into the risk of test workers crashing.
221
        #   2. There should be a minimum two such parameter sets with different
222
        #      sets of values to ensure a wide range of code coverage is provided.
223
        #
224
        # example 1: specify params as dictionary
225
        # any number of params can be specified
226
        # params = {"est": value0, "parama": value1, "paramb": value2}
227
        #
228
        # example 2: specify params as list of dictionary
229
        # note: Only first dictionary will be used by create_test_instance
230
        # params = [{"est": value1, "parama": value2},
231
        #           {"est": value3, "parama": value4}]
232
        #
233
        # return params
234
235
    @classmethod
236
    def _get_score_params(self):
237
        """Return settings for testing score/evaluate functions. Used in tests only.
238
239
        Returns a list, the i-th element should be valid arguments for
240
        self.evaluate and self.score, of an instance constructed with
241
        self.get_test_params()[i].
242
243
        Returns
244
        -------
245
        list of dict
246
            The parameters to be used for scoring.
247
        """
248
        # dict keys should be same as paramnames return
249
        # or subset, only if _evaluate allows for subsets of parameters
250
        score_params1 = {"score_param1": "foo", "score_param2": "bar"}
251
        score_params2 = {"score_param1": "baz", "score_param2": "qux"}
252
        return [score_params1, score_params2]
253