Passed
Push — master ( 73f6be...3a14a5 )
by Simon
01:53
created

optimizers.MyOptimizer._solve()   A

Complexity

Conditions 1

Size

Total Lines 20
Code Lines 3

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 3
dl 0
loc 20
rs 10
c 0
b 0
f 0
cc 1
nop 3
1
# copyright: hyperactive developers, MIT License (see LICENSE file)
2
"""Extension template for optimizers.
3
4
Purpose of this implementation template:
5
    quick implementation of new estimators following the template
6
    NOT a concrete class to import! This is NOT a base class or concrete class!
7
    This is to be used as a "fill-in" coding template.
8
9
How to use this implementation template to implement a new estimator:
10
- make a copy of the template in a suitable location, give it a descriptive name.
11
- work through all the "todo" comments below
12
- fill in code for mandatory methods, and optionally for optional methods
13
- do not write to reserved variables: _tags, _tags_dynamic
14
- you can add more private methods, but do not override BaseEstimator's private methods
15
    an easy way to be safe is to prefix your methods with "_custom"
16
- change docstrings for functions and the file
17
- ensure interface compatibility by hyperactive.utils.check_estimator
18
- once complete: use as a local library, or contribute to hyperactive via PR
19
20
Mandatory methods:
21
    optimization - _solve(self, experiment, **search_config) -> dict
22
23
Testing - required for automated test framework and check_estimator usage:
24
    get default parameters for test instance(s) - get_test_params()
25
"""
26
# todo: write an informative docstring for the file or module, remove the above
27
# todo: add an appropriate copyright notice for your estimator
28
#       estimators contributed should have the copyright notice at the top
29
#       estimators of your own do not need to have permissive or MIT copyright
30
31
# todo: uncomment the following line, enter authors' GitHub IDs
32
# __author__ = [authorGitHubID, anotherAuthorGitHubID]
33
34
from hyperactive.base import BaseOptimizer
35
36
# todo: add any necessary imports here
37
38
# todo: for imports of soft dependencies:
39
# make sure to fill in the "python_dependencies" tag with the package import name
40
# import soft dependencies only inside methods of the class, not at the top of the file
41
42
43
class MyOptimizer(BaseOptimizer):
44
    """Custom optimizer. todo: write docstring.
45
46
    todo: describe your custom optimizer here
47
48
    Parameters
49
    ----------
50
    parama : int
51
        descriptive explanation of parama
52
    paramb : string, optional (default='default')
53
        descriptive explanation of paramb
54
    paramc : boolean, optional (default=MyOtherEstimator(foo=42))
55
        descriptive explanation of paramc
56
    and so on
57
58
    Examples
59
    --------
60
    >>> from somehwere import MyOptimizer
61
    >>> great_example(code)
62
    >>> multi_line_expressions(
63
    ...     require_dots_on_new_lines_so_that_expression_continues_properly
64
    ... )
65
    """
66
67
    # todo: fill in tags - most tags have sensible defaults below
68
    _tags = {
69
        # tags and full specifications are available in the tag API reference
70
        # TO BE ADDED
71
        #
72
        # --------------
73
        # packaging info
74
        # --------------
75
        #
76
        # ownership and contribution tags
77
        # -------------------------------
78
        #
79
        # author = author(s) of th estimator
80
        # an author is anyone with significant contribution to the code at some point
81
        "authors": ["author1", "author2"],
82
        # valid values: str or list of str, should be GitHub handles
83
        # this should follow best scientific contribution practices
84
        # scope is the code, not the methodology (method is per paper citation)
85
        # if interfacing a 3rd party estimator, ensure to give credit to the
86
        # authors of the interfaced estimator
87
        #
88
        # maintainer = current maintainer(s) of the estimator
89
        # per algorithm maintainer role, see governance document
90
        # this is an "owner" type role, with rights and maintenance duties
91
        # for 3rd party interfaces, the scope is the class only
92
        "maintainers": ["maintainer1", "maintainer2"],
93
        # valid values: str or list of str, should be GitHub handles
94
        # remove tag if maintained by package core team
95
        #
96
        # dependency tags: python version and soft dependencies
97
        # -----------------------------------------------------
98
        #
99
        # python version requirement
100
        "python_version": None,
101
        # valid values: str, PEP 440 valid python version specifiers
102
        # raises exception at construction if local python version is incompatible
103
        # delete tag if no python version requirement
104
        #
105
        # soft dependency requirement
106
        "python_dependencies": None,
107
        # valid values: str or list of str, PEP 440 valid package version specifiers
108
        # raises exception at construction if modules at strings cannot be imported
109
        # delete tag if no soft dependency requirement
110
    }
111
112
    # todo: add any hyper-parameters and components to constructor
113
    def __init__(self, parama, paramb="default", paramc=None, experiment=None):
114
        # todo: write any hyper-parameters to self
115
        self.parama = parama
116
        self.paramb = paramb
117
        self.paramc = paramc
118
        # IMPORTANT: the self.params should never be overwritten or mutated from now on
119
        # for handling defaults etc, write to other attributes, e.g., self._parama
120
        self.experiment = experiment
121
        # IMPORTANT: experiment must come last, and have default value None
122
123
        # leave this as is
124
        super().__init__()
125
126
        # todo: optional, parameter checking logic (if applicable) should happen here
127
        # if writes derived values to self, should *not* overwrite self.parama etc
128
        # instead, write to self._parama, self._newparam (starting with _)
129
130
    # optional: implement this to prepare arguments for _run
131
    # the default is all parameters passed to __init__, minus the experiment
132
    # the result of this is passed to _run as search_config
133
    def get_search_config(self):
134
        """Get the search configuration.
135
136
        Returns
137
        -------
138
        dict with str keys
139
            The search configuration dictionary.
140
        """
141
        # the default
142
        search_config = super().get_search_config()
143
        # example of adding a new parameter to the search config
144
        # this is optional, but can be useful for clean separation or API interfacing
145
        search_config["one_more_param"] = 42
146
        # this return is available in _run as search_config
147
        return search_config
148
149
    # todo: implement this, mandatory
150
    def _solve(self, experiment, **search_config):
151
        """Run the optimization search process to maximize the experiment's score.
152
153
        Parameters
154
        ----------
155
        experiment : BaseExperiment
156
            The experiment to optimize parameters for.
157
        search_config : dict with str keys
158
            identical to return of ``get_search_config``.
159
160
        Returns
161
        -------
162
        dict with str keys
163
            The best parameters found during the search.
164
            Must have keys a subset or identical to experiment.paramnames().
165
        """
166
        # important: the search logic should *maximize* the experiment's score
167
        # this is the main method to implement, it should return the best parameters
168
        best_params = {"write_some_logic_to_get": "best_params"}
169
        return best_params
170
171
    # todo: implement this for testing purposes!
172
    #   required to run local automated unit and integration testing of estimator
173
    #   method should return default parameters, so that a test instance can be created
174
    @classmethod
175
    def get_test_params(cls, parameter_set="default"):
176
        """Return testing parameter settings for the estimator.
177
178
        Parameters
179
        ----------
180
        parameter_set : str, default="default"
181
            Name of the set of test parameters to return, for use in tests. If no
182
            special parameters are defined for a value, will return `"default"` set.
183
            There are currently no reserved values for this type of estimator.
184
185
        Returns
186
        -------
187
        params : dict or list of dict, default = {}
188
            Parameters to create testing instances of the class
189
            Each dict are parameters to construct an "interesting" test instance, i.e.,
190
            `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
191
            `create_test_instance` uses the first (or only) dictionary in `params`
192
        """
193
        # todo: set the testing parameters for the estimators
194
        # Testing parameters can be dictionary or list of dictionaries.
195
        # Testing parameter choice should cover internal cases well.
196
        #   for "simple" extension, ignore the parameter_set argument.
197
        #
198
        # IMPORTANT: all parameter sets must contain an experiment object
199
        # this must be passed here, even if experiment can be left None in __init__
200
        from somewhere import AnotherExperiment, MyExperiment
201
202
        paramset1 = {
203
            "parama": 0,
204
            "paramb": "default",
205
            "paramc": None,
206
            "experiment": MyExperiment("experiment_params"),
207
        }
208
        paramset2 = {
209
            "parama": 1,
210
            "paramb": "foo",
211
            "paramc": 42,
212
            "experiment": AnotherExperiment("another_experiment_params"),
213
        }
214
        return [paramset1, paramset2]
215
216
        # this method can, if required, use:
217
        #   class properties (e.g., inherited); parent class test case
218
        #   imported objects such as estimators from sklearn
219
        # important: all such imports should be *inside get_test_params*, not at the top
220
        #            since imports are used only at testing time
221
        #
222
        # A good parameter set should primarily satisfy two criteria,
223
        #   1. Chosen set of parameters should have a low testing time,
224
        #      ideally in the magnitude of few seconds for the entire test suite.
225
        #       This is vital for the cases where default values result in
226
        #       "big" models which not only increases test time but also
227
        #       run into the risk of test workers crashing.
228
        #   2. There should be a minimum two such parameter sets with different
229
        #      sets of values to ensure a wide range of code coverage is provided.
230
        #
231
        # example 1: specify params as dictionary
232
        # any number of params can be specified
233
        # params = {"est": value0, "parama": value1, "paramb": value2}
234
        #
235
        # example 2: specify params as list of dictionary
236
        # note: Only first dictionary will be used by create_test_instance
237
        # params = [{"est": value1, "parama": value2},
238
        #           {"est": value3, "parama": value4}]
239
        #
240
        # return params
241