1
|
|
|
# Author: Simon Blanke |
2
|
|
|
# Email: [email protected] |
3
|
|
|
# License: MIT License |
4
|
|
|
|
5
|
|
|
|
6
|
|
|
import copy |
7
|
|
|
import multiprocessing as mp |
8
|
|
|
import pandas as pd |
9
|
|
|
|
10
|
|
|
from typing import Union, List, Dict, Type |
11
|
|
|
|
12
|
|
|
from .optimizers import RandomSearchOptimizer |
13
|
|
|
from .run_search import run_search |
14
|
|
|
|
15
|
|
|
from .results import Results |
16
|
|
|
from .print_results import PrintResults |
17
|
|
|
from .search_space import SearchSpace |
18
|
|
|
|
19
|
|
|
|
20
|
|
|
class Hyperactive: |
21
|
|
|
def __init__( |
22
|
|
|
self, |
23
|
|
|
verbosity: list = ["progress_bar", "print_results", "print_times"], |
24
|
|
|
distribution: str = "multiprocessing", |
25
|
|
|
n_processes: Union[str, int] = "auto", |
26
|
|
|
): |
27
|
|
|
super().__init__() |
28
|
|
|
if verbosity is False: |
29
|
|
|
verbosity = [] |
30
|
|
|
|
31
|
|
|
self.verbosity = verbosity |
32
|
|
|
self.distribution = distribution |
33
|
|
|
self.n_processes = n_processes |
34
|
|
|
|
35
|
|
|
self.opt_pros = {} |
36
|
|
|
|
37
|
|
|
def _create_shared_memory(self, new_opt): |
38
|
|
|
if new_opt.memory == "share": |
39
|
|
|
if len(self.opt_pros) == 0: |
40
|
|
|
manager = mp.Manager() |
41
|
|
|
new_opt.memory = manager.dict() |
42
|
|
|
|
43
|
|
|
for opt in self.opt_pros.values(): |
44
|
|
|
same_obj_func = ( |
45
|
|
|
opt.objective_function.__name__ |
46
|
|
|
== new_opt.objective_function.__name__ |
47
|
|
|
) |
48
|
|
|
same_ss_length = len(opt.s_space()) == len(new_opt.s_space()) |
49
|
|
|
|
50
|
|
|
if same_obj_func and same_ss_length: |
51
|
|
|
new_opt.memory = opt.memory # get same manager.dict |
52
|
|
|
else: |
53
|
|
|
manager = mp.Manager() # get new manager.dict |
54
|
|
|
new_opt.memory = manager.dict() |
55
|
|
|
|
56
|
|
|
@staticmethod |
57
|
|
|
def _default_opt(optimizer): |
58
|
|
|
if isinstance(optimizer, str): |
59
|
|
|
if optimizer == "default": |
60
|
|
|
optimizer = RandomSearchOptimizer() |
61
|
|
|
return copy.deepcopy(optimizer) |
62
|
|
|
|
63
|
|
|
@staticmethod |
64
|
|
|
def _default_search_id(search_id, objective_function): |
65
|
|
|
if not search_id: |
66
|
|
|
search_id = objective_function.__name__ |
67
|
|
|
return search_id |
68
|
|
|
|
69
|
|
|
@staticmethod |
70
|
|
|
def check_list(search_space): |
71
|
|
|
for key in search_space.keys(): |
72
|
|
|
search_dim = search_space[key] |
73
|
|
|
|
74
|
|
|
error_msg = ( |
75
|
|
|
"Value in '{}' of search space dictionary must be of type list".format( |
76
|
|
|
key |
77
|
|
|
) |
78
|
|
|
) |
79
|
|
|
if not isinstance(search_dim, list): |
80
|
|
|
print("Warning", error_msg) |
81
|
|
|
# raise ValueError(error_msg) |
82
|
|
|
|
83
|
|
|
def add_search( |
84
|
|
|
self, |
85
|
|
|
objective_function: callable, |
86
|
|
|
search_space: Dict[str, list], |
87
|
|
|
n_iter: int, |
88
|
|
|
search_id=None, |
89
|
|
|
optimizer: Union[str, Type[RandomSearchOptimizer]] = "default", |
90
|
|
|
n_jobs: int = 1, |
91
|
|
|
initialize: Dict[str, int] = {"grid": 4, "random": 2, "vertices": 4}, |
92
|
|
|
constraints: List[callable] = None, |
93
|
|
|
pass_through: Dict = None, |
94
|
|
|
callbacks: Dict[str, callable] = None, |
95
|
|
|
catch: Dict = None, |
96
|
|
|
max_score: float = None, |
97
|
|
|
early_stopping: Dict = None, |
98
|
|
|
random_state: int = None, |
99
|
|
|
memory: Union[str, bool] = "share", |
100
|
|
|
memory_warm_start: pd.DataFrame = None, |
101
|
|
|
): |
102
|
|
|
self.check_list(search_space) |
103
|
|
|
|
104
|
|
|
if constraints is None: |
105
|
|
|
constraints = [] |
106
|
|
|
if pass_through is None: |
107
|
|
|
pass_through = {} |
108
|
|
|
if callbacks is None: |
109
|
|
|
callbacks = {} |
110
|
|
|
if catch is None: |
111
|
|
|
catch = {} |
112
|
|
|
if early_stopping is None: |
113
|
|
|
early_stopping = {} |
114
|
|
|
|
115
|
|
|
optimizer = self._default_opt(optimizer) |
116
|
|
|
search_id = self._default_search_id(search_id, objective_function) |
117
|
|
|
s_space = SearchSpace(search_space) |
118
|
|
|
|
119
|
|
|
optimizer.setup_search( |
120
|
|
|
objective_function=objective_function, |
121
|
|
|
s_space=s_space, |
122
|
|
|
n_iter=n_iter, |
123
|
|
|
initialize=initialize, |
124
|
|
|
constraints=constraints, |
125
|
|
|
pass_through=pass_through, |
126
|
|
|
callbacks=callbacks, |
127
|
|
|
catch=catch, |
128
|
|
|
max_score=max_score, |
129
|
|
|
early_stopping=early_stopping, |
130
|
|
|
random_state=random_state, |
131
|
|
|
memory=memory, |
132
|
|
|
memory_warm_start=memory_warm_start, |
133
|
|
|
verbosity=self.verbosity, |
134
|
|
|
) |
135
|
|
|
|
136
|
|
|
if memory == "share": |
137
|
|
|
self._create_shared_memory(optimizer) |
138
|
|
|
|
139
|
|
|
if n_jobs == -1: |
140
|
|
|
n_jobs = mp.cpu_count() |
141
|
|
|
|
142
|
|
|
for _ in range(n_jobs): |
143
|
|
|
nth_process = len(self.opt_pros) |
144
|
|
|
self.opt_pros[nth_process] = optimizer |
145
|
|
|
|
146
|
|
|
def _print_info(self): |
147
|
|
|
print_res = PrintResults(self.opt_pros, self.verbosity) |
148
|
|
|
|
149
|
|
|
if self.verbosity: |
150
|
|
|
for _ in range(len(self.opt_pros)): |
151
|
|
|
print("") |
152
|
|
|
|
153
|
|
|
for results in self.results_list: |
154
|
|
|
nth_process = results["nth_process"] |
155
|
|
|
print_res.print_process(results, nth_process) |
156
|
|
|
|
157
|
|
|
def run(self, max_time: float = None): |
158
|
|
|
for opt in self.opt_pros.values(): |
159
|
|
|
opt.max_time = max_time |
160
|
|
|
|
161
|
|
|
self.results_list = run_search( |
162
|
|
|
self.opt_pros, self.distribution, self.n_processes |
163
|
|
|
) |
164
|
|
|
|
165
|
|
|
self.results_ = Results(self.results_list, self.opt_pros) |
166
|
|
|
|
167
|
|
|
self._print_info() |
168
|
|
|
|
169
|
|
|
def best_para(self, id_): |
170
|
|
|
return self.results_.best_para(id_) |
171
|
|
|
|
172
|
|
|
def best_score(self, id_): |
173
|
|
|
return self.results_.best_score(id_) |
174
|
|
|
|
175
|
|
|
def search_data(self, id_, times=False): |
176
|
|
|
search_data_ = self.results_.search_data(id_) |
177
|
|
|
|
178
|
|
|
if times == False: |
179
|
|
|
search_data_.drop( |
180
|
|
|
labels=["eval_times", "iter_times"], |
181
|
|
|
axis=1, |
182
|
|
|
inplace=True, |
183
|
|
|
errors="ignore", |
184
|
|
|
) |
185
|
|
|
return search_data_ |
186
|
|
|
|