1
|
|
|
# Author: Simon Blanke |
2
|
|
|
# Email: [email protected] |
3
|
|
|
# License: MIT License |
4
|
|
|
|
5
|
|
|
import time |
6
|
|
|
|
7
|
|
|
from importlib import import_module |
8
|
|
|
|
9
|
|
|
import multiprocessing |
10
|
|
|
|
11
|
|
|
from .checks import check_args |
12
|
|
|
from .verbosity import ProgressBar |
13
|
|
|
|
14
|
|
|
|
15
|
|
|
search_process_dict = { |
16
|
|
|
False: "SearchProcessNoMem", |
17
|
|
|
"short": "SearchProcessShortMem", |
18
|
|
|
"long": "SearchProcessLongMem", |
19
|
|
|
} |
20
|
|
|
|
21
|
|
|
search_dict = { |
22
|
|
|
False: "Search", |
23
|
|
|
"short": "Search", |
24
|
|
|
"long": "SearchLongTermMemory", |
25
|
|
|
} |
26
|
|
|
|
27
|
|
|
|
28
|
|
|
def set_n_jobs(n_jobs): |
29
|
|
|
"""Sets the number of jobs to run in parallel""" |
30
|
|
|
num_cores = multiprocessing.cpu_count() |
31
|
|
|
if n_jobs == -1 or n_jobs > num_cores: |
32
|
|
|
return num_cores |
33
|
|
|
else: |
34
|
|
|
return n_jobs |
35
|
|
|
|
36
|
|
|
|
37
|
|
|
def get_class(file_path, class_name): |
38
|
|
|
module = import_module(file_path, "hyperactive") |
39
|
|
|
return getattr(module, class_name) |
40
|
|
|
|
41
|
|
|
|
42
|
|
|
class Hyperactive: |
43
|
|
|
def __init__( |
44
|
|
|
self, X, y, random_state=None, verbosity=3, warnings=False, ext_warnings=False, |
45
|
|
|
): |
46
|
|
|
self.training_data = { |
47
|
|
|
"features": X, |
48
|
|
|
"target": y, |
49
|
|
|
} |
50
|
|
|
self.verbosity = verbosity |
51
|
|
|
self.random_state = random_state |
52
|
|
|
self.search_processes = [] |
53
|
|
|
|
54
|
|
|
def _add_process( |
55
|
|
|
self, |
56
|
|
|
nth_process, |
57
|
|
|
model, |
58
|
|
|
search_space, |
59
|
|
|
name, |
60
|
|
|
n_iter, |
61
|
|
|
optimizer, |
62
|
|
|
n_jobs, |
63
|
|
|
init_para, |
64
|
|
|
memory, |
65
|
|
|
): |
66
|
|
|
search_process_kwargs = { |
67
|
|
|
"nth_process": nth_process, |
68
|
|
|
"p_bar": ProgressBar(), |
69
|
|
|
"model": model, |
70
|
|
|
"search_space": search_space, |
71
|
|
|
"search_name": name, |
72
|
|
|
"n_iter": n_iter, |
73
|
|
|
"training_data": self.training_data, |
74
|
|
|
"optimizer": optimizer, |
75
|
|
|
"n_jobs": n_jobs, |
76
|
|
|
"init_para": init_para, |
77
|
|
|
"memory": memory, |
78
|
|
|
"random_state": self.random_state, |
79
|
|
|
} |
80
|
|
|
SearchProcess = get_class(".search_process", search_process_dict[memory]) |
81
|
|
|
new_search_process = SearchProcess(**search_process_kwargs) |
82
|
|
|
self.search_processes.append(new_search_process) |
83
|
|
|
|
84
|
|
|
def add_search( |
85
|
|
|
self, |
86
|
|
|
model, |
87
|
|
|
search_space, |
88
|
|
|
name=None, |
89
|
|
|
n_iter=10, |
90
|
|
|
optimizer="RandomSearch", |
91
|
|
|
n_jobs=1, |
92
|
|
|
init_para=[], |
93
|
|
|
memory="short", |
94
|
|
|
): |
95
|
|
|
|
96
|
|
|
check_args( |
97
|
|
|
model, search_space, n_iter, optimizer, n_jobs, init_para, memory, |
98
|
|
|
) |
99
|
|
|
|
100
|
|
|
n_jobs = set_n_jobs(n_jobs) |
101
|
|
|
|
102
|
|
|
for nth_job in range(n_jobs): |
103
|
|
|
nth_process = len(self.search_processes) |
104
|
|
|
self._add_process( |
105
|
|
|
nth_process, |
106
|
|
|
model, |
107
|
|
|
search_space, |
108
|
|
|
name, |
109
|
|
|
n_iter, |
110
|
|
|
optimizer, |
111
|
|
|
n_jobs, |
112
|
|
|
init_para, |
113
|
|
|
memory, |
114
|
|
|
) |
115
|
|
|
|
116
|
|
|
Search = get_class(".search", search_dict[memory]) |
117
|
|
|
self.search = Search(self.training_data, self.search_processes) |
118
|
|
|
|
119
|
|
|
def run(self, max_time=None, distribution=None): |
120
|
|
|
if max_time is not None: |
121
|
|
|
max_time = max_time * 60 |
122
|
|
|
|
123
|
|
|
start_time = time.time() |
124
|
|
|
|
125
|
|
|
self.search.run(start_time, max_time) |
126
|
|
|
|
127
|
|
|
# self.position_results = self.search.position_results |
128
|
|
|
self.eval_times = self.search.eval_times_dict |
129
|
|
|
self.iter_times = self.search.iter_times_dict |
130
|
|
|
self.best_para = self.search.para_best_dict |
131
|
|
|
self.best_score = self.search.score_best_dict |
132
|
|
|
|
133
|
|
|
|