1
|
|
|
# Author: Simon Blanke |
2
|
|
|
# Email: [email protected] |
3
|
|
|
# License: MIT License |
4
|
|
|
|
5
|
|
|
from sklearn.datasets import load_breast_cancer |
6
|
|
|
|
7
|
|
|
from sklearn.model_selection import cross_val_score |
8
|
|
|
from hyperactive import Hyperactive |
9
|
|
|
|
10
|
|
|
data = load_breast_cancer() |
11
|
|
|
X, y = data.data, data.target |
12
|
|
|
memory = False |
13
|
|
|
|
14
|
|
|
|
15
|
|
|
def test_sklearn(): |
16
|
|
|
from sklearn.tree import DecisionTreeClassifier |
17
|
|
|
|
18
|
|
View Code Duplication |
def model(para, X_train, y_train): |
|
|
|
|
19
|
|
|
model = DecisionTreeClassifier( |
20
|
|
|
criterion=para["criterion"], |
21
|
|
|
max_depth=para["max_depth"], |
22
|
|
|
min_samples_split=para["min_samples_split"], |
23
|
|
|
min_samples_leaf=para["min_samples_leaf"], |
24
|
|
|
) |
25
|
|
|
scores = cross_val_score(model, X_train, y_train, cv=3) |
26
|
|
|
|
27
|
|
|
return scores.mean() |
28
|
|
|
|
29
|
|
|
search_config = { |
30
|
|
|
model: { |
31
|
|
|
"criterion": ["gini", "entropy"], |
32
|
|
|
"max_depth": range(1, 21), |
33
|
|
|
"min_samples_split": range(2, 21), |
34
|
|
|
"min_samples_leaf": range(1, 21), |
35
|
|
|
} |
36
|
|
|
} |
37
|
|
|
|
38
|
|
|
opt = Hyperactive(X, y) |
39
|
|
|
opt.search(search_config) |
40
|
|
|
# opt.predict(X) |
41
|
|
|
# opt.score(X, y) |
42
|
|
|
|
43
|
|
|
|
44
|
|
|
def test_xgboost(): |
45
|
|
|
from xgboost import XGBClassifier |
46
|
|
|
|
47
|
|
|
def model(para, X_train, y_train): |
48
|
|
|
model = XGBClassifier( |
49
|
|
|
n_estimators=para["n_estimators"], max_depth=para["max_depth"] |
50
|
|
|
) |
51
|
|
|
scores = cross_val_score(model, X_train, y_train, cv=3) |
52
|
|
|
|
53
|
|
|
return scores.mean() |
54
|
|
|
|
55
|
|
|
search_config = {model: {"n_estimators": range(2, 20), "max_depth": range(1, 11)}} |
56
|
|
|
|
57
|
|
|
opt = Hyperactive(X, y, memory=memory) |
58
|
|
|
opt.search(search_config) |
59
|
|
|
# opt.predict(X) |
60
|
|
|
# opt.score(X, y) |
61
|
|
|
|
62
|
|
|
|
63
|
|
|
def test_lightgbm(): |
64
|
|
|
from lightgbm import LGBMClassifier |
65
|
|
|
|
66
|
|
|
def model(para, X_train, y_train): |
67
|
|
|
model = LGBMClassifier( |
68
|
|
|
num_leaves=para["num_leaves"], learning_rate=para["learning_rate"] |
69
|
|
|
) |
70
|
|
|
scores = cross_val_score(model, X_train, y_train, cv=3) |
71
|
|
|
|
72
|
|
|
return scores.mean() |
73
|
|
|
|
74
|
|
|
search_config = { |
75
|
|
|
model: { |
76
|
|
|
"num_leaves": range(2, 20), |
77
|
|
|
"learning_rate": [0.001, 0.005, 00.01, 0.05, 0.1, 0.5, 1], |
78
|
|
|
} |
79
|
|
|
} |
80
|
|
|
|
81
|
|
|
opt = Hyperactive(X, y, memory=memory) |
82
|
|
|
opt.search(search_config) |
83
|
|
|
# opt.predict(X) |
84
|
|
|
# opt.score(X, y) |
85
|
|
|
|
86
|
|
|
|
87
|
|
|
def test_catboost(): |
88
|
|
|
from catboost import CatBoostClassifier |
89
|
|
|
|
90
|
|
|
def model(para, X_train, y_train): |
91
|
|
|
model = CatBoostClassifier( |
92
|
|
|
iterations=para["iterations"], |
93
|
|
|
depth=para["depth"], |
94
|
|
|
learning_rate=para["learning_rate"], |
95
|
|
|
) |
96
|
|
|
scores = cross_val_score(model, X_train, y_train, cv=3) |
97
|
|
|
|
98
|
|
|
return scores.mean() |
99
|
|
|
|
100
|
|
|
search_config = { |
101
|
|
|
model: { |
102
|
|
|
"iterations": [1], |
103
|
|
|
"depth": range(2, 10), |
104
|
|
|
"learning_rate": [0.001, 0.005, 00.01, 0.05, 0.1, 0.5, 1], |
105
|
|
|
} |
106
|
|
|
} |
107
|
|
|
|
108
|
|
|
opt = Hyperactive(X, y, memory=memory) |
109
|
|
|
opt.search(search_config) |
110
|
|
|
# opt.predict(X) |
111
|
|
|
# opt.score(X, y) |
112
|
|
|
|
113
|
|
|
|
114
|
|
|
""" |
115
|
|
|
def test_tensorflow(): |
116
|
|
|
import tensorflow as tf |
117
|
|
|
|
118
|
|
|
mnist = tf.keras.datasets.mnist |
119
|
|
|
|
120
|
|
|
(X_train, y_train), (X_test, y_test) = mnist.load_data() |
121
|
|
|
X_train, X_test = X_train / 255.0, X_test / 255.0 |
122
|
|
|
|
123
|
|
|
def cnn(para, X_train, y_train): |
124
|
|
|
|
125
|
|
|
model = tf.keras.models.Sequential( |
126
|
|
|
[ |
127
|
|
|
tf.keras.layers.Flatten(input_shape=(28, 28)), |
128
|
|
|
tf.keras.layers.Dense(128, activation="relu"), |
129
|
|
|
tf.keras.layers.Dropout(0.2), |
130
|
|
|
tf.keras.layers.Dense(10, activation="softmax"), |
131
|
|
|
] |
132
|
|
|
) |
133
|
|
|
|
134
|
|
|
model.compile( |
135
|
|
|
optimizer="adam", |
136
|
|
|
loss="sparse_categorical_crossentropy", |
137
|
|
|
metrics=["accuracy"], |
138
|
|
|
) |
139
|
|
|
model.fit(X_train, y_train, epochs=1) |
140
|
|
|
|
141
|
|
|
_, score = model.evaluate(X_test, y_test, verbose=2) |
142
|
|
|
print("score", score, type(score)) |
143
|
|
|
return score |
144
|
|
|
|
145
|
|
|
search_config = {cnn: {"filters.0": [32, 64], "kernel_size.0": [3, 4]}} |
146
|
|
|
|
147
|
|
|
opt = Hyperactive(X_train, y_train, memory=memory) |
148
|
|
|
opt.search(search_config) |
149
|
|
|
""" |
150
|
|
|
|
151
|
|
|
""" |
152
|
|
|
def test_keras(): |
153
|
|
|
from tensorflow.keras.models import Sequential |
154
|
|
|
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten |
155
|
|
|
from tensorflow.keras.datasets import cifar10 |
156
|
|
|
from tensorflow.keras.utils import to_categorical |
157
|
|
|
|
158
|
|
|
(X_train, y_train), (X_test, y_test) = cifar10.load_data() |
159
|
|
|
|
160
|
|
|
X_train = X_train[0:1000] |
161
|
|
|
y_train = y_train[0:1000] |
162
|
|
|
|
163
|
|
|
X_test = X_train[0:1000] |
164
|
|
|
y_test = y_train[0:1000] |
165
|
|
|
|
166
|
|
|
y_train = to_categorical(y_train, 10) |
167
|
|
|
y_test = to_categorical(y_test, 10) |
168
|
|
|
|
169
|
|
|
def cnn(para, X_train, y_train): |
170
|
|
|
model = Sequential() |
171
|
|
|
|
172
|
|
|
model.add( |
173
|
|
|
Conv2D( |
174
|
|
|
filters=para["filters.0"], |
175
|
|
|
kernel_size=para["kernel_size.0"], |
176
|
|
|
activation="relu", |
177
|
|
|
) |
178
|
|
|
) |
179
|
|
|
model.add(MaxPooling2D(pool_size=(2, 2))) |
180
|
|
|
|
181
|
|
|
model.add(Flatten()) |
182
|
|
|
model.add(Dense(10, activation="softmax")) |
183
|
|
|
|
184
|
|
|
model.compile( |
185
|
|
|
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] |
186
|
|
|
) |
187
|
|
|
model.fit(X_train, y_train, epochs=1) |
188
|
|
|
|
189
|
|
|
_, score = model.evaluate(x=X_test, y=y_test) |
190
|
|
|
|
191
|
|
|
return score |
192
|
|
|
|
193
|
|
|
search_config = {cnn: {"filters.0": [32, 64], "kernel_size.0": [3, 4]}} |
194
|
|
|
|
195
|
|
|
opt = Hyperactive(X_train, y_train) |
196
|
|
|
opt.search(search_config) |
197
|
|
|
""" |
198
|
|
|
|