1
|
|
|
from keras.models import Sequential |
2
|
|
|
from keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Activation, Dropout |
3
|
|
|
from keras.datasets import cifar10 |
4
|
|
|
from keras.utils import to_categorical |
5
|
|
|
|
6
|
|
|
from hyperactive import Hyperactive |
7
|
|
|
|
8
|
|
|
(X_train, y_train), (X_test, y_test) = cifar10.load_data() |
9
|
|
|
|
10
|
|
|
y_train = to_categorical(y_train, 10) |
11
|
|
|
y_test = to_categorical(y_test, 10) |
12
|
|
|
|
13
|
|
|
|
14
|
|
|
""" |
15
|
|
|
Efficient Neural Architecture Search via Parameter Sharing: |
16
|
|
|
https://arxiv.org/pdf/1802.03268.pdf |
17
|
|
|
""" |
18
|
|
|
|
19
|
|
|
|
20
|
|
|
def conv1(model): |
21
|
|
|
model.add(Conv2D(32, (3, 3))) |
22
|
|
|
model.add(Activation("relu")) |
23
|
|
|
model.add(MaxPooling2D(pool_size=(2, 2))) |
24
|
|
|
return model |
25
|
|
|
|
26
|
|
|
|
27
|
|
|
def conv2(model): |
28
|
|
|
model.add(Conv2D(32, (3, 3))) |
29
|
|
|
model.add(Activation("relu")) |
30
|
|
|
return model |
31
|
|
|
|
32
|
|
|
|
33
|
|
|
def conv3(model): |
34
|
|
|
return model |
35
|
|
|
|
36
|
|
|
|
37
|
|
|
model_pretrained = Sequential() |
38
|
|
|
model_pretrained.add(Conv2D(64, (3, 3), padding="same", input_shape=X_train.shape[1:])) |
39
|
|
|
model_pretrained.add(Activation("relu")) |
40
|
|
|
model_pretrained.add(Conv2D(32, (3, 3))) |
41
|
|
|
model_pretrained.add(Activation("relu")) |
42
|
|
|
model_pretrained.add(MaxPooling2D(pool_size=(2, 2))) |
43
|
|
|
model_pretrained.add(Dropout(0.25)) |
44
|
|
|
|
45
|
|
|
model_pretrained.add(Conv2D(32, (3, 3), padding="same")) |
46
|
|
|
model_pretrained.add(Activation("relu")) |
47
|
|
|
model_pretrained.add(Dropout(0.25)) |
48
|
|
|
|
49
|
|
|
model_pretrained.add(Flatten()) |
50
|
|
|
model_pretrained.add(Dense(200)) |
51
|
|
|
model_pretrained.add(Activation("relu")) |
52
|
|
|
model_pretrained.add(Dropout(0.5)) |
53
|
|
|
model_pretrained.add(Dense(10)) |
54
|
|
|
model_pretrained.add(Activation("softmax")) |
55
|
|
|
|
56
|
|
|
model_pretrained.compile( |
57
|
|
|
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] |
58
|
|
|
) |
59
|
|
|
model_pretrained.fit(X_train, y_train, epochs=50, batch_size=128) |
60
|
|
|
|
61
|
|
|
n_layers = len(model_pretrained.layers) |
62
|
|
|
|
63
|
|
|
for i in range(n_layers - 8): |
64
|
|
|
model_pretrained.pop() |
65
|
|
|
|
66
|
|
|
for layer in model_pretrained.layers: |
67
|
|
|
layer.trainable = False |
68
|
|
|
|
69
|
|
|
print(model_pretrained.summary()) |
70
|
|
|
|
71
|
|
|
|
72
|
|
View Code Duplication |
def cnn(para, X_train, y_train): |
|
|
|
|
73
|
|
|
""" |
74
|
|
|
model = Sequential() |
75
|
|
|
model.add( |
76
|
|
|
Conv2D(64, (3, 3), padding="same", input_shape=X_train.shape[1:]) |
77
|
|
|
) |
78
|
|
|
model.add(Activation("relu")) |
79
|
|
|
model.add(Conv2D(32, (3, 3))) |
80
|
|
|
model.add(Activation("relu")) |
81
|
|
|
|
82
|
|
|
|
83
|
|
|
model.add(MaxPooling2D(pool_size=(2, 2))) |
84
|
|
|
model.add(Dropout(0.25)) |
85
|
|
|
|
86
|
|
|
model.add(Conv2D(32, (3, 3), padding="same")) |
87
|
|
|
model.add(Activation("relu")) |
88
|
|
|
""" |
89
|
|
|
model = model_pretrained |
90
|
|
|
|
91
|
|
|
model = para["conv_layer.0"](model) |
92
|
|
|
model.add(Dropout(0.25)) |
93
|
|
|
|
94
|
|
|
model.add(Flatten()) |
95
|
|
|
model.add(Dense(para["neurons.0"])) |
96
|
|
|
model.add(Activation("relu")) |
97
|
|
|
model.add(Dropout(0.5)) |
98
|
|
|
model.add(Dense(10)) |
99
|
|
|
model.add(Activation("softmax")) |
100
|
|
|
|
101
|
|
|
model.compile( |
102
|
|
|
optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] |
103
|
|
|
) |
104
|
|
|
model.fit(X_train, y_train, epochs=25, batch_size=128) |
105
|
|
|
|
106
|
|
|
_, score = model.evaluate(x=X_test, y=y_test) |
107
|
|
|
|
108
|
|
|
return score |
109
|
|
|
|
110
|
|
|
|
111
|
|
|
search_config = { |
112
|
|
|
cnn: {"conv_layer.0": [conv1, conv2, conv3], "neurons.0": range(100, 1000, 100)} |
113
|
|
|
} |
114
|
|
|
|
115
|
|
|
opt = Hyperactive(X_train, y_train) |
116
|
|
|
opt.search(search_config, n_iter=5) |
117
|
|
|
|