1
|
|
|
#!/usr/bin/env python |
2
|
|
|
# -*- coding: utf-8 -*- |
3
|
|
|
|
4
|
|
|
|
5
|
|
|
import logging as loggers |
6
|
|
|
|
7
|
|
|
import numpy as np |
8
|
|
|
import theano |
9
|
|
|
|
10
|
|
|
from deepy.utils import UniformInitializer |
11
|
|
|
from deepy.core.env import env |
12
|
|
|
from deepy.core.tensor_conversion import neural_computation_prefer_tensor, convert_to_theano_var |
13
|
|
|
|
14
|
|
|
logging = loggers.getLogger("deepy") |
15
|
|
|
|
16
|
|
|
class NeuralLayer(object): |
17
|
|
|
|
18
|
|
|
def __init__(self, name=None): |
19
|
|
|
""" |
20
|
|
|
Create a neural layer. |
21
|
|
|
""" |
22
|
|
|
self.name = name if name else self.__class__.__name__ |
23
|
|
|
self.input_dim = 0 |
24
|
|
|
self.input_dims = [0] |
25
|
|
|
self.output_dim = 0 |
26
|
|
|
self.output_dims= [0] |
27
|
|
|
|
28
|
|
|
self._linked_block = None |
29
|
|
|
|
30
|
|
|
self.initialized = False |
31
|
|
|
self.updates = [] |
32
|
|
|
self.training_updates = [] |
33
|
|
|
self.free_parameters = [] |
34
|
|
|
self.parameters = [] |
35
|
|
|
self.training_monitors = [] |
36
|
|
|
self.testing_monitors = [] |
37
|
|
|
self._registered_monitors = set() |
38
|
|
|
self._registered_updates = set() |
39
|
|
|
self._registered_training_updates = set() |
40
|
|
|
self.external_inputs = [] |
41
|
|
|
self.external_targets = [] |
42
|
|
|
self.parameter_count = 0 |
43
|
|
|
self.epoch_callbacks = [] |
44
|
|
|
self.training_callbacks = [] |
45
|
|
|
self.testing_callbacks = [] |
46
|
|
|
|
47
|
|
|
def init(self, input_dim=0, input_dims=None, no_prepare=False): |
48
|
|
|
""" |
49
|
|
|
Initialize the layer. |
50
|
|
|
:param no_prepare: avoid calling preparation function |
51
|
|
|
""" |
52
|
|
|
if self.initialized: |
53
|
|
|
return |
54
|
|
|
# configure input dimensions |
55
|
|
|
if input_dims: |
56
|
|
|
self.input_dims = input_dims |
57
|
|
|
self.input_dim = input_dims[0] |
58
|
|
|
else: |
59
|
|
|
self.input_dim = input_dim |
60
|
|
|
self.input_dims = [input_dims] |
61
|
|
|
# set default output dimension |
62
|
|
|
if self.output_dim == 0: |
63
|
|
|
self.output_dim = self.input_dim |
64
|
|
|
self.initialized = True |
65
|
|
|
# call prepare |
66
|
|
|
if not no_prepare: |
67
|
|
|
self.prepare() |
68
|
|
|
return self |
69
|
|
|
|
70
|
|
|
def compute(self, *inputs, **kwargs): |
71
|
|
|
""" |
72
|
|
|
Compute based on NeuralVariable. |
73
|
|
|
:type inputs: list of NeuralVariable |
74
|
|
|
:return: NeuralVariable |
75
|
|
|
""" |
76
|
|
|
from deepy.core.neural_var import NeuralVariable |
77
|
|
|
from deepy.core.graph import graph |
78
|
|
|
if type(inputs[0]) != NeuralVariable: |
79
|
|
|
raise SystemError("The input of `compute` must be NeuralVar") |
80
|
|
|
|
81
|
|
|
dims = [t.dim() for t in inputs] |
82
|
|
|
if len(inputs) == 1: |
83
|
|
|
self.init(input_dim=dims[0]) |
84
|
|
|
else: |
85
|
|
|
self.init(input_dims=dims) |
86
|
|
|
# Check block |
87
|
|
|
if self.parameters and not self._linked_block: |
88
|
|
|
self.belongs_to(graph.default_block()) |
89
|
|
|
# convert kwargs |
90
|
|
|
train_kwargs, _, _ = convert_to_theano_var(kwargs) |
91
|
|
|
|
92
|
|
|
output = self.compute_tensor(*[t.tensor for t in inputs], **train_kwargs) |
93
|
|
|
|
94
|
|
|
if type(output) != list and type(output) != tuple: |
95
|
|
|
return NeuralVariable(output, dim=self.output_dim) |
96
|
|
|
else: |
97
|
|
|
return [NeuralVariable(*item) for item in zip(output, self.output_dims)] |
98
|
|
|
|
99
|
|
|
def prepare(self): |
100
|
|
|
""" |
101
|
|
|
Prepare function will be called after connected. |
102
|
|
|
""" |
103
|
|
|
|
104
|
|
|
@neural_computation_prefer_tensor |
105
|
|
|
def compute_tensor(self, *args, **kwargs): |
106
|
|
|
""" |
107
|
|
|
Compute with tensors in Theano. |
108
|
|
|
""" |
109
|
|
|
raise NotImplementedError("output function of '%s' is not implemented" % self.name) |
110
|
|
|
|
111
|
|
|
def belongs_to(self, block): |
112
|
|
|
""" |
113
|
|
|
Let the given block or network manage the parameters of this layer. |
114
|
|
|
:param block: Block or NeuralNetwork |
115
|
|
|
:return: NeuralLayer |
116
|
|
|
""" |
117
|
|
|
if self._linked_block: |
118
|
|
|
raise SystemError("The layer {} has already blonged to {}".format(self.name, self._linked_block.name)) |
119
|
|
|
self._linked_block = block |
120
|
|
|
block.register_layer(self) |
121
|
|
|
return self |
122
|
|
|
|
123
|
|
|
def register(self, *layers): |
124
|
|
|
""" |
125
|
|
|
Register inner layers. |
126
|
|
|
""" |
127
|
|
|
self.register_inner_layers(*layers) |
128
|
|
|
|
129
|
|
|
def register_inner_layers(self, *layers): |
130
|
|
|
for layer in layers: |
131
|
|
|
self.register_parameters(*layer.parameters) |
132
|
|
|
self.register_updates(*layer.updates) |
133
|
|
|
self.register_training_updates(*layer.training_updates) |
134
|
|
|
self.training_monitors.extend(layer.training_monitors) |
135
|
|
|
self.testing_monitors.extend(layer.testing_monitors) |
136
|
|
|
|
137
|
|
|
def register_parameters(self, *parameters): |
138
|
|
|
""" |
139
|
|
|
Register parameters. |
140
|
|
|
""" |
141
|
|
|
for param in parameters: |
142
|
|
|
self.parameter_count += np.prod(param.get_value().shape) |
143
|
|
|
self.parameters.extend(parameters) |
144
|
|
|
|
145
|
|
|
def register_free_parameters(self, *free_parameters): |
146
|
|
|
""" |
147
|
|
|
Register free parameters, which means their value will not be learned by trainer. |
148
|
|
|
""" |
149
|
|
|
return self.free_parameters.extend(free_parameters) |
150
|
|
|
|
151
|
|
|
def register_updates(self, *updates): |
152
|
|
|
""" |
153
|
|
|
Register updates that will be executed in each iteration. |
154
|
|
|
""" |
155
|
|
|
for key, node in updates: |
156
|
|
|
if key not in self._registered_updates: |
157
|
|
|
self.updates.append((key, node)) |
158
|
|
|
self._registered_updates.add(key) |
159
|
|
|
|
160
|
|
|
def register_training_updates(self, *updates): |
161
|
|
|
""" |
162
|
|
|
Register updates that will only be executed in training phase. |
163
|
|
|
""" |
164
|
|
|
for key, node in updates: |
165
|
|
|
if key not in self._registered_training_updates: |
166
|
|
|
self.training_updates.append((key, node)) |
167
|
|
|
self._registered_training_updates.add(key) |
168
|
|
|
|
169
|
|
|
def register_monitors(self, *monitors): |
170
|
|
|
""" |
171
|
|
|
Register monitors they should be tuple of name and Theano variable. |
172
|
|
|
""" |
173
|
|
|
for key, node in monitors: |
174
|
|
|
if key not in self._registered_monitors: |
175
|
|
|
node *= 1.0 # Avoid CudaNdarray |
176
|
|
|
self.training_monitors.append((key, node)) |
177
|
|
|
self.testing_monitors.append((key, node)) |
178
|
|
|
self._registered_monitors.add(key) |
179
|
|
|
|
180
|
|
|
def register_external_inputs(self, *variables): |
181
|
|
|
""" |
182
|
|
|
Register external input variables. |
183
|
|
|
""" |
184
|
|
|
self.external_inputs.extend(variables) |
185
|
|
|
|
186
|
|
|
def register_external_targets(self, *variables): |
187
|
|
|
""" |
188
|
|
|
Register extenal target variables. |
189
|
|
|
""" |
190
|
|
|
self.external_targets.extend(variables) |
191
|
|
|
|
192
|
|
|
def register_training_callbacks(self, *callbacks): |
193
|
|
|
""" |
194
|
|
|
Register callback for each iteration in the training. |
195
|
|
|
""" |
196
|
|
|
self.training_callbacks.extend(callbacks) |
197
|
|
|
|
198
|
|
|
def register_testing_callbacks(self, *callbacks): |
199
|
|
|
""" |
200
|
|
|
Register callback for each iteration in the testing. |
201
|
|
|
""" |
202
|
|
|
self.testing_callbacks.extend(callbacks) |
203
|
|
|
|
204
|
|
|
def register_epoch_callbacks(self, *callbacks): |
205
|
|
|
""" |
206
|
|
|
Register callback which will be called after epoch finished. |
207
|
|
|
""" |
208
|
|
|
self.epoch_callbacks.extend(callbacks) |
209
|
|
|
|
210
|
|
|
def create_weight(self, input_n=1, output_n=1, label="W", initializer=None, shape=None): |
211
|
|
|
if not shape: |
212
|
|
|
shape = (input_n, output_n) |
213
|
|
|
|
214
|
|
|
if not initializer: |
215
|
|
|
initializer = env.default_initializer |
216
|
|
|
|
217
|
|
|
weight = theano.shared(initializer.sample(shape).astype(env.FLOATX), name='{}_{}'.format(self.name, label)) |
218
|
|
|
|
219
|
|
|
logging.info('create param %s %s for %s', label, str(shape), self.name) |
220
|
|
|
return weight |
221
|
|
|
|
222
|
|
|
def create_bias(self, output_n=1, label="B", value=0., shape=None): |
223
|
|
|
if not shape: |
224
|
|
|
shape = (output_n, ) |
225
|
|
|
bs = np.ones(shape) |
226
|
|
|
bs *= value |
227
|
|
|
bias = theano.shared(bs.astype(env.FLOATX), name='{}_{}'.format(self.name, label)) |
228
|
|
|
logging.info('create param %s %s for %s', label, str(shape), self.name) |
229
|
|
|
return bias |
230
|
|
|
|
231
|
|
|
def create_scalar(self, name="S", value=0, dtype=env.FLOATX): |
232
|
|
|
bs = np.array(0, dtype=dtype) |
233
|
|
|
bs += value |
234
|
|
|
v = theano.shared(bs, name='{}_{}'.format(self.name, name)) |
235
|
|
|
|
236
|
|
|
logging.info('create scalar %s', name) |
237
|
|
|
return v |
238
|
|
|
|
239
|
|
|
def create_vector(self, n, name="V", dtype=env.FLOATX): |
240
|
|
|
bs = np.zeros(n, dtype=dtype) |
241
|
|
|
v = theano.shared(bs, name='{}_{}'.format(self.name, name)) |
242
|
|
|
|
243
|
|
|
logging.info('create vector %s: %d', name, n) |
244
|
|
|
return v |
245
|
|
|
|
246
|
|
|
def create_matrix(self, m, n, name="M"): |
247
|
|
|
|
248
|
|
|
matrix = theano.shared(np.zeros((m, n)).astype(env.FLOATX), name="{}_{}".format(self.name, name)) |
249
|
|
|
|
250
|
|
|
logging.info('create matrix %s: %d x %d', name, m, n) |
251
|
|
|
return matrix |
252
|
|
|
|
253
|
|
|
def activation(self, name): |
254
|
|
|
from deepy.tensor.activations import get_activation |
255
|
|
|
return get_activation(name) |
256
|
|
|
|
257
|
|
|
def callback_forward_propagation(self): |
258
|
|
|
pass |
259
|
|
|
|
260
|
|
|
def set_name(self, name): |
261
|
|
|
""" |
262
|
|
|
Set the name of this layer. |
263
|
|
|
This will be the key of saved parameters. |
264
|
|
|
""" |
265
|
|
|
self.name = name |