1
|
|
|
#!/usr/bin/env python |
2
|
|
|
# -*- coding: utf-8 -*- |
3
|
|
|
|
4
|
|
|
from deepy.networks import NeuralNetwork |
5
|
|
|
|
6
|
|
|
|
7
|
|
|
class ComputationalGraph(NeuralNetwork): |
8
|
|
|
""" |
9
|
|
|
A class for defining computational graphs. |
10
|
|
|
This class can be used to design very complex models, such as Neural Turing Machine. |
11
|
|
|
""" |
12
|
|
|
|
13
|
|
|
def __init__(self, input_dim=0, model=None, input_tensor=None, monitors=None, |
14
|
|
|
cost=None, output=None, outputs=None, blocks=None, input_vars=None, target_vars=None): |
15
|
|
|
""" |
16
|
|
|
Create a basic network. |
17
|
|
|
|
18
|
|
|
Parameters: |
19
|
|
|
input_dim - dimension of input variable |
20
|
|
|
model - a short hand to specify the model |
21
|
|
|
config - network configuration |
22
|
|
|
input_tensor - specify the tensor of input if it's special |
23
|
|
|
""" |
24
|
|
|
from deepy.core.neural_var import NeuralVariable |
25
|
|
|
from deepy.core.tensor_conversion import convert_to_theano_var |
26
|
|
|
from theano.sandbox.cuda import CudaNdarraySharedVariable |
27
|
|
|
super(ComputationalGraph, self).__init__(input_dim, input_tensor=input_tensor) |
28
|
|
|
if model: |
29
|
|
|
self.stack(model) |
30
|
|
|
if cost: |
31
|
|
|
self.stack(cost) |
32
|
|
|
if output: |
33
|
|
|
if cost: |
34
|
|
|
self._test_output = output.tensor |
35
|
|
|
else: |
36
|
|
|
self.stack(output) |
37
|
|
|
if blocks: |
38
|
|
|
self.register(*blocks) |
39
|
|
|
if input_vars: |
40
|
|
|
self.input_variables = [t.tensor for t in input_vars] |
41
|
|
|
if target_vars: |
42
|
|
|
self.target_variables = [t.tensor for t in target_vars] |
43
|
|
|
if outputs: |
44
|
|
|
if not output and not cost: |
45
|
|
|
self._test_output = None |
46
|
|
|
self._test_outputs, _, _ = convert_to_theano_var(outputs) |
47
|
|
|
|
48
|
|
|
|
49
|
|
|
if monitors: |
50
|
|
|
if type(monitors) == dict: |
51
|
|
|
monitors = monitors.items() |
52
|
|
|
for monitor in monitors: |
53
|
|
|
if type(monitor) != tuple: |
54
|
|
|
raise Exception("monitors shall be tuples of (name, var).") |
55
|
|
|
name, var = monitor |
56
|
|
|
if isinstance(var, NeuralVariable): |
57
|
|
|
var = var.tensor |
58
|
|
|
if isinstance(var, CudaNdarraySharedVariable): |
59
|
|
|
var *= 1.0 # Avoid CudaNdarray |
60
|
|
|
self.training_monitors.append((name, var)) |
61
|
|
|
self.testing_monitors.append((name, var)) |
62
|
|
|
|
63
|
|
|
|
64
|
|
|
@property |
65
|
|
|
def cost(self): |
66
|
|
|
return self.output |
67
|
|
|
|
68
|
|
|
@property |
69
|
|
|
def test_cost(self): |
70
|
|
|
return self.test_output |