@@ 6-60 (lines=55) @@ | ||
3 | from . import variable_summary |
|
4 | ||
5 | ||
6 | class HiddenLayer: |
|
7 | """ Typical hidden layer for Multi-layer perceptron |
|
8 | User is allowed to specify the non-linearity activation function. |
|
9 | ||
10 | Args: |
|
11 | n_in (:obj:`int`): Number of input cells. |
|
12 | n_out (:obj:`int`): Number of output cells. |
|
13 | name (:obj:`str`): Name of the hidden layer. |
|
14 | x (:class:`tensorflow.placeholder`): Input tensor. |
|
15 | W (:class:`tensorflow.Variable`): Weight matrix. |
|
16 | b (:class:`tensorflow.Variable`): Bias matrix. |
|
17 | activation_fn: Activation function used in this hidden layer. |
|
18 | Common values :method:`tensorflow.sigmoid` for ``sigmoid`` function, :method:`tensorflow.tanh` for ``tanh`` |
|
19 | function, :method:`tensorflow.relu` for RELU. |
|
20 | ||
21 | Attributes: |
|
22 | n_in (:obj:`int`): Number of inputs into this layer. |
|
23 | n_out (:obj:`int`): Number of outputs out of this layer. |
|
24 | name (:obj:`str`): Name of the hidden layer. |
|
25 | x (:class:`tensorflow.placeholder`): Tensorflow placeholder or tensor that represents the input of this layer. |
|
26 | W (:class:`tensorflow.Variable`): Weight matrix of current layer. |
|
27 | b (:class:`tensorflow.Variable`): Bias matrix of current layer. |
|
28 | variables (:obj:`list` of :class:`tensorflow.Variable`): variables of current layer. |
|
29 | logits (:obj:`tensorflow.Tensor`): Tensorflow tensor of linear logits computed in current layer. |
|
30 | y (:class:`tensorflow.Tensor`): Tensorflow tensor represents the output function of this layer. |
|
31 | summaries (:obj:`list`): List of Tensorflow summary buffer. |
|
32 | """ |
|
33 | def __init__(self, n_in, n_out, name, x=None, W=None, b=None, activation_fn=tf.sigmoid): |
|
34 | self.n_in = n_in |
|
35 | self.n_out = n_out |
|
36 | self.name = name |
|
37 | with tf.name_scope(name): |
|
38 | if x is None: |
|
39 | self.x = tf.placeholder(tf.float32, shape=[None, n_in]) |
|
40 | else: |
|
41 | self.x = x |
|
42 | if W is None: |
|
43 | self.W = tf.Variable( |
|
44 | tf.truncated_normal(shape=[n_in, n_out],stddev=1.0/math.sqrt(float(n_in))), |
|
45 | name='weights' |
|
46 | ) |
|
47 | else: |
|
48 | self.W = W |
|
49 | if b is None: |
|
50 | self.b = tf.Variable(tf.zeros(shape=[n_out]), name='biases') |
|
51 | else: |
|
52 | self.b = b |
|
53 | self.variables = [self.W, self.b] |
|
54 | self.logits = tf.matmul(self.x, self.W) + self.b |
|
55 | self.y = activation_fn(self.logits, name='activations') |
|
56 | self.summaries = [] |
|
57 | self.summaries += variable_summary(self.W, tag=name + '/weights') |
|
58 | self.summaries += variable_summary(self.b, tag=name + '/bias') |
|
59 | self.summaries.append(tf.summary.histogram(name + '/pre_act', self.logits)) |
|
60 | self.summaries.append(tf.summary.histogram(name + '/act', self.y)) |
|
61 | ||
62 | ||
63 | class SoftmaxLayer: |
|
@@ 63-112 (lines=50) @@ | ||
60 | self.summaries.append(tf.summary.histogram(name + '/act', self.y)) |
|
61 | ||
62 | ||
63 | class SoftmaxLayer: |
|
64 | """ Softmax Layer as multi-class binary classification output layer |
|
65 | ||
66 | Parameters: |
|
67 | n_in (:obj:`int`): Number of input cells. |
|
68 | n_out (:obj:`int`): Number of output cells. |
|
69 | name (:obj:`str`): Name of the layer. |
|
70 | x (:class:`tensorflow.placeholder`): Input tensor. |
|
71 | W (:class:`tensorflow.Variable`): Weight matrix. |
|
72 | b (:class:`tensorflow.Variable`): Bias matrix. |
|
73 | ||
74 | Attributes: |
|
75 | n_in (:obj:`int`): Number of inputs into this layer. |
|
76 | n_out (:obj:`int`): Number of outputs out of this layer. |
|
77 | name (:obj:`str`): Name of the hidden layer. |
|
78 | x (:class:`tensorflow.placeholder`): Tensorflow placeholder or tensor that represents the input of this layer. |
|
79 | W (:class:`tensorflow.Variable`): Weight matrix of current layer. |
|
80 | b (:class:`tensorflow.Variable`): Bias matrix of current layer. |
|
81 | variables (:obj:`list` of :class:`tensorflow.Variable`): variables of current layer. |
|
82 | logits (:obj:`tensorflow.Tensor`): Tensorflow tensor of linear logits computed in current layer. |
|
83 | y (:class:`tensorflow.Tensor`): Tensorflow tensor represents the output function of this layer. |
|
84 | """ |
|
85 | def __init__(self, n_in, n_out, name, x=None, W=None, b=None): |
|
86 | self.n_in = n_in |
|
87 | self.n_out = n_out |
|
88 | with tf.name_scope(name): |
|
89 | if x is None: |
|
90 | self.x = tf.placeholder(tf.float32, shape=[None, n_in], name='input-x') |
|
91 | else: |
|
92 | self.x = x |
|
93 | if W is None: |
|
94 | self.W = tf.Variable( |
|
95 | tf.truncated_normal(shape=[n_in, n_out],stddev=1.0/math.sqrt(float(n_in))), |
|
96 | name='weights' |
|
97 | ) |
|
98 | else: |
|
99 | self.W = W |
|
100 | if b is None: |
|
101 | self.b = tf.Variable(tf.zeros(shape=[n_out]), name='biases') |
|
102 | else: |
|
103 | self.b = b |
|
104 | self.variables = [self.W, self.b] |
|
105 | self.logits = tf.matmul(self.x, self.W) + self.b |
|
106 | self.name = name |
|
107 | self.y = tf.nn.softmax(self.logits, name='softmax') |
|
108 | self.summaries = [] |
|
109 | self.summaries += variable_summary(self.W, tag=name + '/weights') |
|
110 | self.summaries += variable_summary(self.b, tag=name + '/bias') |
|
111 | self.summaries.append(tf.summary.histogram(name + '/pre_act', self.logits)) |
|
112 | self.summaries.append(tf.summary.histogram(name + '/act', self.y)) |
|
113 | ||
114 | ||
115 | class AutoencoderLayer(HiddenLayer): |