| @@ 63-90 (lines=28) @@ | ||
| 60 | self.encode_optimizer.minimize(self.inner_layers[i].encode_loss, |
|
| 61 | var_list=self.inner_layers[i].variables) |
|
| 62 | ) |
|
| 63 | if num_classes == 1: |
|
| 64 | # Output Layers |
|
| 65 | self.output_layer = HiddenLayer(layers[len(layers) - 1], num_classes, x=self.inner_layers[len(layers)-1].y, |
|
| 66 | name='Output', activation_fn=tf.sigmoid) |
|
| 67 | # Predicted Probability |
|
| 68 | self.y = self.output_layer.y |
|
| 69 | self.y_class = tf.cast(tf.greater_equal(self.y, 0.5), tf.float32) |
|
| 70 | # Loss |
|
| 71 | self.loss = tf.reduce_mean( |
|
| 72 | tf.nn.sigmoid_cross_entropy_with_logits(self.output_layer.logits, self.y_, |
|
| 73 | name='SigmoidCrossEntropyLoss') |
|
| 74 | ) |
|
| 75 | self.correct_prediction = tf.equal(self.y_class, self.y_) |
|
| 76 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) |
|
| 77 | else: |
|
| 78 | # Output Layers |
|
| 79 | self.output_layer = SoftmaxLayer(layers[len(layers) - 1], num_classes, x=self.inner_layers[len(layers)-1].y, |
|
| 80 | name='OutputLayer') |
|
| 81 | # Predicted Probability |
|
| 82 | self.y = self.output_layer.y |
|
| 83 | self.y_class = tf.argmax(self.y, 1) |
|
| 84 | # Loss |
|
| 85 | self.loss = tf.reduce_mean( |
|
| 86 | tf.nn.softmax_cross_entropy_with_logits(logits=self.output_layer.logits, labels=self.y_, |
|
| 87 | name='SoftmaxCrossEntropyLoss') |
|
| 88 | ) |
|
| 89 | self.correct_prediction = tf.equal(self.y_class, tf.argmax(self.y_, 1)) |
|
| 90 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) |
|
| 91 | self.summaries.append(tf.summary.scalar('cross_entropy', self.loss)) |
|
| 92 | self.summaries.append(tf.summary.scalar('accuracy', self.accuracy)) |
|
| 93 | self.summaries += self.output_layer.summaries |
|
| @@ 63-90 (lines=28) @@ | ||
| 60 | name=('Hidden%d' % i), activation_fn=activation_fn) |
|
| 61 | ) |
|
| 62 | self.summaries += self.inner_layers[i].summaries |
|
| 63 | if num_classes == 1: |
|
| 64 | # Output Layers |
|
| 65 | self.output_layer = HiddenLayer(layers[len(layers) - 1], num_classes, x=self.inner_layers[len(layers)-1].y, |
|
| 66 | name='Output', activation_fn=tf.sigmoid) |
|
| 67 | # Predicted Probability |
|
| 68 | self.y = self.output_layer.y |
|
| 69 | self.y_class = tf.cast(tf.greater_equal(self.y, 0.5), tf.float32) |
|
| 70 | # Loss |
|
| 71 | self.loss = tf.reduce_mean( |
|
| 72 | tf.nn.sigmoid_cross_entropy_with_logits(logits=self.output_layer.logits, labels=self.y_, |
|
| 73 | name='SigmoidCrossEntropyLoss') |
|
| 74 | ) |
|
| 75 | self.correct_prediction = tf.equal(self.y_class, self.y_) |
|
| 76 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) |
|
| 77 | else: |
|
| 78 | # Output Layers |
|
| 79 | self.output_layer = SoftmaxLayer(layers[len(layers) - 1], num_classes, x=self.inner_layers[len(layers)-1].y, |
|
| 80 | name='OutputLayer') |
|
| 81 | # Predicted Probability |
|
| 82 | self.y = self.output_layer.y |
|
| 83 | self.y_class = tf.argmax(self.y, 1) |
|
| 84 | # Loss |
|
| 85 | self.loss = tf.reduce_mean( |
|
| 86 | tf.nn.softmax_cross_entropy_with_logits(logits=self.output_layer.logits, labels=self.y_, |
|
| 87 | name='SoftmaxCrossEntropyLoss') |
|
| 88 | ) |
|
| 89 | self.correct_prediction = tf.equal(self.y_class, tf.argmax(self.y_, 1)) |
|
| 90 | self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32)) |
|
| 91 | self.summaries.append(tf.summary.scalar('cross_entropy', self.loss)) |
|
| 92 | self.summaries.append(tf.summary.scalar('accuracy', self.accuracy)) |
|
| 93 | self.summaries += self.output_layer.summaries |
|