Code Duplication    Length = 21-29 lines in 2 locations

pyActLearn/learning/nn/sda.py 2 locations

@@ 162-190 (lines=29) @@
159
                return
160
            injector = BatchInjector(data_x=train_x, data_y=train_y, batch_size=batch_size)
161
            i = 0
162
            while _pretrain_criterion.continue_learning():
163
                batch_x, batch_y = injector.next_batch()
164
                if summaries_dir is not None and (i % summary_interval == 0):
165
                    summary, loss = session.run(
166
                        [current_layer.merged, current_layer.encode_loss],
167
                        feed_dict={self.x: x, self.y_: y}
168
                    )
169
                    train_writer.add_summary(summary, i)
170
                    logger.info('Pre-training Layer %d, Step %d, training loss %g' % (j, i, loss))
171
                    if test_x is not None and test_y is not None:
172
                        summary, loss = session.run(
173
                            [current_layer.merged, current_layer.encode_loss],
174
                            feed_dict={self.x: test_x, self.y_: test_y}
175
                        )
176
                        test_writer.add_summary(summary, i)
177
                        logger.info('Pre-training Layer %d, Step %d, test loss %g' % (j, i, loss))
178
                    if pretrain_criterion == 'monitor_based':
179
                        summary, loss = session.run(
180
                            [current_layer.merged, current_layer.encode_loss],
181
                            feed_dict={self.x: valid_x, self.y_: valid_y}
182
                        )
183
                        valid_writer.add_summary(summary, i)
184
                        logger.info('Pre-training Layer %d, Step %d, valid loss %g' % (j, i, loss))
185
                loss, _ = session.run(
186
                    [current_layer.encode_loss, self.encode_opts[j]],
187
                    feed_dict={self.x: batch_x, self.y_: batch_y}
188
                )
189
                logger.info('Pre-training Layer %d, Step %d, training loss %g' % (j, i, loss))
190
                i += 1
191
            if pretrain_criterion == 'monitor_based':
192
                tf.train.Saver().restore(session, layer_summaries_dir + '/best.ckpt')
193
            if summaries_dir is not None:
@@ 226-246 (lines=21) @@
223
            return
224
        injector = BatchInjector(data_x=train_x, data_y=train_y, batch_size=batch_size)
225
        i = 0
226
        while _tuning_criterion.continue_learning():
227
            batch_x, batch_y = injector.next_batch()
228
            if summaries_dir is not None and (i % summary_interval == 0):
229
                summary, loss, accuracy = session.run([self.merged, self.loss, self.accuracy],
230
                                                      feed_dict={self.x: train_x, self.y_: train_y})
231
                train_writer.add_summary(summary, i)
232
                logger.info('Fine-Tuning: Step %d, training accuracy %g, loss %g' % (i, accuracy, loss))
233
                if (test_x is not None) and (test_y is not None):
234
                    merged, accuracy = session.run([self.merged, self.accuracy],
235
                                                   feed_dict={self.x: test_x, self.y_: test_y})
236
                    test_writer.add_summary(merged, i)
237
                    logger.info('Fine-Tuning: Step %d, test accuracy %g' % (i, accuracy))
238
                if tuning_criterion == 'monitor_based':
239
                    merged, accuracy = session.run([self.merged, self.accuracy],
240
                                                   feed_dict={self.x: valid_x, self.y_: valid_y})
241
                    valid_writer.add_summary(merged, i)
242
                    logger.info('Fine-Tuning: Step %d, valid accuracy %g' % (i, accuracy))
243
            loss, accuracy, _ = session.run([self.loss, self.accuracy, self.fine_tuning],
244
                                            feed_dict={self.x: batch_x, self.y_: batch_y})
245
            logger.info('Fine-Tuning: Step %d, batch accuracy %g, loss %g' % (i, accuracy, loss))
246
            i += 1
247
        if tuning_criterion == 'monitor_based':
248
            tf.train.Saver().restore(session, tuning_summaries_dir + '/best.ckpt')
249
        if summaries_dir is not None: