Conditions | 3 |
Total Lines | 59 |
Code Lines | 46 |
Lines | 0 |
Ratio | 0 % |
Changes | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
1 | from __future__ import division, print_function, absolute_import |
||
24 | def cnn(para, X_train, y_train): |
||
25 | |||
26 | def conv_net(x_dict, n_classes, dropout, reuse, is_training): |
||
27 | with tf.variable_scope('ConvNet', reuse=reuse): |
||
28 | x = x_dict['images'] |
||
29 | x = tf.reshape(x, shape=[-1, 28, 28, 1]) |
||
30 | conv1 = tf.layers.conv2d(x, para["filters_0"], 5, activation=tf.nn.relu) |
||
31 | conv1 = tf.layers.max_pooling2d(conv1, 2, 2) |
||
32 | conv2 = tf.layers.conv2d(conv1, para["filters_1"], 3, activation=tf.nn.relu) |
||
33 | conv2 = tf.layers.max_pooling2d(conv2, 2, 2) |
||
34 | fc1 = tf.contrib.layers.flatten(conv2) |
||
35 | fc1 = tf.layers.dense(fc1, para["dense_0"]) |
||
36 | fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training) |
||
37 | out = tf.layers.dense(fc1, n_classes) |
||
38 | |||
39 | return out |
||
40 | |||
41 | def model_fn(features, labels, mode): |
||
42 | logits_train = conv_net(features, num_classes, dropout, reuse=False, |
||
43 | is_training=True) |
||
44 | logits_test = conv_net(features, num_classes, dropout, reuse=True, |
||
45 | is_training=False) |
||
46 | |||
47 | pred_classes = tf.argmax(logits_test, axis=1) |
||
48 | pred_probas = tf.nn.softmax(logits_test) |
||
49 | |||
50 | if mode == tf.estimator.ModeKeys.PREDICT: |
||
51 | return tf.estimator.EstimatorSpec(mode, predictions=pred_classes) |
||
52 | |||
53 | loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits( |
||
54 | logits=logits_train, labels=tf.cast(labels, dtype=tf.int32))) |
||
55 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) |
||
56 | train_op = optimizer.minimize(loss_op, |
||
57 | global_step=tf.train.get_global_step()) |
||
58 | |||
59 | acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) |
||
60 | |||
61 | estim_specs = tf.estimator.EstimatorSpec( |
||
62 | mode=mode, |
||
63 | predictions=pred_classes, |
||
64 | loss=loss_op, |
||
65 | train_op=train_op, |
||
66 | eval_metric_ops={'accuracy': acc_op}) |
||
67 | |||
68 | return estim_specs |
||
69 | |||
70 | model = tf.estimator.Estimator(model_fn) |
||
71 | |||
72 | input_fn = tf.estimator.inputs.numpy_input_fn( |
||
73 | x={'images': X_train}, y=y_train, |
||
74 | batch_size=batch_size, num_epochs=None, shuffle=True) |
||
75 | model.train(input_fn, steps=num_steps) |
||
76 | |||
77 | input_fn = tf.estimator.inputs.numpy_input_fn( |
||
78 | x={'images': X_test}, y=y_test, |
||
79 | batch_size=batch_size, shuffle=False) |
||
80 | e = model.evaluate(input_fn) |
||
81 | |||
82 | return float(e['accuracy']) |
||
83 | |||
89 |