Conditions | 3 |
Total Lines | 67 |
Code Lines | 47 |
Lines | 67 |
Ratio | 100 % |
Changes | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
1 | from __future__ import division, print_function, absolute_import |
||
24 | View Code Duplication | def cnn(para, X_train, y_train): |
|
|
|||
25 | def conv_net(x_dict, n_classes, dropout, reuse, is_training): |
||
26 | with tf.variable_scope("ConvNet", reuse=reuse): |
||
27 | x = x_dict["images"] |
||
28 | x = tf.reshape(x, shape=[-1, 28, 28, 1]) |
||
29 | conv1 = tf.layers.conv2d(x, para["filters_0"], 5, activation=tf.nn.relu) |
||
30 | conv1 = tf.layers.max_pooling2d(conv1, 2, 2) |
||
31 | conv2 = tf.layers.conv2d(conv1, para["filters_1"], 3, activation=tf.nn.relu) |
||
32 | conv2 = tf.layers.max_pooling2d(conv2, 2, 2) |
||
33 | fc1 = tf.contrib.layers.flatten(conv2) |
||
34 | fc1 = tf.layers.dense(fc1, para["dense_0"]) |
||
35 | fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training) |
||
36 | out = tf.layers.dense(fc1, n_classes) |
||
37 | |||
38 | return out |
||
39 | |||
40 | def model_fn(features, labels, mode): |
||
41 | logits_train = conv_net( |
||
42 | features, num_classes, dropout, reuse=False, is_training=True |
||
43 | ) |
||
44 | logits_test = conv_net( |
||
45 | features, num_classes, dropout, reuse=True, is_training=False |
||
46 | ) |
||
47 | |||
48 | pred_classes = tf.argmax(logits_test, axis=1) |
||
49 | # pred_probas = tf.nn.softmax(logits_test) |
||
50 | |||
51 | if mode == tf.estimator.ModeKeys.PREDICT: |
||
52 | return tf.estimator.EstimatorSpec(mode, predictions=pred_classes) |
||
53 | |||
54 | loss_op = tf.reduce_mean( |
||
55 | tf.nn.sparse_softmax_cross_entropy_with_logits( |
||
56 | logits=logits_train, labels=tf.cast(labels, dtype=tf.int32) |
||
57 | ) |
||
58 | ) |
||
59 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) |
||
60 | train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step()) |
||
61 | |||
62 | acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) |
||
63 | |||
64 | estim_specs = tf.estimator.EstimatorSpec( |
||
65 | mode=mode, |
||
66 | predictions=pred_classes, |
||
67 | loss=loss_op, |
||
68 | train_op=train_op, |
||
69 | eval_metric_ops={"accuracy": acc_op}, |
||
70 | ) |
||
71 | |||
72 | return estim_specs |
||
73 | |||
74 | model = tf.estimator.Estimator(model_fn) |
||
75 | |||
76 | input_fn = tf.estimator.inputs.numpy_input_fn( |
||
77 | x={"images": X_train}, |
||
78 | y=y_train, |
||
79 | batch_size=batch_size, |
||
80 | num_epochs=None, |
||
81 | shuffle=True, |
||
82 | ) |
||
83 | model.train(input_fn, steps=num_steps) |
||
84 | |||
85 | input_fn = tf.estimator.inputs.numpy_input_fn( |
||
86 | x={"images": X_test}, y=y_test, batch_size=batch_size, shuffle=False |
||
87 | ) |
||
88 | e = model.evaluate(input_fn) |
||
89 | |||
90 | return float(e["accuracy"]) |
||
91 | |||
103 |