| Conditions | 3 |
| Total Lines | 54 |
| Code Lines | 38 |
| Lines | 0 |
| Ratio | 0 % |
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
| 1 | import pytest |
||
| 10 | def test_building_layers(graph_builder): |
||
| 11 | import tensorflow as tf |
||
| 12 | tf.compat.v1.reset_default_graph() |
||
| 13 | tf.compat.v1.enable_eager_execution() |
||
| 14 | import numpy as np |
||
| 15 | height = 2 |
||
| 16 | width = 6 |
||
| 17 | channels = 2 |
||
| 18 | expected_input_shape = (1, height, width, channels) |
||
| 19 | |||
| 20 | graph_builder.input(type('ImageSpecs', (), { |
||
| 21 | 'width': width, |
||
| 22 | 'height': height, |
||
| 23 | 'color_channels': channels |
||
| 24 | })()) |
||
| 25 | # assert previous layer is the 'input' layer we just added/created |
||
| 26 | assert tuple(graph_builder._prev_layer.shape) == expected_input_shape |
||
| 27 | assert (graph_builder._prev_layer.numpy() - graph_builder.graph['input'].numpy()).all() == 0 |
||
| 28 | assert graph_builder.graph['input'].numpy().all() == 0 |
||
| 29 | |||
| 30 | # create relu(convolution) layer |
||
| 31 | W = np.array(np.random.rand(*expected_input_shape[1:], channels), dtype=np.float32) |
||
| 32 | |||
| 33 | b_weight = 6.0 |
||
| 34 | b = np.array([b_weight], dtype=np.float32) |
||
| 35 | graph_builder.relu_conv_2d('convo1', (W, b)) |
||
| 36 | |||
| 37 | # assert the previous layer is the relu(convolution) layer we just added |
||
| 38 | assert tuple(graph_builder._prev_layer.shape) == expected_input_shape |
||
| 39 | assert (graph_builder.graph['convo1'].numpy() - graph_builder._prev_layer.numpy()).all() == 0 |
||
| 40 | # We expect that the tensor values are equal to the weight because the algorithm initializes input with tf.zeros |
||
| 41 | assert (graph_builder.graph['convo1'].numpy() - b).all() == 0 |
||
| 42 | |||
| 43 | |||
| 44 | # create Average Pooling layer |
||
| 45 | layer_id = 'avgpool1' |
||
| 46 | graph_builder.avg_pool(layer_id) |
||
| 47 | |||
| 48 | # assert previous layer is the layer we just added/created |
||
| 49 | expected_avg_pool_shape = (1, 1, 3, 2) |
||
| 50 | expected_avg_output = np.array( |
||
| 51 | [[[[b_weight, b_weight, b_weight], |
||
| 52 | [b_weight, b_weight, b_weight], |
||
| 53 | [b_weight, b_weight, b_weight] |
||
| 54 | ]]] |
||
| 55 | ,dtype=np.float32) |
||
| 56 | assert graph_builder.graph[layer_id].numpy().shape == expected_avg_pool_shape |
||
| 57 | assert (graph_builder.graph[layer_id].numpy() - graph_builder._prev_layer.numpy()).all() == 0 |
||
| 58 | assert (graph_builder.graph[layer_id].numpy() - np.array([b_weight])).all() == 0 |
||
| 59 | |||
| 60 | for i in range(2): |
||
| 61 | for c in range(2): |
||
| 62 | assert graph_builder._prev_layer[0][0][i][c] == graph_builder.graph[layer_id][0][0][i][c] |
||
| 63 | assert graph_builder._prev_layer[0][0][i][c] == expected_avg_output[0][0][i][c] |
||
| 64 |