Conditions | 8 |
Total Lines | 59 |
Lines | 0 |
Ratio | 0 % |
Changes | 1 | ||
Bugs | 0 | Features | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
1 | #!usr/bin/env python |
||
40 | def cascade_forest(self, X, y=None): |
||
41 | if y is not None: |
||
42 | setattr(self, 'n_layer', 0) |
||
43 | test_size = getattr(self, 'cascade_test_size') |
||
44 | max_layers = getattr(self, 'cascade_layer') |
||
45 | tol = getattr(self, 'tolerance') |
||
46 | # test_size = int(np.floor(X.shape[0] * test_size)) |
||
47 | # train_size = X.shape[0] - test_size |
||
48 | # X_train = X[0:train_size, :] |
||
49 | # y_train = y[0:train_size] |
||
50 | # X_test = X[train_size:train_size + test_size, :] |
||
51 | # y_test = y[train_size:train_size + test_size] |
||
52 | # X_train, X_test, y_train, y_test = \ |
||
53 | # train_test_split(X, y, test_size=test_size) |
||
54 | X_train = X |
||
55 | X_test = X |
||
56 | y_train = y |
||
57 | y_test = y |
||
58 | self.n_layer += 1 |
||
59 | prf_pred_ref = self._cascade_layer(X_train, y_train) |
||
60 | accuracy_ref = self._cascade_evaluation(X_test, y_test) |
||
61 | feat_arr = self._create_feat_arr(X_train, prf_pred_ref) |
||
62 | |||
63 | self.n_layer += 1 |
||
64 | prf_pred_layer = self._cascade_layer(feat_arr, y_train) |
||
65 | accuracy_layer = self._cascade_evaluation(X_test, y_test) |
||
66 | max_acc = accuracy_ref |
||
67 | max_pred_layer = prf_pred_layer |
||
68 | |||
69 | while accuracy_layer > (accuracy_ref + tol) and self.n_layer <= max_layers: |
||
70 | #while accuracy_layer > (accuracy_ref - 0.000001) and \ |
||
71 | # self.n_layer <= max_layers: |
||
72 | if accuracy_layer > max_acc: |
||
73 | max_acc = accuracy_layer |
||
74 | max_pred_layer = prf_pred_layer |
||
75 | accuracy_ref = accuracy_layer |
||
76 | prf_pred_ref = prf_pred_layer |
||
77 | feat_arr = self._create_feat_arr(X_train, prf_pred_ref) |
||
78 | self.n_layer += 1 |
||
79 | prf_pred_layer = self._cascade_layer(feat_arr, y_train) |
||
80 | accuracy_layer = self._cascade_evaluation(X_test, y_test) |
||
81 | |||
82 | if accuracy_layer < accuracy_ref: |
||
83 | n_cascadeRF = getattr(self, 'n_cascadeRF') |
||
84 | for irf in range(n_cascadeRF): |
||
85 | delattr(self, '_casprf{}_{}'.format(self.n_layer, irf)) |
||
86 | delattr(self, '_cascrf{}_{}'.format(self.n_layer, irf)) |
||
87 | self.n_layer -= 1 |
||
88 | |||
89 | print("layer %d - accuracy %f ref %f" % (self.n_layer, accuracy_layer, accuracy_ref)) |
||
90 | else: |
||
91 | at_layer = 1 |
||
92 | prf_pred_ref = self._cascade_layer(X, layer=at_layer) |
||
93 | while at_layer < getattr(self, 'n_layer'): |
||
94 | at_layer += 1 |
||
95 | feat_arr = self._create_feat_arr(X, prf_pred_ref) |
||
96 | prf_pred_ref = self._cascade_layer(feat_arr, layer=at_layer) |
||
97 | |||
98 | return prf_pred_ref |
||
99 | |||
148 |