Test Failed
Pull Request — master (#63)
by
unknown
02:47
created
src/Phpml/Classification/Linear/LogisticRegression.php 4 patches
Doc Comments   +1 added lines, -1 removed lines patch added patch discarded remove patch
@@ -63,7 +63,7 @@
 block discarded – undo
63 63
 
64 64
      * @param int $maxIterations
65 65
      * @param \Phpml\Classification\Linear\type $normalizeInputs
66
-     * @param \Phpml\Classification\Linear\type $trainingType
66
+     * @param integer $trainingType
67 67
      * @param \Phpml\Classification\Linear\type $cost
68 68
      * @param string $penalty
69 69
      *
Please login to merge, or discard this patch.
Unused Use Statements   -1 removed lines patch added patch discarded remove patch
@@ -4,7 +4,6 @@
 block discarded – undo
4 4
 
5 5
 namespace Phpml\Classification\Linear;
6 6
 
7
-use Phpml\Classification\Classifier;
8 7
 use Phpml\Helper\Optimizer\ConjugateGradient;
9 8
 
10 9
 class LogisticRegression extends Adaline
Please login to merge, or discard this patch.
Indentation   -1 removed lines patch added patch discarded remove patch
@@ -60,7 +60,6 @@
 block discarded – undo
60 60
      * Cost function can be 'log' for log-likelihood and 'sse' for sum of squared errors <br>
61 61
      *
62 62
      * Penalty (Regularization term) can be 'L2' or empty string to cancel penalty term
63
-
64 63
      * @param int $maxIterations
65 64
      * @param \Phpml\Classification\Linear\type $normalizeInputs
66 65
      * @param \Phpml\Classification\Linear\type $trainingType
Please login to merge, or discard this patch.
Spacing   +10 added lines, -10 removed lines patch added patch discarded remove patch
@@ -1,6 +1,6 @@  discard block
 block discarded – undo
1 1
 <?php
2 2
 
3
-declare(strict_types=1);
3
+declare(strict_types = 1);
4 4
 
5 5
 namespace Phpml\Classification\Linear;
6 6
 
@@ -13,12 +13,12 @@  discard block
 block discarded – undo
13 13
     /**
14 14
      * Batch training: Gradient descent algorithm (default)
15 15
      */
16
-    const BATCH_TRAINING    = 1;
16
+    const BATCH_TRAINING = 1;
17 17
 
18 18
     /**
19 19
      * Online training: Stochastic gradient descent learning
20 20
      */
21
-    const ONLINE_TRAINING    = 2;
21
+    const ONLINE_TRAINING = 2;
22 22
 
23 23
     /**
24 24
      * Conjugate Batch: Conjugate Gradient Descent algorithm
@@ -74,14 +74,14 @@  discard block
 block discarded – undo
74 74
         string $penalty = 'L2')
75 75
     {
76 76
         $trainingTypes = range(self::BATCH_TRAINING, self::CONJUGATE_GRAD_TRAINING);
77
-        if (! in_array($trainingType, $trainingTypes)) {
78
-            throw new \Exception("Logistic regression can only be trained with " .
79
-                "batch (gradient descent), online (stochastic gradient descent) " .
77
+        if (!in_array($trainingType, $trainingTypes)) {
78
+            throw new \Exception("Logistic regression can only be trained with ".
79
+                "batch (gradient descent), online (stochastic gradient descent) ".
80 80
                 "or conjugate batch (conjugate gradient descent) algorithms");
81 81
         }
82 82
 
83
-        if (! in_array($cost, ['log', 'sse'])) {
84
-            throw new \Exception("Logistic regression cost function can be one of the following: \n" .
83
+        if (!in_array($cost, ['log', 'sse'])) {
84
+            throw new \Exception("Logistic regression cost function can be one of the following: \n".
85 85
                 "'log' for log-likelihood and 'sse' for sum of squared errors");
86 86
         }
87 87
 
@@ -176,7 +176,7 @@  discard block
 block discarded – undo
176 176
                  * The gradient of the cost function to be used with gradient descent:
177 177
                  *		∇J(x) = -(y - h(x)) = (h(x) - y)
178 178
                  */
179
-                $callback = function ($weights, $sample, $y) use ($penalty) {
179
+                $callback = function($weights, $sample, $y) use ($penalty) {
180 180
                     $this->weights = $weights;
181 181
                     $hX = $this->output($sample);
182 182
 
@@ -207,7 +207,7 @@  discard block
 block discarded – undo
207 207
                  * The gradient of the cost function:
208 208
                  *		∇J(x) = -(h(x) - y) . h(x) . (1 - h(x))
209 209
                  */
210
-                $callback = function ($weights, $sample, $y) use ($penalty) {
210
+                $callback = function($weights, $sample, $y) use ($penalty) {
211 211
                     $this->weights = $weights;
212 212
                     $hX = $this->output($sample);
213 213
 
Please login to merge, or discard this patch.
src/Phpml/Helper/Optimizer/GD.php 1 patch
Spacing   +5 added lines, -5 removed lines patch added patch discarded remove patch
@@ -1,4 +1,4 @@  discard block
 block discarded – undo
1
-<?php declare(strict_types=1);
1
+<?php declare(strict_types = 1);
2 2
 
3 3
 namespace Phpml\Helper\Optimizer;
4 4
 
@@ -40,7 +40,7 @@  discard block
 block discarded – undo
40 40
 
41 41
             $this->updateWeightsWithUpdates($updates, $totalPenalty);
42 42
 
43
-            $this->costValues[] = array_sum($errors)/$this->sampleCount;
43
+            $this->costValues[] = array_sum($errors) / $this->sampleCount;
44 44
 
45 45
             if ($this->earlyStop($theta)) {
46 46
                 break;
@@ -61,7 +61,7 @@  discard block
 block discarded – undo
61 61
     protected function gradient(array $theta)
62 62
     {
63 63
         $costs = [];
64
-        $gradient= [];
64
+        $gradient = [];
65 65
         $totalPenalty = 0;
66 66
 
67 67
         foreach ($this->samples as $index => $sample) {
@@ -71,7 +71,7 @@  discard block
 block discarded – undo
71 71
             list($cost, $grad, $penalty) = array_pad($result, 3, 0);
72 72
 
73 73
             $costs[] = $cost;
74
-            $gradient[]= $grad;
74
+            $gradient[] = $grad;
75 75
             $totalPenalty += $penalty;
76 76
         }
77 77
 
@@ -87,7 +87,7 @@  discard block
 block discarded – undo
87 87
     protected function updateWeightsWithUpdates(array $updates, float $penalty)
88 88
     {
89 89
         // Updates all weights at once
90
-        for ($i=0; $i <= $this->dimensions; $i++) {
90
+        for ($i = 0; $i <= $this->dimensions; $i++) {
91 91
             if ($i == 0) {
92 92
                 $this->theta[0] -= $this->learningRate * array_sum($updates);
93 93
             } else {
Please login to merge, or discard this patch.
src/Phpml/Helper/Optimizer/StochasticGD.php 1 patch
Spacing   +4 added lines, -4 removed lines patch added patch discarded remove patch
@@ -1,4 +1,4 @@  discard block
 block discarded – undo
1
-<?php declare(strict_types=1);
1
+<?php declare(strict_types = 1);
2 2
 
3 3
 namespace Phpml\Helper\Optimizer;
4 4
 
@@ -63,7 +63,7 @@  discard block
 block discarded – undo
63 63
      *
64 64
      * @var array
65 65
      */
66
-    protected $costValues= [];
66
+    protected $costValues = [];
67 67
 
68 68
     /**
69 69
      * Initializes the SGD optimizer for the given number of dimensions
@@ -192,7 +192,7 @@  discard block
 block discarded – undo
192 192
             $this->theta[0] -= $this->learningRate * $gradient;
193 193
 
194 194
             // Update other values
195
-            for ($i=1; $i <= $this->dimensions; $i++) {
195
+            for ($i = 1; $i <= $this->dimensions; $i++) {
196 196
                 $this->theta[$i] -= $this->learningRate *
197 197
                     ($gradient * $sample[$i - 1] + $penalty * $this->theta[$i]);
198 198
             }
@@ -216,7 +216,7 @@  discard block
 block discarded – undo
216 216
     {
217 217
         // Check for early stop: No change larger than threshold (default 1e-5)
218 218
         $diff = array_map(
219
-            function ($w1, $w2) {
219
+            function($w1, $w2) {
220 220
                 return abs($w1 - $w2) > $this->threshold ? 1 : 0;
221 221
             },
222 222
             $oldTheta, $this->theta);
Please login to merge, or discard this patch.
src/Phpml/Helper/Optimizer/Optimizer.php 1 patch
Spacing   +2 added lines, -2 removed lines patch added patch discarded remove patch
@@ -1,4 +1,4 @@  discard block
 block discarded – undo
1
-<?php declare(strict_types=1);
1
+<?php declare(strict_types = 1);
2 2
 
3 3
 namespace Phpml\Helper\Optimizer;
4 4
 
@@ -29,7 +29,7 @@  discard block
 block discarded – undo
29 29
 
30 30
         // Inits the weights randomly
31 31
         $this->theta = [];
32
-        for ($i=0; $i < $this->dimensions; $i++) {
32
+        for ($i = 0; $i < $this->dimensions; $i++) {
33 33
             $this->theta[] = rand() / (float) getrandmax();
34 34
         }
35 35
     }
Please login to merge, or discard this patch.
src/Phpml/Helper/Optimizer/ConjugateGradient.php 1 patch
Spacing   +3 added lines, -3 removed lines patch added patch discarded remove patch
@@ -1,4 +1,4 @@  discard block
 block discarded – undo
1
-<?php declare(strict_types=1);
1
+<?php declare(strict_types = 1);
2 2
 
3 3
 namespace Phpml\Helper\Optimizer;
4 4
 
@@ -32,7 +32,7 @@  discard block
 block discarded – undo
32 32
 
33 33
         $d = mp::muls($this->gradient($this->theta), -1);
34 34
 
35
-        for ($i=0; $i < $this->maxIterations; $i++) {
35
+        for ($i = 0; $i < $this->maxIterations; $i++) {
36 36
             // Obtain α that minimizes f(θ + α.d)
37 37
             $alpha = $this->getAlpha(array_sum($d));
38 38
 
@@ -159,7 +159,7 @@  discard block
 block discarded – undo
159 159
     {
160 160
         $theta = $this->theta;
161 161
 
162
-        for ($i=0; $i < $this->dimensions + 1; $i++) {
162
+        for ($i = 0; $i < $this->dimensions + 1; $i++) {
163 163
             if ($i == 0) {
164 164
                 $theta[$i] += $alpha * array_sum($d);
165 165
             } else {
Please login to merge, or discard this patch.
src/Phpml/Classification/Linear/Perceptron.php 2 patches
Indentation   +2 added lines, -2 removed lines patch added patch discarded remove patch
@@ -15,7 +15,7 @@  discard block
 block discarded – undo
15 15
 {
16 16
     use Predictable, OneVsRest;
17 17
 
18
-   /**
18
+    /**
19 19
      * @var array
20 20
      */
21 21
     protected $samples = [];
@@ -83,7 +83,7 @@  discard block
 block discarded – undo
83 83
         $this->maxIterations = $maxIterations;
84 84
     }
85 85
 
86
-   /**
86
+    /**
87 87
      * @param array $samples
88 88
      * @param array $targets
89 89
      */
Please login to merge, or discard this patch.
Spacing   +4 added lines, -4 removed lines patch added patch discarded remove patch
@@ -1,6 +1,6 @@  discard block
 block discarded – undo
1 1
 <?php
2 2
 
3
-declare(strict_types=1);
3
+declare(strict_types = 1);
4 4
 
5 5
 namespace Phpml\Classification\Linear;
6 6
 
@@ -118,7 +118,7 @@  discard block
 block discarded – undo
118 118
     protected function runTraining()
119 119
     {
120 120
         // The cost function is the sum of squares
121
-        $callback = function ($weights, $sample, $target) {
121
+        $callback = function($weights, $sample, $target) {
122 122
             $this->weights = $weights;
123 123
 
124 124
             $prediction = $this->outputClass($sample);
@@ -137,7 +137,7 @@  discard block
 block discarded – undo
137 137
      */
138 138
     protected function runGradientDescent(\Closure $gradientFunc, bool $isBatch = false)
139 139
     {
140
-        $class = $isBatch ? GD::class :  StochasticGD::class;
140
+        $class = $isBatch ? GD::class : StochasticGD::class;
141 141
 
142 142
         $optimizer = (new $class($this->featureCount))
143 143
             ->setLearningRate($this->learningRate)
@@ -227,6 +227,6 @@  discard block
 block discarded – undo
227 227
 
228 228
         $predictedClass = $this->outputClass($sample);
229 229
 
230
-        return $this->labels[ $predictedClass ];
230
+        return $this->labels[$predictedClass];
231 231
     }
232 232
 }
Please login to merge, or discard this patch.
src/Phpml/Classification/Linear/Adaline.php 1 patch
Spacing   +5 added lines, -5 removed lines patch added patch discarded remove patch
@@ -1,6 +1,6 @@  discard block
 block discarded – undo
1 1
 <?php
2 2
 
3
-declare(strict_types=1);
3
+declare(strict_types = 1);
4 4
 
5 5
 namespace Phpml\Classification\Linear;
6 6
 
@@ -12,12 +12,12 @@  discard block
 block discarded – undo
12 12
     /**
13 13
      * Batch training is the default Adaline training algorithm
14 14
      */
15
-    const BATCH_TRAINING    = 1;
15
+    const BATCH_TRAINING = 1;
16 16
 
17 17
     /**
18 18
      * Online training: Stochastic gradient descent learning
19 19
      */
20
-    const ONLINE_TRAINING    = 2;
20
+    const ONLINE_TRAINING = 2;
21 21
 
22 22
     /**
23 23
      * Training type may be either 'Batch' or 'Online' learning
@@ -41,7 +41,7 @@  discard block
 block discarded – undo
41 41
     public function __construct(float $learningRate = 0.001, int $maxIterations = 1000,
42 42
         bool $normalizeInputs = true, int $trainingType = self::BATCH_TRAINING)
43 43
     {
44
-        if (! in_array($trainingType, [self::BATCH_TRAINING, self::ONLINE_TRAINING])) {
44
+        if (!in_array($trainingType, [self::BATCH_TRAINING, self::ONLINE_TRAINING])) {
45 45
             throw new \Exception("Adaline can only be trained with batch and online/stochastic gradient descent algorithm");
46 46
         }
47 47
 
@@ -57,7 +57,7 @@  discard block
 block discarded – undo
57 57
     protected function runTraining()
58 58
     {
59 59
         // The cost function is the sum of squares
60
-        $callback = function ($weights, $sample, $target) {
60
+        $callback = function($weights, $sample, $target) {
61 61
             $this->weights = $weights;
62 62
 
63 63
             $output = $this->output($sample);
Please login to merge, or discard this patch.