@@ -1,6 +1,6 @@ discard block |
||
1 | 1 | <?php |
2 | 2 | |
3 | -declare(strict_types=1); |
|
3 | +declare(strict_types = 1); |
|
4 | 4 | |
5 | 5 | namespace Phpml\Helper\Optimizer; |
6 | 6 | |
@@ -31,7 +31,7 @@ discard block |
||
31 | 31 | |
32 | 32 | // Inits the weights randomly |
33 | 33 | $this->theta = []; |
34 | - for ($i=0; $i < $this->dimensions; $i++) { |
|
34 | + for ($i = 0; $i < $this->dimensions; $i++) { |
|
35 | 35 | $this->theta[] = rand() / (float) getrandmax(); |
36 | 36 | } |
37 | 37 | } |
@@ -1,6 +1,6 @@ discard block |
||
1 | 1 | <?php |
2 | 2 | |
3 | -declare(strict_types=1); |
|
3 | +declare(strict_types = 1); |
|
4 | 4 | |
5 | 5 | namespace Phpml\Helper\Optimizer; |
6 | 6 | |
@@ -34,7 +34,7 @@ discard block |
||
34 | 34 | |
35 | 35 | $d = mp::muls($this->gradient($this->theta), -1); |
36 | 36 | |
37 | - for ($i=0; $i < $this->maxIterations; $i++) { |
|
37 | + for ($i = 0; $i < $this->maxIterations; $i++) { |
|
38 | 38 | // Obtain α that minimizes f(θ + α.d) |
39 | 39 | $alpha = $this->getAlpha(array_sum($d)); |
40 | 40 | |
@@ -161,7 +161,7 @@ discard block |
||
161 | 161 | { |
162 | 162 | $theta = $this->theta; |
163 | 163 | |
164 | - for ($i=0; $i < $this->dimensions + 1; $i++) { |
|
164 | + for ($i = 0; $i < $this->dimensions + 1; $i++) { |
|
165 | 165 | if ($i == 0) { |
166 | 166 | $theta[$i] += $alpha * array_sum($d); |
167 | 167 | } else { |
@@ -1,6 +1,6 @@ discard block |
||
1 | 1 | <?php |
2 | 2 | |
3 | -declare(strict_types=1); |
|
3 | +declare(strict_types = 1); |
|
4 | 4 | |
5 | 5 | namespace Phpml\Classification\Linear; |
6 | 6 | |
@@ -13,12 +13,12 @@ discard block |
||
13 | 13 | /** |
14 | 14 | * Batch training: Gradient descent algorithm (default) |
15 | 15 | */ |
16 | - const BATCH_TRAINING = 1; |
|
16 | + const BATCH_TRAINING = 1; |
|
17 | 17 | |
18 | 18 | /** |
19 | 19 | * Online training: Stochastic gradient descent learning |
20 | 20 | */ |
21 | - const ONLINE_TRAINING = 2; |
|
21 | + const ONLINE_TRAINING = 2; |
|
22 | 22 | |
23 | 23 | /** |
24 | 24 | * Conjugate Batch: Conjugate Gradient algorithm |
@@ -74,14 +74,14 @@ discard block |
||
74 | 74 | string $penalty = 'L2') |
75 | 75 | { |
76 | 76 | $trainingTypes = range(self::BATCH_TRAINING, self::CONJUGATE_GRAD_TRAINING); |
77 | - if (! in_array($trainingType, $trainingTypes)) { |
|
78 | - throw new \Exception("Logistic regression can only be trained with " . |
|
79 | - "batch (gradient descent), online (stochastic gradient descent) " . |
|
77 | + if (!in_array($trainingType, $trainingTypes)) { |
|
78 | + throw new \Exception("Logistic regression can only be trained with ". |
|
79 | + "batch (gradient descent), online (stochastic gradient descent) ". |
|
80 | 80 | "or conjugate batch (conjugate gradients) algorithms"); |
81 | 81 | } |
82 | 82 | |
83 | - if (! in_array($cost, ['log', 'sse'])) { |
|
84 | - throw new \Exception("Logistic regression cost function can be one of the following: \n" . |
|
83 | + if (!in_array($cost, ['log', 'sse'])) { |
|
84 | + throw new \Exception("Logistic regression cost function can be one of the following: \n". |
|
85 | 85 | "'log' for log-likelihood and 'sse' for sum of squared errors"); |
86 | 86 | } |
87 | 87 | |
@@ -177,7 +177,7 @@ discard block |
||
177 | 177 | * The gradient of the cost function to be used with gradient descent: |
178 | 178 | * ∇J(x) = -(y - h(x)) = (h(x) - y) |
179 | 179 | */ |
180 | - $callback = function ($weights, $sample, $y) use ($penalty) { |
|
180 | + $callback = function($weights, $sample, $y) use ($penalty) { |
|
181 | 181 | $this->weights = $weights; |
182 | 182 | $hX = $this->output($sample); |
183 | 183 | |
@@ -208,7 +208,7 @@ discard block |
||
208 | 208 | * The gradient of the cost function: |
209 | 209 | * ∇J(x) = -(h(x) - y) . h(x) . (1 - h(x)) |
210 | 210 | */ |
211 | - $callback = function ($weights, $sample, $y) use ($penalty) { |
|
211 | + $callback = function($weights, $sample, $y) use ($penalty) { |
|
212 | 212 | $this->weights = $weights; |
213 | 213 | $hX = $this->output($sample); |
214 | 214 |
@@ -1,6 +1,6 @@ discard block |
||
1 | 1 | <?php |
2 | 2 | |
3 | -declare(strict_types=1); |
|
3 | +declare(strict_types = 1); |
|
4 | 4 | |
5 | 5 | namespace Phpml\Classification\Linear; |
6 | 6 | |
@@ -190,8 +190,8 @@ discard block |
||
190 | 190 | } |
191 | 191 | |
192 | 192 | // Try other possible points one by one |
193 | - for ($step = $minValue; $step <= $maxValue; $step+= $stepSize) { |
|
194 | - $threshold = (float)$step; |
|
193 | + for ($step = $minValue; $step <= $maxValue; $step += $stepSize) { |
|
194 | + $threshold = (float) $step; |
|
195 | 195 | list($errorRate, $prob) = $this->calculateErrorRate($targets, $threshold, $operator, $values); |
196 | 196 | if ($errorRate < $split['trainingErrorRate']) { |
197 | 197 | $split = ['value' => $threshold, 'operator' => $operator, |
@@ -215,7 +215,7 @@ discard block |
||
215 | 215 | { |
216 | 216 | $values = array_column($samples, $col); |
217 | 217 | $valueCounts = array_count_values($values); |
218 | - $distinctVals= array_keys($valueCounts); |
|
218 | + $distinctVals = array_keys($valueCounts); |
|
219 | 219 | |
220 | 220 | $split = null; |
221 | 221 | |
@@ -274,7 +274,7 @@ discard block |
||
274 | 274 | $wrong = 0.0; |
275 | 275 | $prob = []; |
276 | 276 | $leftLabel = $this->binaryLabels[0]; |
277 | - $rightLabel= $this->binaryLabels[1]; |
|
277 | + $rightLabel = $this->binaryLabels[1]; |
|
278 | 278 | |
279 | 279 | foreach ($values as $index => $value) { |
280 | 280 | if ($this->evaluate($value, $operator, $threshold)) { |
@@ -288,7 +288,7 @@ discard block |
||
288 | 288 | $wrong += $this->weights[$index]; |
289 | 289 | } |
290 | 290 | |
291 | - if (! isset($prob[$predicted][$target])) { |
|
291 | + if (!isset($prob[$predicted][$target])) { |
|
292 | 292 | $prob[$predicted][$target] = 0; |
293 | 293 | } |
294 | 294 | $prob[$predicted][$target]++; |
@@ -297,7 +297,7 @@ discard block |
||
297 | 297 | // Calculate probabilities: Proportion of labels in each leaf |
298 | 298 | $dist = array_combine($this->binaryLabels, array_fill(0, 2, 0.0)); |
299 | 299 | foreach ($prob as $leaf => $counts) { |
300 | - $leafTotal = (float)array_sum($prob[$leaf]); |
|
300 | + $leafTotal = (float) array_sum($prob[$leaf]); |
|
301 | 301 | foreach ($counts as $label => $count) { |
302 | 302 | if (strval($leaf) == strval($label)) { |
303 | 303 | $dist[$leaf] = $count / $leafTotal; |
@@ -348,8 +348,8 @@ discard block |
||
348 | 348 | */ |
349 | 349 | public function __toString() |
350 | 350 | { |
351 | - return "IF $this->column $this->operator $this->value " . |
|
352 | - "THEN " . $this->binaryLabels[0] . " ". |
|
353 | - "ELSE " . $this->binaryLabels[1]; |
|
351 | + return "IF $this->column $this->operator $this->value ". |
|
352 | + "THEN ".$this->binaryLabels[0]." ". |
|
353 | + "ELSE ".$this->binaryLabels[1]; |
|
354 | 354 | } |
355 | 355 | } |
@@ -1,6 +1,6 @@ discard block |
||
1 | 1 | <?php |
2 | 2 | |
3 | -declare(strict_types=1); |
|
3 | +declare(strict_types = 1); |
|
4 | 4 | |
5 | 5 | namespace Phpml\Classification\Linear; |
6 | 6 | |
@@ -167,7 +167,7 @@ discard block |
||
167 | 167 | protected function runTraining(array $samples, array $targets) |
168 | 168 | { |
169 | 169 | // The cost function is the sum of squares |
170 | - $callback = function ($weights, $sample, $target) { |
|
170 | + $callback = function($weights, $sample, $target) { |
|
171 | 171 | $this->weights = $weights; |
172 | 172 | |
173 | 173 | $prediction = $this->outputClass($sample); |
@@ -189,7 +189,7 @@ discard block |
||
189 | 189 | */ |
190 | 190 | protected function runGradientDescent(array $samples, array $targets, \Closure $gradientFunc, bool $isBatch = false) |
191 | 191 | { |
192 | - $class = $isBatch ? GD::class : StochasticGD::class; |
|
192 | + $class = $isBatch ? GD::class : StochasticGD::class; |
|
193 | 193 | |
194 | 194 | if (empty($this->optimizer)) { |
195 | 195 | $this->optimizer = (new $class($this->featureCount)) |
@@ -284,6 +284,6 @@ discard block |
||
284 | 284 | |
285 | 285 | $predictedClass = $this->outputClass($sample); |
286 | 286 | |
287 | - return $this->labels[ $predictedClass ]; |
|
287 | + return $this->labels[$predictedClass]; |
|
288 | 288 | } |
289 | 289 | } |
@@ -1,6 +1,6 @@ discard block |
||
1 | 1 | <?php |
2 | 2 | |
3 | -declare(strict_types=1); |
|
3 | +declare(strict_types = 1); |
|
4 | 4 | |
5 | 5 | namespace Phpml\Classification\Linear; |
6 | 6 | |
@@ -12,12 +12,12 @@ discard block |
||
12 | 12 | /** |
13 | 13 | * Batch training is the default Adaline training algorithm |
14 | 14 | */ |
15 | - const BATCH_TRAINING = 1; |
|
15 | + const BATCH_TRAINING = 1; |
|
16 | 16 | |
17 | 17 | /** |
18 | 18 | * Online training: Stochastic gradient descent learning |
19 | 19 | */ |
20 | - const ONLINE_TRAINING = 2; |
|
20 | + const ONLINE_TRAINING = 2; |
|
21 | 21 | |
22 | 22 | /** |
23 | 23 | * Training type may be either 'Batch' or 'Online' learning |
@@ -41,7 +41,7 @@ discard block |
||
41 | 41 | public function __construct(float $learningRate = 0.001, int $maxIterations = 1000, |
42 | 42 | bool $normalizeInputs = true, int $trainingType = self::BATCH_TRAINING) |
43 | 43 | { |
44 | - if (! in_array($trainingType, [self::BATCH_TRAINING, self::ONLINE_TRAINING])) { |
|
44 | + if (!in_array($trainingType, [self::BATCH_TRAINING, self::ONLINE_TRAINING])) { |
|
45 | 45 | throw new \Exception("Adaline can only be trained with batch and online/stochastic gradient descent algorithm"); |
46 | 46 | } |
47 | 47 | |
@@ -60,7 +60,7 @@ discard block |
||
60 | 60 | protected function runTraining(array $samples, array $targets) |
61 | 61 | { |
62 | 62 | // The cost function is the sum of squares |
63 | - $callback = function ($weights, $sample, $target) { |
|
63 | + $callback = function($weights, $sample, $target) { |
|
64 | 64 | $this->weights = $weights; |
65 | 65 | |
66 | 66 | $output = $this->output($sample); |
@@ -1,6 +1,6 @@ |
||
1 | 1 | <?php |
2 | 2 | |
3 | -declare(strict_types=1); |
|
3 | +declare(strict_types = 1); |
|
4 | 4 | |
5 | 5 | namespace Phpml; |
6 | 6 |
@@ -1,6 +1,6 @@ discard block |
||
1 | 1 | <?php |
2 | 2 | |
3 | -declare(strict_types=1); |
|
3 | +declare(strict_types = 1); |
|
4 | 4 | |
5 | 5 | namespace Phpml\Math\Statistic; |
6 | 6 | |
@@ -133,14 +133,14 @@ discard block |
||
133 | 133 | |
134 | 134 | if ($means === null) { |
135 | 135 | $means = []; |
136 | - for ($i=0; $i < $n; $i++) { |
|
136 | + for ($i = 0; $i < $n; $i++) { |
|
137 | 137 | $means[] = Mean::arithmetic(array_column($data, $i)); |
138 | 138 | } |
139 | 139 | } |
140 | 140 | |
141 | 141 | $cov = []; |
142 | - for ($i=0; $i < $n; $i++) { |
|
143 | - for ($k=0; $k < $n; $k++) { |
|
142 | + for ($i = 0; $i < $n; $i++) { |
|
143 | + for ($k = 0; $k < $n; $k++) { |
|
144 | 144 | if ($i > $k) { |
145 | 145 | $cov[$i][$k] = $cov[$k][$i]; |
146 | 146 | } else { |
@@ -1,6 +1,6 @@ discard block |
||
1 | 1 | <?php |
2 | 2 | |
3 | -declare(strict_types=1); |
|
3 | +declare(strict_types = 1); |
|
4 | 4 | |
5 | 5 | namespace Phpml\DimensionReduction; |
6 | 6 | |
@@ -48,7 +48,7 @@ discard block |
||
48 | 48 | public function __construct(int $kernel = self::KERNEL_RBF, $totalVariance = null, $numFeatures = null, $gamma = null) |
49 | 49 | { |
50 | 50 | $availableKernels = [self::KERNEL_RBF, self::KERNEL_SIGMOID, self::KERNEL_LAPLACIAN, self::KERNEL_LINEAR]; |
51 | - if (! in_array($kernel, $availableKernels)) { |
|
51 | + if (!in_array($kernel, $availableKernels)) { |
|
52 | 52 | throw new \Exception("KernelPCA can be initialized with the following kernels only: Linear, RBF, Sigmoid and Laplacian"); |
53 | 53 | } |
54 | 54 | |
@@ -102,8 +102,8 @@ discard block |
||
102 | 102 | $kernelFunc = $this->getKernel(); |
103 | 103 | |
104 | 104 | $matrix = []; |
105 | - for ($i=0; $i < $numRows; $i++) { |
|
106 | - for ($k=0; $k < $numRows; $k++) { |
|
105 | + for ($i = 0; $i < $numRows; $i++) { |
|
106 | + for ($k = 0; $k < $numRows; $k++) { |
|
107 | 107 | if ($i <= $k) { |
108 | 108 | $matrix[$i][$k] = $kernelFunc($data[$i], $data[$k]); |
109 | 109 | } else { |
@@ -126,7 +126,7 @@ discard block |
||
126 | 126 | */ |
127 | 127 | protected function centerMatrix(array $matrix, int $n) |
128 | 128 | { |
129 | - $N = array_fill(0, $n, array_fill(0, $n, 1.0/$n)); |
|
129 | + $N = array_fill(0, $n, array_fill(0, $n, 1.0 / $n)); |
|
130 | 130 | $N = new Matrix($N, false); |
131 | 131 | $K = new Matrix($matrix, false); |
132 | 132 | |
@@ -153,19 +153,19 @@ discard block |
||
153 | 153 | switch ($this->kernel) { |
154 | 154 | case self::KERNEL_LINEAR: |
155 | 155 | // k(x,y) = xT.y |
156 | - return function ($x, $y) { |
|
156 | + return function($x, $y) { |
|
157 | 157 | return Matrix::dot($x, $y)[0]; |
158 | 158 | }; |
159 | 159 | case self::KERNEL_RBF: |
160 | 160 | // k(x,y)=exp(-γ.|x-y|) where |..| is Euclidean distance |
161 | 161 | $dist = new Euclidean(); |
162 | - return function ($x, $y) use ($dist) { |
|
162 | + return function($x, $y) use ($dist) { |
|
163 | 163 | return exp(-$this->gamma * $dist->sqDistance($x, $y)); |
164 | 164 | }; |
165 | 165 | |
166 | 166 | case self::KERNEL_SIGMOID: |
167 | 167 | // k(x,y)=tanh(γ.xT.y+c0) where c0=1 |
168 | - return function ($x, $y) { |
|
168 | + return function($x, $y) { |
|
169 | 169 | $res = Matrix::dot($x, $y)[0] + 1.0; |
170 | 170 | return tanh($this->gamma * $res); |
171 | 171 | }; |
@@ -173,7 +173,7 @@ discard block |
||
173 | 173 | case self::KERNEL_LAPLACIAN: |
174 | 174 | // k(x,y)=exp(-γ.|x-y|) where |..| is Manhattan distance |
175 | 175 | $dist = new Manhattan(); |
176 | - return function ($x, $y) use ($dist) { |
|
176 | + return function($x, $y) use ($dist) { |
|
177 | 177 | return exp(-$this->gamma * $dist->distance($x, $y)); |
178 | 178 | }; |
179 | 179 | } |
@@ -204,7 +204,7 @@ discard block |
||
204 | 204 | protected function projectSample(array $pairs) |
205 | 205 | { |
206 | 206 | // Normalize eigenvectors by eig = eigVectors / eigValues |
207 | - $func = function ($eigVal, $eigVect) { |
|
207 | + $func = function($eigVal, $eigVect) { |
|
208 | 208 | $m = new Matrix($eigVect, false); |
209 | 209 | $a = $m->divideByScalar($eigVal)->toArray(); |
210 | 210 |