@@ -1,6 +1,6 @@ discard block |
||
| 1 | 1 | <?php |
| 2 | 2 | |
| 3 | -declare(strict_types=1); |
|
| 3 | +declare(strict_types = 1); |
|
| 4 | 4 | |
| 5 | 5 | namespace Phpml\Helper\Optimizer; |
| 6 | 6 | |
@@ -72,7 +72,7 @@ discard block |
||
| 72 | 72 | * |
| 73 | 73 | * @var array |
| 74 | 74 | */ |
| 75 | - protected $costValues= []; |
|
| 75 | + protected $costValues = []; |
|
| 76 | 76 | |
| 77 | 77 | /** |
| 78 | 78 | * Initializes the SGD optimizer for the given number of dimensions |
@@ -240,7 +240,7 @@ discard block |
||
| 240 | 240 | { |
| 241 | 241 | // Check for early stop: No change larger than threshold (default 1e-5) |
| 242 | 242 | $diff = array_map( |
| 243 | - function ($w1, $w2) { |
|
| 243 | + function($w1, $w2) { |
|
| 244 | 244 | return abs($w1 - $w2) > $this->threshold ? 1 : 0; |
| 245 | 245 | }, |
| 246 | 246 | $oldTheta, |
@@ -100,10 +100,10 @@ |
||
| 100 | 100 | } |
| 101 | 101 | |
| 102 | 102 | /** |
| 103 | - * @param array $samples |
|
| 104 | - * @param array $targets |
|
| 105 | - * @param array $labels |
|
| 106 | - */ |
|
| 103 | + * @param array $samples |
|
| 104 | + * @param array $targets |
|
| 105 | + * @param array $labels |
|
| 106 | + */ |
|
| 107 | 107 | public function trainBinary(array $samples, array $targets, array $labels) |
| 108 | 108 | { |
| 109 | 109 | if ($this->normalizer) { |
@@ -1,6 +1,6 @@ discard block |
||
| 1 | 1 | <?php |
| 2 | 2 | |
| 3 | -declare(strict_types=1); |
|
| 3 | +declare(strict_types = 1); |
|
| 4 | 4 | |
| 5 | 5 | namespace Phpml\Classification\Linear; |
| 6 | 6 | |
@@ -76,13 +76,13 @@ discard block |
||
| 76 | 76 | ) { |
| 77 | 77 | $trainingTypes = range(self::BATCH_TRAINING, self::CONJUGATE_GRAD_TRAINING); |
| 78 | 78 | if (!in_array($trainingType, $trainingTypes)) { |
| 79 | - throw new \Exception("Logistic regression can only be trained with " . |
|
| 80 | - "batch (gradient descent), online (stochastic gradient descent) " . |
|
| 79 | + throw new \Exception("Logistic regression can only be trained with ". |
|
| 80 | + "batch (gradient descent), online (stochastic gradient descent) ". |
|
| 81 | 81 | "or conjugate batch (conjugate gradients) algorithms"); |
| 82 | 82 | } |
| 83 | 83 | |
| 84 | 84 | if (!in_array($cost, ['log', 'sse'])) { |
| 85 | - throw new \Exception("Logistic regression cost function can be one of the following: \n" . |
|
| 85 | + throw new \Exception("Logistic regression cost function can be one of the following: \n". |
|
| 86 | 86 | "'log' for log-likelihood and 'sse' for sum of squared errors"); |
| 87 | 87 | } |
| 88 | 88 | |
@@ -193,7 +193,7 @@ discard block |
||
| 193 | 193 | * The gradient of the cost function to be used with gradient descent: |
| 194 | 194 | * ∇J(x) = -(y - h(x)) = (h(x) - y) |
| 195 | 195 | */ |
| 196 | - $callback = function ($weights, $sample, $y) use ($penalty) { |
|
| 196 | + $callback = function($weights, $sample, $y) use ($penalty) { |
|
| 197 | 197 | $this->weights = $weights; |
| 198 | 198 | $hX = $this->output($sample); |
| 199 | 199 | |
@@ -224,7 +224,7 @@ discard block |
||
| 224 | 224 | * The gradient of the cost function: |
| 225 | 225 | * ∇J(x) = -(h(x) - y) . h(x) . (1 - h(x)) |
| 226 | 226 | */ |
| 227 | - $callback = function ($weights, $sample, $y) use ($penalty) { |
|
| 227 | + $callback = function($weights, $sample, $y) use ($penalty) { |
|
| 228 | 228 | $this->weights = $weights; |
| 229 | 229 | $hX = $this->output($sample); |
| 230 | 230 | |