| @@ 60-68 (lines=9) @@ | ||
| 57 | protected function runTraining() |
|
| 58 | { |
|
| 59 | // The cost function is the sum of squares |
|
| 60 | $callback = function ($weights, $sample, $target) { |
|
| 61 | $this->weights = $weights; |
|
| 62 | ||
| 63 | $output = $this->output($sample); |
|
| 64 | $gradient = $output - $target; |
|
| 65 | $error = $gradient ** 2; |
|
| 66 | ||
| 67 | return [$error, $gradient]; |
|
| 68 | }; |
|
| 69 | ||
| 70 | $isBatch = $this->trainingType == self::BATCH_TRAINING; |
|
| 71 | ||
| @@ 140-154 (lines=15) @@ | ||
| 137 | * Trains the perceptron model with Stochastic Gradient Descent optimization |
|
| 138 | * to get the correct set of weights |
|
| 139 | */ |
|
| 140 | protected function runTraining() |
|
| 141 | { |
|
| 142 | // The cost function is the sum of squares |
|
| 143 | $callback = function ($weights, $sample, $target) { |
|
| 144 | $this->weights = $weights; |
|
| 145 | ||
| 146 | $prediction = $this->outputClass($sample); |
|
| 147 | $gradient = $prediction - $target; |
|
| 148 | $error = $gradient**2; |
|
| 149 | ||
| 150 | return [$error, $gradient]; |
|
| 151 | }; |
|
| 152 | ||
| 153 | $this->runGradientDescent($callback); |
|
| 154 | } |
|
| 155 | ||
| 156 | /** |
|
| 157 | * Executes Stochastic Gradient Descent algorithm for |
|