Total Complexity | 69 |
Total Lines | 1022 |
Duplicated Lines | 9.69 % |
Changes | 0 |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like NiaPy.algorithms.basic.pso often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | # encoding=utf8 |
||
2 | import logging |
||
3 | |||
4 | import numpy as np |
||
5 | |||
6 | from NiaPy.algorithms.algorithm import Algorithm |
||
7 | from NiaPy.util import fullArray |
||
8 | from NiaPy.util.utility import reflectRepair |
||
9 | |||
10 | logging.basicConfig() |
||
11 | logger = logging.getLogger('NiaPy.algorithms.basic') |
||
12 | logger.setLevel('INFO') |
||
13 | |||
14 | __all__ = ['ParticleSwarmAlgorithm', 'ParticleSwarmOptimization', 'OppositionVelocityClampingParticleSwarmOptimization', 'CenterParticleSwarmOptimization', 'MutatedParticleSwarmOptimization', 'MutatedCenterParticleSwarmOptimization', 'MutatedCenterUnifiedParticleSwarmOptimization', 'ComprehensiveLearningParticleSwarmOptimizer'] |
||
15 | |||
16 | class ParticleSwarmAlgorithm(Algorithm): |
||
17 | r"""Implementation of Particle Swarm Optimization algorithm. |
||
18 | |||
19 | Algorithm: |
||
20 | Particle Swarm Optimization algorithm |
||
21 | |||
22 | Date: |
||
23 | 2018 |
||
24 | |||
25 | Authors: |
||
26 | Lucija Brezočnik, Grega Vrbančič, Iztok Fister Jr. and Klemen Berkovič |
||
27 | |||
28 | License: |
||
29 | MIT |
||
30 | |||
31 | Reference paper: |
||
32 | Kennedy, J. and Eberhart, R. "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995. |
||
33 | |||
34 | Attributes: |
||
35 | Name (List[str]): List of strings representing algorithm names |
||
36 | C1 (float): Cognitive component. |
||
37 | C2 (float): Social component. |
||
38 | w (Union[float, numpy.ndarray[float]]): Inertial weight. |
||
39 | vMin (Union[float, numpy.ndarray[float]]): Minimal velocity. |
||
40 | vMax (Union[float, numpy.ndarray[float]]): Maximal velocity. |
||
41 | Repair (Callable[[numpy.ndarray, numpy.ndarray, numpy.ndarray, mtrnd.RandomState], numpy.ndarray]): Repair method for velocity. |
||
42 | |||
43 | See Also: |
||
44 | * :class:`NiaPy.algorithms.Algorithm` |
||
45 | """ |
||
46 | Name = ['WeightedVelocityClampingParticleSwarmAlgorithm', 'WVCPSO'] |
||
47 | |||
48 | @staticmethod |
||
49 | def algorithmInfo(): |
||
50 | r"""Get basic information of algorithm. |
||
51 | |||
52 | Returns: |
||
53 | str: Basic information of algorithm. |
||
54 | |||
55 | See Also: |
||
56 | * :func:`NiaPy.algorithms.Algorithm.algorithmInfo` |
||
57 | """ |
||
58 | return r"""Kennedy, J. and Eberhart, R. "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995.""" |
||
59 | |||
60 | @staticmethod |
||
61 | def typeParameters(): |
||
62 | r"""Get dictionary with functions for checking values of parameters. |
||
63 | |||
64 | Returns: |
||
65 | Dict[str, Callable[[Union[int, float]], bool]]: |
||
66 | * NP (Callable[[int], bool]) |
||
67 | * C1 (Callable[[Union[int, float]], bool]) |
||
68 | * C2 (Callable[[Union[int, float]], bool]) |
||
69 | * w (Callable[[float], bool]) |
||
70 | * vMin (Callable[[Union[int, float]], bool]) |
||
71 | * vMax (Callable[[Union[int, float], bool]) |
||
72 | """ |
||
73 | d = Algorithm.typeParameters() |
||
74 | d.update({ |
||
75 | 'C1': lambda x: isinstance(x, (int, float)) and x >= 0, |
||
76 | 'C2': lambda x: isinstance(x, (int, float)) and x >= 0, |
||
77 | 'w': lambda x: isinstance(x, float) and x >= 0, |
||
78 | 'vMin': lambda x: isinstance(x, (int, float)), |
||
79 | 'vMax': lambda x: isinstance(x, (int, float)) |
||
80 | }) |
||
81 | return d |
||
82 | |||
83 | def setParameters(self, NP=25, C1=2.0, C2=2.0, w=0.7, vMin=-1.5, vMax=1.5, Repair=reflectRepair, **ukwargs): |
||
84 | r"""Set Particle Swarm Algorithm main parameters. |
||
85 | |||
86 | Args: |
||
87 | NP (int): Population size |
||
88 | C1 (float): Cognitive component. |
||
89 | C2 (float): Social component. |
||
90 | w (Union[float, numpy.ndarray]): Inertial weight. |
||
91 | vMin (Union[float, numpy.ndarray]): Minimal velocity. |
||
92 | vMax (Union[float, numpy.ndarray]): Maximal velocity. |
||
93 | Repair (Callable[[np.ndarray, np.ndarray, np.ndarray, dict], np.ndarray]): Repair method for velocity. |
||
94 | **ukwargs: Additional arguments |
||
95 | |||
96 | See Also: |
||
97 | * :func:`NiaPy.algorithms.Algorithm.setParameters` |
||
98 | """ |
||
99 | Algorithm.setParameters(self, NP=NP, **ukwargs) |
||
100 | self.C1, self.C2, self.w, self.vMin, self.vMax, self.Repair = C1, C2, w, vMin, vMax, Repair |
||
101 | |||
102 | def getParameters(self): |
||
103 | r"""Get value of parametrs for this instance of algorithm. |
||
104 | |||
105 | Returns: |
||
106 | Dict[str, Union[int, float, np.ndarray]]: Dictionary which has parameters maped to values. |
||
107 | |||
108 | See Also: |
||
109 | * :func:`NiaPy.algorithms.Algorithm.getParameters` |
||
110 | """ |
||
111 | d = Algorithm.getParameters(self) |
||
112 | d.update({ |
||
113 | 'C1': self.C1, |
||
114 | 'C2': self.C2, |
||
115 | 'w': self.w, |
||
116 | 'vMin': self.vMin, |
||
117 | 'vMax': self.vMax |
||
118 | }) |
||
119 | return d |
||
120 | |||
121 | def init(self, task): |
||
122 | r"""Initialize dynamic arguments of Particle Swarm Optimization algorithm. |
||
123 | |||
124 | Args: |
||
125 | task (Task): Optimization task. |
||
126 | |||
127 | Returns: |
||
128 | Dict[str, Union[float, np.ndarray]]: |
||
129 | * w (numpy.ndarray): Inertial weight. |
||
130 | * vMin (numpy.ndarray): Mininal velocity. |
||
131 | * vMax (numpy.ndarray): Maximal velocity. |
||
132 | * V (numpy.ndarray): Initial velocity of particle. |
||
133 | """ |
||
134 | return { |
||
135 | 'w': fullArray(self.w, task.D), |
||
136 | 'vMin': fullArray(self.vMin, task.D), |
||
137 | 'vMax': fullArray(self.vMax, task.D), |
||
138 | 'V': np.full([self.NP, task.D], 0.0) |
||
139 | } |
||
140 | |||
141 | def initPopulation(self, task): |
||
142 | r"""Initialize population and dynamic arguments of the Particle Swarm Optimization algorithm. |
||
143 | |||
144 | Args: |
||
145 | task: Optimization task. |
||
146 | |||
147 | Returns: |
||
148 | Tuple[np.ndarray, np.ndarray, dict]: |
||
149 | 1. Initial population. |
||
150 | 2. Initial population fitness/function values. |
||
151 | 3. Additional arguments: |
||
152 | * popb (numpy.ndarray): particles best population. |
||
153 | * fpopb (numpy.ndarray[float]): particles best positions function/fitness value. |
||
154 | * w (numpy.ndarray): Inertial weight. |
||
155 | * vMin (numpy.ndarray): Minimal velocity. |
||
156 | * vMax (numpy.ndarray): Maximal velocity. |
||
157 | * V (numpy.ndarray): Initial velocity of particle. |
||
158 | |||
159 | See Also: |
||
160 | * :func:`NiaPy.algorithms.Algorithm.initPopulation` |
||
161 | """ |
||
162 | pop, fpop, d = Algorithm.initPopulation(self, task) |
||
163 | d.update(self.init(task)) |
||
164 | d.update({'popb': pop.copy(), 'fpopb': fpop.copy()}) |
||
165 | return pop, fpop, d |
||
166 | |||
167 | View Code Duplication | def updateVelocity(self, V, p, pb, gb, w, vMin, vMax, task, **kwargs): |
|
|
|||
168 | r"""Update particle velocity. |
||
169 | |||
170 | Args: |
||
171 | V (numpy.ndarray): Current velocity of particle. |
||
172 | p (numpy.ndarray): Current position of particle. |
||
173 | pb (numpy.ndarray): Personal best position of particle. |
||
174 | gb (numpy.ndarray): Global best position of particle. |
||
175 | w (numpy.ndarray): Weights for velocity adjustment. |
||
176 | vMin (numpy.ndarray): Minimal velocity allowed. |
||
177 | vMax (numpy.ndarray): Maximal velocity allowed. |
||
178 | task (Task): Optimization task. |
||
179 | kwargs: Additional arguments. |
||
180 | |||
181 | Returns: |
||
182 | numpy.ndarray: Updated velocity of particle. |
||
183 | """ |
||
184 | return self.Repair(w * V + self.C1 * self.rand(task.D) * (pb - p) + self.C2 * self.rand(task.D) * (gb - p), vMin, vMax) |
||
185 | |||
186 | def runIteration(self, task, pop, fpop, xb, fxb, popb, fpopb, w, vMin, vMax, V, **dparams): |
||
187 | r"""Core function of Particle Swarm Optimization algorithm. |
||
188 | |||
189 | Args: |
||
190 | task (Task): Optimization task. |
||
191 | pop (numpy.ndarray): Current populations. |
||
192 | fpop (numpy.ndarray): Current population fitness/function values. |
||
193 | xb (numpy.ndarray): Current best particle. |
||
194 | fxb (float): Current best particle fitness/function value. |
||
195 | popb (numpy.ndarray): Particles best position. |
||
196 | fpopb (numpy.ndarray): Particles best positions fitness/function values. |
||
197 | w (numpy.ndarray): Inertial weights. |
||
198 | vMin (numpy.ndarray): Minimal velocity. |
||
199 | vMax (numpy.ndarray): Maximal velocity. |
||
200 | V (numpy.ndarray): Velocity of particles. |
||
201 | **dparams: Additional function arguments. |
||
202 | |||
203 | Returns: |
||
204 | Tuple[np.ndarray, np.ndarray, np.ndarray, float, dict]: |
||
205 | 1. New population. |
||
206 | 2. New population fitness/function values. |
||
207 | 3. New global best position. |
||
208 | 4. New global best positions function/fitness value. |
||
209 | 5. Additional arguments: |
||
210 | * popb (numpy.ndarray): Particles best population. |
||
211 | * fpopb (numpy.ndarray[float]): Particles best positions function/fitness value. |
||
212 | * w (numpy.ndarray): Inertial weight. |
||
213 | * vMin (numpy.ndarray): Minimal velocity. |
||
214 | * vMax (numpy.ndarray): Maximal velocity. |
||
215 | * V (numpy.ndarray): Initial velocity of particle. |
||
216 | |||
217 | See Also: |
||
218 | * :class:`NiaPy.algorithms.algorithm.runIteration` |
||
219 | """ |
||
220 | for i in range(len(pop)): |
||
221 | V[i] = self.updateVelocity(V[i], pop[i], popb[i], xb, w, vMin, vMax, task) |
||
222 | pop[i] = task.repair(pop[i] + V[i], rnd=self.Rand) |
||
223 | fpop[i] = task.eval(pop[i]) |
||
224 | if fpop[i] < fpopb[i]: |
||
225 | popb[i], fpopb[i] = pop[i].copy(), fpop[i] |
||
226 | if fpop[i] < fxb: xb, fxb = pop[i].copy(), fpop[i] |
||
227 | return pop, fpop, xb, fxb, {'popb': popb, 'fpopb': fpopb, 'w': w, 'vMin': vMin, 'vMax': vMax, 'V': V} |
||
228 | |||
229 | class ParticleSwarmOptimization(ParticleSwarmAlgorithm): |
||
230 | r"""Implementation of Particle Swarm Optimization algorithm. |
||
231 | |||
232 | Algorithm: |
||
233 | Particle Swarm Optimization algorithm |
||
234 | |||
235 | Date: |
||
236 | 2018 |
||
237 | |||
238 | Authors: |
||
239 | Lucija Brezočnik, Grega Vrbančič, Iztok Fister Jr. and Klemen Berkovič |
||
240 | |||
241 | License: |
||
242 | MIT |
||
243 | |||
244 | Reference paper: |
||
245 | Kennedy, J. and Eberhart, R. "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995. |
||
246 | |||
247 | Attributes: |
||
248 | Name (List[str]): List of strings representing algorithm names |
||
249 | C1 (float): Cognitive component. |
||
250 | C2 (float): Social component. |
||
251 | Repair (Callable[[numpy.ndarray, numpy.ndarray, numpy.ndarray, mtrnd.RandomState], numpy.ndarray]): Repair method for velocity. |
||
252 | |||
253 | See Also: |
||
254 | * :class:`NiaPy.algorithms.basic.WeightedVelocityClampingParticleSwarmAlgorithm` |
||
255 | """ |
||
256 | Name = ['ParticleSwarmAlgorithm', 'PSO'] |
||
257 | |||
258 | @staticmethod |
||
259 | def algorithmInfo(): |
||
260 | r"""Get basic information of algorithm. |
||
261 | |||
262 | Returns: |
||
263 | str: Basic information of algorithm. |
||
264 | |||
265 | See Also: |
||
266 | * :func:`NiaPy.algorithms.Algorithm.algorithmInfo` |
||
267 | """ |
||
268 | return r"""Kennedy, J. and Eberhart, R. "Particle Swarm Optimization". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995.""" |
||
269 | |||
270 | @staticmethod |
||
271 | def typeParameters(): |
||
272 | r"""Get dictionary with functions for checking values of parameters. |
||
273 | |||
274 | Returns: |
||
275 | Dict[str, Callable[[Union[int, float]], bool]]: |
||
276 | * NP: Population size. |
||
277 | * C1: Cognitive component. |
||
278 | * C2: Social component. |
||
279 | """ |
||
280 | d = ParticleSwarmAlgorithm.typeParameters() |
||
281 | d.pop('w', None), d.pop('vMin', None), d.pop('vMax', None) |
||
282 | return d |
||
283 | |||
284 | def setParameters(self, **ukwargs): |
||
285 | r"""Set core parameters of algorithm. |
||
286 | |||
287 | Args: |
||
288 | **ukwargs (Dict[str, Any]): Additional parameters. |
||
289 | |||
290 | See Also: |
||
291 | * :func:`NiaPy.algorithms.basic.WeightedVelocityClampingParticleSwarmAlgorithm.setParameters` |
||
292 | """ |
||
293 | ukwargs.pop('w', None), ukwargs.pop('vMin', None), ukwargs.pop('vMax', None) |
||
294 | ParticleSwarmAlgorithm.setParameters(self, w=1, vMin=-np.inf, vMax=np.inf, **ukwargs) |
||
295 | |||
296 | def getParameters(self): |
||
297 | r"""Get value of parametrs for this instance of algorithm. |
||
298 | |||
299 | Returns: |
||
300 | Dict[str, Union[int, float, np.ndarray]]: Dictionary which has parameters maped to values. |
||
301 | |||
302 | See Also: |
||
303 | * :func:`NiaPy.algorithms.basic.ParticleSwarmAlgorithm.getParameters` |
||
304 | """ |
||
305 | d = ParticleSwarmAlgorithm.getParameters(self) |
||
306 | d.pop('w', None), d.pop('vMin', None), d.pop('vMax', None) |
||
307 | return d |
||
308 | |||
309 | class OppositionVelocityClampingParticleSwarmOptimization(ParticleSwarmAlgorithm): |
||
310 | r"""Implementation of Opposition-Based Particle Swarm Optimization with Velocity Clamping. |
||
311 | |||
312 | Algorithm: |
||
313 | Opposition-Based Particle Swarm Optimization with Velocity Clamping |
||
314 | |||
315 | Date: |
||
316 | 2019 |
||
317 | |||
318 | Authors: |
||
319 | Klemen Berkovič |
||
320 | |||
321 | License: |
||
322 | MIT |
||
323 | |||
324 | Reference paper: |
||
325 | Shahzad, Farrukh, et al. "Opposition-based particle swarm optimization with velocity clamping (OVCPSO)." Advances in Computational Intelligence. Springer, Berlin, Heidelberg, 2009. 339-348 |
||
326 | |||
327 | Attributes: |
||
328 | p0: Probability of opposite learning phase. |
||
329 | w_min: Minimum inertial weight. |
||
330 | w_max: Maximum inertial weight. |
||
331 | sigma: Velocity scaling factor. |
||
332 | |||
333 | See Also: |
||
334 | * :class:`NiaPy.algorithms.basic.ParticleSwarmAlgorithm` |
||
335 | """ |
||
336 | Name = ['OppositionVelocityClampingParticleSwarmOptimization', 'OVCPSO'] |
||
337 | |||
338 | @staticmethod |
||
339 | def algorithmInfo(): |
||
340 | r"""Get basic information of algorithm. |
||
341 | |||
342 | Returns: |
||
343 | str: Basic information of algorithm. |
||
344 | |||
345 | See Also: |
||
346 | * :func:`NiaPy.algorithms.Algorithm.algorithmInfo` |
||
347 | """ |
||
348 | return r"""Shahzad, Farrukh, et al. "Opposition-based particle swarm optimization with velocity clamping (OVCPSO)." Advances in Computational Intelligence. Springer, Berlin, Heidelberg, 2009. 339-348""" |
||
349 | |||
350 | def setParameters(self, p0=.3, w_min=.4, w_max=.9, sigma=.1, C1=1.49612, C2=1.49612, **kwargs): |
||
351 | r"""Set core algorithm parameters. |
||
352 | |||
353 | Args: |
||
354 | p0 (float): Probability of running Opposite learning. |
||
355 | w_min (numpy.ndarray): Minimal value of weights. |
||
356 | w_max (numpy.ndarray): Maximum value of weights. |
||
357 | sigma (numpy.ndarray): Velocity range factor. |
||
358 | **kwargs: Additional arguments. |
||
359 | |||
360 | See Also: |
||
361 | * :func:`NiaPy.algorithm.basic.ParticleSwarmAlgorithm.setParameters` |
||
362 | """ |
||
363 | kwargs.pop('w', None) |
||
364 | ParticleSwarmAlgorithm.setParameters(self, w=w_max, C1=C1, C2=C2, **kwargs) |
||
365 | self.p0, self.w_min, self.w_max, self.sigma = p0, w_min, w_max, sigma |
||
366 | |||
367 | def getParameters(self): |
||
368 | r"""Get value of parametrs for this instance of algorithm. |
||
369 | |||
370 | Returns: |
||
371 | Dict[str, Union[int, float, np.ndarray]]: Dictionary which has parameters maped to values. |
||
372 | |||
373 | See Also: |
||
374 | * :func:`NiaPy.algorithms.basic.ParticleSwarmAlgorithm.getParameters` |
||
375 | """ |
||
376 | d = ParticleSwarmAlgorithm.getParameters(self) |
||
377 | d.pop('vMin', None), d.pop('vMax', None) |
||
378 | d.update({ |
||
379 | 'p0': self.p0, 'w_min': self.w_min, 'w_max': self.w_max, 'sigma': self.sigma |
||
380 | }) |
||
381 | return d |
||
382 | |||
383 | def oppositeLearning(self, S_l, S_h, pop, fpop, task): |
||
384 | r"""Run opposite learning phase. |
||
385 | |||
386 | Args: |
||
387 | S_l (numpy.ndarray): Lower limit of opposite particles. |
||
388 | S_h (numpy.ndarray): Upper limit of opposite particles. |
||
389 | pop (numpy.ndarray): Current populations positions. |
||
390 | fpop (numpy.ndarray): Current populations functions/fitness values. |
||
391 | task (Task): Optimization task. |
||
392 | |||
393 | Returns: |
||
394 | Tuple[np.ndarray, np.ndarray, np.ndarray, float]: |
||
395 | 1. New particles position |
||
396 | 2. New particles function/fitness values |
||
397 | 3. New best position of opposite learning phase |
||
398 | 4. new best function/fitness value of opposite learning phase |
||
399 | """ |
||
400 | S_r = S_l + S_h |
||
401 | S = np.asarray([S_r - e for e in pop]) |
||
402 | S_f = np.asarray([task.eval(e) for e in S]) |
||
403 | S, S_f = np.concatenate([pop, S]), np.concatenate([fpop, S_f]) |
||
404 | sinds = np.argsort(S_f) |
||
405 | return S[sinds[:len(pop)]], S_f[sinds[:len(pop)]], S[sinds[0]], S_f[sinds[0]] |
||
406 | |||
407 | def initPopulation(self, task): |
||
408 | r"""Init starting population and dynamic parameters. |
||
409 | |||
410 | Args: |
||
411 | task (Task): Optimization task. |
||
412 | |||
413 | Returns: |
||
414 | Tuple[np.ndarray, np.ndarray, dict]: |
||
415 | 1. Initialized population. |
||
416 | 2. Initialized populations function/fitness values. |
||
417 | 3. Additional arguments: |
||
418 | * popb (numpy.ndarray): particles best population. |
||
419 | * fpopb (numpy.ndarray[float]): particles best positions function/fitness value. |
||
420 | * vMin (numpy.ndarray): Minimal velocity. |
||
421 | * vMax (numpy.ndarray): Maximal velocity. |
||
422 | * V (numpy.ndarray): Initial velocity of particle. |
||
423 | * S_u (numpy.ndarray): Upper bound for opposite learning. |
||
424 | * S_l (numpy.ndarray): Lower bound for opposite learning. |
||
425 | """ |
||
426 | pop, fpop, d = ParticleSwarmAlgorithm.initPopulation(self, task) |
||
427 | S_l, S_h = task.Lower, task.Upper |
||
428 | pop, fpop, _, _ = self.oppositeLearning(S_l, S_h, pop, fpop, task) |
||
429 | pb_inds = np.where(fpop < d['fpopb']) |
||
430 | d['popb'][pb_inds], d['fpopb'][pb_inds] = pop[pb_inds], fpop[pb_inds] |
||
431 | d['vMin'], d['vMax'] = self.sigma * (task.Upper - task.Lower), self.sigma * (task.Lower - task.Upper) |
||
432 | d.update({'S_l': S_l, 'S_h': S_h}) |
||
433 | return pop, fpop, d |
||
434 | |||
435 | def runIteration(self, task, pop, fpop, xb, fxb, popb, fpopb, vMin, vMax, V, S_l, S_h, **dparams): |
||
436 | r"""Core function of Opposite-based Particle Swarm Optimization with velocity clamping algorithm. |
||
437 | |||
438 | Args: |
||
439 | task (Task): Optimization task. |
||
440 | pop (numpy.ndarray): Current population. |
||
441 | fpop (numpy.ndarray): Current populations function/fitness values. |
||
442 | xb (numpy.ndarray): Current global best position. |
||
443 | fxb (float): Current global best positions function/fitness value. |
||
444 | popb (numpy.ndarray): Personal best position. |
||
445 | fpopb (numpy.ndarray): Personal best positions function/fitness values. |
||
446 | vMin (numpy.ndarray): Minimal allowed velocity. |
||
447 | vMax (numpy.ndarray): Maximal allowed velocity. |
||
448 | V (numpy.ndarray): Populations velocity. |
||
449 | S_l (numpy.ndarray): Lower bound of opposite learning. |
||
450 | S_h (numpy.ndarray): Upper bound of opposite learning. |
||
451 | **dparams: Additional arguments. |
||
452 | |||
453 | Returns: |
||
454 | Tuple[np.ndarray, np.ndarray, np.ndarray, float, dict]: |
||
455 | 1. New population. |
||
456 | 2. New populations function/fitness values. |
||
457 | 3. New global best position. |
||
458 | 4. New global best positions function/fitness value. |
||
459 | 5. Additional arguments: |
||
460 | * popb: particles best population. |
||
461 | * fpopb: particles best positions function/fitness value. |
||
462 | * vMin: Minimal velocity. |
||
463 | * vMax: Maximal velocity. |
||
464 | * V: Initial velocity of particle. |
||
465 | * S_u: Upper bound for opposite learning. |
||
466 | * S_l: Lower bound for opposite learning. |
||
467 | """ |
||
468 | if self.rand() < self.p0: |
||
469 | pop, fpop, nb, fnb = self.oppositeLearning(S_l, S_h, pop, fpop, task) |
||
470 | pb_inds = np.where(fpop < fpopb) |
||
471 | popb[pb_inds], fpopb[pb_inds] = pop[pb_inds], fpop[pb_inds] |
||
472 | if fnb < fxb: xb, fxb = nb.copy(), fnb |
||
473 | else: |
||
474 | w = self.w_max - ((self.w_max - self.w_min) / task.nGEN) * task.Iters |
||
475 | for i in range(len(pop)): |
||
476 | V[i] = self.updateVelocity(V[i], pop[i], popb[i], xb, w, vMin, vMax, task) |
||
477 | pop[i] = task.repair(pop[i] + V[i], rnd=self.Rand) |
||
478 | fpop[i] = task.eval(pop[i]) |
||
479 | if fpop[i] < fpopb[i]: |
||
480 | popb[i], fpopb[i] = pop[i].copy(), fpop[i] |
||
481 | if fpop[i] < fxb: xb, fxb = pop[i].copy(), fpop[i] |
||
482 | vMin, vMax = self.sigma * np.min(pop, axis=0), self.sigma * np.max(pop, axis=0) |
||
483 | return pop, fpop, xb, fxb, {'popb': popb, 'fpopb': fpopb, 'vMin': vMin, 'vMax': vMax, 'V': V, 'S_l': S_l, 'S_h': S_h} |
||
484 | |||
485 | class CenterParticleSwarmOptimization(ParticleSwarmAlgorithm): |
||
486 | r"""Implementation of Center Particle Swarm Optimization. |
||
487 | |||
488 | Algorithm: |
||
489 | Center Particle Swarm Optimization |
||
490 | |||
491 | Date: |
||
492 | 2019 |
||
493 | |||
494 | Authors: |
||
495 | Klemen Berkovič |
||
496 | |||
497 | License: |
||
498 | MIT |
||
499 | |||
500 | Reference paper: |
||
501 | H.-C. Tsai, Predicting strengths of concrete-type specimens using hybrid multilayer perceptrons with center-Unified particle swarm optimization, Adv. Eng. Softw. 37 (2010) 1104–1112. |
||
502 | |||
503 | See Also: |
||
504 | * :class:`NiaPy.algorithms.basic.WeightedVelocityClampingParticleSwarmAlgorithm` |
||
505 | """ |
||
506 | Name = ['CenterParticleSwarmOptimization', 'CPSO'] |
||
507 | |||
508 | @staticmethod |
||
509 | def algorithmInfo(): |
||
510 | r"""Get basic information of algorithm. |
||
511 | |||
512 | Returns: |
||
513 | str: Basic information of algorithm. |
||
514 | |||
515 | See Also: |
||
516 | * :func:`NiaPy.algorithms.Algorithm.algorithmInfo` |
||
517 | """ |
||
518 | return r"""H.-C. Tsai, Predicting strengths of concrete-type specimens using hybrid multilayer perceptrons with center-Unified particle swarm optimization, Adv. Eng. Softw. 37 (2010) 1104–1112.""" |
||
519 | |||
520 | def setParameters(self, **kwargs): |
||
521 | r"""Set core algorithm parameters. |
||
522 | |||
523 | Args: |
||
524 | **kwargs: Additional arguments. |
||
525 | |||
526 | See Also: |
||
527 | :func:`NiaPy.algorithm.basic.WeightedVelocityClampingParticleSwarmAlgorithm.setParameters` |
||
528 | """ |
||
529 | kwargs.pop('vMin', None), kwargs.pop('vMax', None) |
||
530 | ParticleSwarmAlgorithm.setParameters(self, vMin=-np.inf, vMax=np.inf, **kwargs) |
||
531 | |||
532 | def getParameters(self): |
||
533 | r"""Get value of parametrs for this instance of algorithm. |
||
534 | |||
535 | Returns: |
||
536 | Dict[str, Union[int, float, np.ndarray]]: Dictionary which has parameters maped to values. |
||
537 | |||
538 | See Also: |
||
539 | * :func:`NiaPy.algorithms.basic.ParticleSwarmAlgorithm.getParameters` |
||
540 | """ |
||
541 | d = ParticleSwarmAlgorithm.getParameters(self) |
||
542 | d.pop('vMin', None), d.pop('vMax', None) |
||
543 | return d |
||
544 | |||
545 | def runIteration(self, task, pop, fpop, xb, fxb, **dparams): |
||
546 | r"""Core function of algorithm. |
||
547 | |||
548 | Args: |
||
549 | task (Task): Optimization task. |
||
550 | pop (numpy.ndarray): Current population of particles. |
||
551 | fpop (numpy.ndarray): Current particles function/fitness values. |
||
552 | xb (numpy.ndarray): Current global best particle. |
||
553 | fxb (numpy.ndarray): Current global best particles function/fitness value. |
||
554 | **dparams: Additional arguments. |
||
555 | |||
556 | Returns: |
||
557 | Tuple[np.ndarray, np.ndarray, np.ndarray, float, dict]: |
||
558 | 1. New population of particles. |
||
559 | 2. New populations function/fitness values. |
||
560 | 3. New global best particle. |
||
561 | 4. New global best particle function/fitness value. |
||
562 | 5. Additional arguments. |
||
563 | |||
564 | See Also: |
||
565 | * :func:`NiaPy.algorithm.basic.WeightedVelocityClampingParticleSwarmAlgorithm.runIteration` |
||
566 | """ |
||
567 | pop, fpop, xb, fxb, d = ParticleSwarmAlgorithm.runIteration(self, task, pop, fpop, xb, fxb, **dparams) |
||
568 | c = np.sum(pop, axis=0) / len(pop) |
||
569 | fc = task.eval(c) |
||
570 | if fc <= fxb: xb, fxb = c, fc |
||
571 | return pop, fpop, xb, fxb, d |
||
572 | |||
573 | class MutatedParticleSwarmOptimization(ParticleSwarmAlgorithm): |
||
574 | r"""Implementation of Mutated Particle Swarm Optimization. |
||
575 | |||
576 | Algorithm: |
||
577 | Mutated Particle Swarm Optimization |
||
578 | |||
579 | Date: |
||
580 | 2019 |
||
581 | |||
582 | Authors: |
||
583 | Klemen Berkovič |
||
584 | |||
585 | License: |
||
586 | MIT |
||
587 | |||
588 | Reference paper: |
||
589 | H. Wang, C. Li, Y. Liu, S. Zeng, A hybrid particle swarm algorithm with cauchy mutation, Proceedings of the 2007 IEEE Swarm Intelligence Symposium (2007) 356–360. |
||
590 | |||
591 | Attributes: |
||
592 | nmutt (int): Number of mutations of global best particle. |
||
593 | |||
594 | See Also: |
||
595 | * :class:`NiaPy.algorithms.basic.WeightedVelocityClampingParticleSwarmAlgorithm` |
||
596 | """ |
||
597 | Name = ['MutatedParticleSwarmOptimization', 'MPSO'] |
||
598 | |||
599 | @staticmethod |
||
600 | def algorithmInfo(): |
||
601 | r"""Get basic information of algorithm. |
||
602 | |||
603 | Returns: |
||
604 | str: Basic information of algorithm. |
||
605 | |||
606 | See Also: |
||
607 | * :func:`NiaPy.algorithms.Algorithm.algorithmInfo` |
||
608 | """ |
||
609 | return r"""H. Wang, C. Li, Y. Liu, S. Zeng, A hybrid particle swarm algorithm with cauchy mutation, Proceedings of the 2007 IEEE Swarm Intelligence Symposium (2007) 356–360.""" |
||
610 | |||
611 | def setParameters(self, nmutt=10, **kwargs): |
||
612 | r"""Set core algorithm parameters. |
||
613 | |||
614 | Args: |
||
615 | nmutt (int): Number of mutations of global best particle. |
||
616 | **kwargs: Additional arguments. |
||
617 | |||
618 | See Also: |
||
619 | :func:`NiaPy.algorithm.basic.WeightedVelocityClampingParticleSwarmAlgorithm.setParameters` |
||
620 | """ |
||
621 | kwargs.pop('vMin', None), kwargs.pop('vMax', None) |
||
622 | ParticleSwarmAlgorithm.setParameters(self, vMin=-np.inf, vMax=np.inf, **kwargs) |
||
623 | self.nmutt = nmutt |
||
624 | |||
625 | def getParameters(self): |
||
626 | r"""Get value of parametrs for this instance of algorithm. |
||
627 | |||
628 | Returns: |
||
629 | Dict[str, Union[int, float, np.ndarray]]: Dictionary which has parameters maped to values. |
||
630 | |||
631 | See Also: |
||
632 | * :func:`NiaPy.algorithms.basic.ParticleSwarmAlgorithm.getParameters` |
||
633 | """ |
||
634 | d = ParticleSwarmAlgorithm.getParameters(self) |
||
635 | d.pop('vMin', None), d.pop('vMax', None) |
||
636 | d.update({'nmutt': self.nmutt}) |
||
637 | return d |
||
638 | |||
639 | View Code Duplication | def runIteration(self, task, pop, fpop, xb, fxb, **dparams): |
|
640 | r"""Core function of algorithm. |
||
641 | |||
642 | Args: |
||
643 | task (Task): Optimization task. |
||
644 | pop (numpy.ndarray): Current population of particles. |
||
645 | fpop (numpy.ndarray): Current particles function/fitness values. |
||
646 | xb (numpy.ndarray): Current global best particle. |
||
647 | fxb (float): Current global best particles function/fitness value. |
||
648 | **dparams: Additional arguments. |
||
649 | |||
650 | Returns: |
||
651 | Tuple[np.ndarray, np.ndarray, np.ndarray, float, dict]: |
||
652 | 1. New population of particles. |
||
653 | 2. New populations function/fitness values. |
||
654 | 3. New global best particle. |
||
655 | 4. New global best particle function/fitness value. |
||
656 | 5. Additional arguments. |
||
657 | |||
658 | See Also: |
||
659 | * :func:`NiaPy.algorithm.basic.WeightedVelocityClampingParticleSwarmAlgorithm.runIteration` |
||
660 | """ |
||
661 | pop, fpop, xb, fxb, d = ParticleSwarmAlgorithm.runIteration(self, task, pop, fpop, xb, fxb, **dparams) |
||
662 | v = d['V'] |
||
663 | v_a = (np.sum(v, axis=0) / len(v)) |
||
664 | v_a = v_a / np.max(np.abs(v_a)) |
||
665 | for _ in range(self.nmutt): |
||
666 | g = task.repair(xb + v_a * self.uniform(task.Lower, task.Upper), self.Rand) |
||
667 | fg = task.eval(g) |
||
668 | if fg <= fxb: xb, fxb = g, fg |
||
669 | return pop, fpop, xb, fxb, d |
||
670 | |||
671 | class MutatedCenterParticleSwarmOptimization(CenterParticleSwarmOptimization): |
||
672 | r"""Implementation of Mutated Particle Swarm Optimization. |
||
673 | |||
674 | Algorithm: |
||
675 | Mutated Center Particle Swarm Optimization |
||
676 | |||
677 | Date: |
||
678 | 2019 |
||
679 | |||
680 | Authors: |
||
681 | Klemen Berkovič |
||
682 | |||
683 | License: |
||
684 | MIT |
||
685 | |||
686 | Reference paper: |
||
687 | TODO find one |
||
688 | |||
689 | Attributes: |
||
690 | nmutt (int): Number of mutations of global best particle. |
||
691 | |||
692 | See Also: |
||
693 | * :class:`NiaPy.algorithms.basic.CenterParticleSwarmOptimization` |
||
694 | """ |
||
695 | Name = ['MutatedCenterParticleSwarmOptimization', 'MCPSO'] |
||
696 | |||
697 | @staticmethod |
||
698 | def algorithmInfo(): |
||
699 | r"""Get basic information of algorithm. |
||
700 | |||
701 | Returns: |
||
702 | str: Basic information of algorithm. |
||
703 | |||
704 | See Also: |
||
705 | * :func:`NiaPy.algorithms.Algorithm.algorithmInfo` |
||
706 | """ |
||
707 | return r"""TODO find one""" |
||
708 | |||
709 | def setParameters(self, nmutt=10, **kwargs): |
||
710 | r"""Set core algorithm parameters. |
||
711 | |||
712 | Args: |
||
713 | nmutt (int): Number of mutations of global best particle. |
||
714 | **kwargs: Additional arguments. |
||
715 | |||
716 | See Also: |
||
717 | :func:`NiaPy.algorithm.basic.CenterParticleSwarmOptimization.setParameters` |
||
718 | """ |
||
719 | kwargs.pop('vMin', None), kwargs.pop('vMax', None) |
||
720 | ParticleSwarmAlgorithm.setParameters(self, vMin=-np.inf, vMax=np.inf, **kwargs) |
||
721 | self.nmutt = nmutt |
||
722 | |||
723 | def getParameters(self): |
||
724 | r"""Get value of parametrs for this instance of algorithm. |
||
725 | |||
726 | Returns: |
||
727 | Dict[str, Union[int, float, np.ndarray]]: Dictionary which has parameters maped to values. |
||
728 | |||
729 | See Also: |
||
730 | * :func:`NiaPy.algorithms.basic.CenterParticleSwarmOptimization.getParameters` |
||
731 | """ |
||
732 | d = CenterParticleSwarmOptimization.getParameters(self) |
||
733 | d.update({'nmutt': self.nmutt}) |
||
734 | return d |
||
735 | |||
736 | View Code Duplication | def runIteration(self, task, pop, fpop, xb, fxb, **dparams): |
|
737 | r"""Core function of algorithm. |
||
738 | |||
739 | Args: |
||
740 | task (Task): Optimization task. |
||
741 | pop (numpy.ndarray): Current population of particles. |
||
742 | fpop (numpy.ndarray): Current particles function/fitness values. |
||
743 | xb (numpy.ndarray): Current global best particle. |
||
744 | fxb (float: Current global best particles function/fitness value. |
||
745 | **dparams: Additional arguments. |
||
746 | |||
747 | Returns: |
||
748 | Tuple[np.ndarray, np.ndarray, np.ndarray, float, dict]: |
||
749 | 1. New population of particles. |
||
750 | 2. New populations function/fitness values. |
||
751 | 3. New global best particle. |
||
752 | 4. New global best particle function/fitness value. |
||
753 | 5. Additional arguments. |
||
754 | |||
755 | See Also: |
||
756 | * :func:`NiaPy.algorithm.basic.WeightedVelocityClampingParticleSwarmAlgorithm.runIteration` |
||
757 | """ |
||
758 | pop, fpop, xb, fxb, d = CenterParticleSwarmOptimization.runIteration(self, task, pop, fpop, xb, fxb, **dparams) |
||
759 | v = d['V'] |
||
760 | v_a = (np.sum(v, axis=0) / len(v)) |
||
761 | v_a = v_a / np.max(np.abs(v_a)) |
||
762 | for _ in range(self.nmutt): |
||
763 | g = task.repair(xb + v_a * self.uniform(task.Lower, task.Upper), self.Rand) |
||
764 | fg = task.eval(g) |
||
765 | if fg <= fxb: xb, fxb = g, fg |
||
766 | return pop, fpop, xb, fxb, d |
||
767 | |||
768 | class MutatedCenterUnifiedParticleSwarmOptimization(MutatedCenterParticleSwarmOptimization): |
||
769 | r"""Implementation of Mutated Particle Swarm Optimization. |
||
770 | |||
771 | Algorithm: |
||
772 | Mutated Center Unified Particle Swarm Optimization |
||
773 | |||
774 | Date: |
||
775 | 2019 |
||
776 | |||
777 | Authors: |
||
778 | Klemen Berkovič |
||
779 | |||
780 | License: |
||
781 | MIT |
||
782 | |||
783 | Reference paper: |
||
784 | Tsai, Hsing-Chih. "Unified particle swarm delivers high efficiency to particle swarm optimization." Applied Soft Computing 55 (2017): 371-383. |
||
785 | |||
786 | Attributes: |
||
787 | nmutt (int): Number of mutations of global best particle. |
||
788 | |||
789 | See Also: |
||
790 | * :class:`NiaPy.algorithms.basic.CenterParticleSwarmOptimization` |
||
791 | """ |
||
792 | Name = ['MutatedCenterUnifiedParticleSwarmOptimization', 'MCUPSO'] |
||
793 | |||
794 | @staticmethod |
||
795 | def algorithmInfo(): |
||
796 | r"""Get basic information of algorithm. |
||
797 | |||
798 | Returns: |
||
799 | str: Basic information of algorithm. |
||
800 | |||
801 | See Also: |
||
802 | * :func:`NiaPy.algorithms.Algorithm.algorithmInfo` |
||
803 | """ |
||
804 | return r"""Tsai, Hsing-Chih. "Unified particle swarm delivers high efficiency to particle swarm optimization." Applied Soft Computing 55 (2017): 371-383.""" |
||
805 | |||
806 | def setParameters(self, **kwargs): |
||
807 | r"""Set core algorithm parameters. |
||
808 | |||
809 | Args: |
||
810 | **kwargs: Additional arguments. |
||
811 | |||
812 | See Also: |
||
813 | :func:`NiaPy.algorithm.basic.MutatedCenterParticleSwarmOptimization.setParameters` |
||
814 | """ |
||
815 | kwargs.pop('vMin', None), kwargs.pop('vMax', None) |
||
816 | MutatedCenterParticleSwarmOptimization.setParameters(self, vMin=-np.inf, vMax=np.inf, **kwargs) |
||
817 | |||
818 | View Code Duplication | def updateVelocity(self, V, p, pb, gb, w, vMin, vMax, task, **kwargs): |
|
819 | r"""Update particle velocity. |
||
820 | |||
821 | Args: |
||
822 | V (numpy.ndarray): Current velocity of particle. |
||
823 | p (numpy.ndarray): Current position of particle. |
||
824 | pb (numpy.ndarray): Personal best position of particle. |
||
825 | gb (numpy.ndarray): Global best position of particle. |
||
826 | w (numpy.ndarray): Weights for velocity adjustment. |
||
827 | vMin (numpy.ndarray): Minimal velocity allowed. |
||
828 | vMax (numpy.ndarray): Maxmimal velocity allowed. |
||
829 | task (Task): Optimization task. |
||
830 | kwargs: Additional arguments. |
||
831 | |||
832 | Returns: |
||
833 | numpy.ndarray: Updated velocity of particle. |
||
834 | """ |
||
835 | r3 = self.rand(task.D) |
||
836 | return self.Repair(w * V + self.C1 * self.rand(task.D) * (pb - p) * r3 + self.C2 * self.rand(task.D) * (gb - p) * (1 - r3), vMin, vMax) |
||
837 | |||
838 | class ComprehensiveLearningParticleSwarmOptimizer(ParticleSwarmAlgorithm): |
||
839 | r"""Implementation of Mutated Particle Swarm Optimization. |
||
840 | |||
841 | Algorithm: |
||
842 | Comprehensive Learning Particle Swarm Optimizer |
||
843 | |||
844 | Date: |
||
845 | 2019 |
||
846 | |||
847 | Authors: |
||
848 | Klemen Berkovič |
||
849 | |||
850 | License: |
||
851 | MIT |
||
852 | |||
853 | Reference paper: |
||
854 | J. J. Liang, A. K. Qin, P. N. Suganthan and S. Baskar, "Comprehensive learning particle swarm optimizer for global optimization of multimodal functions," in IEEE Transactions on Evolutionary Computation, vol. 10, no. 3, pp. 281-295, June 2006. doi: 10.1109/TEVC.2005.857610 |
||
855 | |||
856 | Reference URL: |
||
857 | http://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1637688&isnumber=34326 |
||
858 | |||
859 | Attributes: |
||
860 | w0 (float): Inertia weight. |
||
861 | w1 (float): Inertia weight. |
||
862 | C (float): Velocity constant. |
||
863 | m (int): Refresh rate. |
||
864 | |||
865 | See Also: |
||
866 | * :class:`NiaPy.algorithms.basic.ParticleSwarmAlgorithm` |
||
867 | """ |
||
868 | Name = ['ComprehensiveLearningParticleSwarmOptimizer', 'CLPSO'] |
||
869 | |||
870 | @staticmethod |
||
871 | def algorithmInfo(): |
||
872 | r"""Get basic information of algorithm. |
||
873 | |||
874 | Returns: |
||
875 | str: Basic information of algorithm. |
||
876 | |||
877 | See Also: |
||
878 | * :func:`NiaPy.algorithms.Algorithm.algorithmInfo` |
||
879 | """ |
||
880 | return r"""J. J. Liang, A. K. Qin, P. N. Suganthan and S. Baskar, "Comprehensive learning particle swarm optimizer for global optimization of multimodal functions," in IEEE Transactions on Evolutionary Computation, vol. 10, no. 3, pp. 281-295, June 2006. doi: 10.1109/TEVC.2005.857610 """ |
||
881 | |||
882 | def setParameters(self, m=10, w0=.9, w1=.4, C=1.49445, **ukwargs): |
||
883 | r"""Set Particle Swarm Algorithm main parameters. |
||
884 | |||
885 | Args: |
||
886 | w0 (int): Inertia weight. |
||
887 | w1 (float): Inertia weight. |
||
888 | C (float): Velocity constant. |
||
889 | m (float): Refresh rate. |
||
890 | **ukwargs: Additional arguments |
||
891 | |||
892 | See Also: |
||
893 | * :func:`NiaPy.algorithms.basic.ParticleSwarmAlgorithm.setParameters` |
||
894 | """ |
||
895 | ParticleSwarmAlgorithm.setParameters(self, **ukwargs) |
||
896 | self.m, self.w0, self.w1, self.C = m, w0, w1, C |
||
897 | |||
898 | def getParameters(self): |
||
899 | r"""Get value of parametrs for this instance of algorithm. |
||
900 | |||
901 | Returns: |
||
902 | Dict[str, Union[int, float, np.ndarray]]: Dictionary which has parameters maped to values. |
||
903 | |||
904 | See Also: |
||
905 | * :func:`NiaPy.algorithms.basic.ParticleSwarmAlgorithm.getParameters` |
||
906 | """ |
||
907 | d = ParticleSwarmAlgorithm.getParameters(self) |
||
908 | d.update({ |
||
909 | 'm': self.m, 'w0': self.w0, 'w1': self.w1, 'C': self.C |
||
910 | }) |
||
911 | return d |
||
912 | |||
913 | def init(self, task): |
||
914 | r"""Initialize dynamic arguments of Particle Swarm Optimization algorithm. |
||
915 | |||
916 | Args: |
||
917 | task (Task): Optimization task. |
||
918 | |||
919 | Returns: |
||
920 | Dict[str, np.ndarray]: |
||
921 | * vMin: Mininal velocity. |
||
922 | * vMax: Maximal velocity. |
||
923 | * V: Initial velocity of particle. |
||
924 | * flag: Refresh gap counter. |
||
925 | """ |
||
926 | return {'vMin': fullArray(self.vMin, task.D), 'vMax': fullArray(self.vMax, task.D), 'V': np.full([self.NP, task.D], 0.0), 'flag': np.full(self.NP, 0), 'Pc': np.asarray([.05 + .45 * (np.exp(10 * (i - 1) / (self.NP - 1)) - 1) / (np.exp(10) - 1) for i in range(self.NP)])} |
||
927 | |||
928 | def generatePbestCL(self, i, Pc, pbs, fpbs): |
||
929 | r"""Generate new personal best position for learning. |
||
930 | |||
931 | Args: |
||
932 | i (int): Current particle. |
||
933 | Pc (float): Learning probability. |
||
934 | pbs (numpy.ndarray): Personal best positions for population. |
||
935 | fpbs (numpy.ndarray): Personal best positions function/fitness values for persolan best position. |
||
936 | |||
937 | Returns: |
||
938 | numpy.ndarray: Personal best for learning. |
||
939 | """ |
||
940 | pbest = [] |
||
941 | for j in range(len(pbs[i])): |
||
942 | if self.rand() > Pc: pbest.append(pbs[i, j]) |
||
943 | else: |
||
944 | r1, r2 = int(self.rand() * len(pbs)), int(self.rand() * len(pbs)) |
||
945 | if fpbs[r1] < fpbs[r2]: pbest.append(pbs[r1, j]) |
||
946 | else: pbest.append(pbs[r2, j]) |
||
947 | return np.asarray(pbest) |
||
948 | |||
949 | def updateVelocityCL(self, V, p, pb, w, vMin, vMax, task, **kwargs): |
||
950 | r"""Update particle velocity. |
||
951 | |||
952 | Args: |
||
953 | V (numpy.ndarray): Current velocity of particle. |
||
954 | p (numpy.ndarray): Current position of particle. |
||
955 | pb (numpy.ndarray): Personal best position of particle. |
||
956 | w (numpy.ndarray): Weights for velocity adjustment. |
||
957 | vMin (numpy.ndarray): Minimal velocity allowed. |
||
958 | vMax (numpy.ndarray): Maxmimal velocity allowed. |
||
959 | task (Task): Optimization task. |
||
960 | kwargs: Additional arguments. |
||
961 | |||
962 | Returns: |
||
963 | numpy.ndarray: Updated velocity of particle. |
||
964 | """ |
||
965 | return self.Repair(w * V + self.C * self.rand(task.D) * (pb - p), vMin, vMax) |
||
966 | |||
967 | def runIteration(self, task, pop, fpop, xb, fxb, popb, fpopb, vMin, vMax, V, flag, Pc, **dparams): |
||
968 | r"""Core function of algorithm. |
||
969 | |||
970 | Args: |
||
971 | task (Task): Optimization task. |
||
972 | pop (numpy.ndarray): Current populations. |
||
973 | fpop (numpy.ndarray): Current population fitness/function values. |
||
974 | xb (numpy.ndarray): Current best particle. |
||
975 | fxb (float): Current best particle fitness/function value. |
||
976 | popb (numpy.ndarray): Particles best position. |
||
977 | fpopb (numpy.ndarray): Particles best positions fitness/function values. |
||
978 | vMin (numpy.ndarray): Minimal velocity. |
||
979 | vMax (numpy.ndarray): Maximal velocity. |
||
980 | V (numpy.ndarray): Velocity of particles. |
||
981 | flag (numpy.ndarray): Refresh rate counter. |
||
982 | Pc (numpy.ndarray): Learning rate. |
||
983 | **dparams (Dict[str, Any]): Additional function arguments. |
||
984 | |||
985 | Returns: |
||
986 | Tuple[np.ndarray, np.ndarray, np.ndarray, dict]: |
||
987 | 1. New population. |
||
988 | 2. New population fitness/function values. |
||
989 | 3. New global best position. |
||
990 | 4. New global best positions function/fitness value. |
||
991 | 5. Additional arguments: |
||
992 | * popb: Particles best population. |
||
993 | * fpopb: Particles best positions function/fitness value. |
||
994 | * vMin: Minimal velocity. |
||
995 | * vMax: Maximal velocity. |
||
996 | * V: Initial velocity of particle. |
||
997 | * flag: Refresh gap counter. |
||
998 | * Pc: Learning rate. |
||
999 | |||
1000 | See Also: |
||
1001 | * :class:`NiaPy.algorithms.basic.ParticleSwarmAlgorithm.runIteration` |
||
1002 | """ |
||
1003 | w = self.w0 * (self.w0 - self.w1) * task.Iters / task.nGEN |
||
1004 | for i in range(len(pop)): |
||
1005 | if flag[i] >= self.m: |
||
1006 | V[i] = self.updateVelocity(V[i], pop[i], popb[i], xb, 1, vMin, vMax, task) |
||
1007 | pop[i] = task.repair(pop[i] + V[i], rnd=self.Rand) |
||
1008 | fpop[i] = task.eval(pop[i]) |
||
1009 | if fpop[i] < fpopb[i]: |
||
1010 | popb[i], fpopb[i] = pop[i].copy(), fpop[i] |
||
1011 | if fpop[i] < fxb: xb, fxb = pop[i].copy(), fpop[i] |
||
1012 | flag[i] = 0 |
||
1013 | pbest = self.generatePbestCL(i, Pc[i], popb, fpopb) |
||
1014 | V[i] = self.updateVelocityCL(V[i], pop[i], pbest, w, vMin, vMax, task) |
||
1015 | pop[i] = pop[i] + V[i] |
||
1016 | if not ((pop[i] < task.Lower).any() or (pop[i] > task.Upper).any()): |
||
1017 | fpop[i] = task.eval(pop[i]) |
||
1018 | if fpop[i] < fpopb[i]: |
||
1019 | popb[i], fpopb[i] = pop[i].copy(), fpop[i] |
||
1020 | if fpop[i] < fxb: xb, fxb = pop[i].copy(), fpop[i] |
||
1021 | return pop, fpop, xb, fxb, {'popb': popb, 'fpopb': fpopb, 'vMin': vMin, 'vMax': vMax, 'V': V, 'flag': flag, 'Pc': Pc} |
||
1022 | |||
1024 |