| Total Complexity | 67 |
| Total Lines | 546 |
| Duplicated Lines | 4.03 % |
| Changes | 0 | ||
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like NiaPy.algorithms.other.mts often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | # encoding=utf8 |
||
| 2 | # pylint: disable=mixed-indentation, multiple-statements, consider-using-enumerate, unused-argument, bad-continuation, line-too-long, redefined-builtin, arguments-differ, superfluous-parens |
||
| 3 | import logging |
||
| 4 | import operator as oper |
||
| 5 | |||
| 6 | from numpy import random as rand, vectorize, argwhere, copy, apply_along_axis, argmin, argsort, fmin, fmax, full, asarray, abs, inf |
||
| 7 | |||
| 8 | from NiaPy.algorithms.algorithm import Algorithm |
||
| 9 | |||
| 10 | logging.basicConfig() |
||
| 11 | logger = logging.getLogger('NiaPy.algorithms.other') |
||
| 12 | logger.setLevel('INFO') |
||
| 13 | |||
| 14 | __all__ = ['MultipleTrajectorySearch', 'MultipleTrajectorySearchV1', 'MTS_LS1', 'MTS_LS1v1', 'MTS_LS2', 'MTS_LS3', 'MTS_LS3v1'] |
||
| 15 | |||
| 16 | def MTS_LS1(Xk, Xk_fit, Xb, Xb_fit, improve, SR, task, BONUS1=10, BONUS2=1, sr_fix=0.4, rnd=rand, **ukwargs): |
||
| 17 | r"""Multiple trajectory local search one. |
||
| 18 | |||
| 19 | Args: |
||
| 20 | Xk (numpy.ndarray): Current solution. |
||
| 21 | Xk_fit (float): Current solutions fitness/function value. |
||
| 22 | Xb (numpy.ndarray): Global best solution. |
||
| 23 | Xb_fit (float): Global best solutions fitness/function value. |
||
| 24 | improve (bool): Has the solution been improved. |
||
| 25 | SR (numpy.ndarray): Search range. |
||
| 26 | task (Task): Optimization task. |
||
| 27 | BONUS1 (int): Bonus reward for improving global best solution. |
||
| 28 | BONUS2 (int): Bonus reward for improving solution. |
||
| 29 | sr_fix (numpy.ndarray): Fix when search range is to small. |
||
| 30 | rnd (mtrand.RandomState): Random number generator. |
||
| 31 | **ukwargs (Dict[str, Any]): Additional arguments. |
||
| 32 | |||
| 33 | Returns: |
||
| 34 | Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray]: |
||
| 35 | 1. New solution. |
||
| 36 | 2. New solutions fitness/function value. |
||
| 37 | 3. Global best if found else old global best. |
||
| 38 | 4. Global bests function/fitness value. |
||
| 39 | 5. If solution has improved. |
||
| 40 | 6. Search range. |
||
| 41 | """ |
||
| 42 | if not improve: |
||
| 43 | SR /= 2 |
||
| 44 | ifix = argwhere(SR < 1e-15) |
||
| 45 | SR[ifix] = task.bRange[ifix] * sr_fix |
||
| 46 | improve, grade = False, 0.0 |
||
| 47 | for i in range(len(Xk)): |
||
| 48 | Xk_i_old = Xk[i] |
||
| 49 | Xk[i] = Xk_i_old - SR[i] |
||
| 50 | Xk = task.repair(Xk, rnd) |
||
| 51 | Xk_fit_new = task.eval(Xk) |
||
| 52 | if Xk_fit_new < Xb_fit: grade, Xb, Xb_fit = grade + BONUS1, Xk.copy(), Xk_fit_new |
||
| 53 | if Xk_fit_new == Xk_fit: Xk[i] = Xk_i_old |
||
| 54 | elif Xk_fit_new > Xk_fit: |
||
| 55 | Xk[i] = Xk_i_old + 0.5 * SR[i] |
||
| 56 | Xk = task.repair(Xk, rnd) |
||
| 57 | Xk_fit_new = task.eval(Xk) |
||
| 58 | if Xk_fit_new < Xb_fit: grade, Xb, Xb_fit = grade + BONUS1, Xk.copy(), Xk_fit_new |
||
| 59 | if Xk_fit_new >= Xk_fit: Xk[i] = Xk_i_old |
||
| 60 | else: grade, improve, Xk_fit = grade + BONUS2, True, Xk_fit_new |
||
| 61 | else: grade, improve, Xk_fit = grade + BONUS2, True, Xk_fit_new |
||
| 62 | return Xk, Xk_fit, Xb, Xb_fit, improve, grade, SR |
||
| 63 | |||
| 64 | def MTS_LS1v1(Xk, Xk_fit, Xb, Xb_fit, improve, SR, task, BONUS1=10, BONUS2=1, sr_fix=0.4, rnd=rand, **ukwargs): |
||
| 65 | r"""Multiple trajectory local search one version two. |
||
| 66 | |||
| 67 | Args: |
||
| 68 | Xk (numpy.ndarray): Current solution. |
||
| 69 | Xk_fit (float): Current solutions fitness/function value. |
||
| 70 | Xb (numpy.ndarray): Global best solution. |
||
| 71 | Xb_fit (float): Global best solutions fitness/function value. |
||
| 72 | improve (bool): Has the solution been improved. |
||
| 73 | SR (numpy.ndarray): Search range. |
||
| 74 | task (Task): Optimization task. |
||
| 75 | BONUS1 (int): Bonus reward for improving global best solution. |
||
| 76 | BONUS2 (int): Bonus reward for improving solution. |
||
| 77 | sr_fix (numpy.ndarray): Fix when search range is to small. |
||
| 78 | rnd (mtrand.RandomState): Random number generator. |
||
| 79 | **ukwargs (Dict[str, Any]): Additional arguments. |
||
| 80 | |||
| 81 | Returns: |
||
| 82 | Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray]: |
||
| 83 | 1. New solution. |
||
| 84 | 2. New solutions fitness/function value. |
||
| 85 | 3. Global best if found else old global best. |
||
| 86 | 4. Global bests function/fitness value. |
||
| 87 | 5. If solution has improved. |
||
| 88 | 6. Search range. |
||
| 89 | """ |
||
| 90 | if not improve: |
||
| 91 | SR /= 2 |
||
| 92 | ifix = argwhere(SR < 1e-15) |
||
| 93 | SR[ifix] = task.bRange[ifix] * sr_fix |
||
| 94 | improve, D, grade = False, rnd.uniform(-1, 1, task.D), 0.0 |
||
| 95 | for i in range(len(Xk)): |
||
| 96 | Xk_i_old = Xk[i] |
||
| 97 | Xk[i] = Xk_i_old - SR[i] * D[i] |
||
| 98 | Xk = task.repair(Xk, rnd) |
||
| 99 | Xk_fit_new = task.eval(Xk) |
||
| 100 | if Xk_fit_new < Xb_fit: grade, Xb, Xb_fit = grade + BONUS1, Xk.copy(), Xk_fit_new |
||
| 101 | elif Xk_fit_new == Xk_fit: Xk[i] = Xk_i_old |
||
| 102 | elif Xk_fit_new > Xk_fit: |
||
| 103 | Xk[i] = Xk_i_old + 0.5 * SR[i] |
||
| 104 | Xk = task.repair(Xk, rnd) |
||
| 105 | Xk_fit_new = task.eval(Xk) |
||
| 106 | if Xk_fit_new < Xb_fit: grade, Xb, Xb_fit = grade + BONUS1, Xk.copy(), Xk_fit_new |
||
| 107 | elif Xk_fit_new >= Xk_fit: Xk[i] = Xk_i_old |
||
| 108 | else: grade, improve, Xk_fit = grade + BONUS2, True, Xk_fit_new |
||
| 109 | else: grade, improve, Xk_fit = grade + BONUS2, True, Xk_fit_new |
||
| 110 | return Xk, Xk_fit, Xb, Xb_fit, improve, grade, SR |
||
| 111 | |||
| 112 | def genNewX(x, r, d, SR, op): |
||
| 113 | r"""Move solution to other position based on operator. |
||
| 114 | |||
| 115 | Args: |
||
| 116 | x (numpy.ndarray): Solution to move. |
||
| 117 | r (int): Random number. |
||
| 118 | d (float): Scale factor. |
||
| 119 | SR (numpy.ndarray): Search range. |
||
| 120 | op (operator): Operator to use. |
||
| 121 | |||
| 122 | Returns: |
||
| 123 | numpy.ndarray: Moved solution based on operator. |
||
| 124 | """ |
||
| 125 | return op(x, SR * d) if r == 0 else x |
||
| 126 | |||
| 127 | def MTS_LS2(Xk, Xk_fit, Xb, Xb_fit, improve, SR, task, BONUS1=10, BONUS2=1, sr_fix=0.4, rnd=rand, **ukwargs): |
||
| 128 | r"""Multiple trajectory local search two. |
||
| 129 | |||
| 130 | Args: |
||
| 131 | Xk (numpy.ndarray): Current solution. |
||
| 132 | Xk_fit (float): Current solutions fitness/function value. |
||
| 133 | Xb (numpy.ndarray): Global best solution. |
||
| 134 | Xb_fit (float): Global best solutions fitness/function value. |
||
| 135 | improve (bool): Has the solution been improved. |
||
| 136 | SR (numpy.ndarray): Search range. |
||
| 137 | task (Task): Optimization task. |
||
| 138 | BONUS1 (int): Bonus reward for improving global best solution. |
||
| 139 | BONUS2 (int): Bonus reward for improving solution. |
||
| 140 | sr_fix (numpy.ndarray): Fix when search range is to small. |
||
| 141 | rnd (mtrand.RandomState): Random number generator. |
||
| 142 | **ukwargs (Dict[str, Any]): Additional arguments. |
||
| 143 | |||
| 144 | Returns: |
||
| 145 | Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray]: |
||
| 146 | 1. New solution. |
||
| 147 | 2. New solutions fitness/function value. |
||
| 148 | 3. Global best if found else old global best. |
||
| 149 | 4. Global bests function/fitness value. |
||
| 150 | 5. If solution has improved. |
||
| 151 | 6. Search range. |
||
| 152 | |||
| 153 | See Also: |
||
| 154 | * :func:`NiaPy.algorithms.other.genNewX` |
||
| 155 | """ |
||
| 156 | if not improve: |
||
| 157 | SR /= 2 |
||
| 158 | ifix = argwhere(SR < 1e-15) |
||
| 159 | SR[ifix] = task.bRange[ifix] * sr_fix |
||
| 160 | improve, grade = False, 0.0 |
||
| 161 | for _ in range(len(Xk)): |
||
| 162 | D = -1 + rnd.rand(len(Xk)) * 2 |
||
| 163 | R = rnd.choice([0, 1, 2, 3], len(Xk)) |
||
| 164 | Xk_new = task.repair(vectorize(genNewX)(Xk, R, D, SR, oper.sub), rnd) |
||
| 165 | Xk_fit_new = task.eval(Xk_new) |
||
| 166 | if Xk_fit_new < Xb_fit: grade, Xb, Xb_fit = grade + BONUS1, Xk_new.copy(), Xk_fit_new |
||
| 167 | elif Xk_fit_new != Xk_fit: |
||
| 168 | if Xk_fit_new > Xk_fit: |
||
| 169 | Xk_new = task.repair(vectorize(genNewX)(Xk, R, D, SR, oper.add), rnd) |
||
| 170 | Xk_fit_new = task.eval(Xk_new) |
||
| 171 | if Xk_fit_new < Xb_fit: grade, Xb, Xb_fit = grade + BONUS1, Xk_new.copy(), Xk_fit_new |
||
| 172 | elif Xk_fit_new < Xk_fit: grade, Xk, Xk_fit, improve = grade + BONUS2, Xk_new.copy(), Xk_fit_new, True |
||
| 173 | else: grade, Xk, Xk_fit, improve = grade + BONUS2, Xk_new.copy(), Xk_fit_new, True |
||
| 174 | return Xk, Xk_fit, Xb, Xb_fit, improve, grade, SR |
||
| 175 | |||
| 176 | def MTS_LS3(Xk, Xk_fit, Xb, Xb_fit, improve, SR, task, BONUS1=10, BONUS2=1, rnd=rand, **ukwargs): |
||
| 177 | r"""Multiple trajectory local search three. |
||
| 178 | |||
| 179 | Args: |
||
| 180 | Xk (numpy.ndarray): Current solution. |
||
| 181 | Xk_fit (float): Current solutions fitness/function value. |
||
| 182 | Xb (numpy.ndarray): Global best solution. |
||
| 183 | Xb_fit (float): Global best solutions fitness/function value. |
||
| 184 | improve (bool): Has the solution been improved. |
||
| 185 | SR (numpy.ndarray): Search range. |
||
| 186 | task (Task): Optimization task. |
||
| 187 | BONUS1 (int): Bonus reward for improving global best solution. |
||
| 188 | BONUS2 (int): Bonus reward for improving solution. |
||
| 189 | rnd (mtrand.RandomState): Random number generator. |
||
| 190 | **ukwargs (Dict[str, Any]): Additional arguments. |
||
| 191 | |||
| 192 | Returns: |
||
| 193 | Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray]: |
||
| 194 | 1. New solution. |
||
| 195 | 2. New solutions fitness/function value. |
||
| 196 | 3. Global best if found else old global best. |
||
| 197 | 4. Global bests function/fitness value. |
||
| 198 | 5. If solution has improved. |
||
| 199 | 6. Search range. |
||
| 200 | """ |
||
| 201 | Xk_new, grade = copy(Xk), 0.0 |
||
| 202 | for i in range(len(Xk)): |
||
| 203 | Xk1, Xk2, Xk3 = copy(Xk_new), copy(Xk_new), copy(Xk_new) |
||
| 204 | Xk1[i], Xk2[i], Xk3[i] = Xk1[i] + 0.1, Xk2[i] - 0.1, Xk3[i] + 0.2 |
||
| 205 | Xk1, Xk2, Xk3 = task.repair(Xk1, rnd), task.repair(Xk2, rnd), task.repair(Xk3, rnd) |
||
| 206 | Xk1_fit, Xk2_fit, Xk3_fit = task.eval(Xk1), task.eval(Xk2), task.eval(Xk3) |
||
| 207 | if Xk1_fit < Xb_fit: grade, Xb, Xb_fit, improve = grade + BONUS1, Xk1.copy(), Xk1_fit, True |
||
| 208 | if Xk2_fit < Xb_fit: grade, Xb, Xb_fit, improve = grade + BONUS1, Xk2.copy(), Xk2_fit, True |
||
| 209 | if Xk3_fit < Xb_fit: grade, Xb, Xb_fit, improve = grade + BONUS1, Xk3.copy(), Xk3_fit, True |
||
| 210 | D1, D2, D3 = Xk_fit - Xk1_fit if abs(Xk1_fit) != inf else 0, Xk_fit - Xk2_fit if abs(Xk2_fit) != inf else 0, Xk_fit - Xk3_fit if abs(Xk3_fit) != inf else 0 |
||
| 211 | if D1 > 0: grade, improve = grade + BONUS2, True |
||
| 212 | if D2 > 0: grade, improve = grade + BONUS2, True |
||
| 213 | if D3 > 0: grade, improve = grade + BONUS2, True |
||
| 214 | a, b, c = 0.4 + rnd.rand() * 0.1, 0.1 + rnd.rand() * 0.2, rnd.rand() |
||
| 215 | Xk_new[i] += a * (D1 - D2) + b * (D3 - 2 * D1) + c |
||
| 216 | Xk_new = task.repair(Xk_new, rnd) |
||
| 217 | Xk_fit_new = task.eval(Xk_new) |
||
| 218 | if Xk_fit_new < Xk_fit: |
||
| 219 | if Xk_fit_new < Xb_fit: Xb, Xb_fit, grade = Xk_new.copy(), Xk_fit_new, grade + BONUS1 |
||
| 220 | else: grade += BONUS2 |
||
| 221 | Xk, Xk_fit, improve = Xk_new, Xk_fit_new, True |
||
| 222 | return Xk, Xk_fit, Xb, Xb_fit, improve, grade, SR |
||
| 223 | |||
| 224 | def MTS_LS3v1(Xk, Xk_fit, Xb, Xb_fit, improve, SR, task, phi=3, BONUS1=10, BONUS2=1, rnd=rand, **ukwargs): |
||
| 225 | r"""Multiple trajectory local search three version one. |
||
| 226 | |||
| 227 | Args: |
||
| 228 | Xk (numpy.ndarray): Current solution. |
||
| 229 | Xk_fit (float): Current solutions fitness/function value. |
||
| 230 | Xb (numpy.ndarray): Global best solution. |
||
| 231 | Xb_fit (float): Global best solutions fitness/function value. |
||
| 232 | improve (bool): Has the solution been improved. |
||
| 233 | SR (numpy.ndarray): Search range. |
||
| 234 | task (Task): Optimization task. |
||
| 235 | phi (int): Number of new generated positions. |
||
| 236 | BONUS1 (int): Bonus reward for improving global best solution. |
||
| 237 | BONUS2 (int): Bonus reward for improving solution. |
||
| 238 | rnd (mtrand.RandomState): Random number generator. |
||
| 239 | **ukwargs (Dict[str, Any]): Additional arguments. |
||
| 240 | |||
| 241 | Returns: |
||
| 242 | Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray]: |
||
| 243 | 1. New solution. |
||
| 244 | 2. New solutions fitness/function value. |
||
| 245 | 3. Global best if found else old global best. |
||
| 246 | 4. Global bests function/fitness value. |
||
| 247 | 5. If solution has improved. |
||
| 248 | 6. Search range. |
||
| 249 | """ |
||
| 250 | grade, Disp = 0.0, task.bRange / 10 |
||
| 251 | while True in (Disp > 1e-3): |
||
| 252 | Xn = apply_along_axis(task.repair, 1, asarray([rnd.permutation(Xk) + Disp * rnd.uniform(-1, 1, len(Xk)) for _ in range(phi)]), rnd) |
||
| 253 | Xn_f = apply_along_axis(task.eval, 1, Xn) |
||
| 254 | iBetter, iBetterBest = argwhere(Xn_f < Xk_fit), argwhere(Xn_f < Xb_fit) |
||
| 255 | grade += len(iBetterBest) * BONUS1 + (len(iBetter) - len(iBetterBest)) * BONUS2 |
||
| 256 | if len(Xn_f[iBetterBest]) > 0: |
||
| 257 | ib, improve = argmin(Xn_f[iBetterBest]), True |
||
| 258 | Xb, Xb_fit, Xk, Xk_fit = Xn[iBetterBest][ib][0].copy(), Xn_f[iBetterBest][ib][0], Xn[iBetterBest][ib][0].copy(), Xn_f[iBetterBest][ib][0] |
||
| 259 | elif len(Xn_f[iBetter]) > 0: |
||
| 260 | ib, improve = argmin(Xn_f[iBetter]), True |
||
| 261 | Xk, Xk_fit = Xn[iBetter][ib][0].copy(), Xn_f[iBetter][ib][0] |
||
| 262 | Su, Sl = fmin(task.Upper, Xk + 2 * Disp), fmax(task.Lower, Xk - 2 * Disp) |
||
| 263 | Disp = (Su - Sl) / 10 |
||
| 264 | return Xk, Xk_fit, Xb, Xb_fit, improve, grade, SR |
||
| 265 | |||
| 266 | class MultipleTrajectorySearch(Algorithm): |
||
| 267 | r"""Implementation of Multiple trajectory search. |
||
| 268 | |||
| 269 | Algorithm: |
||
| 270 | Multiple trajectory search |
||
| 271 | |||
| 272 | Date: |
||
| 273 | 2018 |
||
| 274 | |||
| 275 | Authors: |
||
| 276 | Klemen Berkovic |
||
| 277 | |||
| 278 | License: |
||
| 279 | MIT |
||
| 280 | |||
| 281 | Reference URL: |
||
| 282 | https://ieeexplore.ieee.org/document/4631210/ |
||
| 283 | |||
| 284 | Reference paper: |
||
| 285 | Lin-Yu Tseng and Chun Chen, "Multiple trajectory search for Large Scale Global Optimization," 2008 IEEE Congress on Evolutionary Computation (IEEE World Congress on Computational Intelligence), Hong Kong, 2008, pp. 3052-3059. doi: 10.1109/CEC.2008.4631210 |
||
| 286 | |||
| 287 | Attributes: |
||
| 288 | Name (List[Str]): List of strings representing algorithm name. |
||
| 289 | LSs (Iterable[Callable[[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray, Task, Dict[str, Any]], Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, int, numpy.ndarray]]]): Local searches to use. |
||
| 290 | BONUS1 (int): Bonus for improving global best solution. |
||
| 291 | BONUS2 (int): Bonus for improving solution. |
||
| 292 | NoLsTests (int): Number of test runs on local search algorithms. |
||
| 293 | NoLs (int): Number of local search algorithm runs. |
||
| 294 | NoLsBest (int): Number of locals search algorithm runs on best solution. |
||
| 295 | NoEnabled (int): Number of best solution for testing. |
||
| 296 | |||
| 297 | See Also: |
||
| 298 | * :class:`NiaPy.algorithms.Algorithm` |
||
| 299 | """ |
||
| 300 | Name = ['MultipleTrajectorySearch', 'MTS'] |
||
| 301 | |||
| 302 | @staticmethod |
||
| 303 | def algorithmInfo(): |
||
| 304 | r"""Get basic information of algorithm. |
||
| 305 | |||
| 306 | Returns: |
||
| 307 | str: Basic information of algorithm. |
||
| 308 | |||
| 309 | See Also: |
||
| 310 | * :func:`NiaPy.algorithms.Algorithm.algorithmInfo` |
||
| 311 | """ |
||
| 312 | return r"""Lin-Yu Tseng and Chun Chen, "Multiple trajectory search for Large Scale Global Optimization," 2008 IEEE Congress on Evolutionary Computation (IEEE World Congress on Computational Intelligence), Hong Kong, 2008, pp. 3052-3059. doi: 10.1109/CEC.2008.4631210""" |
||
| 313 | |||
| 314 | View Code Duplication | @staticmethod |
|
|
|
|||
| 315 | def typeParameters(): |
||
| 316 | r"""Get dictionary with functions for checking values of parameters. |
||
| 317 | |||
| 318 | Returns: |
||
| 319 | Dict[str, Callable]: |
||
| 320 | * M (Callable[[int], bool]) |
||
| 321 | * NoLsTests (Callable[[int], bool]) |
||
| 322 | * NoLs (Callable[[int], bool]) |
||
| 323 | * NoLsBest (Callable[[int], bool]) |
||
| 324 | * NoEnabled (Callable[[int], bool]) |
||
| 325 | * BONUS1 (Callable([[Union[int, float], bool]) |
||
| 326 | * BONUS2 (Callable([[Union[int, float], bool]) |
||
| 327 | """ |
||
| 328 | return { |
||
| 329 | 'M': lambda x: isinstance(x, int) and x > 0, |
||
| 330 | 'NoLsTests': lambda x: isinstance(x, int) and x >= 0, |
||
| 331 | 'NoLs': lambda x: isinstance(x, int) and x >= 0, |
||
| 332 | 'NoLsBest': lambda x: isinstance(x, int) and x >= 0, |
||
| 333 | 'NoEnabled': lambda x: isinstance(x, int) and x > 0, |
||
| 334 | 'BONUS1': lambda x: isinstance(x, (int, float)) and x > 0, |
||
| 335 | 'BONUS2': lambda x: isinstance(x, (int, float)) and x > 0, |
||
| 336 | } |
||
| 337 | |||
| 338 | def setParameters(self, M=40, NoLsTests=5, NoLs=5, NoLsBest=5, NoEnabled=17, BONUS1=10, BONUS2=1, LSs=(MTS_LS1, MTS_LS2, MTS_LS3), **ukwargs): |
||
| 339 | r"""Set the arguments of the algorithm. |
||
| 340 | |||
| 341 | Arguments: |
||
| 342 | M (int): Number of individuals in population. |
||
| 343 | NoLsTests (int): Number of test runs on local search algorithms. |
||
| 344 | NoLs (int): Number of local search algorithm runs. |
||
| 345 | NoLsBest (int): Number of locals search algorithm runs on best solution. |
||
| 346 | NoEnabled (int): Number of best solution for testing. |
||
| 347 | BONUS1 (int): Bonus for improving global best solution. |
||
| 348 | BONUS2 (int): Bonus for improving self. |
||
| 349 | LSs (Iterable[Callable[[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray, Task, Dict[str, Any]], Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, int, numpy.ndarray]]]): Local searches to use. |
||
| 350 | |||
| 351 | See Also: |
||
| 352 | * :func:`NiaPy.algorithms.Algorithm.setParameters` |
||
| 353 | """ |
||
| 354 | Algorithm.setParameters(self, NP=ukwargs.pop('NP', M), **ukwargs) |
||
| 355 | self.NoLsTests, self.NoLs, self.NoLsBest, self.NoEnabled, self.BONUS1, self.BONUS2 = NoLsTests, NoLs, NoLsBest, NoEnabled, BONUS1, BONUS2 |
||
| 356 | self.LSs = LSs |
||
| 357 | |||
| 358 | def getParameters(self): |
||
| 359 | r"""Get parameters values for the algorithm. |
||
| 360 | |||
| 361 | Returns: |
||
| 362 | Dict[str, Any]: |
||
| 363 | """ |
||
| 364 | d = Algorithm.getParameters(self) |
||
| 365 | d.update({ |
||
| 366 | 'M': d.pop('NP', self.NP), |
||
| 367 | 'NoLsTests': self.NoLsTests, |
||
| 368 | 'NoLs': self.NoLs, |
||
| 369 | 'NoLsBest': self.NoLsBest, |
||
| 370 | 'BONUS1': self.BONUS1, |
||
| 371 | 'BONUS2': self.BONUS2, |
||
| 372 | 'NoEnabled': self.NoEnabled, |
||
| 373 | 'LSs': self.LSs |
||
| 374 | }) |
||
| 375 | return d |
||
| 376 | |||
| 377 | def GradingRun(self, x, x_f, xb, fxb, improve, SR, task): |
||
| 378 | r"""Run local search for getting scores of local searches. |
||
| 379 | |||
| 380 | Args: |
||
| 381 | x (numpy.ndarray): Solution for grading. |
||
| 382 | x_f (float): Solutions fitness/function value. |
||
| 383 | xb (numpy.ndarray): Global best solution. |
||
| 384 | fxb (float): Global best solutions function/fitness value. |
||
| 385 | improve (bool): Info if solution has improved. |
||
| 386 | SR (numpy.ndarray): Search range. |
||
| 387 | task (Task): Optimization task. |
||
| 388 | |||
| 389 | Returns: |
||
| 390 | Tuple[numpy.ndarray, float, numpy.ndarray, float]: |
||
| 391 | 1. New solution. |
||
| 392 | 2. New solutions function/fitness value. |
||
| 393 | 3. Global best solution. |
||
| 394 | 4. Global best solutions fitness/function value. |
||
| 395 | """ |
||
| 396 | ls_grades, Xn = full(3, 0.0), [[x, x_f]] * len(self.LSs) |
||
| 397 | for k in range(len(self.LSs)): |
||
| 398 | for _ in range(self.NoLsTests): |
||
| 399 | Xn[k][0], Xn[k][1], xb, fxb, improve, g, SR = self.LSs[k](Xn[k][0], Xn[k][1], xb, fxb, improve, SR, task, BONUS1=self.BONUS1, BONUS2=self.BONUS2, rnd=self.Rand) |
||
| 400 | ls_grades[k] += g |
||
| 401 | xn, xn_f = min(Xn, key=lambda x: x[1]) |
||
| 402 | return xn, xn_f, xb, fxb, k |
||
| 403 | |||
| 404 | def LsRun(self, k, x, x_f, xb, fxb, improve, SR, g, task): |
||
| 405 | r"""Run a selected local search. |
||
| 406 | |||
| 407 | Args: |
||
| 408 | k (int): Index of local search. |
||
| 409 | x (numpy.ndarray): Current solution. |
||
| 410 | x_f (float): Current solutions function/fitness value. |
||
| 411 | xb (numpy.ndarray): Global best solution. |
||
| 412 | fxb (float): Global best solutions fitness/function value. |
||
| 413 | improve (bool): If the solution has improved. |
||
| 414 | SR (numpy.ndarray): Search range. |
||
| 415 | g (int): Grade. |
||
| 416 | task (Task): Optimization task. |
||
| 417 | |||
| 418 | Returns: |
||
| 419 | Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray, int]: |
||
| 420 | 1. New best solution found. |
||
| 421 | 2. New best solutions found function/fitness value. |
||
| 422 | 3. Global best solution. |
||
| 423 | 4. Global best solutions function/fitness value. |
||
| 424 | 5. If the solution has improved. |
||
| 425 | 6. Grade of local search run. |
||
| 426 | """ |
||
| 427 | for _j in range(self.NoLs): |
||
| 428 | x, x_f, xb, fxb, improve, grade, SR = self.LSs[k](x, x_f, xb, fxb, improve, SR, task, BONUS1=self.BONUS1, BONUS2=self.BONUS2, rnd=self.Rand) |
||
| 429 | g += grade |
||
| 430 | return x, x_f, xb, fxb, improve, SR, g |
||
| 431 | |||
| 432 | def initPopulation(self, task): |
||
| 433 | r"""Initialize starting population. |
||
| 434 | |||
| 435 | Args: |
||
| 436 | task (Task): Optimization task. |
||
| 437 | |||
| 438 | Returns: |
||
| 439 | Tuple[numpy.ndarray, numpy.ndarray, Dict[str, Any]]: |
||
| 440 | 1. Initialized population. |
||
| 441 | 2. Initialized populations function/fitness value. |
||
| 442 | 3. Additional arguments: |
||
| 443 | * enable (numpy.ndarray): If solution/individual is enabled. |
||
| 444 | * improve (numpy.ndarray): If solution/individual is improved. |
||
| 445 | * SR (numpy.ndarray): Search range. |
||
| 446 | * grades (numpy.ndarray): Grade of solution/individual. |
||
| 447 | """ |
||
| 448 | X, X_f, d = Algorithm.initPopulation(self, task) |
||
| 449 | enable, improve, SR, grades = full(self.NP, True), full(self.NP, True), full([self.NP, task.D], task.bRange / 2), full(self.NP, 0.0) |
||
| 450 | d.update({ |
||
| 451 | 'enable': enable, |
||
| 452 | 'improve': improve, |
||
| 453 | 'SR': SR, |
||
| 454 | 'grades': grades |
||
| 455 | }) |
||
| 456 | return X, X_f, d |
||
| 457 | |||
| 458 | def runIteration(self, task, X, X_f, xb, xb_f, enable, improve, SR, grades, **dparams): |
||
| 459 | r"""Core function of MultipleTrajectorySearch algorithm. |
||
| 460 | |||
| 461 | Args: |
||
| 462 | task (Task): Optimization task. |
||
| 463 | X (numpy.ndarray): Current population of individuals. |
||
| 464 | X_f (numpy.ndarray): Current individuals function/fitness values. |
||
| 465 | xb (numpy.ndarray): Global best individual. |
||
| 466 | xb_f (float): Global best individual function/fitness value. |
||
| 467 | enable (numpy.ndarray): Enabled status of individuals. |
||
| 468 | improve (numpy.ndarray): Improved status of individuals. |
||
| 469 | SR (numpy.ndarray): Search ranges of individuals. |
||
| 470 | grades (numpy.ndarray): Grades of individuals. |
||
| 471 | **dparams (Dict[str, Any]): Additional arguments. |
||
| 472 | |||
| 473 | Returns: |
||
| 474 | Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]: |
||
| 475 | 1. Initialized population. |
||
| 476 | 2. Initialized populations function/fitness value. |
||
| 477 | 3. New global best solution. |
||
| 478 | 4. New global best solutions fitness/objective value. |
||
| 479 | 5. Additional arguments: |
||
| 480 | * enable (numpy.ndarray): If solution/individual is enabled. |
||
| 481 | * improve (numpy.ndarray): If solution/individual is improved. |
||
| 482 | * SR (numpy.ndarray): Search range. |
||
| 483 | * grades (numpy.ndarray): Grade of solution/individual. |
||
| 484 | """ |
||
| 485 | for i in range(len(X)): |
||
| 486 | if not enable[i]: continue |
||
| 487 | enable[i], grades[i] = False, 0 |
||
| 488 | X[i], X_f[i], xb, xb_f, k = self.GradingRun(X[i], X_f[i], xb, xb_f, improve[i], SR[i], task) |
||
| 489 | X[i], X_f[i], xb, xb_f, improve[i], SR[i], grades[i] = self.LsRun(k, X[i], X_f[i], xb, xb_f, improve[i], SR[i], grades[i], task) |
||
| 490 | for _ in range(self.NoLsBest): _, _, xb, xb_f, _, _, _ = MTS_LS1(xb, xb_f, xb, xb_f, False, task.bRange.copy() / 10, task, rnd=self.Rand) |
||
| 491 | enable[argsort(grades)[:self.NoEnabled]] = True |
||
| 492 | return X, X_f, xb, xb_f, {'enable': enable, 'improve': improve, 'SR': SR, 'grades': grades} |
||
| 493 | |||
| 494 | class MultipleTrajectorySearchV1(MultipleTrajectorySearch): |
||
| 495 | r"""Implementation of Multiple trajectory search. |
||
| 496 | |||
| 497 | Algorithm: |
||
| 498 | Multiple trajectory search |
||
| 499 | |||
| 500 | Date: |
||
| 501 | 2018 |
||
| 502 | |||
| 503 | Authors: |
||
| 504 | Klemen Berkovic |
||
| 505 | |||
| 506 | License: |
||
| 507 | MIT |
||
| 508 | |||
| 509 | Reference URL: |
||
| 510 | https://ieeexplore.ieee.org/document/4983179/ |
||
| 511 | |||
| 512 | Reference paper: |
||
| 513 | Tseng, Lin-Yu, and Chun Chen. "Multiple trajectory search for unconstrained/constrained multi-objective optimization." Evolutionary Computation, 2009. CEC'09. IEEE Congress on. IEEE, 2009. |
||
| 514 | |||
| 515 | Attributes: |
||
| 516 | Name (List[str]): List of strings representing algorithm name. |
||
| 517 | |||
| 518 | See Also: |
||
| 519 | * :class:`NiaPy.algorithms.other.MultipleTrajectorySearch`` |
||
| 520 | """ |
||
| 521 | Name = ['MultipleTrajectorySearchV1', 'MTSv1'] |
||
| 522 | |||
| 523 | @staticmethod |
||
| 524 | def algorithmInfo(): |
||
| 525 | r"""Get basic information of algorithm. |
||
| 526 | |||
| 527 | Returns: |
||
| 528 | str: Basic information of algorithm. |
||
| 529 | |||
| 530 | See Also: |
||
| 531 | * :func:`NiaPy.algorithms.Algorithm.algorithmInfo` |
||
| 532 | """ |
||
| 533 | return r"""Tseng, Lin-Yu, and Chun Chen. "Multiple trajectory search for unconstrained/constrained multi-objective optimization." Evolutionary Computation, 2009. CEC'09. IEEE Congress on. IEEE, 2009.""" |
||
| 534 | |||
| 535 | def setParameters(self, **kwargs): |
||
| 536 | r"""Set core parameters of MultipleTrajectorySearchV1 algorithm. |
||
| 537 | |||
| 538 | Args: |
||
| 539 | **kwargs (Dict[str, Any]): Additional arguments. |
||
| 540 | |||
| 541 | See Also: |
||
| 542 | * :func:`NiaPy.algorithms.other.MultipleTrajectorySearch.setParameters` |
||
| 543 | """ |
||
| 544 | kwargs.pop('NoLsBest', None) |
||
| 545 | MultipleTrajectorySearch.setParameters(self, NoLsBest=0, LSs=(MTS_LS1v1, MTS_LS2), **kwargs) |
||
| 546 | |||
| 548 |