1
|
|
|
# encoding=utf8 |
2
|
|
|
import logging |
3
|
|
|
import operator as oper |
4
|
|
|
|
5
|
|
|
from numpy import random as rand, vectorize, argwhere, copy, apply_along_axis, argmin, argsort, fmin, fmax, full, asarray, abs, inf |
6
|
|
|
|
7
|
|
|
from NiaPy.algorithms.algorithm import Algorithm |
8
|
|
|
|
9
|
|
|
logging.basicConfig() |
10
|
|
|
logger = logging.getLogger('NiaPy.algorithms.other') |
11
|
|
|
logger.setLevel('INFO') |
12
|
|
|
|
13
|
|
|
__all__ = ['MultipleTrajectorySearch', 'MultipleTrajectorySearchV1', 'MTS_LS1', 'MTS_LS1v1', 'MTS_LS2', 'MTS_LS3', 'MTS_LS3v1'] |
14
|
|
|
|
15
|
|
|
def MTS_LS1(Xk, Xk_fit, Xb, Xb_fit, improve, SR, task, BONUS1=10, BONUS2=1, sr_fix=0.4, rnd=rand, **ukwargs): |
16
|
|
|
r"""Multiple trajectory local search one. |
17
|
|
|
|
18
|
|
|
Args: |
19
|
|
|
Xk (numpy.ndarray): Current solution. |
20
|
|
|
Xk_fit (float): Current solutions fitness/function value. |
21
|
|
|
Xb (numpy.ndarray): Global best solution. |
22
|
|
|
Xb_fit (float): Global best solutions fitness/function value. |
23
|
|
|
improve (bool): Has the solution been improved. |
24
|
|
|
SR (numpy.ndarray): Search range. |
25
|
|
|
task (Task): Optimization task. |
26
|
|
|
BONUS1 (int): Bonus reward for improving global best solution. |
27
|
|
|
BONUS2 (int): Bonus reward for improving solution. |
28
|
|
|
sr_fix (numpy.ndarray): Fix when search range is to small. |
29
|
|
|
rnd (mtrand.RandomState): Random number generator. |
30
|
|
|
**ukwargs (Dict[str, Any]): Additional arguments. |
31
|
|
|
|
32
|
|
|
Returns: |
33
|
|
|
Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray]: |
34
|
|
|
1. New solution. |
35
|
|
|
2. New solutions fitness/function value. |
36
|
|
|
3. Global best if found else old global best. |
37
|
|
|
4. Global bests function/fitness value. |
38
|
|
|
5. If solution has improved. |
39
|
|
|
6. Search range. |
40
|
|
|
""" |
41
|
|
|
if not improve: |
42
|
|
|
SR /= 2 |
43
|
|
|
ifix = argwhere(SR < 1e-15) |
44
|
|
|
SR[ifix] = task.bRange[ifix] * sr_fix |
45
|
|
|
improve, grade = False, 0.0 |
46
|
|
|
for i in range(len(Xk)): |
47
|
|
|
Xk_i_old = Xk[i] |
48
|
|
|
Xk[i] = Xk_i_old - SR[i] |
49
|
|
|
Xk = task.repair(Xk, rnd) |
50
|
|
|
Xk_fit_new = task.eval(Xk) |
51
|
|
|
if Xk_fit_new < Xb_fit: grade, Xb, Xb_fit = grade + BONUS1, Xk.copy(), Xk_fit_new |
52
|
|
|
if Xk_fit_new == Xk_fit: Xk[i] = Xk_i_old |
53
|
|
|
elif Xk_fit_new > Xk_fit: |
54
|
|
|
Xk[i] = Xk_i_old + 0.5 * SR[i] |
55
|
|
|
Xk = task.repair(Xk, rnd) |
56
|
|
|
Xk_fit_new = task.eval(Xk) |
57
|
|
|
if Xk_fit_new < Xb_fit: grade, Xb, Xb_fit = grade + BONUS1, Xk.copy(), Xk_fit_new |
58
|
|
|
if Xk_fit_new >= Xk_fit: Xk[i] = Xk_i_old |
59
|
|
|
else: grade, improve, Xk_fit = grade + BONUS2, True, Xk_fit_new |
60
|
|
|
else: grade, improve, Xk_fit = grade + BONUS2, True, Xk_fit_new |
61
|
|
|
return Xk, Xk_fit, Xb, Xb_fit, improve, grade, SR |
62
|
|
|
|
63
|
|
|
def MTS_LS1v1(Xk, Xk_fit, Xb, Xb_fit, improve, SR, task, BONUS1=10, BONUS2=1, sr_fix=0.4, rnd=rand, **ukwargs): |
64
|
|
|
r"""Multiple trajectory local search one version two. |
65
|
|
|
|
66
|
|
|
Args: |
67
|
|
|
Xk (numpy.ndarray): Current solution. |
68
|
|
|
Xk_fit (float): Current solutions fitness/function value. |
69
|
|
|
Xb (numpy.ndarray): Global best solution. |
70
|
|
|
Xb_fit (float): Global best solutions fitness/function value. |
71
|
|
|
improve (bool): Has the solution been improved. |
72
|
|
|
SR (numpy.ndarray): Search range. |
73
|
|
|
task (Task): Optimization task. |
74
|
|
|
BONUS1 (int): Bonus reward for improving global best solution. |
75
|
|
|
BONUS2 (int): Bonus reward for improving solution. |
76
|
|
|
sr_fix (numpy.ndarray): Fix when search range is to small. |
77
|
|
|
rnd (mtrand.RandomState): Random number generator. |
78
|
|
|
**ukwargs (Dict[str, Any]): Additional arguments. |
79
|
|
|
|
80
|
|
|
Returns: |
81
|
|
|
Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray]: |
82
|
|
|
1. New solution. |
83
|
|
|
2. New solutions fitness/function value. |
84
|
|
|
3. Global best if found else old global best. |
85
|
|
|
4. Global bests function/fitness value. |
86
|
|
|
5. If solution has improved. |
87
|
|
|
6. Search range. |
88
|
|
|
""" |
89
|
|
|
if not improve: |
90
|
|
|
SR /= 2 |
91
|
|
|
ifix = argwhere(SR < 1e-15) |
92
|
|
|
SR[ifix] = task.bRange[ifix] * sr_fix |
93
|
|
|
improve, D, grade = False, rnd.uniform(-1, 1, task.D), 0.0 |
94
|
|
|
for i in range(len(Xk)): |
95
|
|
|
Xk_i_old = Xk[i] |
96
|
|
|
Xk[i] = Xk_i_old - SR[i] * D[i] |
97
|
|
|
Xk = task.repair(Xk, rnd) |
98
|
|
|
Xk_fit_new = task.eval(Xk) |
99
|
|
|
if Xk_fit_new < Xb_fit: grade, Xb, Xb_fit = grade + BONUS1, Xk.copy(), Xk_fit_new |
100
|
|
|
elif Xk_fit_new == Xk_fit: Xk[i] = Xk_i_old |
101
|
|
|
elif Xk_fit_new > Xk_fit: |
102
|
|
|
Xk[i] = Xk_i_old + 0.5 * SR[i] |
103
|
|
|
Xk = task.repair(Xk, rnd) |
104
|
|
|
Xk_fit_new = task.eval(Xk) |
105
|
|
|
if Xk_fit_new < Xb_fit: grade, Xb, Xb_fit = grade + BONUS1, Xk.copy(), Xk_fit_new |
106
|
|
|
elif Xk_fit_new >= Xk_fit: Xk[i] = Xk_i_old |
107
|
|
|
else: grade, improve, Xk_fit = grade + BONUS2, True, Xk_fit_new |
108
|
|
|
else: grade, improve, Xk_fit = grade + BONUS2, True, Xk_fit_new |
109
|
|
|
return Xk, Xk_fit, Xb, Xb_fit, improve, grade, SR |
110
|
|
|
|
111
|
|
|
def genNewX(x, r, d, SR, op): |
112
|
|
|
r"""Move solution to other position based on operator. |
113
|
|
|
|
114
|
|
|
Args: |
115
|
|
|
x (numpy.ndarray): Solution to move. |
116
|
|
|
r (int): Random number. |
117
|
|
|
d (float): Scale factor. |
118
|
|
|
SR (numpy.ndarray): Search range. |
119
|
|
|
op (operator): Operator to use. |
120
|
|
|
|
121
|
|
|
Returns: |
122
|
|
|
numpy.ndarray: Moved solution based on operator. |
123
|
|
|
""" |
124
|
|
|
return op(x, SR * d) if r == 0 else x |
125
|
|
|
|
126
|
|
|
def MTS_LS2(Xk, Xk_fit, Xb, Xb_fit, improve, SR, task, BONUS1=10, BONUS2=1, sr_fix=0.4, rnd=rand, **ukwargs): |
127
|
|
|
r"""Multiple trajectory local search two. |
128
|
|
|
|
129
|
|
|
Args: |
130
|
|
|
Xk (numpy.ndarray): Current solution. |
131
|
|
|
Xk_fit (float): Current solutions fitness/function value. |
132
|
|
|
Xb (numpy.ndarray): Global best solution. |
133
|
|
|
Xb_fit (float): Global best solutions fitness/function value. |
134
|
|
|
improve (bool): Has the solution been improved. |
135
|
|
|
SR (numpy.ndarray): Search range. |
136
|
|
|
task (Task): Optimization task. |
137
|
|
|
BONUS1 (int): Bonus reward for improving global best solution. |
138
|
|
|
BONUS2 (int): Bonus reward for improving solution. |
139
|
|
|
sr_fix (numpy.ndarray): Fix when search range is to small. |
140
|
|
|
rnd (mtrand.RandomState): Random number generator. |
141
|
|
|
**ukwargs (Dict[str, Any]): Additional arguments. |
142
|
|
|
|
143
|
|
|
Returns: |
144
|
|
|
Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray]: |
145
|
|
|
1. New solution. |
146
|
|
|
2. New solutions fitness/function value. |
147
|
|
|
3. Global best if found else old global best. |
148
|
|
|
4. Global bests function/fitness value. |
149
|
|
|
5. If solution has improved. |
150
|
|
|
6. Search range. |
151
|
|
|
|
152
|
|
|
See Also: |
153
|
|
|
* :func:`NiaPy.algorithms.other.genNewX` |
154
|
|
|
""" |
155
|
|
|
if not improve: |
156
|
|
|
SR /= 2 |
157
|
|
|
ifix = argwhere(SR < 1e-15) |
158
|
|
|
SR[ifix] = task.bRange[ifix] * sr_fix |
159
|
|
|
improve, grade = False, 0.0 |
160
|
|
|
for _ in range(len(Xk)): |
161
|
|
|
D = -1 + rnd.rand(len(Xk)) * 2 |
162
|
|
|
R = rnd.choice([0, 1, 2, 3], len(Xk)) |
163
|
|
|
Xk_new = task.repair(vectorize(genNewX)(Xk, R, D, SR, oper.sub), rnd) |
164
|
|
|
Xk_fit_new = task.eval(Xk_new) |
165
|
|
|
if Xk_fit_new < Xb_fit: grade, Xb, Xb_fit = grade + BONUS1, Xk_new.copy(), Xk_fit_new |
166
|
|
|
elif Xk_fit_new != Xk_fit: |
167
|
|
|
if Xk_fit_new > Xk_fit: |
168
|
|
|
Xk_new = task.repair(vectorize(genNewX)(Xk, R, D, SR, oper.add), rnd) |
169
|
|
|
Xk_fit_new = task.eval(Xk_new) |
170
|
|
|
if Xk_fit_new < Xb_fit: grade, Xb, Xb_fit = grade + BONUS1, Xk_new.copy(), Xk_fit_new |
171
|
|
|
elif Xk_fit_new < Xk_fit: grade, Xk, Xk_fit, improve = grade + BONUS2, Xk_new.copy(), Xk_fit_new, True |
172
|
|
|
else: grade, Xk, Xk_fit, improve = grade + BONUS2, Xk_new.copy(), Xk_fit_new, True |
173
|
|
|
return Xk, Xk_fit, Xb, Xb_fit, improve, grade, SR |
174
|
|
|
|
175
|
|
|
def MTS_LS3(Xk, Xk_fit, Xb, Xb_fit, improve, SR, task, BONUS1=10, BONUS2=1, rnd=rand, **ukwargs): |
176
|
|
|
r"""Multiple trajectory local search three. |
177
|
|
|
|
178
|
|
|
Args: |
179
|
|
|
Xk (numpy.ndarray): Current solution. |
180
|
|
|
Xk_fit (float): Current solutions fitness/function value. |
181
|
|
|
Xb (numpy.ndarray): Global best solution. |
182
|
|
|
Xb_fit (float): Global best solutions fitness/function value. |
183
|
|
|
improve (bool): Has the solution been improved. |
184
|
|
|
SR (numpy.ndarray): Search range. |
185
|
|
|
task (Task): Optimization task. |
186
|
|
|
BONUS1 (int): Bonus reward for improving global best solution. |
187
|
|
|
BONUS2 (int): Bonus reward for improving solution. |
188
|
|
|
rnd (mtrand.RandomState): Random number generator. |
189
|
|
|
**ukwargs (Dict[str, Any]): Additional arguments. |
190
|
|
|
|
191
|
|
|
Returns: |
192
|
|
|
Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray]: |
193
|
|
|
1. New solution. |
194
|
|
|
2. New solutions fitness/function value. |
195
|
|
|
3. Global best if found else old global best. |
196
|
|
|
4. Global bests function/fitness value. |
197
|
|
|
5. If solution has improved. |
198
|
|
|
6. Search range. |
199
|
|
|
""" |
200
|
|
|
Xk_new, grade = copy(Xk), 0.0 |
201
|
|
|
for i in range(len(Xk)): |
202
|
|
|
Xk1, Xk2, Xk3 = copy(Xk_new), copy(Xk_new), copy(Xk_new) |
203
|
|
|
Xk1[i], Xk2[i], Xk3[i] = Xk1[i] + 0.1, Xk2[i] - 0.1, Xk3[i] + 0.2 |
204
|
|
|
Xk1, Xk2, Xk3 = task.repair(Xk1, rnd), task.repair(Xk2, rnd), task.repair(Xk3, rnd) |
205
|
|
|
Xk1_fit, Xk2_fit, Xk3_fit = task.eval(Xk1), task.eval(Xk2), task.eval(Xk3) |
206
|
|
|
if Xk1_fit < Xb_fit: grade, Xb, Xb_fit, improve = grade + BONUS1, Xk1.copy(), Xk1_fit, True |
207
|
|
|
if Xk2_fit < Xb_fit: grade, Xb, Xb_fit, improve = grade + BONUS1, Xk2.copy(), Xk2_fit, True |
208
|
|
|
if Xk3_fit < Xb_fit: grade, Xb, Xb_fit, improve = grade + BONUS1, Xk3.copy(), Xk3_fit, True |
209
|
|
|
D1, D2, D3 = Xk_fit - Xk1_fit if abs(Xk1_fit) != inf else 0, Xk_fit - Xk2_fit if abs(Xk2_fit) != inf else 0, Xk_fit - Xk3_fit if abs(Xk3_fit) != inf else 0 |
210
|
|
|
if D1 > 0: grade, improve = grade + BONUS2, True |
211
|
|
|
if D2 > 0: grade, improve = grade + BONUS2, True |
212
|
|
|
if D3 > 0: grade, improve = grade + BONUS2, True |
213
|
|
|
a, b, c = 0.4 + rnd.rand() * 0.1, 0.1 + rnd.rand() * 0.2, rnd.rand() |
214
|
|
|
Xk_new[i] += a * (D1 - D2) + b * (D3 - 2 * D1) + c |
215
|
|
|
Xk_new = task.repair(Xk_new, rnd) |
216
|
|
|
Xk_fit_new = task.eval(Xk_new) |
217
|
|
|
if Xk_fit_new < Xk_fit: |
218
|
|
|
if Xk_fit_new < Xb_fit: Xb, Xb_fit, grade = Xk_new.copy(), Xk_fit_new, grade + BONUS1 |
219
|
|
|
else: grade += BONUS2 |
220
|
|
|
Xk, Xk_fit, improve = Xk_new, Xk_fit_new, True |
221
|
|
|
return Xk, Xk_fit, Xb, Xb_fit, improve, grade, SR |
222
|
|
|
|
223
|
|
|
def MTS_LS3v1(Xk, Xk_fit, Xb, Xb_fit, improve, SR, task, phi=3, BONUS1=10, BONUS2=1, rnd=rand, **ukwargs): |
224
|
|
|
r"""Multiple trajectory local search three version one. |
225
|
|
|
|
226
|
|
|
Args: |
227
|
|
|
Xk (numpy.ndarray): Current solution. |
228
|
|
|
Xk_fit (float): Current solutions fitness/function value. |
229
|
|
|
Xb (numpy.ndarray): Global best solution. |
230
|
|
|
Xb_fit (float): Global best solutions fitness/function value. |
231
|
|
|
improve (bool): Has the solution been improved. |
232
|
|
|
SR (numpy.ndarray): Search range. |
233
|
|
|
task (Task): Optimization task. |
234
|
|
|
phi (int): Number of new generated positions. |
235
|
|
|
BONUS1 (int): Bonus reward for improving global best solution. |
236
|
|
|
BONUS2 (int): Bonus reward for improving solution. |
237
|
|
|
rnd (mtrand.RandomState): Random number generator. |
238
|
|
|
**ukwargs (Dict[str, Any]): Additional arguments. |
239
|
|
|
|
240
|
|
|
Returns: |
241
|
|
|
Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray]: |
242
|
|
|
1. New solution. |
243
|
|
|
2. New solutions fitness/function value. |
244
|
|
|
3. Global best if found else old global best. |
245
|
|
|
4. Global bests function/fitness value. |
246
|
|
|
5. If solution has improved. |
247
|
|
|
6. Search range. |
248
|
|
|
""" |
249
|
|
|
grade, Disp = 0.0, task.bRange / 10 |
250
|
|
|
while True in (Disp > 1e-3): |
251
|
|
|
Xn = apply_along_axis(task.repair, 1, asarray([rnd.permutation(Xk) + Disp * rnd.uniform(-1, 1, len(Xk)) for _ in range(phi)]), rnd) |
252
|
|
|
Xn_f = apply_along_axis(task.eval, 1, Xn) |
253
|
|
|
iBetter, iBetterBest = argwhere(Xn_f < Xk_fit), argwhere(Xn_f < Xb_fit) |
254
|
|
|
grade += len(iBetterBest) * BONUS1 + (len(iBetter) - len(iBetterBest)) * BONUS2 |
255
|
|
|
if len(Xn_f[iBetterBest]) > 0: |
256
|
|
|
ib, improve = argmin(Xn_f[iBetterBest]), True |
257
|
|
|
Xb, Xb_fit, Xk, Xk_fit = Xn[iBetterBest][ib][0].copy(), Xn_f[iBetterBest][ib][0], Xn[iBetterBest][ib][0].copy(), Xn_f[iBetterBest][ib][0] |
258
|
|
|
elif len(Xn_f[iBetter]) > 0: |
259
|
|
|
ib, improve = argmin(Xn_f[iBetter]), True |
260
|
|
|
Xk, Xk_fit = Xn[iBetter][ib][0].copy(), Xn_f[iBetter][ib][0] |
261
|
|
|
Su, Sl = fmin(task.Upper, Xk + 2 * Disp), fmax(task.Lower, Xk - 2 * Disp) |
262
|
|
|
Disp = (Su - Sl) / 10 |
263
|
|
|
return Xk, Xk_fit, Xb, Xb_fit, improve, grade, SR |
264
|
|
|
|
265
|
|
|
class MultipleTrajectorySearch(Algorithm): |
266
|
|
|
r"""Implementation of Multiple trajectory search. |
267
|
|
|
|
268
|
|
|
Algorithm: |
269
|
|
|
Multiple trajectory search |
270
|
|
|
|
271
|
|
|
Date: |
272
|
|
|
2018 |
273
|
|
|
|
274
|
|
|
Authors: |
275
|
|
|
Klemen Berkovic |
276
|
|
|
|
277
|
|
|
License: |
278
|
|
|
MIT |
279
|
|
|
|
280
|
|
|
Reference URL: |
281
|
|
|
https://ieeexplore.ieee.org/document/4631210/ |
282
|
|
|
|
283
|
|
|
Reference paper: |
284
|
|
|
Lin-Yu Tseng and Chun Chen, "Multiple trajectory search for Large Scale Global Optimization," 2008 IEEE Congress on Evolutionary Computation (IEEE World Congress on Computational Intelligence), Hong Kong, 2008, pp. 3052-3059. doi: 10.1109/CEC.2008.4631210 |
285
|
|
|
|
286
|
|
|
Attributes: |
287
|
|
|
Name (List[Str]): List of strings representing algorithm name. |
288
|
|
|
LSs (Iterable[Callable[[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray, Task, Dict[str, Any]], Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, int, numpy.ndarray]]]): Local searches to use. |
289
|
|
|
BONUS1 (int): Bonus for improving global best solution. |
290
|
|
|
BONUS2 (int): Bonus for improving solution. |
291
|
|
|
NoLsTests (int): Number of test runs on local search algorithms. |
292
|
|
|
NoLs (int): Number of local search algorithm runs. |
293
|
|
|
NoLsBest (int): Number of locals search algorithm runs on best solution. |
294
|
|
|
NoEnabled (int): Number of best solution for testing. |
295
|
|
|
|
296
|
|
|
See Also: |
297
|
|
|
* :class:`NiaPy.algorithms.Algorithm` |
298
|
|
|
""" |
299
|
|
|
Name = ['MultipleTrajectorySearch', 'MTS'] |
300
|
|
|
|
301
|
|
|
@staticmethod |
302
|
|
|
def algorithmInfo(): |
303
|
|
|
r"""Get basic information of algorithm. |
304
|
|
|
|
305
|
|
|
Returns: |
306
|
|
|
str: Basic information of algorithm. |
307
|
|
|
|
308
|
|
|
See Also: |
309
|
|
|
* :func:`NiaPy.algorithms.Algorithm.algorithmInfo` |
310
|
|
|
""" |
311
|
|
|
return r"""Lin-Yu Tseng and Chun Chen, "Multiple trajectory search for Large Scale Global Optimization," 2008 IEEE Congress on Evolutionary Computation (IEEE World Congress on Computational Intelligence), Hong Kong, 2008, pp. 3052-3059. doi: 10.1109/CEC.2008.4631210""" |
312
|
|
|
|
313
|
|
View Code Duplication |
@staticmethod |
|
|
|
|
314
|
|
|
def typeParameters(): |
315
|
|
|
r"""Get dictionary with functions for checking values of parameters. |
316
|
|
|
|
317
|
|
|
Returns: |
318
|
|
|
Dict[str, Callable]: |
319
|
|
|
* M (Callable[[int], bool]) |
320
|
|
|
* NoLsTests (Callable[[int], bool]) |
321
|
|
|
* NoLs (Callable[[int], bool]) |
322
|
|
|
* NoLsBest (Callable[[int], bool]) |
323
|
|
|
* NoEnabled (Callable[[int], bool]) |
324
|
|
|
* BONUS1 (Callable([[Union[int, float], bool]) |
325
|
|
|
* BONUS2 (Callable([[Union[int, float], bool]) |
326
|
|
|
""" |
327
|
|
|
return { |
328
|
|
|
'M': lambda x: isinstance(x, int) and x > 0, |
329
|
|
|
'NoLsTests': lambda x: isinstance(x, int) and x >= 0, |
330
|
|
|
'NoLs': lambda x: isinstance(x, int) and x >= 0, |
331
|
|
|
'NoLsBest': lambda x: isinstance(x, int) and x >= 0, |
332
|
|
|
'NoEnabled': lambda x: isinstance(x, int) and x > 0, |
333
|
|
|
'BONUS1': lambda x: isinstance(x, (int, float)) and x > 0, |
334
|
|
|
'BONUS2': lambda x: isinstance(x, (int, float)) and x > 0, |
335
|
|
|
} |
336
|
|
|
|
337
|
|
|
def setParameters(self, M=40, NoLsTests=5, NoLs=5, NoLsBest=5, NoEnabled=17, BONUS1=10, BONUS2=1, LSs=(MTS_LS1, MTS_LS2, MTS_LS3), **ukwargs): |
338
|
|
|
r"""Set the arguments of the algorithm. |
339
|
|
|
|
340
|
|
|
Arguments: |
341
|
|
|
M (int): Number of individuals in population. |
342
|
|
|
NoLsTests (int): Number of test runs on local search algorithms. |
343
|
|
|
NoLs (int): Number of local search algorithm runs. |
344
|
|
|
NoLsBest (int): Number of locals search algorithm runs on best solution. |
345
|
|
|
NoEnabled (int): Number of best solution for testing. |
346
|
|
|
BONUS1 (int): Bonus for improving global best solution. |
347
|
|
|
BONUS2 (int): Bonus for improving self. |
348
|
|
|
LSs (Iterable[Callable[[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray, Task, Dict[str, Any]], Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, int, numpy.ndarray]]]): Local searches to use. |
349
|
|
|
|
350
|
|
|
See Also: |
351
|
|
|
* :func:`NiaPy.algorithms.Algorithm.setParameters` |
352
|
|
|
""" |
353
|
|
|
Algorithm.setParameters(self, NP=ukwargs.pop('NP', M), **ukwargs) |
354
|
|
|
self.NoLsTests, self.NoLs, self.NoLsBest, self.NoEnabled, self.BONUS1, self.BONUS2 = NoLsTests, NoLs, NoLsBest, NoEnabled, BONUS1, BONUS2 |
355
|
|
|
self.LSs = LSs |
356
|
|
|
|
357
|
|
|
def getParameters(self): |
358
|
|
|
r"""Get parameters values for the algorithm. |
359
|
|
|
|
360
|
|
|
Returns: |
361
|
|
|
Dict[str, Any]: |
362
|
|
|
""" |
363
|
|
|
d = Algorithm.getParameters(self) |
364
|
|
|
d.update({ |
365
|
|
|
'M': d.pop('NP', self.NP), |
366
|
|
|
'NoLsTests': self.NoLsTests, |
367
|
|
|
'NoLs': self.NoLs, |
368
|
|
|
'NoLsBest': self.NoLsBest, |
369
|
|
|
'BONUS1': self.BONUS1, |
370
|
|
|
'BONUS2': self.BONUS2, |
371
|
|
|
'NoEnabled': self.NoEnabled, |
372
|
|
|
'LSs': self.LSs |
373
|
|
|
}) |
374
|
|
|
return d |
375
|
|
|
|
376
|
|
|
def GradingRun(self, x, x_f, xb, fxb, improve, SR, task): |
377
|
|
|
r"""Run local search for getting scores of local searches. |
378
|
|
|
|
379
|
|
|
Args: |
380
|
|
|
x (numpy.ndarray): Solution for grading. |
381
|
|
|
x_f (float): Solutions fitness/function value. |
382
|
|
|
xb (numpy.ndarray): Global best solution. |
383
|
|
|
fxb (float): Global best solutions function/fitness value. |
384
|
|
|
improve (bool): Info if solution has improved. |
385
|
|
|
SR (numpy.ndarray): Search range. |
386
|
|
|
task (Task): Optimization task. |
387
|
|
|
|
388
|
|
|
Returns: |
389
|
|
|
Tuple[numpy.ndarray, float, numpy.ndarray, float]: |
390
|
|
|
1. New solution. |
391
|
|
|
2. New solutions function/fitness value. |
392
|
|
|
3. Global best solution. |
393
|
|
|
4. Global best solutions fitness/function value. |
394
|
|
|
""" |
395
|
|
|
ls_grades, Xn = full(3, 0.0), [[x, x_f]] * len(self.LSs) |
396
|
|
|
for k in range(len(self.LSs)): |
397
|
|
|
for _ in range(self.NoLsTests): |
398
|
|
|
Xn[k][0], Xn[k][1], xb, fxb, improve, g, SR = self.LSs[k](Xn[k][0], Xn[k][1], xb, fxb, improve, SR, task, BONUS1=self.BONUS1, BONUS2=self.BONUS2, rnd=self.Rand) |
399
|
|
|
ls_grades[k] += g |
400
|
|
|
xn, xn_f = min(Xn, key=lambda x: x[1]) |
401
|
|
|
return xn, xn_f, xb, fxb, k |
|
|
|
|
402
|
|
|
|
403
|
|
|
def LsRun(self, k, x, x_f, xb, fxb, improve, SR, g, task): |
404
|
|
|
r"""Run a selected local search. |
405
|
|
|
|
406
|
|
|
Args: |
407
|
|
|
k (int): Index of local search. |
408
|
|
|
x (numpy.ndarray): Current solution. |
409
|
|
|
x_f (float): Current solutions function/fitness value. |
410
|
|
|
xb (numpy.ndarray): Global best solution. |
411
|
|
|
fxb (float): Global best solutions fitness/function value. |
412
|
|
|
improve (bool): If the solution has improved. |
413
|
|
|
SR (numpy.ndarray): Search range. |
414
|
|
|
g (int): Grade. |
415
|
|
|
task (Task): Optimization task. |
416
|
|
|
|
417
|
|
|
Returns: |
418
|
|
|
Tuple[numpy.ndarray, float, numpy.ndarray, float, bool, numpy.ndarray, int]: |
419
|
|
|
1. New best solution found. |
420
|
|
|
2. New best solutions found function/fitness value. |
421
|
|
|
3. Global best solution. |
422
|
|
|
4. Global best solutions function/fitness value. |
423
|
|
|
5. If the solution has improved. |
424
|
|
|
6. Grade of local search run. |
425
|
|
|
""" |
426
|
|
|
for _j in range(self.NoLs): |
427
|
|
|
x, x_f, xb, fxb, improve, grade, SR = self.LSs[k](x, x_f, xb, fxb, improve, SR, task, BONUS1=self.BONUS1, BONUS2=self.BONUS2, rnd=self.Rand) |
428
|
|
|
g += grade |
429
|
|
|
return x, x_f, xb, fxb, improve, SR, g |
430
|
|
|
|
431
|
|
|
def initPopulation(self, task): |
432
|
|
|
r"""Initialize starting population. |
433
|
|
|
|
434
|
|
|
Args: |
435
|
|
|
task (Task): Optimization task. |
436
|
|
|
|
437
|
|
|
Returns: |
438
|
|
|
Tuple[numpy.ndarray, numpy.ndarray, Dict[str, Any]]: |
439
|
|
|
1. Initialized population. |
440
|
|
|
2. Initialized populations function/fitness value. |
441
|
|
|
3. Additional arguments: |
442
|
|
|
* enable (numpy.ndarray): If solution/individual is enabled. |
443
|
|
|
* improve (numpy.ndarray): If solution/individual is improved. |
444
|
|
|
* SR (numpy.ndarray): Search range. |
445
|
|
|
* grades (numpy.ndarray): Grade of solution/individual. |
446
|
|
|
""" |
447
|
|
|
X, X_f, d = Algorithm.initPopulation(self, task) |
448
|
|
|
enable, improve, SR, grades = full(self.NP, True), full(self.NP, True), full([self.NP, task.D], task.bRange / 2), full(self.NP, 0.0) |
449
|
|
|
d.update({ |
450
|
|
|
'enable': enable, |
451
|
|
|
'improve': improve, |
452
|
|
|
'SR': SR, |
453
|
|
|
'grades': grades |
454
|
|
|
}) |
455
|
|
|
return X, X_f, d |
456
|
|
|
|
457
|
|
|
def runIteration(self, task, X, X_f, xb, xb_f, enable, improve, SR, grades, **dparams): |
458
|
|
|
r"""Core function of MultipleTrajectorySearch algorithm. |
459
|
|
|
|
460
|
|
|
Args: |
461
|
|
|
task (Task): Optimization task. |
462
|
|
|
X (numpy.ndarray): Current population of individuals. |
463
|
|
|
X_f (numpy.ndarray): Current individuals function/fitness values. |
464
|
|
|
xb (numpy.ndarray): Global best individual. |
465
|
|
|
xb_f (float): Global best individual function/fitness value. |
466
|
|
|
enable (numpy.ndarray): Enabled status of individuals. |
467
|
|
|
improve (numpy.ndarray): Improved status of individuals. |
468
|
|
|
SR (numpy.ndarray): Search ranges of individuals. |
469
|
|
|
grades (numpy.ndarray): Grades of individuals. |
470
|
|
|
**dparams (Dict[str, Any]): Additional arguments. |
471
|
|
|
|
472
|
|
|
Returns: |
473
|
|
|
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]: |
474
|
|
|
1. Initialized population. |
475
|
|
|
2. Initialized populations function/fitness value. |
476
|
|
|
3. New global best solution. |
477
|
|
|
4. New global best solutions fitness/objective value. |
478
|
|
|
5. Additional arguments: |
479
|
|
|
* enable (numpy.ndarray): If solution/individual is enabled. |
480
|
|
|
* improve (numpy.ndarray): If solution/individual is improved. |
481
|
|
|
* SR (numpy.ndarray): Search range. |
482
|
|
|
* grades (numpy.ndarray): Grade of solution/individual. |
483
|
|
|
""" |
484
|
|
|
for i in range(len(X)): |
485
|
|
|
if not enable[i]: continue |
486
|
|
|
enable[i], grades[i] = False, 0 |
487
|
|
|
X[i], X_f[i], xb, xb_f, k = self.GradingRun(X[i], X_f[i], xb, xb_f, improve[i], SR[i], task) |
488
|
|
|
X[i], X_f[i], xb, xb_f, improve[i], SR[i], grades[i] = self.LsRun(k, X[i], X_f[i], xb, xb_f, improve[i], SR[i], grades[i], task) |
489
|
|
|
for _ in range(self.NoLsBest): _, _, xb, xb_f, _, _, _ = MTS_LS1(xb, xb_f, xb, xb_f, False, task.bRange.copy() / 10, task, rnd=self.Rand) |
490
|
|
|
enable[argsort(grades)[:self.NoEnabled]] = True |
|
|
|
|
491
|
|
|
return X, X_f, xb, xb_f, {'enable': enable, 'improve': improve, 'SR': SR, 'grades': grades} |
492
|
|
|
|
493
|
|
|
class MultipleTrajectorySearchV1(MultipleTrajectorySearch): |
494
|
|
|
r"""Implementation of Multiple trajectory search. |
495
|
|
|
|
496
|
|
|
Algorithm: |
497
|
|
|
Multiple trajectory search |
498
|
|
|
|
499
|
|
|
Date: |
500
|
|
|
2018 |
501
|
|
|
|
502
|
|
|
Authors: |
503
|
|
|
Klemen Berkovic |
504
|
|
|
|
505
|
|
|
License: |
506
|
|
|
MIT |
507
|
|
|
|
508
|
|
|
Reference URL: |
509
|
|
|
https://ieeexplore.ieee.org/document/4983179/ |
510
|
|
|
|
511
|
|
|
Reference paper: |
512
|
|
|
Tseng, Lin-Yu, and Chun Chen. "Multiple trajectory search for unconstrained/constrained multi-objective optimization." Evolutionary Computation, 2009. CEC'09. IEEE Congress on. IEEE, 2009. |
513
|
|
|
|
514
|
|
|
Attributes: |
515
|
|
|
Name (List[str]): List of strings representing algorithm name. |
516
|
|
|
|
517
|
|
|
See Also: |
518
|
|
|
* :class:`NiaPy.algorithms.other.MultipleTrajectorySearch`` |
519
|
|
|
""" |
520
|
|
|
Name = ['MultipleTrajectorySearchV1', 'MTSv1'] |
521
|
|
|
|
522
|
|
|
@staticmethod |
523
|
|
|
def algorithmInfo(): |
524
|
|
|
r"""Get basic information of algorithm. |
525
|
|
|
|
526
|
|
|
Returns: |
527
|
|
|
str: Basic information of algorithm. |
528
|
|
|
|
529
|
|
|
See Also: |
530
|
|
|
* :func:`NiaPy.algorithms.Algorithm.algorithmInfo` |
531
|
|
|
""" |
532
|
|
|
return r"""Tseng, Lin-Yu, and Chun Chen. "Multiple trajectory search for unconstrained/constrained multi-objective optimization." Evolutionary Computation, 2009. CEC'09. IEEE Congress on. IEEE, 2009.""" |
533
|
|
|
|
534
|
|
|
def setParameters(self, **kwargs): |
535
|
|
|
r"""Set core parameters of MultipleTrajectorySearchV1 algorithm. |
536
|
|
|
|
537
|
|
|
Args: |
538
|
|
|
**kwargs (Dict[str, Any]): Additional arguments. |
539
|
|
|
|
540
|
|
|
See Also: |
541
|
|
|
* :func:`NiaPy.algorithms.other.MultipleTrajectorySearch.setParameters` |
542
|
|
|
""" |
543
|
|
|
kwargs.pop('NoLsBest', None) |
544
|
|
|
MultipleTrajectorySearch.setParameters(self, NoLsBest=0, LSs=(MTS_LS1v1, MTS_LS2), **kwargs) |
545
|
|
|
|
546
|
|
|
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3 |
547
|
|
|
|