|
1
|
|
|
"""!
|
|
2
|
|
|
|
|
3
|
|
|
@brief Chaotic Neural Network
|
|
4
|
|
|
@details Based on article description:
|
|
5
|
|
|
- E.N.Benderskaya, S.V.Zhukova. Large-dimension image clustering by means of fragmentary synchronization in chaotic systems. 2007.
|
|
6
|
|
|
- E.N.Benderskaya, S.V.Zhukova. Clustering by Chaotic Neural Networks with Mean Field Calculated Via Delaunay Triangulation. 2008.
|
|
7
|
|
|
|
|
8
|
|
|
@authors Andrei Novikov ([email protected])
|
|
9
|
|
|
@date 2014-2016
|
|
10
|
|
|
@copyright GNU Public License
|
|
11
|
|
|
|
|
12
|
|
|
@cond GNU_PUBLIC_LICENSE
|
|
13
|
|
|
PyClustering is free software: you can redistribute it and/or modify
|
|
14
|
|
|
it under the terms of the GNU General Public License as published by
|
|
15
|
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
16
|
|
|
(at your option) any later version.
|
|
17
|
|
|
|
|
18
|
|
|
PyClustering is distributed in the hope that it will be useful,
|
|
19
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
20
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
21
|
|
|
GNU General Public License for more details.
|
|
22
|
|
|
|
|
23
|
|
|
You should have received a copy of the GNU General Public License
|
|
24
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
25
|
|
|
@endcond
|
|
26
|
|
|
|
|
27
|
|
|
"""
|
|
28
|
|
|
|
|
29
|
|
|
import matplotlib.pyplot as plt;
|
|
|
|
|
|
|
30
|
|
|
import matplotlib.animation as animation;
|
|
|
|
|
|
|
31
|
|
|
|
|
32
|
|
|
from matplotlib import rcParams;
|
|
|
|
|
|
|
33
|
|
|
from matplotlib.font_manager import FontProperties;
|
|
|
|
|
|
|
34
|
|
|
|
|
35
|
|
|
import math;
|
|
36
|
|
|
import numpy;
|
|
|
|
|
|
|
37
|
|
|
import random;
|
|
38
|
|
|
|
|
39
|
|
|
from enum import IntEnum;
|
|
40
|
|
|
|
|
41
|
|
|
from scipy.spatial import Delaunay;
|
|
|
|
|
|
|
42
|
|
|
|
|
43
|
|
|
from pyclustering.utils import euclidean_distance_sqrt, average_neighbor_distance, heaviside, draw_dynamics;
|
|
44
|
|
|
|
|
45
|
|
|
|
|
46
|
|
|
class type_conn(IntEnum):
|
|
47
|
|
|
"""!
|
|
48
|
|
|
@brief Enumeration of connection types for Chaotic Neural Network.
|
|
49
|
|
|
|
|
50
|
|
|
@see cnn_network
|
|
51
|
|
|
|
|
52
|
|
|
"""
|
|
53
|
|
|
|
|
54
|
|
|
## All oscillators have connection with each other.
|
|
55
|
|
|
ALL_TO_ALL = 0,
|
|
56
|
|
|
|
|
57
|
|
|
## Connections between oscillators are created in line with Delaunay triangulation.
|
|
58
|
|
|
TRIANGULATION_DELAUNAY = 1,
|
|
59
|
|
|
|
|
60
|
|
|
|
|
61
|
|
|
class cnn_dynamic:
|
|
62
|
|
|
"""!
|
|
63
|
|
|
@brief Container of output dynamic of the chaotic neural network where states of each neuron during simulation are stored.
|
|
64
|
|
|
|
|
65
|
|
|
@see cnn_network
|
|
66
|
|
|
|
|
67
|
|
|
"""
|
|
68
|
|
|
|
|
69
|
|
|
def __init__(self, output = [], time = []):
|
|
|
|
|
|
|
70
|
|
|
"""!
|
|
71
|
|
|
@brief Costructor of the chaotic neural network output dynamic.
|
|
72
|
|
|
|
|
73
|
|
|
@param[in] phase (list): Dynamic of oscillators on each step of simulation.
|
|
74
|
|
|
@param[in] time (list): Simulation time.
|
|
75
|
|
|
|
|
76
|
|
|
"""
|
|
77
|
|
|
|
|
78
|
|
|
## Output value of each neuron on each iteration.
|
|
79
|
|
|
self.output = output;
|
|
80
|
|
|
|
|
81
|
|
|
## Sequence of simulation steps of the network.
|
|
82
|
|
|
self.time = time;
|
|
83
|
|
|
|
|
84
|
|
|
|
|
85
|
|
|
def __len__(self):
|
|
86
|
|
|
"""!
|
|
87
|
|
|
@brief (uint) Returns amount of simulation steps that are stored.
|
|
88
|
|
|
|
|
89
|
|
|
"""
|
|
90
|
|
|
return len(self.output);
|
|
91
|
|
|
|
|
92
|
|
|
|
|
93
|
|
|
def allocate_observation_matrix(self):
|
|
94
|
|
|
"""!
|
|
95
|
|
|
@brief Allocates observation matrix in line with output dynamic of the network.
|
|
96
|
|
|
@details Matrix where state of each neuron is denoted by zero/one in line with Heaviside function on each iteration.
|
|
97
|
|
|
|
|
98
|
|
|
@return (list) Observation matrix of the network dynamic.
|
|
99
|
|
|
|
|
100
|
|
|
"""
|
|
101
|
|
|
number_neurons = len(self.output[0]);
|
|
102
|
|
|
observation_matrix = [];
|
|
103
|
|
|
|
|
104
|
|
|
for iteration in range(len(self.output)):
|
|
105
|
|
|
obervation_column = [];
|
|
106
|
|
|
for index_neuron in range(number_neurons):
|
|
107
|
|
|
obervation_column.append(heaviside(self.output[iteration][index_neuron]));
|
|
108
|
|
|
|
|
109
|
|
|
observation_matrix.append(obervation_column);
|
|
110
|
|
|
|
|
111
|
|
|
return observation_matrix;
|
|
112
|
|
|
|
|
113
|
|
|
|
|
114
|
|
|
class cnn_visualizer:
|
|
115
|
|
|
"""!
|
|
116
|
|
|
@brief Visualizer of output dynamic of chaotic neural network (CNN).
|
|
117
|
|
|
|
|
118
|
|
|
"""
|
|
119
|
|
|
|
|
120
|
|
|
@staticmethod
|
|
121
|
|
|
def show_output_dynamic(cnn_output_dynamic):
|
|
122
|
|
|
"""!
|
|
123
|
|
|
@brief Shows output dynamic (output of each neuron) during simulation.
|
|
124
|
|
|
|
|
125
|
|
|
@param[in] cnn_output_dynamic (cnn_dynamic): Output dynamic of the chaotic neural network.
|
|
126
|
|
|
|
|
127
|
|
|
@see show_dynamic_matrix
|
|
128
|
|
|
@see show_observation_matrix
|
|
129
|
|
|
|
|
130
|
|
|
"""
|
|
131
|
|
|
|
|
132
|
|
|
draw_dynamics(cnn_output_dynamic.time, cnn_output_dynamic.output, x_title = "t", y_title = "x");
|
|
133
|
|
|
|
|
134
|
|
|
|
|
135
|
|
|
@staticmethod
|
|
136
|
|
|
def show_dynamic_matrix(cnn_output_dynamic):
|
|
137
|
|
|
"""!
|
|
138
|
|
|
@brief Shows output dynamic as matrix in grey colors.
|
|
139
|
|
|
@details This type of visualization is convenient for observing allocated clusters.
|
|
140
|
|
|
|
|
141
|
|
|
@param[in] cnn_output_dynamic (cnn_dynamic): Output dynamic of the chaotic neural network.
|
|
142
|
|
|
|
|
143
|
|
|
@see show_output_dynamic
|
|
144
|
|
|
@see show_observation_matrix
|
|
145
|
|
|
|
|
146
|
|
|
"""
|
|
147
|
|
|
|
|
148
|
|
|
plt.imshow(cnn_output_dynamic.output, cmap = plt.get_cmap('gray'), interpolation='None', vmin = 0.0, vmax = 1.0);
|
|
149
|
|
|
plt.show();
|
|
150
|
|
|
|
|
151
|
|
|
|
|
152
|
|
|
@staticmethod
|
|
153
|
|
|
def show_observation_matrix(cnn_output_dynamic):
|
|
154
|
|
|
"""!
|
|
155
|
|
|
@brief Shows observation matrix as black/white blocks.
|
|
156
|
|
|
@details This type of visualization is convenient for observing allocated clusters.
|
|
157
|
|
|
|
|
158
|
|
|
@param[in] cnn_output_dynamic (cnn_dynamic): Output dynamic of the chaotic neural network.
|
|
159
|
|
|
|
|
160
|
|
|
@see show_output_dynamic
|
|
161
|
|
|
@see show_dynamic_matrix
|
|
162
|
|
|
|
|
163
|
|
|
"""
|
|
164
|
|
|
|
|
165
|
|
|
observation_matrix = cnn_output_dynamic.allocate_observation_matrix();
|
|
166
|
|
|
plt.imshow(observation_matrix, cmap = plt.get_cmap('gray'), interpolation='None', vmin = 0.0, vmax = 1.0);
|
|
167
|
|
|
plt.show();
|
|
168
|
|
|
|
|
169
|
|
|
|
|
170
|
|
|
class cnn_network:
|
|
171
|
|
|
"""!
|
|
172
|
|
|
@brief Chaotic neural network based on system of logistic map where clustering phenomenon can be observed.
|
|
173
|
|
|
|
|
174
|
|
|
Example:
|
|
175
|
|
|
@code
|
|
176
|
|
|
# load stimulus from file
|
|
177
|
|
|
stimulus = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1);
|
|
178
|
|
|
|
|
179
|
|
|
# create chaotic neural network, amount of neurons should be equal to amout of stimulus
|
|
180
|
|
|
network_instance = cnn_network(len(stimulus));
|
|
181
|
|
|
|
|
182
|
|
|
# simulate it during 100 steps
|
|
183
|
|
|
output_dynamic = network_instance.simulate(steps, stimulus);
|
|
184
|
|
|
|
|
185
|
|
|
# display output dynamic of the network
|
|
186
|
|
|
cnn_visualizer.show_output_dynamic(output_dynamic);
|
|
187
|
|
|
|
|
188
|
|
|
# dysplay dynamic matrix and observation matrix to show clustering
|
|
189
|
|
|
# phenomenon.
|
|
190
|
|
|
cnn_visualizer.show_dynamic_matrix(output_dynamic);
|
|
191
|
|
|
cnn_visualizer.show_observation_matrix(output_dynamic);
|
|
192
|
|
|
@endcode
|
|
193
|
|
|
|
|
194
|
|
|
"""
|
|
195
|
|
|
|
|
196
|
|
|
def __init__(self, num_osc, conn_type = type_conn.ALL_TO_ALL, amount_neighbors = 3):
|
|
197
|
|
|
"""!
|
|
198
|
|
|
@brief Constructor of chaotic neural network.
|
|
199
|
|
|
|
|
200
|
|
|
@param[in] num_osc (uint): Amount of neurons in the chaotic neural network.
|
|
201
|
|
|
@param[in] conn_type (type_conn): CNN type connection for the network.
|
|
202
|
|
|
@param[in] amount_neighbors (uint): k-nearest neighbors for calculation scaling constant of weights.
|
|
203
|
|
|
|
|
204
|
|
|
"""
|
|
205
|
|
|
|
|
206
|
|
|
self.__num_osc = num_osc;
|
|
207
|
|
|
self.__conn_type = conn_type;
|
|
208
|
|
|
self.__amount_neighbors = amount_neighbors;
|
|
209
|
|
|
|
|
210
|
|
|
self.__average_distance = 0.0;
|
|
211
|
|
|
self.__weights = None;
|
|
212
|
|
|
self.__weights_summary = None;
|
|
213
|
|
|
|
|
214
|
|
|
self.__location = None; # just for network visualization
|
|
215
|
|
|
|
|
216
|
|
|
random.seed();
|
|
217
|
|
|
self.__output = [ random.random() for _ in range(num_osc) ];
|
|
218
|
|
|
|
|
219
|
|
|
|
|
220
|
|
|
def __len__(self):
|
|
221
|
|
|
"""!
|
|
222
|
|
|
@brief Returns size of the chaotic neural network that is defined by amount of neurons.
|
|
223
|
|
|
|
|
224
|
|
|
"""
|
|
225
|
|
|
return self.__num_osc;
|
|
226
|
|
|
|
|
227
|
|
|
|
|
228
|
|
|
def simulate(self, steps, stimulus):
|
|
229
|
|
|
"""!
|
|
230
|
|
|
@brief Simulates chaotic neural network with extrnal stimulus during specified steps.
|
|
231
|
|
|
@details Stimulus are considered as a coordinates of neurons and in line with that weights
|
|
232
|
|
|
are initialized.
|
|
233
|
|
|
|
|
234
|
|
|
@param[in] steps (uint): Amount of steps for simulation.
|
|
235
|
|
|
@param[in] stimulus (list): Stimulus that are used for simulation.
|
|
236
|
|
|
|
|
237
|
|
|
@return (cnn_dynamic) Output dynamic of the chaotic neural network.
|
|
238
|
|
|
|
|
239
|
|
|
"""
|
|
240
|
|
|
|
|
241
|
|
|
self.__create_weights(stimulus);
|
|
242
|
|
|
self.__location = stimulus;
|
|
243
|
|
|
|
|
244
|
|
|
dynamic = cnn_dynamic([], []);
|
|
245
|
|
|
dynamic.output.append(self.__output);
|
|
246
|
|
|
dynamic.time.append(0);
|
|
247
|
|
|
|
|
248
|
|
|
for step in range(1, steps, 1):
|
|
249
|
|
|
self.__output = self.__calculate_states();
|
|
250
|
|
|
|
|
251
|
|
|
dynamic.output.append(self.__output);
|
|
252
|
|
|
dynamic.time.append(step);
|
|
253
|
|
|
|
|
254
|
|
|
return dynamic;
|
|
255
|
|
|
|
|
256
|
|
|
|
|
257
|
|
|
def __calculate_states(self):
|
|
258
|
|
|
"""!
|
|
259
|
|
|
@brief Calculates new state of each neuron.
|
|
260
|
|
|
@detail There is no any assignment.
|
|
261
|
|
|
|
|
262
|
|
|
@return (list) Returns new states (output).
|
|
263
|
|
|
|
|
264
|
|
|
"""
|
|
265
|
|
|
|
|
266
|
|
|
output = [ 0.0 for _ in range(self.__num_osc) ];
|
|
267
|
|
|
|
|
268
|
|
|
for i in range(self.__num_osc):
|
|
269
|
|
|
output[i] = self.__neuron_evolution(i);
|
|
270
|
|
|
|
|
271
|
|
|
return output;
|
|
272
|
|
|
|
|
273
|
|
|
|
|
274
|
|
|
def __neuron_evolution(self, index):
|
|
275
|
|
|
"""!
|
|
276
|
|
|
@brief Calculates state of the neuron with specified index.
|
|
277
|
|
|
|
|
278
|
|
|
@param[in] index (uint): Index of neuron in the network.
|
|
279
|
|
|
|
|
280
|
|
|
@return (double) New output of the specified neuron.
|
|
281
|
|
|
|
|
282
|
|
|
"""
|
|
283
|
|
|
value = 0.0;
|
|
284
|
|
|
|
|
285
|
|
|
for index_neighbor in range(self.__num_osc):
|
|
286
|
|
|
value += self.__weights[index][index_neighbor] * (1.0 - 2.0 * (self.__output[index_neighbor] ** 2));
|
|
287
|
|
|
|
|
288
|
|
|
return value / self.__weights_summary[index];
|
|
289
|
|
|
|
|
290
|
|
|
|
|
291
|
|
|
def __create_weights(self, stimulus):
|
|
292
|
|
|
"""!
|
|
293
|
|
|
@brief Create weights between neurons in line with stimulus.
|
|
294
|
|
|
|
|
295
|
|
|
@param[in] stimulus (list): External stimulus for the chaotic neural network.
|
|
296
|
|
|
|
|
297
|
|
|
"""
|
|
298
|
|
|
|
|
299
|
|
|
self.__average_distance = average_neighbor_distance(stimulus, self.__amount_neighbors);
|
|
300
|
|
|
|
|
301
|
|
|
self.__weights = [ [ 0.0 for _ in range(len(stimulus)) ] for _ in range(len(stimulus)) ];
|
|
302
|
|
|
self.__weights_summary = [ 0.0 for _ in range(self.__num_osc) ];
|
|
303
|
|
|
|
|
304
|
|
|
if (self.__conn_type == type_conn.ALL_TO_ALL):
|
|
305
|
|
|
self.__create_weights_all_to_all(stimulus);
|
|
306
|
|
|
|
|
307
|
|
|
elif (self.__conn_type == type_conn.TRIANGULATION_DELAUNAY):
|
|
308
|
|
|
self.__create_weights_delaunay_triangulation(stimulus);
|
|
309
|
|
|
|
|
310
|
|
|
|
|
311
|
|
|
def __create_weights_all_to_all(self, stimulus):
|
|
312
|
|
|
"""!
|
|
313
|
|
|
@brief Create weight all-to-all structure between neurons in line with stimulus.
|
|
314
|
|
|
|
|
315
|
|
|
@param[in] stimulus (list): External stimulus for the chaotic neural network.
|
|
316
|
|
|
|
|
317
|
|
|
"""
|
|
318
|
|
|
|
|
319
|
|
|
for i in range(len(stimulus)):
|
|
320
|
|
|
for j in range(i + 1, len(stimulus)):
|
|
321
|
|
|
weight = self.__calculate_weight(stimulus[i], stimulus[j]);
|
|
322
|
|
|
|
|
323
|
|
|
self.__weights[i][j] = weight;
|
|
324
|
|
|
self.__weights[j][i] = weight;
|
|
325
|
|
|
|
|
326
|
|
|
self.__weights_summary[i] += weight;
|
|
327
|
|
|
self.__weights_summary[j] += weight;
|
|
328
|
|
|
|
|
329
|
|
|
|
|
330
|
|
|
def __create_weights_delaunay_triangulation(self, stimulus):
|
|
331
|
|
|
"""!
|
|
332
|
|
|
@brief Create weight Denlauny triangulation structure between neurons in line with stimulus.
|
|
333
|
|
|
|
|
334
|
|
|
@param[in] stimulus (list): External stimulus for the chaotic neural network.
|
|
335
|
|
|
|
|
336
|
|
|
"""
|
|
337
|
|
|
|
|
338
|
|
|
points = numpy.array(stimulus);
|
|
339
|
|
|
triangulation = Delaunay(points);
|
|
340
|
|
|
|
|
341
|
|
|
for triangle in triangulation.simplices:
|
|
342
|
|
|
for index_tri_point1 in range(len(triangle)):
|
|
343
|
|
|
for index_tri_point2 in range(index_tri_point1 + 1, len(triangle)):
|
|
344
|
|
|
index_point1 = triangle[index_tri_point1];
|
|
345
|
|
|
index_point2 = triangle[index_tri_point2];
|
|
346
|
|
|
|
|
347
|
|
|
weight = self.__calculate_weight(stimulus[index_point1], stimulus[index_point2]);
|
|
348
|
|
|
|
|
349
|
|
|
self.__weights[index_point1][index_point2] = weight;
|
|
350
|
|
|
self.__weights[index_point2][index_point1] = weight;
|
|
351
|
|
|
|
|
352
|
|
|
self.__weights_summary[index_point1] += weight;
|
|
353
|
|
|
self.__weights_summary[index_point2] += weight;
|
|
354
|
|
|
|
|
355
|
|
|
|
|
356
|
|
|
def __calculate_weight(self, stimulus1, stimulus2):
|
|
357
|
|
|
"""!
|
|
358
|
|
|
@brief Calculate weight between neurons that have external stimulus1 and stimulus2.
|
|
359
|
|
|
|
|
360
|
|
|
@param[in] stimulus1 (list): External stimulus of the first neuron.
|
|
361
|
|
|
@param[in] stimulus2 (list): External stimulus of the second neuron.
|
|
362
|
|
|
|
|
363
|
|
|
@return (double) Weight between neurons that are under specified stimulus.
|
|
364
|
|
|
|
|
365
|
|
|
"""
|
|
366
|
|
|
|
|
367
|
|
|
distance = euclidean_distance_sqrt(stimulus1, stimulus2);
|
|
368
|
|
|
return math.exp(-distance / (2.0 * self.__average_distance));
|
|
369
|
|
|
|
|
370
|
|
|
|
|
371
|
|
|
def show_network(self):
|
|
372
|
|
|
"""!
|
|
373
|
|
|
@brief Shows structure of the network: neurons and connections between them.
|
|
374
|
|
|
|
|
375
|
|
|
"""
|
|
376
|
|
|
|
|
377
|
|
|
dimension = len(self.__location[0]);
|
|
378
|
|
|
if ( (dimension != 3) and (dimension != 2) ):
|
|
379
|
|
|
raise NameError('Network that is located in different from 2-d and 3-d dimensions can not be represented');
|
|
380
|
|
|
|
|
381
|
|
|
rcParams['font.sans-serif'] = ['Arial'];
|
|
382
|
|
|
rcParams['font.size'] = 12;
|
|
383
|
|
|
|
|
384
|
|
|
fig = plt.figure();
|
|
385
|
|
|
axes = None;
|
|
386
|
|
|
if (dimension == 2):
|
|
387
|
|
|
axes = fig.add_subplot(111);
|
|
388
|
|
|
elif (dimension == 3):
|
|
389
|
|
|
axes = fig.gca(projection='3d');
|
|
390
|
|
|
|
|
391
|
|
|
surface_font = FontProperties();
|
|
392
|
|
|
surface_font.set_name('Arial');
|
|
393
|
|
|
surface_font.set_size('12');
|
|
394
|
|
|
|
|
395
|
|
|
for i in range(0, self.__num_osc, 1):
|
|
396
|
|
|
if (dimension == 2):
|
|
397
|
|
|
axes.plot(self.__location[i][0], self.__location[i][1], 'bo');
|
|
398
|
|
|
for j in range(i, self.__num_osc, 1): # draw connection between two points only one time
|
|
399
|
|
|
if (self.__weights[i][j] > 0.0):
|
|
400
|
|
|
axes.plot([self.__location[i][0], self.__location[j][0]], [self.__location[i][1], self.__location[j][1]], 'b-', linewidth = 0.5);
|
|
401
|
|
|
|
|
402
|
|
|
elif (dimension == 3):
|
|
403
|
|
|
axes.scatter(self.__location[i][0], self.__location[i][1], self.__location[i][2], c = 'b', marker = 'o');
|
|
404
|
|
|
|
|
405
|
|
|
for j in range(i, self._num_osc, 1): # draw connection between two points only one time
|
|
|
|
|
|
|
406
|
|
View Code Duplication |
if (self.__weights[i][j] > 0.0):
|
|
|
|
|
|
|
407
|
|
|
axes.plot([self.__location[i][0], self.__location[j][0]], [self.__location[i][1], self.__location[j][1]], [self.__location[i][2], self.__location[j][2]], 'b-', linewidth = 0.5);
|
|
408
|
|
|
|
|
409
|
|
|
plt.grid();
|
|
410
|
|
|
plt.show(); |
This can be caused by one of the following:
1. Missing Dependencies
This error could indicate a configuration issue of Pylint. Make sure that your libraries are available by adding the necessary commands.
2. Missing __init__.py files
This error could also result from missing
__init__.pyfiles in your module folders. Make sure that you place one file in each sub-folder.