Total Complexity | 183 |
Total Lines | 805 |
Duplicated Lines | 1.24 % |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like som often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | """! |
||
110 | class som: |
||
111 | """! |
||
112 | @brief Represents self-organized feature map (SOM). |
||
113 | |||
114 | Example: |
||
115 | @code |
||
116 | # sample for training |
||
117 | sample_train = read_sample(file_train_sample); |
||
118 | |||
119 | # create self-organized feature map with size 5x5 |
||
120 | network = som(5, 5, sample_train, 100); |
||
121 | |||
122 | # train network |
||
123 | network.train(); |
||
124 | |||
125 | # simulate using another sample |
||
126 | sample = read_sample(file_sample); |
||
127 | index_winner = network.simulate(sample); |
||
128 | |||
129 | # check what it is (what it looks like?) |
||
130 | index_similar_objects = network.capture_objects[index_winner]; |
||
131 | |||
132 | @endcode |
||
133 | |||
134 | """ |
||
135 | |||
136 | # describe network |
||
137 | _rows = 0; |
||
138 | _cols = 0; |
||
139 | _size = 0; |
||
140 | _weights = None; # Weights of each neuron (coordinates in data dimension in other words). |
||
141 | _award = None; # Lists of indexes of won points for each neuron. |
||
142 | _data = None; # Analyzed data. |
||
143 | _conn_type = None; # Type of connections between neuron. |
||
144 | |||
145 | # just for convenience (avoid excess calculation during learning) |
||
146 | _location = None; # Location in grid. |
||
147 | _sqrt_distances = None; |
||
148 | _capture_objects = None; # Store indexes of input points that were captured by each neurons individually at the end. |
||
149 | _neighbors = None; # Indexes of neighbours for each neuron. |
||
150 | |||
151 | # describe learning process and internal state |
||
152 | _epochs = 0; # Iteration for learning. |
||
153 | _params = None; |
||
154 | |||
155 | # dynamic changes learning parameters |
||
156 | _local_radius = 0.0; |
||
157 | _learn_rate = 0.0; |
||
158 | |||
159 | __ccore_som_pointer = None; |
||
160 | |||
161 | |||
162 | @property |
||
163 | def size(self): |
||
164 | """! |
||
165 | @return (uint) Size of self-organized map (number of neurons). |
||
166 | |||
167 | """ |
||
168 | |||
169 | if (self.__ccore_som_pointer is not None): |
||
170 | self._size = wrapper.som_get_size(self.__ccore_som_pointer); |
||
171 | |||
172 | return self._size; |
||
173 | |||
174 | @property |
||
175 | def weights(self): |
||
176 | """! |
||
177 | @return (list) Weights of each neuron. |
||
178 | |||
179 | """ |
||
180 | |||
181 | if (self.__ccore_som_pointer is not None): |
||
182 | self._weights = wrapper.som_get_weights(self.__ccore_som_pointer); |
||
183 | |||
184 | return self._weights; |
||
185 | |||
186 | @property |
||
187 | def awards(self): |
||
188 | """! |
||
189 | @return (list) Numbers of captured objects by each neuron. |
||
190 | |||
191 | """ |
||
192 | |||
193 | if (self.__ccore_som_pointer is not None): |
||
194 | self._award = wrapper.som_get_awards(self.__ccore_som_pointer); |
||
195 | |||
196 | return self._award; |
||
197 | |||
198 | @property |
||
199 | def capture_objects(self): |
||
200 | """! |
||
201 | @return (list) Indexes of captured objects by each neuron. |
||
202 | |||
203 | """ |
||
204 | |||
205 | if (self.__ccore_som_pointer is not None): |
||
206 | self._capture_objects = wrapper.som_get_capture_objects(self.__ccore_som_pointer); |
||
207 | |||
208 | return self._capture_objects; |
||
209 | |||
210 | |||
211 | def __init__(self, rows, cols, conn_type = type_conn.grid_eight, parameters = None, ccore = False): |
||
212 | """! |
||
213 | @brief Constructor of self-organized map. |
||
214 | |||
215 | @param[in] rows (uint): Number of neurons in the column (number of rows). |
||
216 | @param[in] cols (uint): Number of neurons in the row (number of columns). |
||
217 | @param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour). |
||
218 | @param[in] parameters (som_parameters): Other specific parameters. |
||
219 | @param[in] ccore (bool): If True simulation is performed by CCORE library (C++ implementation of pyclustering). |
||
220 | |||
221 | """ |
||
222 | |||
223 | # some of these parameters are required despite core implementation, for example, for network demonstration. |
||
224 | self._cols = cols; |
||
225 | |||
226 | self._rows = rows; |
||
227 | |||
228 | self._size = cols * rows; |
||
229 | |||
230 | self._conn_type = conn_type; |
||
231 | |||
232 | if (parameters is not None): |
||
233 | self._params = parameters; |
||
234 | else: |
||
235 | self._params = som_parameters(); |
||
236 | |||
237 | if (self._params.init_radius is None): |
||
238 | self._params.init_radius = self.__initialize_initial_radius(rows, cols); |
||
239 | |||
240 | if (ccore is True): |
||
241 | self.__ccore_som_pointer = wrapper.som_create(rows, cols, conn_type, self._params); |
||
242 | |||
243 | else: |
||
244 | # location |
||
245 | self._location = self.__initialize_locations(rows, cols); |
||
246 | |||
247 | # awards |
||
248 | self._award = [0] * self._size; |
||
249 | |||
250 | # captured objects |
||
251 | self._capture_objects = [ [] for i in range(self._size) ]; |
||
252 | |||
253 | # distances |
||
254 | self._sqrt_distances = self.__initialize_distances(self._size, self._location); |
||
255 | |||
256 | # connections |
||
257 | if (conn_type != type_conn.func_neighbor): |
||
258 | self._create_connections(conn_type); |
||
259 | |||
260 | |||
261 | def __del__(self): |
||
262 | """! |
||
263 | @brief Destructor of the self-organized feature map. |
||
264 | |||
265 | """ |
||
266 | |||
267 | if (self.__ccore_som_pointer is not None): |
||
268 | wrapper.som_destroy(self.__ccore_som_pointer); |
||
269 | |||
270 | |||
271 | def __len__(self): |
||
272 | """! |
||
273 | @return (uint) Size of self-organized map (number of neurons). |
||
274 | |||
275 | """ |
||
276 | |||
277 | return self.size; |
||
278 | |||
279 | |||
280 | def __initialize_initial_radius(self, rows, cols): |
||
281 | """! |
||
282 | @brief Initialize initial radius using map sizes. |
||
283 | |||
284 | @param[in] rows (uint): Number of neurons in the column (number of rows). |
||
285 | @param[in] cols (uint): Number of neurons in the row (number of columns). |
||
286 | |||
287 | @return (list) Value of initial radius. |
||
288 | |||
289 | """ |
||
290 | |||
291 | if ((cols + rows) / 4.0 > 1.0): |
||
292 | return 2.0; |
||
293 | |||
294 | elif ( (cols > 1) and (rows > 1) ): |
||
295 | return 1.5; |
||
296 | |||
297 | else: |
||
298 | return 1.0; |
||
299 | |||
300 | |||
301 | def __initialize_locations(self, rows, cols): |
||
302 | """! |
||
303 | @brief Initialize locations (coordinates in SOM grid) of each neurons in the map. |
||
304 | |||
305 | @param[in] rows (uint): Number of neurons in the column (number of rows). |
||
306 | @param[in] cols (uint): Number of neurons in the row (number of columns). |
||
307 | |||
308 | @return (list) List of coordinates of each neuron in map. |
||
309 | |||
310 | """ |
||
311 | |||
312 | location = list(); |
||
313 | for i in range(rows): |
||
314 | for j in range(cols): |
||
315 | location.append([float(i), float(j)]); |
||
316 | |||
317 | return location; |
||
318 | |||
319 | |||
320 | def __initialize_distances(self, size, location): |
||
321 | """! |
||
322 | @brief Initialize distance matrix in SOM grid. |
||
323 | |||
324 | @param[in] size (uint): Amount of neurons in the network. |
||
325 | @param[in] location (list): List of coordinates of each neuron in the network. |
||
326 | |||
327 | @return (list) Distance matrix between neurons in the network. |
||
328 | |||
329 | """ |
||
330 | sqrt_distances = [ [ [] for i in range(size) ] for j in range(size) ]; |
||
331 | for i in range(size): |
||
332 | for j in range(i, size, 1): |
||
333 | dist = euclidean_distance_sqrt(location[i], location[j]); |
||
334 | sqrt_distances[i][j] = dist; |
||
335 | sqrt_distances[j][i] = dist; |
||
336 | |||
337 | return sqrt_distances; |
||
338 | |||
339 | |||
340 | def _create_initial_weights(self, init_type): |
||
341 | """! |
||
342 | @brief Creates initial weights for neurons in line with the specified initialization. |
||
343 | |||
344 | @param[in] init_type (type_init): Type of initialization of initial neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid). |
||
345 | |||
346 | """ |
||
347 | |||
348 | dimension = len(self._data[0]); |
||
349 | |||
350 | maximum_dimension = [self._data[0][i] for i in range(dimension)]; |
||
351 | minimum_dimension = [self._data[0][i] for i in range(dimension)]; |
||
352 | for i in range(len(self._data)): |
||
353 | for dim in range(dimension): |
||
354 | if (maximum_dimension[dim] < self._data[i][dim]): |
||
355 | maximum_dimension[dim] = self._data[i][dim]; |
||
356 | elif (minimum_dimension[dim] > self._data[i][dim]): |
||
357 | minimum_dimension[dim] = self._data[i][dim]; |
||
358 | |||
359 | # Increase border |
||
360 | width_dimension = [0] * dimension; |
||
361 | center_dimension = [0] * dimension; |
||
362 | for dim in range(dimension): |
||
363 | width_dimension[dim] = maximum_dimension[dim] - minimum_dimension[dim]; |
||
364 | center_dimension[dim] = (maximum_dimension[dim] + minimum_dimension[dim]) / 2; |
||
365 | |||
366 | step_x = center_dimension[0]; |
||
367 | if (self._rows > 1): step_x = width_dimension[0] / (self._rows - 1); |
||
368 | |||
369 | step_y = 0.0; |
||
370 | if (dimension > 1): |
||
371 | step_y = center_dimension[1]; |
||
372 | if (self._cols > 1): step_y = width_dimension[1] / (self._cols - 1); |
||
373 | |||
374 | # generate weights (topological coordinates) |
||
375 | random.seed(); |
||
376 | |||
377 | # Feature SOM 0002: Uniform grid. |
||
378 | if (init_type == type_init.uniform_grid): |
||
379 | # Predefined weights in line with input data. |
||
380 | self._weights = [ [ [] for i in range(dimension) ] for j in range(self._size)]; |
||
381 | for i in range(self._size): |
||
382 | location = self._location[i]; |
||
383 | for dim in range(dimension): |
||
384 | if (dim == 0): |
||
385 | if (self._rows > 1): |
||
386 | self._weights[i][dim] = minimum_dimension[dim] + step_x * location[dim]; |
||
387 | else: |
||
388 | self._weights[i][dim] = center_dimension[dim]; |
||
389 | |||
390 | elif (dim == 1): |
||
391 | if (self._cols > 1): |
||
392 | self._weights[i][dim] = minimum_dimension[dim] + step_y * location[dim]; |
||
393 | else: |
||
394 | self._weights[i][dim] = center_dimension[dim]; |
||
395 | else: |
||
396 | self._weights[i][dim] = center_dimension[dim]; |
||
397 | |||
398 | elif (init_type == type_init.random_surface): |
||
399 | # Random weights at the full surface. |
||
400 | self._weights = [ [random.uniform(minimum_dimension[i], maximum_dimension[i]) for i in range(dimension)] for j in range(self._size) ]; |
||
401 | |||
402 | elif (init_type == type_init.random_centroid): |
||
403 | # Random weights at the center of input data. |
||
404 | self._weights = [ [(random.random() + center_dimension[i]) for i in range(dimension)] for j in range(self._size) ]; |
||
405 | |||
406 | else: |
||
407 | # Random weights of input data. |
||
408 | self._weights = [ [random.random() for i in range(dimension)] for j in range(self._size) ]; |
||
409 | |||
410 | |||
411 | def _create_connections(self, conn_type): |
||
412 | """! |
||
413 | @brief Create connections in line with input rule (grid four, grid eight, honeycomb, function neighbour). |
||
414 | |||
415 | @param[in] conn_type (type_conn): Type of connection between oscillators in the network. |
||
416 | |||
417 | """ |
||
418 | |||
419 | self._neighbors = [[] for index in range(self._size)]; |
||
420 | |||
421 | for index in range(0, self._size, 1): |
||
422 | upper_index = index - self._cols; |
||
423 | upper_left_index = index - self._cols - 1; |
||
424 | upper_right_index = index - self._cols + 1; |
||
425 | |||
426 | lower_index = index + self._cols; |
||
427 | lower_left_index = index + self._cols - 1; |
||
428 | lower_right_index = index + self._cols + 1; |
||
429 | |||
430 | left_index = index - 1; |
||
431 | right_index = index + 1; |
||
432 | |||
433 | node_row_index = math.floor(index / self._cols); |
||
434 | upper_row_index = node_row_index - 1; |
||
435 | lower_row_index = node_row_index + 1; |
||
436 | |||
437 | if ( (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four) ): |
||
438 | if (upper_index >= 0): |
||
439 | self._neighbors[index].append(upper_index); |
||
440 | |||
441 | if (lower_index < self._size): |
||
442 | self._neighbors[index].append(lower_index); |
||
443 | |||
444 | if ( (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four) or (conn_type == type_conn.honeycomb) ): |
||
445 | if ( (left_index >= 0) and (math.floor(left_index / self._cols) == node_row_index) ): |
||
446 | self._neighbors[index].append(left_index); |
||
447 | |||
448 | if ( (right_index < self._size) and (math.floor(right_index / self._cols) == node_row_index) ): |
||
449 | self._neighbors[index].append(right_index); |
||
450 | |||
451 | |||
452 | if (conn_type == type_conn.grid_eight): |
||
453 | if ( (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index) ): |
||
454 | self._neighbors[index].append(upper_left_index); |
||
455 | |||
456 | if ( (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index) ): |
||
457 | self._neighbors[index].append(upper_right_index); |
||
458 | |||
459 | if ( (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index) ): |
||
460 | self._neighbors[index].append(lower_left_index); |
||
461 | |||
462 | if ( (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index) ): |
||
463 | self._neighbors[index].append(lower_right_index); |
||
464 | |||
465 | |||
466 | if (conn_type == type_conn.honeycomb): |
||
467 | if ( (node_row_index % 2) == 0): |
||
468 | upper_left_index = index - self._cols; |
||
469 | upper_right_index = index - self._cols + 1; |
||
470 | |||
471 | lower_left_index = index + self._cols; |
||
472 | lower_right_index = index + self._cols + 1; |
||
473 | else: |
||
474 | upper_left_index = index - self._cols - 1; |
||
475 | upper_right_index = index - self._cols; |
||
476 | |||
477 | lower_left_index = index + self._cols - 1; |
||
478 | lower_right_index = index + self._cols; |
||
479 | |||
480 | if ( (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index) ): |
||
481 | self._neighbors[index].append(upper_left_index); |
||
482 | |||
483 | if ( (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index) ): |
||
484 | self._neighbors[index].append(upper_right_index); |
||
485 | |||
486 | if ( (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index) ): |
||
487 | self._neighbors[index].append(lower_left_index); |
||
488 | |||
489 | if ( (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index) ): |
||
490 | self._neighbors[index].append(lower_right_index); |
||
491 | |||
492 | |||
493 | def _competition(self, x): |
||
494 | """! |
||
495 | @brief Calculates neuron winner (distance, neuron index). |
||
496 | |||
497 | @param[in] x (list): Input pattern from the input data set, for example it can be coordinates of point. |
||
498 | |||
499 | @return (uint) Returns index of neuron that is winner. |
||
500 | |||
501 | """ |
||
502 | |||
503 | index = 0; |
||
504 | minimum = euclidean_distance_sqrt(self._weights[0], x); |
||
505 | |||
506 | for i in range(1, self._size, 1): |
||
507 | candidate = euclidean_distance_sqrt(self._weights[i], x); |
||
508 | if (candidate < minimum): |
||
509 | index = i; |
||
510 | minimum = candidate; |
||
511 | |||
512 | return index; |
||
513 | |||
514 | |||
515 | def _adaptation(self, index, x): |
||
516 | """! |
||
517 | @brief Change weight of neurons in line with won neuron. |
||
518 | |||
519 | @param[in] index (uint): Index of neuron-winner. |
||
520 | @param[in] x (list): Input pattern from the input data set. |
||
521 | |||
522 | """ |
||
523 | |||
524 | dimension = len(self._weights[0]); |
||
525 | |||
526 | if (self._conn_type == type_conn.func_neighbor): |
||
527 | for neuron_index in range(self._size): |
||
528 | distance = self._sqrt_distances[index][neuron_index]; |
||
529 | |||
530 | View Code Duplication | if (distance < self._local_radius): |
|
531 | influence = math.exp( -( distance / (2.0 * self._local_radius) ) ); |
||
532 | |||
533 | for i in range(dimension): |
||
534 | self._weights[neuron_index][i] = self._weights[neuron_index][i] + self._learn_rate * influence * (x[i] - self._weights[neuron_index][i]); |
||
535 | |||
536 | else: |
||
537 | for i in range(dimension): |
||
538 | self._weights[index][i] = self._weights[index][i] + self._learn_rate * (x[i] - self._weights[index][i]); |
||
539 | |||
540 | for neighbor_index in self._neighbors[index]: |
||
541 | distance = self._sqrt_distances[index][neighbor_index] |
||
542 | View Code Duplication | if (distance < self._local_radius): |
|
543 | influence = math.exp( -( distance / (2.0 * self._local_radius) ) ); |
||
544 | |||
545 | for i in range(dimension): |
||
546 | self._weights[neighbor_index][i] = self._weights[neighbor_index][i] + self._learn_rate * influence * (x[i] - self._weights[neighbor_index][i]); |
||
547 | |||
548 | |||
549 | def train(self, data, epochs, autostop = False): |
||
550 | """! |
||
551 | @brief Trains self-organized feature map (SOM). |
||
552 | |||
553 | @param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates. |
||
554 | @param[in] epochs (uint): Number of epochs for training. |
||
555 | @param[in] autostop (bool): Automatic termination of learining process when adaptation is not occurred. |
||
556 | |||
557 | @return (uint) Number of learining iterations. |
||
558 | |||
559 | """ |
||
560 | |||
561 | if (self.__ccore_som_pointer is not None): |
||
562 | return wrapper.som_train(self.__ccore_som_pointer, data, epochs, autostop); |
||
563 | |||
564 | for i in range(self._size): |
||
565 | self._award[i] = 0; |
||
566 | self._capture_objects[i].clear(); |
||
567 | |||
568 | self._epochs = epochs; |
||
569 | self._data = data; |
||
570 | |||
571 | # weights |
||
572 | self._create_initial_weights(self._params.init_type); |
||
573 | |||
574 | previous_weights = None; |
||
575 | |||
576 | for epoch in range(1, self._epochs + 1): |
||
577 | # Depression term of coupling |
||
578 | self._local_radius = ( self._params.init_radius * math.exp(-(epoch / self._epochs)) ) ** 2; |
||
579 | self._learn_rate = self._params.init_learn_rate * math.exp(-(epoch / self._epochs)); |
||
580 | |||
581 | #random.shuffle(self._data); # Random order |
||
582 | |||
583 | # Feature SOM 0003: Clear statistics |
||
584 | if (autostop == True): |
||
585 | for i in range(self._size): |
||
586 | self._award[i] = 0; |
||
587 | self._capture_objects[i].clear(); |
||
588 | |||
589 | for i in range(len(self._data)): |
||
590 | # Step 1: Competition: |
||
591 | index = self._competition(self._data[i]); |
||
592 | |||
593 | # Step 2: Adaptation: |
||
594 | self._adaptation(index, self._data[i]); |
||
595 | |||
596 | # Update statistics |
||
597 | if ( (autostop == True) or (epoch == self._epochs) ): |
||
598 | self._award[index] += 1; |
||
599 | self._capture_objects[index].append(i); |
||
600 | |||
601 | # Feature SOM 0003: Check requirement of stopping |
||
602 | if (autostop == True): |
||
603 | if (previous_weights is not None): |
||
604 | maximal_adaptation = self._get_maximal_adaptation(previous_weights); |
||
605 | if (maximal_adaptation < self._params.adaptation_threshold): |
||
606 | return epoch; |
||
607 | |||
608 | previous_weights = [item[:] for item in self._weights]; |
||
609 | |||
610 | return self._epochs; |
||
611 | |||
612 | def simulate(self, input_pattern): |
||
613 | """! |
||
614 | @brief Processes input pattern (no learining) and returns index of neuron-winner. |
||
615 | Using index of neuron winner catched object can be obtained using property capture_objects. |
||
616 | |||
617 | @param[in] input_pattern (list): Input pattern. |
||
618 | |||
619 | @return (uint) Returns index of neuron-winner. |
||
620 | |||
621 | @see capture_objects |
||
622 | |||
623 | """ |
||
624 | |||
625 | if (self.__ccore_som_pointer is not None): |
||
626 | return wrapper.som_simulate(self.__ccore_som_pointer, [ input_pattern ]); |
||
627 | |||
628 | return self._competition(input_pattern); |
||
629 | |||
630 | |||
631 | def _get_maximal_adaptation(self, previous_weights): |
||
632 | """! |
||
633 | @brief Calculates maximum changes of weight in line with comparison between previous weights and current weights. |
||
634 | |||
635 | @param[in] previous_weights (list): Weights from the previous step of learning process. |
||
636 | |||
637 | @return (double) Value that represents maximum changes of weight after adaptation process. |
||
638 | |||
639 | """ |
||
640 | |||
641 | dimension = len(self._data[0]); |
||
642 | maximal_adaptation = 0.0; |
||
643 | |||
644 | for neuron_index in range(self._size): |
||
645 | for dim in range(dimension): |
||
646 | current_adaptation = previous_weights[neuron_index][dim] - self._weights[neuron_index][dim]; |
||
647 | |||
648 | if (current_adaptation < 0): current_adaptation = -current_adaptation; |
||
649 | |||
650 | if (maximal_adaptation < current_adaptation): |
||
651 | maximal_adaptation = current_adaptation; |
||
652 | |||
653 | return maximal_adaptation; |
||
654 | |||
655 | |||
656 | def get_winner_number(self): |
||
657 | """! |
||
658 | @brief Calculates number of winner at the last step of learning process. |
||
659 | |||
660 | @return (uint) Number of winner. |
||
661 | |||
662 | """ |
||
663 | |||
664 | if (self.__ccore_som_pointer is not None): |
||
665 | self._award = wrapper.som_get_awards(self.__ccore_som_pointer); |
||
666 | |||
667 | winner_number = 0; |
||
668 | for i in range(self._size): |
||
669 | if (self._award[i] > 0): |
||
670 | winner_number += 1; |
||
671 | |||
672 | return winner_number; |
||
673 | |||
674 | |||
675 | def show_distance_matrix(self): |
||
676 | """! |
||
677 | @brief Shows gray visualization of U-matrix (distance matrix). |
||
678 | |||
679 | @see get_distance_matrix() |
||
680 | |||
681 | """ |
||
682 | distance_matrix = self.get_distance_matrix(); |
||
683 | |||
684 | plt.imshow(distance_matrix, cmap = plt.get_cmap('hot'), interpolation='kaiser'); |
||
685 | plt.title("U-Matrix"); |
||
686 | plt.colorbar(); |
||
687 | plt.show(); |
||
688 | |||
689 | |||
690 | def get_distance_matrix(self): |
||
691 | """! |
||
692 | @brief Calculates distance matrix (U-matrix). |
||
693 | @details The U-Matrix visualizes based on the distance in input space between a weight vector and its neighbors on map. |
||
694 | |||
695 | @return (list) Distance matrix (U-matrix). |
||
696 | |||
697 | @see show_distance_matrix() |
||
698 | @see get_density_matrix() |
||
699 | |||
700 | """ |
||
701 | if (self.__ccore_som_pointer is not None): |
||
702 | self._weights = wrapper.som_get_weights(self.__ccore_som_pointer); |
||
703 | |||
704 | if (self._conn_type != type_conn.func_neighbor): |
||
705 | self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer); |
||
706 | |||
707 | distance_matrix = [ [0.0] * self._cols for i in range(self._rows) ]; |
||
708 | |||
709 | for i in range(self._rows): |
||
710 | for j in range(self._cols): |
||
711 | neuron_index = i * self._cols + j; |
||
712 | |||
713 | if (self._conn_type == type_conn.func_neighbor): |
||
714 | self._create_connections(type_conn.grid_eight); |
||
715 | |||
716 | for neighbor_index in self._neighbors[neuron_index]: |
||
717 | distance_matrix[i][j] += euclidean_distance_sqrt(self._weights[neuron_index], self._weights[neighbor_index]); |
||
718 | |||
719 | distance_matrix[i][j] /= len(self._neighbors[neuron_index]); |
||
720 | |||
721 | return distance_matrix; |
||
722 | |||
723 | |||
724 | def show_density_matrix(self, surface_divider = 20.0): |
||
725 | """! |
||
726 | @brief Show density matrix (P-matrix) using kernel density estimation. |
||
727 | |||
728 | @param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement. |
||
729 | |||
730 | @see show_distance_matrix() |
||
731 | |||
732 | """ |
||
733 | density_matrix = self.get_density_matrix(); |
||
734 | |||
735 | plt.imshow(density_matrix, cmap = plt.get_cmap('hot'), interpolation='kaiser'); |
||
736 | plt.title("P-Matrix"); |
||
737 | plt.colorbar(); |
||
738 | plt.show(); |
||
739 | |||
740 | |||
741 | def get_density_matrix(self, surface_divider = 20.0): |
||
742 | """! |
||
743 | @brief Calculates density matrix (P-Matrix). |
||
744 | |||
745 | @param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement. |
||
746 | |||
747 | @return (list) Density matrix (P-Matrix). |
||
748 | |||
749 | @see get_distance_matrix() |
||
750 | |||
751 | """ |
||
752 | |||
753 | if (self.__ccore_som_pointer is not None): |
||
754 | self._weights = wrapper.som_get_weights(self.__ccore_som_pointer); |
||
755 | |||
756 | density_matrix = [ [0] * self._cols for i in range(self._rows) ]; |
||
757 | dimension = len(self._weights[0]); |
||
758 | |||
759 | dim_max = [ float('-Inf') ] * dimension; |
||
760 | dim_min = [ float('Inf') ] * dimension; |
||
761 | |||
762 | for weight in self._weights: |
||
763 | for index_dim in range(dimension): |
||
764 | if (weight[index_dim] > dim_max[index_dim]): |
||
765 | dim_max[index_dim] = weight[index_dim]; |
||
766 | |||
767 | if (weight[index_dim] < dim_min[index_dim]): |
||
768 | dim_min[index_dim] = weight[index_dim]; |
||
769 | |||
770 | radius = [0.0] * len(self._weights[0]); |
||
771 | for index_dim in range(dimension): |
||
772 | radius[index_dim] = ( dim_max[index_dim] - dim_min[index_dim] ) / surface_divider; |
||
773 | |||
774 | for point in self._data: |
||
775 | for index_neuron in range(len(self)): |
||
776 | point_covered = True; |
||
777 | |||
778 | for index_dim in range(dimension): |
||
779 | if (abs(point[index_dim] - self._weights[index_neuron][index_dim]) > radius[index_dim]): |
||
780 | point_covered = False; |
||
781 | break; |
||
782 | |||
783 | row = math.floor(index_neuron / self._cols); |
||
784 | col = index_neuron - row * self._cols; |
||
785 | |||
786 | if (point_covered is True): |
||
787 | density_matrix[row][col] += 1; |
||
788 | |||
789 | return density_matrix; |
||
790 | |||
791 | |||
792 | def show_winner_matrix(self): |
||
793 | """! |
||
794 | @brief Show winner matrix where each element corresponds to neuron and value represents |
||
795 | amount of won objects from input dataspace at the last training iteration. |
||
796 | |||
797 | @see show_distance_matrix() |
||
798 | |||
799 | """ |
||
800 | |||
801 | if (self.__ccore_som_pointer is not None): |
||
802 | self._award = wrapper.som_get_awards(self.__ccore_som_pointer); |
||
803 | |||
804 | (fig, ax) = plt.subplots(); |
||
805 | winner_matrix = [ [0] * self._cols for i in range(self._rows) ]; |
||
806 | |||
807 | for i in range(self._rows): |
||
808 | for j in range(self._cols): |
||
809 | neuron_index = i * self._cols + j; |
||
810 | |||
811 | winner_matrix[i][j] = self._award[neuron_index]; |
||
812 | ax.text(i, j, str(winner_matrix[i][j]), va='center', ha='center') |
||
813 | |||
814 | ax.imshow(winner_matrix, cmap = plt.get_cmap('cool'), interpolation='none'); |
||
815 | ax.grid(True); |
||
816 | |||
817 | plt.title("Winner Matrix"); |
||
818 | plt.show(); |
||
819 | |||
820 | |||
821 | def show_network(self, awards = False, belongs = False, coupling = True, dataset = True, marker_type = 'o'): |
||
822 | """! |
||
823 | @brief Shows neurons in the dimension of data. |
||
824 | |||
825 | @param[in] awards (bool): If True - displays how many objects won each neuron. |
||
826 | @param[in] belongs (bool): If True - marks each won object by according index of neuron-winner (only when dataset is displayed too). |
||
827 | @param[in] coupling (bool): If True - displays connections between neurons (except case when function neighbor is used). |
||
828 | @param[in] dataset (bool): If True - displays inputs data set. |
||
829 | @param[in] marker_type (string): Defines marker that is used for dispaying neurons in the network. |
||
830 | |||
831 | """ |
||
832 | |||
833 | if (self.__ccore_som_pointer is not None): |
||
834 | self._size = wrapper.som_get_size(self.__ccore_som_pointer); |
||
835 | self._weights = wrapper.som_get_weights(self.__ccore_som_pointer); |
||
836 | self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer); |
||
837 | self._award = wrapper.som_get_awards(self.__ccore_som_pointer); |
||
838 | |||
839 | dimension = len(self._weights[0]); |
||
840 | |||
841 | fig = plt.figure(); |
||
842 | axes = None; |
||
843 | |||
844 | # Check for dimensions |
||
845 | if ( (dimension == 1) or (dimension == 2) ): |
||
846 | axes = fig.add_subplot(111); |
||
847 | elif (dimension == 3): |
||
848 | axes = fig.gca(projection='3d'); |
||
849 | else: |
||
850 | raise NameError('Dwawer supports only 1D, 2D and 3D data representation'); |
||
851 | |||
852 | |||
853 | # Show data |
||
854 | if (dataset == True): |
||
855 | for x in self._data: |
||
856 | if (dimension == 1): |
||
857 | axes.plot(x[0], 0.0, 'b|', ms = 30); |
||
858 | |||
859 | elif (dimension == 2): |
||
860 | axes.plot(x[0], x[1], 'b.'); |
||
861 | |||
862 | elif (dimension == 3): |
||
863 | axes.scatter(x[0], x[1], x[2], c = 'b', marker = '.'); |
||
864 | |||
865 | # Show neurons |
||
866 | for index in range(self._size): |
||
867 | color = 'g'; |
||
868 | if (self._award[index] == 0): color = 'y'; |
||
869 | |||
870 | if (dimension == 1): |
||
871 | axes.plot(self._weights[index][0], 0.0, color + marker_type); |
||
872 | |||
873 | if (awards == True): |
||
874 | location = '{0}'.format(self._award[index]); |
||
875 | axes.text(self._weights[index][0], 0.0, location, color='black', fontsize = 10); |
||
876 | |||
877 | if (belongs == True): |
||
878 | location = '{0}'.format(index); |
||
879 | axes.text(self._weights[index][0], 0.0, location, color='black', fontsize = 12); |
||
880 | for k in range(len(self._capture_objects[index])): |
||
881 | point = self._data[self._capture_objects[index][k]]; |
||
882 | axes.text(point[0], 0.0, location, color='blue', fontsize = 10); |
||
883 | |||
884 | if (dimension == 2): |
||
885 | axes.plot(self._weights[index][0], self._weights[index][1], color + marker_type); |
||
886 | |||
887 | if (awards == True): |
||
888 | location = '{0}'.format(self._award[index]); |
||
889 | axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize = 10); |
||
890 | |||
891 | if (belongs == True): |
||
892 | location = '{0}'.format(index); |
||
893 | axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize = 12); |
||
894 | for k in range(len(self._capture_objects[index])): |
||
895 | point = self._data[self._capture_objects[index][k]]; |
||
896 | axes.text(point[0], point[1], location, color='blue', fontsize = 10); |
||
897 | |||
898 | if ( (self._conn_type != type_conn.func_neighbor) and (coupling != False) ): |
||
899 | for neighbor in self._neighbors[index]: |
||
900 | if (neighbor > index): |
||
901 | axes.plot([self._weights[index][0], self._weights[neighbor][0]], [self._weights[index][1], self._weights[neighbor][1]], 'g', linewidth = 0.5); |
||
902 | |||
903 | elif (dimension == 3): |
||
904 | axes.scatter(self._weights[index][0], self._weights[index][1], self._weights[index][2], c = color, marker = marker_type); |
||
905 | |||
906 | if ( (self._conn_type != type_conn.func_neighbor) and (coupling != False) ): |
||
907 | for neighbor in self._neighbors[index]: |
||
908 | if (neighbor > index): |
||
909 | axes.plot([self._weights[index][0], self._weights[neighbor][0]], [self._weights[index][1], self._weights[neighbor][1]], [self._weights[index][2], self._weights[neighbor][2]], 'g-', linewidth = 0.5); |
||
910 | |||
911 | |||
912 | plt.title("Network Structure"); |
||
913 | plt.grid(); |
||
914 | plt.show(); |
This can be caused by one of the following:
1. Missing Dependencies
This error could indicate a configuration issue of Pylint. Make sure that your libraries are available by adding the necessary commands.
2. Missing __init__.py files
This error could also result from missing
__init__.py
files in your module folders. Make sure that you place one file in each sub-folder.