| Total Complexity | 180 |
| Total Lines | 752 |
| Duplicated Lines | 0 % |
Complex classes like pyclustering.nnet.som often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | """! |
||
| 103 | class som: |
||
| 104 | """! |
||
| 105 | @brief Represents self-organized feature map (SOM). |
||
| 106 | |||
| 107 | Example: |
||
| 108 | @code |
||
| 109 | # sample for training |
||
| 110 | sample_train = read_sample(file_train_sample); |
||
| 111 | |||
| 112 | # create self-organized feature map with size 5x5 |
||
| 113 | network = som(5, 5, sample_train, 100); |
||
| 114 | |||
| 115 | # train network |
||
| 116 | network.train(); |
||
| 117 | |||
| 118 | # simulate using another sample |
||
| 119 | sample = read_sample(file_sample); |
||
| 120 | index_winner = network.simulate(sample); |
||
| 121 | |||
| 122 | # check what it is (what it looks like?) |
||
| 123 | index_similar_objects = network.capture_objects[index_winner]; |
||
| 124 | |||
| 125 | @endcode |
||
| 126 | |||
| 127 | """ |
||
| 128 | |||
| 129 | # describe network |
||
| 130 | _rows = 0; |
||
| 131 | _cols = 0; |
||
| 132 | _size = 0; |
||
| 133 | _weights = None; # Weights of each neuron (coordinates in data dimension in other words). |
||
| 134 | _award = None; # Lists of indexes of won points for each neuron. |
||
| 135 | _data = None; # Analyzed data. |
||
| 136 | _conn_type = None; # Type of connections between neuron. |
||
| 137 | |||
| 138 | # just for convenience (avoid excess calculation during learning) |
||
| 139 | _location = None; # Location in grid. |
||
| 140 | _sqrt_distances = None; |
||
| 141 | _capture_objects = None; # Store indexes of input points that were captured by each neurons individually at the end. |
||
| 142 | _neighbors = None; # Indexes of neighbours for each neuron. |
||
| 143 | |||
| 144 | # describe learning process and internal state |
||
| 145 | _epochs = 0; # Iteration for learning. |
||
| 146 | _params = None; |
||
| 147 | |||
| 148 | # dynamic changes learning parameters |
||
| 149 | _local_radius = 0.0; |
||
| 150 | _learn_rate = 0.0; |
||
| 151 | |||
| 152 | __ccore_som_pointer = None; |
||
| 153 | |||
| 154 | |||
| 155 | @property |
||
| 156 | def size(self): |
||
| 157 | """! |
||
| 158 | @return (uint) Size of self-organized map (number of neurons). |
||
| 159 | |||
| 160 | """ |
||
| 161 | |||
| 162 | if (self.__ccore_som_pointer is not None): |
||
| 163 | self._size = wrapper.som_get_size(self.__ccore_som_pointer); |
||
| 164 | |||
| 165 | return self._size; |
||
| 166 | |||
| 167 | @property |
||
| 168 | def weights(self): |
||
| 169 | """! |
||
| 170 | @return (list) Weights of each neuron. |
||
| 171 | |||
| 172 | """ |
||
| 173 | |||
| 174 | if (self.__ccore_som_pointer is not None): |
||
| 175 | self._weights = wrapper.som_get_weights(self.__ccore_som_pointer); |
||
| 176 | |||
| 177 | return self._weights; |
||
| 178 | |||
| 179 | @property |
||
| 180 | def awards(self): |
||
| 181 | """! |
||
| 182 | @return (list) Numbers of captured objects by each neuron. |
||
| 183 | |||
| 184 | """ |
||
| 185 | |||
| 186 | if (self.__ccore_som_pointer is not None): |
||
| 187 | self._award = wrapper.som_get_awards(self.__ccore_som_pointer); |
||
| 188 | |||
| 189 | return self._award; |
||
| 190 | |||
| 191 | @property |
||
| 192 | def capture_objects(self): |
||
| 193 | """! |
||
| 194 | @return (list) Indexes of captured objects by each neuron. |
||
| 195 | |||
| 196 | """ |
||
| 197 | |||
| 198 | if (self.__ccore_som_pointer is not None): |
||
| 199 | self._capture_objects = wrapper.som_get_capture_objects(self.__ccore_som_pointer); |
||
| 200 | |||
| 201 | return self._capture_objects; |
||
| 202 | |||
| 203 | |||
| 204 | def __init__(self, rows, cols, conn_type = type_conn.grid_eight, parameters = None, ccore = False): |
||
| 205 | """! |
||
| 206 | @brief Constructor of self-organized map. |
||
| 207 | |||
| 208 | @param[in] rows (uint): Number of neurons in the column (number of rows). |
||
| 209 | @param[in] cols (uint): Number of neurons in the row (number of columns). |
||
| 210 | @param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour). |
||
| 211 | @param[in] parameters (som_parameters): Other specific parameters. |
||
| 212 | @param[in] ccore (bool): If True simulation is performed by CCORE library (C++ implementation of pyclustering). |
||
| 213 | |||
| 214 | """ |
||
| 215 | |||
| 216 | # some of these parameters are required despite core implementation, for example, for network demonstration. |
||
| 217 | self._cols = cols; |
||
| 218 | self._rows = rows; |
||
| 219 | self._size = cols * rows; |
||
| 220 | self._conn_type = conn_type; |
||
| 221 | |||
| 222 | if (parameters is not None): |
||
| 223 | self._params = parameters; |
||
| 224 | else: |
||
| 225 | self._params = som_parameters(); |
||
| 226 | |||
| 227 | if (self._params.init_radius is None): |
||
| 228 | if ((cols + rows) / 4.0 > 1.0): |
||
| 229 | self._params.init_radius = 2.0; |
||
| 230 | elif ( (cols > 1) and (rows > 1) ): |
||
| 231 | self._params.init_radius = 1.5; |
||
| 232 | else: |
||
| 233 | self._params.init_radius = 1.0; |
||
| 234 | |||
| 235 | if (ccore is True): |
||
| 236 | self.__ccore_som_pointer = wrapper.som_create(rows, cols, conn_type, self._params); |
||
| 237 | |||
| 238 | else: |
||
| 239 | # location |
||
| 240 | self._location = list(); |
||
| 241 | for i in range(self._rows): |
||
| 242 | for j in range(self._cols): |
||
| 243 | self._location.append([float(i), float(j)]); |
||
| 244 | |||
| 245 | # awards |
||
| 246 | self._award = [0] * self._size; |
||
| 247 | self._capture_objects = [ [] for i in range(self._size) ]; |
||
| 248 | |||
| 249 | # distances |
||
| 250 | self._sqrt_distances = [ [ [] for i in range(self._size) ] for j in range(self._size) ]; |
||
| 251 | for i in range(self._size): |
||
| 252 | for j in range(i, self._size, 1): |
||
| 253 | dist = euclidean_distance_sqrt(self._location[i], self._location[j]); |
||
| 254 | self._sqrt_distances[i][j] = dist; |
||
| 255 | self._sqrt_distances[j][i] = dist; |
||
| 256 | |||
| 257 | # connections |
||
| 258 | if (conn_type != type_conn.func_neighbor): |
||
| 259 | self._create_connections(conn_type); |
||
| 260 | |||
| 261 | |||
| 262 | def __del__(self): |
||
| 263 | """! |
||
| 264 | @brief Destructor of the self-organized feature map. |
||
| 265 | |||
| 266 | """ |
||
| 267 | |||
| 268 | if (self.__ccore_som_pointer is not None): |
||
| 269 | wrapper.som_destroy(self.__ccore_som_pointer); |
||
| 270 | |||
| 271 | |||
| 272 | def __len__(self): |
||
| 273 | """! |
||
| 274 | @return (uint) Size of self-organized map (number of neurons). |
||
| 275 | |||
| 276 | """ |
||
| 277 | |||
| 278 | return self.size; |
||
| 279 | |||
| 280 | |||
| 281 | def _create_initial_weights(self, init_type): |
||
| 282 | """! |
||
| 283 | @brief Creates initial weights for neurons in line with the specified initialization. |
||
| 284 | |||
| 285 | @param[in] init_type (type_init): Type of initialization of initial neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid). |
||
| 286 | |||
| 287 | """ |
||
| 288 | |||
| 289 | dimension = len(self._data[0]); |
||
| 290 | |||
| 291 | maximum_dimension = [self._data[0][i] for i in range(dimension)]; |
||
| 292 | minimum_dimension = [self._data[0][i] for i in range(dimension)]; |
||
| 293 | for i in range(len(self._data)): |
||
| 294 | for dim in range(dimension): |
||
| 295 | if (maximum_dimension[dim] < self._data[i][dim]): |
||
| 296 | maximum_dimension[dim] = self._data[i][dim]; |
||
| 297 | elif (minimum_dimension[dim] > self._data[i][dim]): |
||
| 298 | minimum_dimension[dim] = self._data[i][dim]; |
||
| 299 | |||
| 300 | # Increase border |
||
| 301 | width_dimension = [0] * dimension; |
||
| 302 | center_dimension = [0] * dimension; |
||
| 303 | for dim in range(dimension): |
||
| 304 | width_dimension[dim] = maximum_dimension[dim] - minimum_dimension[dim]; |
||
| 305 | center_dimension[dim] = (maximum_dimension[dim] + minimum_dimension[dim]) / 2; |
||
| 306 | |||
| 307 | step_x = center_dimension[0]; |
||
| 308 | if (self._rows > 1): step_x = width_dimension[0] / (self._rows - 1); |
||
| 309 | |||
| 310 | step_y = 0.0; |
||
| 311 | if (dimension > 1): |
||
| 312 | step_y = center_dimension[1]; |
||
| 313 | if (self._cols > 1): step_y = width_dimension[1] / (self._cols - 1); |
||
| 314 | |||
| 315 | # generate weights (topological coordinates) |
||
| 316 | random.seed(); |
||
| 317 | |||
| 318 | # Feature SOM 0002: Uniform grid. |
||
| 319 | if (init_type == type_init.uniform_grid): |
||
| 320 | # Predefined weights in line with input data. |
||
| 321 | self._weights = [ [ [] for i in range(dimension) ] for j in range(self._size)]; |
||
| 322 | for i in range(self._size): |
||
| 323 | location = self._location[i]; |
||
| 324 | for dim in range(dimension): |
||
| 325 | if (dim == 0): |
||
| 326 | if (self._rows > 1): |
||
| 327 | self._weights[i][dim] = minimum_dimension[dim] + step_x * location[dim]; |
||
| 328 | else: |
||
| 329 | self._weights[i][dim] = center_dimension[dim]; |
||
| 330 | |||
| 331 | elif (dim == 1): |
||
| 332 | if (self._cols > 1): |
||
| 333 | self._weights[i][dim] = minimum_dimension[dim] + step_y * location[dim]; |
||
| 334 | else: |
||
| 335 | self._weights[i][dim] = center_dimension[dim]; |
||
| 336 | else: |
||
| 337 | self._weights[i][dim] = center_dimension[dim]; |
||
| 338 | |||
| 339 | elif (init_type == type_init.random_surface): |
||
| 340 | # Random weights at the full surface. |
||
| 341 | self._weights = [ [random.uniform(minimum_dimension[i], maximum_dimension[i]) for i in range(dimension)] for j in range(self._size) ]; |
||
| 342 | |||
| 343 | elif (init_type == type_init.random_centroid): |
||
| 344 | # Random weights at the center of input data. |
||
| 345 | self._weights = [ [(random.random() + center_dimension[i]) for i in range(dimension)] for j in range(self._size) ]; |
||
| 346 | |||
| 347 | else: |
||
| 348 | # Random weights of input data. |
||
| 349 | self._weights = [ [random.random() for i in range(dimension)] for j in range(self._size) ]; |
||
| 350 | |||
| 351 | def _create_connections(self, conn_type): |
||
| 352 | """! |
||
| 353 | @brief Create connections in line with input rule (grid four, grid eight, honeycomb, function neighbour). |
||
| 354 | |||
| 355 | @param[in] conn_type (type_conn): Type of connection between oscillators in the network. |
||
| 356 | |||
| 357 | """ |
||
| 358 | |||
| 359 | self._neighbors = [[] for index in range(self._size)]; |
||
| 360 | |||
| 361 | for index in range(0, self._size, 1): |
||
| 362 | upper_index = index - self._cols; |
||
| 363 | upper_left_index = index - self._cols - 1; |
||
| 364 | upper_right_index = index - self._cols + 1; |
||
| 365 | |||
| 366 | lower_index = index + self._cols; |
||
| 367 | lower_left_index = index + self._cols - 1; |
||
| 368 | lower_right_index = index + self._cols + 1; |
||
| 369 | |||
| 370 | left_index = index - 1; |
||
| 371 | right_index = index + 1; |
||
| 372 | |||
| 373 | node_row_index = math.floor(index / self._cols); |
||
| 374 | upper_row_index = node_row_index - 1; |
||
| 375 | lower_row_index = node_row_index + 1; |
||
| 376 | |||
| 377 | if ( (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four) ): |
||
| 378 | if (upper_index >= 0): |
||
| 379 | self._neighbors[index].append(upper_index); |
||
| 380 | |||
| 381 | if (lower_index < self._size): |
||
| 382 | self._neighbors[index].append(lower_index); |
||
| 383 | |||
| 384 | if ( (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four) or (conn_type == type_conn.honeycomb) ): |
||
| 385 | if ( (left_index >= 0) and (math.floor(left_index / self._cols) == node_row_index) ): |
||
| 386 | self._neighbors[index].append(left_index); |
||
| 387 | |||
| 388 | if ( (right_index < self._size) and (math.floor(right_index / self._cols) == node_row_index) ): |
||
| 389 | self._neighbors[index].append(right_index); |
||
| 390 | |||
| 391 | |||
| 392 | if (conn_type == type_conn.grid_eight): |
||
| 393 | if ( (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index) ): |
||
| 394 | self._neighbors[index].append(upper_left_index); |
||
| 395 | |||
| 396 | if ( (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index) ): |
||
| 397 | self._neighbors[index].append(upper_right_index); |
||
| 398 | |||
| 399 | if ( (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index) ): |
||
| 400 | self._neighbors[index].append(lower_left_index); |
||
| 401 | |||
| 402 | if ( (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index) ): |
||
| 403 | self._neighbors[index].append(lower_right_index); |
||
| 404 | |||
| 405 | |||
| 406 | if (conn_type == type_conn.honeycomb): |
||
| 407 | if ( (node_row_index % 2) == 0): |
||
| 408 | upper_left_index = index - self._cols; |
||
| 409 | upper_right_index = index - self._cols + 1; |
||
| 410 | |||
| 411 | lower_left_index = index + self._cols; |
||
| 412 | lower_right_index = index + self._cols + 1; |
||
| 413 | else: |
||
| 414 | upper_left_index = index - self._cols - 1; |
||
| 415 | upper_right_index = index - self._cols; |
||
| 416 | |||
| 417 | lower_left_index = index + self._cols - 1; |
||
| 418 | lower_right_index = index + self._cols; |
||
| 419 | |||
| 420 | if ( (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index) ): |
||
| 421 | self._neighbors[index].append(upper_left_index); |
||
| 422 | |||
| 423 | if ( (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index) ): |
||
| 424 | self._neighbors[index].append(upper_right_index); |
||
| 425 | |||
| 426 | if ( (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index) ): |
||
| 427 | self._neighbors[index].append(lower_left_index); |
||
| 428 | |||
| 429 | if ( (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index) ): |
||
| 430 | self._neighbors[index].append(lower_right_index); |
||
| 431 | |||
| 432 | |||
| 433 | def _competition(self, x): |
||
| 434 | """! |
||
| 435 | @brief Calculates neuron winner (distance, neuron index). |
||
| 436 | |||
| 437 | @param[in] x (list): Input pattern from the input data set, for example it can be coordinates of point. |
||
| 438 | |||
| 439 | @return (uint) Returns index of neuron that is winner. |
||
| 440 | |||
| 441 | """ |
||
| 442 | |||
| 443 | index = 0; |
||
| 444 | minimum = euclidean_distance_sqrt(self._weights[0], x); |
||
| 445 | |||
| 446 | for i in range(1, self._size, 1): |
||
| 447 | candidate = euclidean_distance_sqrt(self._weights[i], x); |
||
| 448 | if (candidate < minimum): |
||
| 449 | index = i; |
||
| 450 | minimum = candidate; |
||
| 451 | |||
| 452 | return index; |
||
| 453 | |||
| 454 | |||
| 455 | def _adaptation(self, index, x): |
||
| 456 | """! |
||
| 457 | @brief Change weight of neurons in line with won neuron. |
||
| 458 | |||
| 459 | @param[in] index (uint): Index of neuron-winner. |
||
| 460 | @param[in] x (list): Input pattern from the input data set. |
||
| 461 | |||
| 462 | """ |
||
| 463 | |||
| 464 | dimension = len(self._weights[0]); |
||
| 465 | |||
| 466 | if (self._conn_type == type_conn.func_neighbor): |
||
| 467 | for neuron_index in range(self._size): |
||
| 468 | distance = self._sqrt_distances[index][neuron_index]; |
||
| 469 | |||
| 470 | View Code Duplication | if (distance < self._local_radius): |
|
| 471 | influence = math.exp( -( distance / (2.0 * self._local_radius) ) ); |
||
| 472 | |||
| 473 | for i in range(dimension): |
||
| 474 | self._weights[neuron_index][i] = self._weights[neuron_index][i] + self._learn_rate * influence * (x[i] - self._weights[neuron_index][i]); |
||
| 475 | |||
| 476 | else: |
||
| 477 | for i in range(dimension): |
||
| 478 | self._weights[index][i] = self._weights[index][i] + self._learn_rate * (x[i] - self._weights[index][i]); |
||
| 479 | |||
| 480 | for neighbor_index in self._neighbors[index]: |
||
| 481 | distance = self._sqrt_distances[index][neighbor_index] |
||
| 482 | View Code Duplication | if (distance < self._local_radius): |
|
| 483 | influence = math.exp( -( distance / (2.0 * self._local_radius) ) ); |
||
| 484 | |||
| 485 | for i in range(dimension): |
||
| 486 | self._weights[neighbor_index][i] = self._weights[neighbor_index][i] + self._learn_rate * influence * (x[i] - self._weights[neighbor_index][i]); |
||
| 487 | |||
| 488 | |||
| 489 | def train(self, data, epochs, autostop = False): |
||
| 490 | """! |
||
| 491 | @brief Trains self-organized feature map (SOM). |
||
| 492 | |||
| 493 | @param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates. |
||
| 494 | @param[in] epochs (uint): Number of epochs for training. |
||
| 495 | @param[in] autostop (bool): Automatic termination of learining process when adaptation is not occurred. |
||
| 496 | |||
| 497 | @return (uint) Number of learining iterations. |
||
| 498 | |||
| 499 | """ |
||
| 500 | |||
| 501 | if (self.__ccore_som_pointer is not None): |
||
| 502 | return wrapper.som_train(self.__ccore_som_pointer, data, epochs, autostop); |
||
| 503 | |||
| 504 | for i in range(self._size): |
||
| 505 | self._award[i] = 0; |
||
| 506 | self._capture_objects[i].clear(); |
||
| 507 | |||
| 508 | self._epochs = epochs; |
||
| 509 | self._data = data; |
||
| 510 | |||
| 511 | # weights |
||
| 512 | self._create_initial_weights(self._params.init_type); |
||
| 513 | |||
| 514 | previous_weights = None; |
||
| 515 | |||
| 516 | for epoch in range(1, self._epochs + 1): |
||
| 517 | # Depression term of coupling |
||
| 518 | self._local_radius = ( self._params.init_radius * math.exp(-(epoch / self._epochs)) ) ** 2; |
||
| 519 | self._learn_rate = self._params.init_learn_rate * math.exp(-(epoch / self._epochs)); |
||
| 520 | |||
| 521 | #random.shuffle(self._data); # Random order |
||
| 522 | |||
| 523 | # Feature SOM 0003: Clear statistics |
||
| 524 | if (autostop == True): |
||
| 525 | for i in range(self._size): |
||
| 526 | self._award[i] = 0; |
||
| 527 | self._capture_objects[i].clear(); |
||
| 528 | |||
| 529 | for i in range(len(self._data)): |
||
| 530 | # Step 1: Competition: |
||
| 531 | index = self._competition(self._data[i]); |
||
| 532 | |||
| 533 | # Step 2: Adaptation: |
||
| 534 | self._adaptation(index, self._data[i]); |
||
| 535 | |||
| 536 | # Update statistics |
||
| 537 | if ( (autostop == True) or (epoch == self._epochs) ): |
||
| 538 | self._award[index] += 1; |
||
| 539 | self._capture_objects[index].append(i); |
||
| 540 | |||
| 541 | # Feature SOM 0003: Check requirement of stopping |
||
| 542 | if (autostop == True): |
||
| 543 | if (previous_weights is not None): |
||
| 544 | maximal_adaptation = self._get_maximal_adaptation(previous_weights); |
||
| 545 | if (maximal_adaptation < self._params.adaptation_threshold): |
||
| 546 | return epoch; |
||
| 547 | |||
| 548 | previous_weights = [item[:] for item in self._weights]; |
||
| 549 | |||
| 550 | return self._epochs; |
||
| 551 | |||
| 552 | def simulate(self, input_pattern): |
||
| 553 | """! |
||
| 554 | @brief Processes input pattern (no learining) and returns index of neuron-winner. |
||
| 555 | Using index of neuron winner catched object can be obtained using property capture_objects. |
||
| 556 | |||
| 557 | @param[in] input_pattern (list): Input pattern. |
||
| 558 | |||
| 559 | @return (uint) Returns index of neuron-winner. |
||
| 560 | |||
| 561 | @see capture_objects |
||
| 562 | |||
| 563 | """ |
||
| 564 | |||
| 565 | if (self.__ccore_som_pointer is not None): |
||
| 566 | return wrapper.som_simulate(self.__ccore_som_pointer, [ input_pattern ]); |
||
| 567 | |||
| 568 | return self._competition(input_pattern); |
||
| 569 | |||
| 570 | |||
| 571 | def _get_maximal_adaptation(self, previous_weights): |
||
| 572 | """! |
||
| 573 | @brief Calculates maximum changes of weight in line with comparison between previous weights and current weights. |
||
| 574 | |||
| 575 | @param[in] previous_weights (list): Weights from the previous step of learning process. |
||
| 576 | |||
| 577 | @return (double) Value that represents maximum changes of weight after adaptation process. |
||
| 578 | |||
| 579 | """ |
||
| 580 | |||
| 581 | dimension = len(self._data[0]); |
||
| 582 | maximal_adaptation = 0.0; |
||
| 583 | |||
| 584 | for neuron_index in range(self._size): |
||
| 585 | for dim in range(dimension): |
||
| 586 | current_adaptation = previous_weights[neuron_index][dim] - self._weights[neuron_index][dim]; |
||
| 587 | |||
| 588 | if (current_adaptation < 0): current_adaptation = -current_adaptation; |
||
| 589 | |||
| 590 | if (maximal_adaptation < current_adaptation): |
||
| 591 | maximal_adaptation = current_adaptation; |
||
| 592 | |||
| 593 | return maximal_adaptation; |
||
| 594 | |||
| 595 | |||
| 596 | def get_winner_number(self): |
||
| 597 | """! |
||
| 598 | @brief Calculates number of winner at the last step of learning process. |
||
| 599 | |||
| 600 | @return (uint) Number of winner. |
||
| 601 | |||
| 602 | """ |
||
| 603 | |||
| 604 | if (self.__ccore_som_pointer is not None): |
||
| 605 | self._award = wrapper.som_get_awards(self.__ccore_som_pointer); |
||
| 606 | |||
| 607 | winner_number = 0; |
||
| 608 | for i in range(self._size): |
||
| 609 | if (self._award[i] > 0): |
||
| 610 | winner_number += 1; |
||
| 611 | |||
| 612 | return winner_number; |
||
| 613 | |||
| 614 | |||
| 615 | def show_distance_matrix(self): |
||
| 616 | """! |
||
| 617 | @brief Shows gray visualization of U-matrix (distance matrix). |
||
| 618 | |||
| 619 | @see get_distance_matrix() |
||
| 620 | |||
| 621 | """ |
||
| 622 | distance_matrix = self.get_distance_matrix(); |
||
| 623 | |||
| 624 | plt.imshow(distance_matrix, cmap = plt.get_cmap('hot'), interpolation='kaiser');
|
||
| 625 | plt.title("U-Matrix");
|
||
| 626 | plt.colorbar(); |
||
| 627 | plt.show(); |
||
| 628 | |||
| 629 | |||
| 630 | def get_distance_matrix(self): |
||
| 631 | """! |
||
| 632 | @brief Calculates distance matrix (U-matrix). |
||
| 633 | @details The U-Matrix visualizes based on the distance in input space between a weight vector and its neighbors on map. |
||
| 634 | |||
| 635 | @return (list) Distance matrix (U-matrix). |
||
| 636 | |||
| 637 | @see show_distance_matrix() |
||
| 638 | @see get_density_matrix() |
||
| 639 | |||
| 640 | """ |
||
| 641 | if (self.__ccore_som_pointer is not None): |
||
| 642 | self._weights = wrapper.som_get_weights(self.__ccore_som_pointer); |
||
| 643 | |||
| 644 | if (self._conn_type != type_conn.func_neighbor): |
||
| 645 | self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer); |
||
| 646 | |||
| 647 | distance_matrix = [ [0.0] * self._cols for i in range(self._rows) ]; |
||
| 648 | |||
| 649 | for i in range(self._rows): |
||
| 650 | for j in range(self._cols): |
||
| 651 | neuron_index = i * self._cols + j; |
||
| 652 | |||
| 653 | if (self._conn_type == type_conn.func_neighbor): |
||
| 654 | self._create_connections(type_conn.grid_eight); |
||
| 655 | |||
| 656 | for neighbor_index in self._neighbors[neuron_index]: |
||
| 657 | distance_matrix[i][j] += euclidean_distance_sqrt(self._weights[neuron_index], self._weights[neighbor_index]); |
||
| 658 | |||
| 659 | distance_matrix[i][j] /= len(self._neighbors[neuron_index]); |
||
| 660 | |||
| 661 | return distance_matrix; |
||
| 662 | |||
| 663 | |||
| 664 | def show_density_matrix(self, surface_divider = 20.0): |
||
| 665 | """! |
||
| 666 | @brief Show density matrix (P-matrix) using kernel density estimation. |
||
| 667 | |||
| 668 | @param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement. |
||
| 669 | |||
| 670 | @see show_distance_matrix() |
||
| 671 | |||
| 672 | """ |
||
| 673 | density_matrix = self.get_density_matrix(); |
||
| 674 | |||
| 675 | plt.imshow(density_matrix, cmap = plt.get_cmap('hot'), interpolation='kaiser');
|
||
| 676 | plt.title("P-Matrix");
|
||
| 677 | plt.colorbar(); |
||
| 678 | plt.show(); |
||
| 679 | |||
| 680 | |||
| 681 | def get_density_matrix(self, surface_divider = 20.0): |
||
| 682 | """! |
||
| 683 | @brief Calculates density matrix (P-Matrix). |
||
| 684 | |||
| 685 | @param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement. |
||
| 686 | |||
| 687 | @return (list) Density matrix (P-Matrix). |
||
| 688 | |||
| 689 | @see get_distance_matrix() |
||
| 690 | |||
| 691 | """ |
||
| 692 | |||
| 693 | if (self.__ccore_som_pointer is not None): |
||
| 694 | self._weights = wrapper.som_get_weights(self.__ccore_som_pointer); |
||
| 695 | |||
| 696 | density_matrix = [ [0] * self._cols for i in range(self._rows) ]; |
||
| 697 | dimension = len(self._weights[0]); |
||
| 698 | |||
| 699 | dim_max = [ float('-Inf') ] * dimension;
|
||
| 700 | dim_min = [ float('Inf') ] * dimension;
|
||
| 701 | |||
| 702 | for weight in self._weights: |
||
| 703 | for index_dim in range(dimension): |
||
| 704 | if (weight[index_dim] > dim_max[index_dim]): |
||
| 705 | dim_max[index_dim] = weight[index_dim]; |
||
| 706 | |||
| 707 | if (weight[index_dim] < dim_min[index_dim]): |
||
| 708 | dim_min[index_dim] = weight[index_dim]; |
||
| 709 | |||
| 710 | radius = [0.0] * len(self._weights[0]); |
||
| 711 | for index_dim in range(dimension): |
||
| 712 | radius[index_dim] = ( dim_max[index_dim] - dim_min[index_dim] ) / surface_divider; |
||
| 713 | |||
| 714 | for point in self._data: |
||
| 715 | for index_neuron in range(len(self)): |
||
| 716 | point_covered = True; |
||
| 717 | |||
| 718 | for index_dim in range(dimension): |
||
| 719 | if (abs(point[index_dim] - self._weights[index_neuron][index_dim]) > radius[index_dim]): |
||
| 720 | point_covered = False; |
||
| 721 | break; |
||
| 722 | |||
| 723 | row = math.floor(index_neuron / self._cols); |
||
| 724 | col = index_neuron - row * self._cols; |
||
| 725 | |||
| 726 | if (point_covered is True): |
||
| 727 | density_matrix[row][col] += 1; |
||
| 728 | |||
| 729 | return density_matrix; |
||
| 730 | |||
| 731 | |||
| 732 | def show_winner_matrix(self): |
||
| 733 | """! |
||
| 734 | @brief Show winner matrix where each element corresponds to neuron and value represents |
||
| 735 | amount of won objects from input dataspace at the last training iteration. |
||
| 736 | |||
| 737 | @see show_distance_matrix() |
||
| 738 | |||
| 739 | """ |
||
| 740 | |||
| 741 | if (self.__ccore_som_pointer is not None): |
||
| 742 | self._award = wrapper.som_get_awards(self.__ccore_som_pointer); |
||
| 743 | |||
| 744 | (fig, ax) = plt.subplots(); |
||
| 745 | winner_matrix = [ [0] * self._cols for i in range(self._rows) ]; |
||
| 746 | |||
| 747 | for i in range(self._rows): |
||
| 748 | for j in range(self._cols): |
||
| 749 | neuron_index = i * self._cols + j; |
||
| 750 | |||
| 751 | winner_matrix[i][j] = self._award[neuron_index]; |
||
| 752 | ax.text(i, j, str(winner_matrix[i][j]), va='center', ha='center') |
||
| 753 | |||
| 754 | ax.imshow(winner_matrix, cmap = plt.get_cmap('cool'), interpolation='none');
|
||
| 755 | ax.grid(True); |
||
| 756 | |||
| 757 | plt.title("Winner Matrix");
|
||
| 758 | plt.show(); |
||
| 759 | |||
| 760 | |||
| 761 | def show_network(self, awards = False, belongs = False, coupling = True, dataset = True, marker_type = 'o'): |
||
| 762 | """! |
||
| 763 | @brief Shows neurons in the dimension of data. |
||
| 764 | |||
| 765 | @param[in] awards (bool): If True - displays how many objects won each neuron. |
||
| 766 | @param[in] belongs (bool): If True - marks each won object by according index of neuron-winner (only when dataset is displayed too). |
||
| 767 | @param[in] coupling (bool): If True - displays connections between neurons (except case when function neighbor is used). |
||
| 768 | @param[in] dataset (bool): If True - displays inputs data set. |
||
| 769 | @param[in] marker_type (string): Defines marker that is used for dispaying neurons in the network. |
||
| 770 | |||
| 771 | """ |
||
| 772 | |||
| 773 | if (self.__ccore_som_pointer is not None): |
||
| 774 | self._size = wrapper.som_get_size(self.__ccore_som_pointer); |
||
| 775 | self._weights = wrapper.som_get_weights(self.__ccore_som_pointer); |
||
| 776 | self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer); |
||
| 777 | self._award = wrapper.som_get_awards(self.__ccore_som_pointer); |
||
| 778 | |||
| 779 | dimension = len(self._weights[0]); |
||
| 780 | |||
| 781 | fig = plt.figure(); |
||
| 782 | axes = None; |
||
| 783 | |||
| 784 | # Check for dimensions |
||
| 785 | if ( (dimension == 1) or (dimension == 2) ): |
||
| 786 | axes = fig.add_subplot(111); |
||
| 787 | elif (dimension == 3): |
||
| 788 | axes = fig.gca(projection='3d'); |
||
| 789 | else: |
||
| 790 | raise NameError('Dwawer supports only 1D, 2D and 3D data representation');
|
||
| 791 | |||
| 792 | |||
| 793 | # Show data |
||
| 794 | if (dataset == True): |
||
| 795 | for x in self._data: |
||
| 796 | if (dimension == 1): |
||
| 797 | axes.plot(x[0], 0.0, 'b|', ms = 30); |
||
| 798 | |||
| 799 | elif (dimension == 2): |
||
| 800 | axes.plot(x[0], x[1], 'b.'); |
||
| 801 | |||
| 802 | elif (dimension == 3): |
||
| 803 | axes.scatter(x[0], x[1], x[2], c = 'b', marker = '.'); |
||
| 804 | |||
| 805 | # Show neurons |
||
| 806 | for index in range(self._size): |
||
| 807 | color = 'g'; |
||
| 808 | if (self._award[index] == 0): color = 'y'; |
||
| 809 | |||
| 810 | if (dimension == 1): |
||
| 811 | axes.plot(self._weights[index][0], 0.0, color + marker_type); |
||
| 812 | |||
| 813 | if (awards == True): |
||
| 814 | location = '{0}'.format(self._award[index]);
|
||
| 815 | axes.text(self._weights[index][0], 0.0, location, color='black', fontsize = 10); |
||
| 816 | |||
| 817 | if (belongs == True): |
||
| 818 | location = '{0}'.format(index);
|
||
| 819 | axes.text(self._weights[index][0], 0.0, location, color='black', fontsize = 12); |
||
| 820 | for k in range(len(self._capture_objects[index])): |
||
| 821 | point = self._data[self._capture_objects[index][k]]; |
||
| 822 | axes.text(point[0], 0.0, location, color='blue', fontsize = 10); |
||
| 823 | |||
| 824 | if (dimension == 2): |
||
| 825 | axes.plot(self._weights[index][0], self._weights[index][1], color + marker_type); |
||
| 826 | |||
| 827 | if (awards == True): |
||
| 828 | location = '{0}'.format(self._award[index]);
|
||
| 829 | axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize = 10); |
||
| 830 | |||
| 831 | if (belongs == True): |
||
| 832 | location = '{0}'.format(index);
|
||
| 833 | axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize = 12); |
||
| 834 | for k in range(len(self._capture_objects[index])): |
||
| 835 | point = self._data[self._capture_objects[index][k]]; |
||
| 836 | axes.text(point[0], point[1], location, color='blue', fontsize = 10); |
||
| 837 | |||
| 838 | if ( (self._conn_type != type_conn.func_neighbor) and (coupling != False) ): |
||
| 839 | for neighbor in self._neighbors[index]: |
||
| 840 | if (neighbor > index): |
||
| 841 | axes.plot([self._weights[index][0], self._weights[neighbor][0]], [self._weights[index][1], self._weights[neighbor][1]], 'g', linewidth = 0.5); |
||
| 842 | |||
| 843 | elif (dimension == 3): |
||
| 844 | axes.scatter(self._weights[index][0], self._weights[index][1], self._weights[index][2], c = color, marker = marker_type); |
||
| 845 | |||
| 846 | if ( (self._conn_type != type_conn.func_neighbor) and (coupling != False) ): |
||
| 847 | for neighbor in self._neighbors[index]: |
||
| 848 | if (neighbor > index): |
||
| 849 | axes.plot([self._weights[index][0], self._weights[neighbor][0]], [self._weights[index][1], self._weights[neighbor][1]], [self._weights[index][2], self._weights[neighbor][2]], 'g-', linewidth = 0.5); |
||
| 850 | |||
| 851 | |||
| 852 | plt.title("Network Structure");
|
||
| 853 | plt.grid(); |
||
| 854 | plt.show(); |
This can be caused by one of the following:
1. Missing Dependencies
This error could indicate a configuration issue of Pylint. Make sure that your libraries are available by adding the necessary commands.
2. Missing __init__.py files
This error could also result from missing
__init__.pyfiles in your module folders. Make sure that you place one file in each sub-folder.