Completed
Push — master ( 53dd28...48d24e )
by Andrei
01:33
created

xmeans.get_centers()   A

Complexity

Conditions 1

Size

Total Lines 12

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 1
dl 0
loc 12
rs 9.4285
c 0
b 0
f 0
1
"""!
2
3
@brief Cluster analysis algorithm: X-Means
4
@details Based on article description:
5
         - D.Pelleg, A.Moore. X-means: Extending K-means with Efficient Estimation of the Number of Clusters. 2000.
6
7
@authors Andrei Novikov ([email protected])
8
@date 2014-2016
9
@copyright GNU Public License
10
11
@cond GNU_PUBLIC_LICENSE
12
    PyClustering is free software: you can redistribute it and/or modify
13
    it under the terms of the GNU General Public License as published by
14
    the Free Software Foundation, either version 3 of the License, or
15
    (at your option) any later version.
16
    
17
    PyClustering is distributed in the hope that it will be useful,
18
    but WITHOUT ANY WARRANTY; without even the implied warranty of
19
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20
    GNU General Public License for more details.
21
    
22
    You should have received a copy of the GNU General Public License
23
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
24
@endcond
25
26
"""
27
28
import numpy;
0 ignored issues
show
Configuration introduced by
The import numpy could not be resolved.

This can be caused by one of the following:

1. Missing Dependencies

This error could indicate a configuration issue of Pylint. Make sure that your libraries are available by adding the necessary commands.

# .scrutinizer.yml
before_commands:
    - sudo pip install abc # Python2
    - sudo pip3 install abc # Python3
Tip: We are currently not using virtualenv to run pylint, when installing your modules make sure to use the command for the correct version.

2. Missing __init__.py files

This error could also result from missing __init__.py files in your module folders. Make sure that you place one file in each sub-folder.

Loading history...
29
import math;
30
import random;
31
32
from enum import IntEnum;
33
34
import pyclustering.core.wrapper as wrapper;
35
36
from pyclustering.utils import euclidean_distance, euclidean_distance_sqrt;
37
from pyclustering.utils import list_math_addition_number, list_math_addition, list_math_multiplication, list_math_division_number, list_math_subtraction;
38
39
40
class splitting_type(IntEnum):
41
    """!
42
    @brief Enumeration of splitting types that can be used as splitting creation of cluster in X-Means algorithm.
43
    
44
    """
45
    
46
    ## Bayesian information criterion to approximate the correct number of clusters.
47
    BAYESIAN_INFORMATION_CRITERION = 0;
48
    
49
    ## Minimum noiseless description length to approximate the correct number of clusters.
50
    MINIMUM_NOISELESS_DESCRIPTION_LENGTH = 1;
51
52
53
class xmeans:
54
    """!
55
    @brief Class represents clustering algorithm X-Means.
56
    
57
    Example:
58
    @code
59
        # sample for cluster analysis (represented by list)
60
        sample = read_sample(path_to_sample);
61
        
62
        # create object of X-Means algorithm that uses CCORE for processing
63
        # initial centers - optional parameter, if it is None, then random center will be used by the algorithm
64
        initial_centers = [ [0.0, 0.5] ];
65
        xmeans_instance = xmeans(sample, initial_centers, ccore = True);
66
        
67
        # run cluster analysis
68
        xmeans_instance.process();
69
        
70
        # obtain results of clustering
71
        clusters = xmeans_instance.get_clusters();
72
        
73
        # display allocated clusters
74
        draw_clusters(sample, clusters);
75
    @endcode
76
    
77
    """
78
    
79
    def __init__(self, data, initial_centers = None, kmax = 20, tolerance = 0.025, criterion = splitting_type.BAYESIAN_INFORMATION_CRITERION, ccore = False):
80
        """!
81
        @brief Constructor of clustering algorithm X-Means.
82
        
83
        @param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple.
84
        @param[in] initial_centers (list): Initial coordinates of centers of clusters that are represented by list: [center1, center2, ...], 
85
                    if it is not specified then X-Means starts from the random center.
86
        @param[in] kmax (uint): Maximum number of clusters that can be allocated.
87
        @param[in] tolerance (double): Stop condition for each iteration: if maximum value of change of centers of clusters is less than tolerance than algorithm will stop processing.
88
        @param[in] criterion (splitting_type): Type of splitting creation.
89
        @param[in] ccore (bool): Defines should be CCORE (C++ pyclustering library) used instead of Python code or not.
90
        
91
        """
92
           
93
        self.__pointer_data = data;
94
        self.__clusters = [];
95
        
96
        if (initial_centers is not None):
97
            self.__centers = initial_centers[:];
98
        else:
99
            self.__centers = [ [random.random() for _ in range(len(data[0])) ] ];
100
        
101
        self.__kmax = kmax;
102
        self.__tolerance = tolerance;
103
        self.__criterion = criterion;
104
         
105
        self.__ccore = ccore;
106
         
107
    def process(self):
108
        """!
109
        @brief Performs cluster analysis in line with rules of X-Means algorithm.
110
        
111
        @remark Results of clustering can be obtained using corresponding gets methods.
112
        
113
        @see get_clusters()
114
        @see get_centers()
115
        
116
        """
117
        
118
        if (self.__ccore is True):
119
            self.__clusters = wrapper.xmeans(self.__pointer_data, self.__centers, self.__kmax, self.__tolerance);
120
            self.__clusters = [ cluster for cluster in self.__clusters if len(cluster) > 0 ]; 
121
            
122
            self.__centers = self.__update_centers(self.__clusters);
123
        else:
124
            self.__clusters = [];
125
            while ( len(self.__centers) < self.__kmax ):
126
                current_cluster_number = len(self.__centers);
127
                 
128
                (self.__clusters, self.__centers) = self.__improve_parameters(self.__centers);
129
                allocated_centers = self.__improve_structure(self.__clusters, self.__centers);
130
                
131
                if ( (current_cluster_number == len(allocated_centers)) ):
132
                    break;
133
                else:
134
                    self.__centers = allocated_centers;
135
                    
136
     
137
    def get_clusters(self):
138
        """!
139
        @brief Returns list of allocated clusters, each cluster contains indexes of objects in list of data.
140
        
141
        @return (list) List of allocated clusters.
142
        
143
        @see process()
144
        @see get_centers()
145
        
146
        """
147
         
148
        return self.__clusters;
149
     
150
     
151
    def get_centers(self):
152
        """!
153
        @brief Returns list of centers for allocated clusters.
154
        
155
        @return (list) List of centers for allocated clusters.
156
        
157
        @see process()
158
        @see get_clusters()
159
        
160
        """
161
         
162
        return self.__centers;
163
     
164
     
165
    def __improve_parameters(self, centers, available_indexes = None):
166
        """!
167
        @brief Performs k-means clustering in the specified region.
168
        
169
        @param[in] centers (list): Centers of clusters.
170
        @param[in] available_indexes (list): Indexes that defines which points can be used for k-means clustering, if None - then all points are used.
171
        
172
        @return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
173
        
174
        """
175
        
176
        changes = numpy.Inf;
177
        
178
        stop_condition = self.__tolerance * self.__tolerance; # Fast solution
179
          
180
        clusters = [];
181
          
182
        while (changes > stop_condition):
183
            clusters = self.__update_clusters(centers, available_indexes);
184
            clusters = [ cluster for cluster in clusters if len(cluster) > 0 ]; 
185
            
186
            updated_centers = self.__update_centers(clusters);
187
          
188
            changes = max([euclidean_distance_sqrt(centers[index], updated_centers[index]) for index in range(len(updated_centers))]);    # Fast solution
189
              
190
            centers = updated_centers;
191
          
192
        return (clusters, centers);
193
     
194
     
195
    def __improve_structure(self, clusters, centers):
196
        """!
197
        @brief Check for best structure: divides each cluster into two and checks for best results using splitting criterion.
198
        
199
        @param[in] clusters (list): Clusters that have been allocated (each cluster contains indexes of points from data).
200
        @param[in] centers (list): Centers of clusters.
201
        
202
        @return (list) Allocated centers for clustering.
203
        
204
        """
205
         
206
        difference = 0.001;
207
          
208
        allocated_centers = [];
209
          
210
        for index_cluster in range(len(clusters)):
211
            # split cluster into two child clusters
212
            parent_child_centers = [];
213
            parent_child_centers.append(list_math_addition_number(centers[index_cluster], -difference));
214
            parent_child_centers.append(list_math_addition_number(centers[index_cluster], difference));
215
          
216
            # solve k-means problem for children where data of parent are used.
217
            (parent_child_clusters, parent_child_centers) = self.__improve_parameters(parent_child_centers, clusters[index_cluster]);
218
              
219
            # If it's possible to split current data
220
            if (len(parent_child_clusters) > 1):
221
                # Calculate splitting criterion
222
                parent_scores = self.__splitting_criterion([ clusters[index_cluster] ], [ centers[index_cluster] ]);
223
                child_scores = self.__splitting_criterion([ parent_child_clusters[0], parent_child_clusters[1] ], parent_child_centers);
224
              
225
                split_require = False;
226
                
227
                # Reallocate number of centers (clusters) in line with scores        
228
                if (self.__criterion == splitting_type.BAYESIAN_INFORMATION_CRITERION):
229
                    if (parent_scores < child_scores): split_require = True;
230
                    
231
                elif (self.__criterion == splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH):
232
                    if (parent_scores > child_scores): split_require = True;
233
                    
234
                if (split_require is True):
235
                    allocated_centers.append(parent_child_centers[0]);
236
                    allocated_centers.append(parent_child_centers[1]);
237
                else:
238
                    allocated_centers.append(centers[index_cluster]);
239
240
                    
241
            else:
242
                allocated_centers.append(centers[index_cluster]);
243
          
244
        return allocated_centers;
245
     
246
     
247
    def __splitting_criterion(self, clusters, centers):
248
        """!
249
        @brief Calculates splitting criterion for input clusters.
250
        
251
        @param[in] clusters (list): Clusters for which splitting criterion should be calculated.
252
        @param[in] centers (list): Centers of the clusters.
253
        
254
        @return (double) Returns splitting criterion. High value of splitting cretion means that current structure is much better.
255
        
256
        @see __bayesian_information_criterion(clusters, centers)
257
        @see __minimum_noiseless_description_length(clusters, centers)
258
        
259
        """
260
        
261
        if (self.__criterion == splitting_type.BAYESIAN_INFORMATION_CRITERION):
262
            return self.__bayesian_information_criterion(clusters, centers);
263
        
264
        elif (self.__criterion == splitting_type.MINIMUM_NOISELESS_DESCRIPTION_LENGTH):
265
            return self.__minimum_noiseless_description_length(clusters, centers);
266
        
267
        else:
268
            assert 0;
269
 
270
    
271
    def __minimum_noiseless_description_length(self, clusters, centers):
272
        """!
273
        @brief Calculates splitting criterion for input clusters using minimum noiseless description length criterion.
274
        
275
        @param[in] clusters (list): Clusters for which splitting criterion should be calculated.
276
        @param[in] centers (list): Centers of the clusters.
277
        
278
        @return (double) Returns splitting criterion in line with bayesian information criterion. 
279
                Low value of splitting cretion means that current structure is much better.
280
        
281
        @see __bayesian_information_criterion(clusters, centers)
282
        
283
        """
284
                
285
        scores = [0.0] * len(clusters);
286
        
287
        W = 0.0;
288
        K = len(clusters);
289
        N = 0.0;
290
291
        sigma_sqrt = 0.0;
292
        
293
        alpha = 0.9;
294
        betta = 0.9;
295
                
296
        for index_cluster in range(0, len(clusters), 1):
297
            for index_object in clusters[index_cluster]:
298
                delta_vector = list_math_subtraction(self.__pointer_data[index_object], centers[index_cluster]);
299
                delta_sqrt = sum(list_math_multiplication(delta_vector, delta_vector));
300
                
301
                W += delta_sqrt;
302
                sigma_sqrt += delta_sqrt;
303
            
304
            N += len(clusters[index_cluster]);     
305
        
306
        if (N - K != 0):
307
            W /= N;
308
            
309
            sigma_sqrt /= (N - K);
310
            sigma = sigma_sqrt ** 0.5;
311
            
312
            for index_cluster in range(0, len(clusters), 1):
313
                Kw = (1.0 - K / N) * sigma_sqrt;
314
                Ks = ( 2.0 * alpha * sigma / (N ** 0.5) ) + ( (alpha ** 2.0) * sigma_sqrt / N + W - Kw / 2.0 ) ** 0.5;
315
                U = W - Kw + 2.0 * (alpha ** 2.0) * sigma_sqrt / N + Ks;
316
                
317
                Z = K * sigma_sqrt / N + U + betta * ( (2.0 * K) ** 0.5 ) * sigma_sqrt / N;
318
                
319
                if (Z == 0.0):
320
                    scores[index_cluster] = float("inf");
321
                else:
322
                    scores[index_cluster] = Z;
323
                
324
        else:
325
            scores = [float("inf")] * len(clusters);
326
        
327
        return sum(scores);
328
 
329
    def __bayesian_information_criterion(self, clusters, centers):
330
        """!
331
        @brief Calculates splitting criterion for input clusters using bayesian information criterion.
332
        
333
        @param[in] clusters (list): Clusters for which splitting criterion should be calculated.
334
        @param[in] centers (list): Centers of the clusters.
335
        
336
        @return (double) Splitting criterion in line with bayesian information criterion.
337
                High value of splitting cretion means that current structure is much better.
338
                
339
        @see __minimum_noiseless_description_length(clusters, centers)
340
        
341
        """
342
343
        scores = [0.0] * len(clusters)     # splitting criterion
344
        dimension = len(self.__pointer_data[0]);
345
          
346
        # estimation of the noise variance in the data set
347
        sigma = 0.0;
348
        K = len(clusters);
349
        N = 0.0;
350
          
351
        for index_cluster in range(0, len(clusters), 1):
352
            for index_object in clusters[index_cluster]:
353
                sigma += (euclidean_distance(self.__pointer_data[index_object], centers[index_cluster]));  # It works
354
355
            N += len(clusters[index_cluster]);
356
      
357
        if (N - K != 0):
358
            sigma /= (N - K);
359
        
360
            # splitting criterion    
361 View Code Duplication
            for index_cluster in range(0, len(clusters), 1):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
362
                n = len(clusters[index_cluster]);
363
                
364
                if (sigma > 0.0):
365
                    scores[index_cluster] = n * math.log(n) - n * math.log(N) - n * math.log(2.0 * numpy.pi) / 2.0 - n * dimension * math.log(sigma) / 2.0 - (n - K) / 2.0;
366
                  
367
        return sum(scores);
368
 
369
 
370
    def __update_clusters(self, centers, available_indexes = None):
371
        """!
372
        @brief Calculates Euclidean distance to each point from the each cluster.
373
               Nearest points are captured by according clusters and as a result clusters are updated.
374
               
375
        @param[in] centers (list): Coordinates of centers of clusters that are represented by list: [center1, center2, ...].
376
        @param[in] available_indexes (list): Indexes that defines which points can be used from imput data, if None - then all points are used.
377
        
378
        @return (list) Updated clusters.
379
        
380
        """
381
            
382
        bypass = None;
383
        if (available_indexes is None):
384
            bypass = range(len(self.__pointer_data));
385
        else:
386
            bypass = available_indexes;
387
          
388
        clusters = [[] for i in range(len(centers))];
389
        for index_point in bypass:
390
            index_optim = -1;
391
            dist_optim = 0.0;
392
              
393
            for index in range(len(centers)):
394
                # dist = euclidean_distance(data[index_point], centers[index]);         # Slow solution
395
                dist = euclidean_distance_sqrt(self.__pointer_data[index_point], centers[index]);      # Fast solution
396
                  
397 View Code Duplication
                if ( (dist < dist_optim) or (index is 0)):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
398
                    index_optim = index;
399
                    dist_optim = dist;
400
              
401
            clusters[index_optim].append(index_point);
402
              
403
        return clusters;
404
             
405
     
406
    def __update_centers(self, clusters):
407
        """!
408
        @brief Updates centers of clusters in line with contained objects.
409
        
410
        @param[in] clusters (list): Clusters that contain indexes of objects from data.
411
        
412
        @return (list) Updated centers.
413
        
414
        """
415
         
416
        centers = [[] for i in range(len(clusters))];
417
        dimension = len(self.__pointer_data[0])
418
          
419
        for index in range(len(clusters)):
420
            point_sum = [0.0] * dimension;
421
              
422
            for index_point in clusters[index]:
423
                point_sum = list_math_addition(point_sum, self.__pointer_data[index_point]);
424
            
425
            centers[index] = list_math_division_number(point_sum, len(clusters[index]));
426
              
427
        return centers;
428