|
1
|
|
|
"""!
|
|
2
|
|
|
|
|
3
|
|
|
@brief Double-layer oscillatory network with phase oscillator for image segmentation.
|
|
4
|
|
|
@details Implementation based on article:
|
|
5
|
|
|
- A.Novikov, E.Benderskaya. Oscillatory Network Based on Kuramoto Model for Image Segmentation. 2015.
|
|
6
|
|
|
|
|
7
|
|
|
@authors Andrei Novikov ([email protected])
|
|
8
|
|
|
@date 2014-2016
|
|
9
|
|
|
@copyright GNU Public License
|
|
10
|
|
|
|
|
11
|
|
|
@cond GNU_PUBLIC_LICENSE
|
|
12
|
|
|
PyClustering is free software: you can redistribute it and/or modify
|
|
13
|
|
|
it under the terms of the GNU General Public License as published by
|
|
14
|
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
15
|
|
|
(at your option) any later version.
|
|
16
|
|
|
|
|
17
|
|
|
PyClustering is distributed in the hope that it will be useful,
|
|
18
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
19
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
20
|
|
|
GNU General Public License for more details.
|
|
21
|
|
|
|
|
22
|
|
|
You should have received a copy of the GNU General Public License
|
|
23
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
24
|
|
|
@endcond
|
|
25
|
|
|
|
|
26
|
|
|
"""
|
|
27
|
|
|
|
|
28
|
|
|
from math import floor;
|
|
29
|
|
|
|
|
30
|
|
|
from PIL import Image;
|
|
|
|
|
|
|
31
|
|
|
|
|
32
|
|
|
from pyclustering.cluster.syncnet import syncnet;
|
|
33
|
|
|
|
|
34
|
|
|
from pyclustering.nnet import solve_type;
|
|
35
|
|
|
from pyclustering.nnet.sync import sync_visualizer;
|
|
36
|
|
|
|
|
37
|
|
|
from pyclustering.utils import read_image;
|
|
38
|
|
|
|
|
39
|
|
|
|
|
40
|
|
|
class syncsegm_visualizer:
|
|
41
|
|
|
"""!
|
|
42
|
|
|
@brief Result visualizer of double-layer oscillatory network 'syncsegm'.
|
|
43
|
|
|
|
|
44
|
|
|
"""
|
|
45
|
|
|
|
|
46
|
|
|
@staticmethod
|
|
47
|
|
|
def show_first_layer_dynamic(analyser):
|
|
48
|
|
|
"""!
|
|
49
|
|
|
@brief Shows output dynamic of the first layer.
|
|
50
|
|
|
|
|
51
|
|
|
@param[in] analyser (syncsegm_analyser): Analyser of output dynamic of the 'syncsegm' oscillatory network.
|
|
52
|
|
|
|
|
53
|
|
|
"""
|
|
54
|
|
|
|
|
55
|
|
|
sync_visualizer.show_output_dynamic(analyser.get_first_layer_analyser());
|
|
56
|
|
|
|
|
57
|
|
|
|
|
58
|
|
|
@staticmethod
|
|
59
|
|
|
def show_second_layer_dynamic(analyser):
|
|
60
|
|
|
"""!
|
|
61
|
|
|
@brief Shows output dynamic of the second layer.
|
|
62
|
|
|
|
|
63
|
|
|
@param[in] analyser (syncsegm_analyser): Analyser of output dynamic of the 'syncsegm' oscillatory network.
|
|
64
|
|
|
|
|
65
|
|
|
"""
|
|
66
|
|
|
|
|
67
|
|
|
second_layer_analysers = analyser.get_second_layer_analysers();
|
|
68
|
|
|
analysers_sequence = [ object_segment_analyser['analyser'] for object_segment_analyser in second_layer_analysers ]
|
|
69
|
|
|
|
|
70
|
|
|
sync_visualizer.show_output_dynamics(analysers_sequence);
|
|
71
|
|
|
|
|
72
|
|
|
|
|
73
|
|
|
class syncsegm_analyser:
|
|
74
|
|
|
"""!
|
|
75
|
|
|
@brief Performs analysis of output dynamic of the double-layer oscillatory network 'syncsegm' to extract information about segmentation results.
|
|
76
|
|
|
|
|
77
|
|
|
"""
|
|
78
|
|
|
|
|
79
|
|
|
def __init__(self, color_analyser, object_segment_analysers = None):
|
|
80
|
|
|
"""!
|
|
81
|
|
|
@brief Constructor of the analyser.
|
|
82
|
|
|
|
|
83
|
|
|
@param[in] color_analyser (list): Analyser of coloring segmentation results of the first layer.
|
|
84
|
|
|
@param[in] object_segment_analysers (list): Analysers of objects on image segments - results of the second layer.
|
|
85
|
|
|
|
|
86
|
|
|
"""
|
|
87
|
|
|
|
|
88
|
|
|
self.__color_analyser = color_analyser;
|
|
89
|
|
|
self.__object_segment_analysers = object_segment_analysers;
|
|
90
|
|
|
|
|
91
|
|
|
|
|
92
|
|
|
def get_first_layer_analyser(self):
|
|
93
|
|
|
"""!
|
|
94
|
|
|
@brief Returns analyser of coloring segmentation of the first layer.
|
|
95
|
|
|
|
|
96
|
|
|
"""
|
|
97
|
|
|
|
|
98
|
|
|
return self.__color_analyser;
|
|
99
|
|
|
|
|
100
|
|
|
|
|
101
|
|
|
def get_second_layer_analysers(self):
|
|
102
|
|
|
"""!
|
|
103
|
|
|
@brief Returns analysers of object segmentation of the second layer.
|
|
104
|
|
|
|
|
105
|
|
|
"""
|
|
106
|
|
|
|
|
107
|
|
|
return self.__object_segment_analysers;
|
|
108
|
|
|
|
|
109
|
|
|
|
|
110
|
|
|
def allocate_colors(self, eps = 0.01, noise_size = 1):
|
|
111
|
|
|
"""!
|
|
112
|
|
|
@brief Allocates color segments.
|
|
113
|
|
|
|
|
114
|
|
|
@param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment.
|
|
115
|
|
|
@param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise.
|
|
116
|
|
|
|
|
117
|
|
|
@return (list) Color segments where each color segment consists of indexes of pixels that forms color segment.
|
|
118
|
|
|
|
|
119
|
|
|
"""
|
|
120
|
|
|
|
|
121
|
|
|
segments = self.__color_analyser.allocate_clusters(eps);
|
|
122
|
|
|
real_segments = [cluster for cluster in segments if len(cluster) > noise_size];
|
|
123
|
|
|
return real_segments;
|
|
124
|
|
|
|
|
125
|
|
|
|
|
126
|
|
|
def allocate_objects(self, eps = 0.01, noise_size = 1):
|
|
127
|
|
|
"""!
|
|
128
|
|
|
@brief Allocates object segments.
|
|
129
|
|
|
|
|
130
|
|
|
@param[in] eps (double): Tolerance level that define maximal difference between phases of oscillators in one segment.
|
|
131
|
|
|
@param[in] noise_size (uint): Threshold that defines noise - segments size (in pixels) that is less then the threshold is considered as a noise.
|
|
132
|
|
|
|
|
133
|
|
|
@return (list) Object segments where each object segment consists of indexes of pixels that forms object segment.
|
|
134
|
|
|
|
|
135
|
|
|
"""
|
|
136
|
|
|
|
|
137
|
|
|
if (self.__object_segment_analysers is None):
|
|
138
|
|
|
return [];
|
|
139
|
|
|
|
|
140
|
|
|
segments = [];
|
|
141
|
|
|
for object_segment_analyser in self.__object_segment_analysers:
|
|
142
|
|
|
indexes = object_segment_analyser['color_segment'];
|
|
143
|
|
|
analyser = object_segment_analyser['analyser'];
|
|
144
|
|
|
|
|
145
|
|
|
segments += analyser.allocate_clusters(eps, indexes);
|
|
146
|
|
|
|
|
147
|
|
|
real_segments = [segment for segment in segments if len(segment) > noise_size];
|
|
148
|
|
|
return real_segments;
|
|
149
|
|
|
|
|
150
|
|
|
|
|
151
|
|
|
class syncsegm:
|
|
152
|
|
|
"""!
|
|
153
|
|
|
@brief Class represents segmentation algorithm syncsegm.
|
|
154
|
|
|
@details syncsegm is a bio-inspired algorithm that is based on double-layer oscillatory network that uses modified Kuramoto model.
|
|
155
|
|
|
Algorithm extracts colors and colored objects. It uses only CCORE (C++ implementation of pyclustering) parts to implement the algorithm.
|
|
156
|
|
|
|
|
157
|
|
|
Example:
|
|
158
|
|
|
@code
|
|
159
|
|
|
# create oscillatory for image segmentaion - extract colors (radius 128) and objects (radius 4),
|
|
160
|
|
|
# and ignore noise (segments with size that is less than 10 pixels)
|
|
161
|
|
|
algorithm = syncsegm(128, 4, 10);
|
|
162
|
|
|
|
|
163
|
|
|
# extract segments (colors and objects)
|
|
164
|
|
|
analyser = algorithm(path_to_file);
|
|
165
|
|
|
|
|
166
|
|
|
# obtain segmentation results (only colors - from the first layer)
|
|
167
|
|
|
color_segments = analyser.allocate_colors(0.01, 10);
|
|
168
|
|
|
draw_image_mask_segments(path_to_file, color_segments);
|
|
169
|
|
|
|
|
170
|
|
|
# obtain segmentation results (objects - from the second layer)
|
|
171
|
|
|
object_segments = analyser.allocate_objects(0.01, 10);
|
|
172
|
|
|
draw_image_mask_segments(path_to_file, object_segments);
|
|
173
|
|
|
@endcode
|
|
174
|
|
|
|
|
175
|
|
|
"""
|
|
176
|
|
|
|
|
177
|
|
|
def __init__(self, color_radius, object_radius, noise_size = 0):
|
|
178
|
|
|
"""!
|
|
179
|
|
|
@brief Contructor of the oscillatory network SYNC for cluster analysis.
|
|
180
|
|
|
|
|
181
|
|
|
@param[in] color_radius (double): Radius of color connectivity (color similarity) for the first layer.
|
|
182
|
|
|
@param[in] object_radius (double): Radius of object connectivity (object similarity) for the second layer,
|
|
183
|
|
|
if 'None' then object segmentation is not performed (only color segmentation).
|
|
184
|
|
|
@param[in] noise_size (double): Size of segment that should be considered as a noise and ignored by the second layer.
|
|
185
|
|
|
|
|
186
|
|
|
"""
|
|
187
|
|
|
|
|
188
|
|
|
self.__color_radius = color_radius;
|
|
189
|
|
|
self.__object_radius = object_radius;
|
|
190
|
|
|
self.__noise_size = noise_size;
|
|
191
|
|
|
|
|
192
|
|
|
self.__order_color = 0.9995;
|
|
193
|
|
|
self.__order_object = 0.999;
|
|
194
|
|
|
|
|
195
|
|
|
self.__network = None;
|
|
196
|
|
|
|
|
197
|
|
|
|
|
198
|
|
|
def process(self, image_source, collect_dynamic = False, order_color = 0.9995, order_object = 0.999):
|
|
199
|
|
|
"""!
|
|
200
|
|
|
@brief Performs image segmentation.
|
|
201
|
|
|
|
|
202
|
|
|
@param[in] image_source (string): Path to image file that should be processed.
|
|
203
|
|
|
@param[in] collect_dynamic (bool): If 'True' then whole dynamic of each layer of the network is collected.
|
|
204
|
|
|
@param[in] order_color (double): Local synchronization order for the first layer - coloring segmentation.
|
|
205
|
|
|
@param[in] order_object (double): Local synchronization order for the second layer - object segmentation.
|
|
206
|
|
|
|
|
207
|
|
|
@return (syncsegm_analyser) Analyser of segmentation results by the network.
|
|
208
|
|
|
|
|
209
|
|
|
"""
|
|
210
|
|
|
|
|
211
|
|
|
self.__color_radius = order_color;
|
|
212
|
|
|
self.__order_object = order_object;
|
|
213
|
|
|
|
|
214
|
|
|
data = read_image(image_source);
|
|
215
|
|
|
color_analyser = self.__analyse_colors(data, collect_dynamic);
|
|
216
|
|
|
|
|
217
|
|
|
if (self.__object_radius is None):
|
|
218
|
|
|
return syncsegm_analyser(color_analyser, None);
|
|
219
|
|
|
|
|
220
|
|
|
object_segment_analysers = self.__analyse_objects(image_source, color_analyser, collect_dynamic);
|
|
221
|
|
|
return syncsegm_analyser(color_analyser, object_segment_analysers);
|
|
222
|
|
|
|
|
223
|
|
|
|
|
224
|
|
|
def __analyse_colors(self, image_source, collect_dynamic):
|
|
225
|
|
|
"""!
|
|
226
|
|
|
@brief Performs color segmentation by the first layer.
|
|
227
|
|
|
|
|
228
|
|
|
@param[in] image_source (string): Path to image file that should be processed.
|
|
229
|
|
|
@param[in] collect_dynamic (bool): If 'True' then whole dynamic of the first layer of the network is collected.
|
|
230
|
|
|
|
|
231
|
|
|
@return (syncnet_analyser) Analyser of color segmentation results of the first layer.
|
|
232
|
|
|
|
|
233
|
|
|
"""
|
|
234
|
|
|
|
|
235
|
|
|
network = syncnet(image_source, self.__color_radius, ccore = True);
|
|
236
|
|
|
analyser = network.process(self.__order_color, solve_type.FAST, collect_dynamic);
|
|
237
|
|
|
|
|
238
|
|
|
return analyser;
|
|
239
|
|
|
|
|
240
|
|
|
|
|
241
|
|
|
def __analyse_objects(self, image_source, color_analyser, collect_dynamic):
|
|
242
|
|
|
"""!
|
|
243
|
|
|
@brief Performs object segmentation by the second layer.
|
|
244
|
|
|
|
|
245
|
|
|
@param[in] image_source (string): Path to image file that should be processed.
|
|
246
|
|
|
@param[in] color_analyser (syncnet_analyser): Analyser of color segmentation results.
|
|
247
|
|
|
@param[in] collect_dynamic (bool): If 'True' then whole dynamic of the first layer of the network is collected.
|
|
248
|
|
|
|
|
249
|
|
|
@return (map) Analysers of object segments.
|
|
250
|
|
|
|
|
251
|
|
|
"""
|
|
252
|
|
|
|
|
253
|
|
|
# continue analysis
|
|
254
|
|
|
pointer_image = Image.open(image_source);
|
|
255
|
|
|
image_size = pointer_image.size;
|
|
256
|
|
|
|
|
257
|
|
|
object_analysers = [];
|
|
258
|
|
|
|
|
259
|
|
|
color_segments = color_analyser.allocate_clusters();
|
|
260
|
|
|
|
|
261
|
|
|
for segment in color_segments:
|
|
262
|
|
|
object_analyser = self.__analyse_color_segment(image_size, segment, collect_dynamic);
|
|
263
|
|
|
if (object_analyser is not None):
|
|
264
|
|
|
object_analysers.append( { 'color_segment': segment, 'analyser': object_analyser } );
|
|
265
|
|
|
|
|
266
|
|
|
return object_analysers;
|
|
267
|
|
|
|
|
268
|
|
|
|
|
269
|
|
|
def __analyse_color_segment(self, image_size, color_segment, collect_dynamic):
|
|
270
|
|
|
"""!
|
|
271
|
|
|
@brief Performs object segmentation of separate segment.
|
|
272
|
|
|
|
|
273
|
|
|
@param[in] image_size (list): Image size presented as a [width x height].
|
|
274
|
|
|
@param[in] color_segment (list): Image segment that should be processed.
|
|
275
|
|
|
@param[in] collect_dynamic (bool): If 'True' then whole dynamic of the second layer of the network is collected.
|
|
276
|
|
|
|
|
277
|
|
|
@return (syncnet_analyser) Analyser of object segmentation results of the second layer.
|
|
278
|
|
|
|
|
279
|
|
|
"""
|
|
280
|
|
|
coordinates = self.__extract_location_coordinates(image_size, color_segment);
|
|
281
|
|
|
|
|
282
|
|
|
if (len(coordinates) < self.__noise_size):
|
|
283
|
|
|
return None;
|
|
284
|
|
|
|
|
285
|
|
|
network = syncnet(coordinates, self.__object_radius, ccore = True);
|
|
286
|
|
|
analyser = network.process(self.__order_object, solve_type.FAST, collect_dynamic);
|
|
287
|
|
|
|
|
288
|
|
|
return analyser;
|
|
289
|
|
|
|
|
290
|
|
|
|
|
291
|
|
|
def __extract_location_coordinates(self, image_size, color_segment):
|
|
292
|
|
|
"""!
|
|
293
|
|
|
@brief Extracts coordinates of specified image segment.
|
|
294
|
|
|
|
|
295
|
|
|
@param[in] image_size (list): Image size presented as a [width x height].
|
|
296
|
|
|
@param[in] color_segment (list): Image segment whose coordinates should be extracted.
|
|
297
|
|
|
|
|
298
|
|
|
@return (list) Coordinates of each pixel.
|
|
299
|
|
|
|
|
300
|
|
|
"""
|
|
301
|
|
|
coordinates = [];
|
|
302
|
|
|
for index in color_segment:
|
|
303
|
|
|
y = floor(index / image_size[0]);
|
|
304
|
|
|
x = index - y * image_size[0];
|
|
305
|
|
|
|
|
306
|
|
|
coordinates.append([x, y]);
|
|
307
|
|
|
|
|
308
|
|
|
return coordinates;
|
|
309
|
|
|
|
This can be caused by one of the following:
1. Missing Dependencies
This error could indicate a configuration issue of Pylint. Make sure that your libraries are available by adding the necessary commands.
2. Missing __init__.py files
This error could also result from missing
__init__.pyfiles in your module folders. Make sure that you place one file in each sub-folder.