Test Failed
Push — develop ( d7cf39...faa4bd )
by Nicolas
04:34 queued 10s
created

glances/plugins/glances_docker.py (1 issue)

1
# -*- coding: utf-8 -*-
2
#
3
# This file is part of Glances.
4
#
5
# Copyright (C) 2019 Nicolargo <[email protected]>
6
#
7
# Glances is free software; you can redistribute it and/or modify
8
# it under the terms of the GNU Lesser General Public License as published by
9
# the Free Software Foundation, either version 3 of the License, or
10
# (at your option) any later version.
11
#
12
# Glances is distributed in the hope that it will be useful,
13
# but WITHOUT ANY WARRANTY; without even the implied warranty of
14
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
# GNU Lesser General Public License for more details.
16
#
17
# You should have received a copy of the GNU Lesser General Public License
18
# along with this program. If not, see <http://www.gnu.org/licenses/>.
19
20
"""Docker plugin."""
21
22
import os
23
import threading
24
import time
25
26
from glances.logger import logger
27
from glances.compat import iterkeys, itervalues, nativestr
28
from glances.timer import getTimeSinceLastUpdate
29
from glances.plugins.glances_plugin import GlancesPlugin
30
from glances.processes import sort_stats as sort_stats_processes, weighted, glances_processes
31
32
# Docker-py library (optional and Linux-only)
33
# https://github.com/docker/docker-py
34
try:
35
    import docker
36
except Exception as e:
37
    import_error_tag = True
38
    # Display debu message if import KeyError
39
    logger.warning("Error loading Docker Python Lib. Docker plugin is disabled ({})".format(e))
40
else:
41
    import_error_tag = False
42
43
# Define the items history list (list of items to add to history)
44
# TODO: For the moment limited to the CPU. Had to change the graph exports
45
#       method to display one graph per container.
46
# items_history_list = [{'name': 'cpu_percent',
47
#                        'description': 'Container CPU consumption in %',
48
#                        'y_unit': '%'},
49
#                       {'name': 'memory_usage',
50
#                        'description': 'Container memory usage in bytes',
51
#                        'y_unit': 'B'},
52
#                       {'name': 'network_rx',
53
#                        'description': 'Container network RX bitrate in bits per second',
54
#                        'y_unit': 'bps'},
55
#                       {'name': 'network_tx',
56
#                        'description': 'Container network TX bitrate in bits per second',
57
#                        'y_unit': 'bps'},
58
#                       {'name': 'io_r',
59
#                        'description': 'Container IO bytes read per second',
60
#                        'y_unit': 'Bps'},
61
#                       {'name': 'io_w',
62
#                        'description': 'Container IO bytes write per second',
63
#                        'y_unit': 'Bps'}]
64
items_history_list = [{'name': 'cpu_percent',
65
                       'description': 'Container CPU consumption in %',
66
                       'y_unit': '%'}]
67
68
69
class Plugin(GlancesPlugin):
70
    """Glances Docker plugin.
71
72
    stats is a dict: {'version': {...}, 'containers': [{}, {}]}
73
    """
74
75
    def __init__(self, args=None, config=None):
76
        """Init the plugin."""
77
        super(Plugin, self).__init__(args=args,
78
                                     config=config,
79
                                     items_history_list=items_history_list)
80
81
        # The plgin can be disable using: args.disable_docker
82
        self.args = args
83
84
        # We want to display the stat in the curse interface
85
        self.display_curse = True
86
87
        # Init the Docker API
88
        self.docker_client = self.connect()
89
90
        # Dict of thread (to grab stats asynchroniously, one thread is created by container)
91
        # key: Container Id
92
        # value: instance of ThreadDockerGrabber
93
        self.thread_list = {}
94
95
    def exit(self):
96
        """Overwrite the exit method to close threads."""
97
        for t in itervalues(self.thread_list):
98
            t.stop()
99
        # Call the father class
100
        super(Plugin, self).exit()
101
102
    def get_key(self):
103
        """Return the key of the list."""
104
        return 'name'
105
106
    def get_export(self):
107
        """Overwrite the default export method.
108
109
        - Only exports containers
110
        - The key is the first container name
111
        """
112
        ret = []
113
        try:
114
            ret = self.stats['containers']
115
        except KeyError as e:
116
            logger.debug("docker plugin - Docker export error {}".format(e))
117
        return ret
118
119
    def connect(self):
120
        """Connect to the Docker server."""
121
        try:
122
            ret = docker.from_env()
123
        except Exception as e:
124
            logger.error("docker plugin - Can not connect to Docker ({})".format(e))
125
            ret = None
126
127
        return ret
128
129
    def _all_tag(self):
130
        """Return the all tag of the Glances/Docker configuration file.
131
132
        # By default, Glances only display running containers
133
        # Set the following key to True to display all containers
134
        all=True
135
        """
136
        all_tag = self.get_conf_value('all')
137
        if len(all_tag) == 0:
138
            return False
139
        else:
140
            return all_tag[0].lower() == 'true'
141
142
    @GlancesPlugin._check_decorator
143
    @GlancesPlugin._log_result_decorator
144
    def update(self):
145
        """Update Docker stats using the input method."""
146
        # Init new stats
147
        stats = self.get_init_value()
148
149
        # The Docker-py lib is mandatory
150
        if import_error_tag:
151
            return self.stats
152
153
        if self.input_method == 'local':
154
            # Update stats
155
156
            # Docker version
157
            # Exemple: {
158
            #     "KernelVersion": "3.16.4-tinycore64",
159
            #     "Arch": "amd64",
160
            #     "ApiVersion": "1.15",
161
            #     "Version": "1.3.0",
162
            #     "GitCommit": "c78088f",
163
            #     "Os": "linux",
164
            #     "GoVersion": "go1.3.3"
165
            # }
166
            try:
167
                stats['version'] = self.docker_client.version()
168
            except Exception as e:
169
                # Correct issue#649
170
                logger.error("{} plugin - Cannot get Docker version ({})".format(self.plugin_name, e))
171
                # We may have lost connection remove version info
172
                if 'version' in self.stats:
173
                    del self.stats['version']
174
                self.stats['containers'] = []
175
                return self.stats
176
177
            # Update current containers list
178
            try:
179
                # Issue #1152: Docker module doesn't export details about stopped containers
180
                # The Docker/all key of the configuration file should be set to True
181
                containers = self.docker_client.containers.list(all=self._all_tag()) or []
182
            except Exception as e:
183
                logger.error("{} plugin - Cannot get containers list ({})".format(self.plugin_name, e))
184
                # We may have lost connection empty the containers list.
185
                self.stats['containers'] = []
186
                return self.stats
187
188
            # Start new thread for new container
189
            for container in containers:
190
                if container.id not in self.thread_list:
191
                    # Thread did not exist in the internal dict
192
                    # Create it and add it to the internal dict
193
                    logger.debug("{} plugin - Create thread for container {}".format(self.plugin_name, container.id[:12]))
194
                    t = ThreadDockerGrabber(container)
195
                    self.thread_list[container.id] = t
196
                    t.start()
197
198
            # Stop threads for non-existing containers
199
            nonexisting_containers = set(iterkeys(self.thread_list)) - set([c.id for c in containers])
200
            for container_id in nonexisting_containers:
201
                # Stop the thread
202
                logger.debug("{} plugin - Stop thread for old container {}".format(self.plugin_name, container_id[:12]))
203
                self.thread_list[container_id].stop()
204
                # Delete the item from the dict
205
                del self.thread_list[container_id]
206
207
            # Get stats for all containers
208
            stats['containers'] = []
209
            for container in containers:
210
                # Init the stats for the current container
211
                container_stats = {}
212
                # The key is the container name and not the Id
213
                container_stats['key'] = self.get_key()
214
                # Export name (first name in the Names list, without the /)
215
                container_stats['name'] = nativestr(container.name)
216
                # Export global Names (used by the WebUI)
217
                container_stats['Names'] = [nativestr(container.name)]
218
                # Container Id
219
                container_stats['Id'] = container.id
220
                # Container Image
221
                container_stats['Image'] = container.image.tags
222
                # Global stats (from attrs)
223
                container_stats['Status'] = container.attrs['State']['Status']
224
                container_stats['Command'] = container.attrs['Config']['Entrypoint']
225
                # Standards stats
226
                if container_stats['Status'] in ('running', 'paused'):
227
                    container_stats['cpu'] = self.get_docker_cpu(container.id, self.thread_list[container.id].stats)
228
                    container_stats['cpu_percent'] = container_stats['cpu'].get('total', None)
229
                    container_stats['memory'] = self.get_docker_memory(container.id, self.thread_list[container.id].stats)
230
                    container_stats['memory_usage'] = container_stats['memory'].get('usage', None)
231
                    container_stats['io'] = self.get_docker_io(container.id, self.thread_list[container.id].stats)
232
                    container_stats['io_r'] = container_stats['io'].get('ior', None)
233
                    container_stats['io_w'] = container_stats['io'].get('iow', None)
234
                    container_stats['network'] = self.get_docker_network(container.id, self.thread_list[container.id].stats)
235
                    container_stats['network_rx'] = container_stats['network'].get('rx', None)
236
                    container_stats['network_tx'] = container_stats['network'].get('tx', None)
237
                else:
238
                    container_stats['cpu'] = {}
239
                    container_stats['cpu_percent'] = None
240
                    container_stats['memory'] = {}
241
                    container_stats['memory_percent'] = None
242
                    container_stats['io'] = {}
243
                    container_stats['io_r'] = None
244
                    container_stats['io_w'] = None
245
                    container_stats['network'] = {}
246
                    container_stats['network_rx'] = None
247
                    container_stats['network_tx'] = None
248
                # Add current container stats to the stats list
249
                stats['containers'].append(container_stats)
250
251
        elif self.input_method == 'snmp':
252
            # Update stats using SNMP
253
            # Not available
254
            pass
255
256
        # Sort and update the stats
257
        self.stats = sort_stats(stats)
258
259
        return self.stats
260
261
    def get_docker_cpu(self, container_id, all_stats):
262
        """Return the container CPU usage.
263
264
        Input: id is the full container id
265
               all_stats is the output of the stats method of the Docker API
266
        Output: a dict {'total': 1.49}
267
        """
268
        cpu_new = {}
269
        ret = {'total': 0.0}
270
271
        # Read the stats
272
        # For each container, you will find a pseudo-file cpuacct.stat,
273
        # containing the CPU usage accumulated by the processes of the container.
274
        # Those times are expressed in ticks of 1/USER_HZ of a second.
275
        # On x86 systems, USER_HZ is 100.
276
        try:
277
            cpu_new['total'] = all_stats['cpu_stats']['cpu_usage']['total_usage']
278
            cpu_new['system'] = all_stats['cpu_stats']['system_cpu_usage']
279
            cpu_new['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage'] or [])
280
        except KeyError as e:
281
            # all_stats do not have CPU information
282
            logger.debug("docker plugin - Cannot grab CPU usage for container {} ({})".format(container_id, e))
283
            logger.debug(all_stats)
284
        else:
285
            # Previous CPU stats stored in the cpu_old variable
286
            if not hasattr(self, 'cpu_old'):
287
                # First call, we init the cpu_old variable
288
                self.cpu_old = {}
289
                try:
290
                    self.cpu_old[container_id] = cpu_new
291
                except (IOError, UnboundLocalError):
292
                    pass
293
294
            if container_id not in self.cpu_old:
295
                try:
296
                    self.cpu_old[container_id] = cpu_new
297
                except (IOError, UnboundLocalError):
298
                    pass
299
            else:
300
                #
301
                cpu_delta = float(cpu_new['total'] - self.cpu_old[container_id]['total'])
302
                system_delta = float(cpu_new['system'] - self.cpu_old[container_id]['system'])
303
                if cpu_delta > 0.0 and system_delta > 0.0:
304
                    ret['total'] = (cpu_delta / system_delta) * float(cpu_new['nb_core']) * 100
305
306
                # Save stats to compute next stats
307
                self.cpu_old[container_id] = cpu_new
308
309
        # Return the stats
310
        return ret
311
312
    def get_docker_memory(self, container_id, all_stats):
313
        """Return the container MEMORY.
314
315
        Input: id is the full container id
316
               all_stats is the output of the stats method of the Docker API
317
        Output: a dict {'rss': 1015808, 'cache': 356352,  'usage': ..., 'max_usage': ...}
318
        """
319
        ret = {}
320
        # Read the stats
321
        try:
322
            # Do not exist anymore with Docker 1.11 (issue #848)
323
            # ret['rss'] = all_stats['memory_stats']['stats']['rss']
324
            # ret['cache'] = all_stats['memory_stats']['stats']['cache']
325
            ret['usage'] = all_stats['memory_stats']['usage']
326
            ret['limit'] = all_stats['memory_stats']['limit']
327
            ret['max_usage'] = all_stats['memory_stats']['max_usage']
328
        except (KeyError, TypeError) as e:
329
            # all_stats do not have MEM information
330
            logger.debug("docker plugin - Cannot grab MEM usage for container {} ({})".format(container_id, e))
331
            logger.debug(all_stats)
332
        # Return the stats
333
        return ret
334
335
    def get_docker_network(self, container_id, all_stats):
336
        """Return the container network usage using the Docker API (v1.0 or higher).
337
338
        Input: id is the full container id
339
        Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}.
340
        with:
341
            time_since_update: number of seconds elapsed between the latest grab
342
            rx: Number of byte received
343
            tx: Number of byte transmited
344
        """
345
        # Init the returned dict
346
        network_new = {}
347
348
        # Read the rx/tx stats (in bytes)
349
        try:
350
            netcounters = all_stats["networks"]
351
        except KeyError as e:
352
            # all_stats do not have NETWORK information
353
            logger.debug("docker plugin - Cannot grab NET usage for container {} ({})".format(container_id, e))
354
            logger.debug(all_stats)
355
            # No fallback available...
356
            return network_new
357
358
        # Previous network interface stats are stored in the network_old variable
359
        if not hasattr(self, 'inetcounters_old'):
360
            # First call, we init the network_old var
361
            self.netcounters_old = {}
362
            try:
363
                self.netcounters_old[container_id] = netcounters
364
            except (IOError, UnboundLocalError):
365
                pass
366
367
        if container_id not in self.netcounters_old:
368
            try:
369
                self.netcounters_old[container_id] = netcounters
370
            except (IOError, UnboundLocalError):
371
                pass
372
        else:
373
            # By storing time data we enable Rx/s and Tx/s calculations in the
374
            # XML/RPC API, which would otherwise be overly difficult work
375
            # for users of the API
376
            try:
377
                network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{}'.format(container_id))
378
                network_new['rx'] = netcounters["eth0"]["rx_bytes"] - self.netcounters_old[container_id]["eth0"]["rx_bytes"]
379
                network_new['tx'] = netcounters["eth0"]["tx_bytes"] - self.netcounters_old[container_id]["eth0"]["tx_bytes"]
380
                network_new['cumulative_rx'] = netcounters["eth0"]["rx_bytes"]
381
                network_new['cumulative_tx'] = netcounters["eth0"]["tx_bytes"]
382
            except KeyError as e:
383
                # all_stats do not have INTERFACE information
384
                logger.debug("docker plugin - Cannot grab network interface usage for container {} ({})".format(container_id, e))
385
                logger.debug(all_stats)
386
387
            # Save stats to compute next bitrate
388
            self.netcounters_old[container_id] = netcounters
389
390
        # Return the stats
391
        return network_new
392
393
    def get_docker_io(self, container_id, all_stats):
394
        """Return the container IO usage using the Docker API (v1.0 or higher).
395
396
        Input: id is the full container id
397
        Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}.
398
        with:
399
            time_since_update: number of seconds elapsed between the latest grab
400
            ior: Number of byte readed
401
            iow: Number of byte written
402
        """
403
        # Init the returned dict
404
        io_new = {}
405
406
        # Read the ior/iow stats (in bytes)
407
        try:
408
            iocounters = all_stats["blkio_stats"]
409
        except KeyError as e:
410
            # all_stats do not have io information
411
            logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e))
412
            logger.debug(all_stats)
413
            # No fallback available...
414
            return io_new
415
416
        # Previous io interface stats are stored in the io_old variable
417
        if not hasattr(self, 'iocounters_old'):
418
            # First call, we init the io_old var
419
            self.iocounters_old = {}
420
            try:
421
                self.iocounters_old[container_id] = iocounters
422
            except (IOError, UnboundLocalError):
423
                pass
424
425
        if container_id not in self.iocounters_old:
426
            try:
427
                self.iocounters_old[container_id] = iocounters
428
            except (IOError, UnboundLocalError):
429
                pass
430
        else:
431
            # By storing time data we enable IoR/s and IoW/s calculations in the
432
            # XML/RPC API, which would otherwise be overly difficult work
433
            # for users of the API
434
            try:
435
                # Read IOR and IOW value in the structure list of dict
436
                ior = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value']
437
                iow = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value']
438
                ior_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value']
439
                iow_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value']
440
            except (TypeError, IndexError, KeyError) as e:
441
                # all_stats do not have io information
442
                logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e))
443
            else:
444
                io_new['time_since_update'] = getTimeSinceLastUpdate('docker_io_{}'.format(container_id))
445
                io_new['ior'] = ior - ior_old
446
                io_new['iow'] = iow - iow_old
447
                io_new['cumulative_ior'] = ior
448
                io_new['cumulative_iow'] = iow
449
450
                # Save stats to compute next bitrate
451
                self.iocounters_old[container_id] = iocounters
452
453
        # Return the stats
454
        return io_new
455
456
    def get_user_ticks(self):
457
        """Return the user ticks by reading the environment variable."""
458
        return os.sysconf(os.sysconf_names['SC_CLK_TCK'])
459
460
    def get_stats_action(self):
461
        """Return stats for the action.
462
463
        Docker will return self.stats['containers']
464
        """
465
        return self.stats['containers']
466
467
    def update_views(self):
468
        """Update stats views."""
469
        # Call the father's method
470
        super(Plugin, self).update_views()
471
472
        if 'containers' not in self.stats:
473
            return False
474
475
        # Add specifics informations
476
        # Alert
477
        for i in self.stats['containers']:
478
            # Init the views for the current container (key = container name)
479
            self.views[i[self.get_key()]] = {'cpu': {}, 'mem': {}}
480
            # CPU alert
481
            if 'cpu' in i and 'total' in i['cpu']:
482
                # Looking for specific CPU container threasold in the conf file
483
                alert = self.get_alert(i['cpu']['total'],
484
                                       header=i['name'] + '_cpu',
485
                                       action_key=i['name'])
486
                if alert == 'DEFAULT':
487
                    # Not found ? Get back to default CPU threasold value
488
                    alert = self.get_alert(i['cpu']['total'], header='cpu')
489
                self.views[i[self.get_key()]]['cpu']['decoration'] = alert
490
            # MEM alert
491
            if 'memory' in i and 'usage' in i['memory']:
492
                # Looking for specific MEM container threasold in the conf file
493
                alert = self.get_alert(i['memory']['usage'],
494
                                       maximum=i['memory']['limit'],
495
                                       header=i['name'] + '_mem',
496
                                       action_key=i['name'])
497
                if alert == 'DEFAULT':
498
                    # Not found ? Get back to default MEM threasold value
499
                    alert = self.get_alert(i['memory']['usage'],
500
                                           maximum=i['memory']['limit'],
501
                                           header='mem')
502
                self.views[i[self.get_key()]]['mem']['decoration'] = alert
503
504
        return True
505
506
    def msg_curse(self, args=None, max_width=None):
507
        """Return the dict to display in the curse interface."""
508
        # Init the return message
509
        ret = []
510
511
        # Only process if stats exist (and non null) and display plugin enable...
512
        if not self.stats \
513
           or 'containers' not in self.stats or len(self.stats['containers']) == 0 \
514
           or self.is_disable():
515
            return ret
516
517
        # Build the string message
518
        # Title
519
        msg = '{}'.format('CONTAINERS')
520
        ret.append(self.curse_add_line(msg, "TITLE"))
521
        msg = ' {}'.format(len(self.stats['containers']))
522
        ret.append(self.curse_add_line(msg))
523
        msg = ' (served by Docker {})'.format(self.stats['version']["Version"])
524
        ret.append(self.curse_add_line(msg))
525
        ret.append(self.curse_new_line())
526
        # Header
527
        ret.append(self.curse_new_line())
528
        # Get the maximum containers name (cutted to 20 char max)
529
        name_max_width = min(20, len(max(self.stats['containers'], key=lambda x: len(x['name']))['name']))
530
        msg = ' {:{width}}'.format('Name', width=name_max_width)
531
        ret.append(self.curse_add_line(msg))
532
        msg = '{:>10}'.format('Status')
533
        ret.append(self.curse_add_line(msg))
534
        msg = '{:>6}'.format('CPU%')
535
        ret.append(self.curse_add_line(msg))
536
        msg = '{:>7}'.format('MEM')
537
        ret.append(self.curse_add_line(msg))
538
        msg = '{:>7}'.format('/MAX')
539
        ret.append(self.curse_add_line(msg))
540
        msg = '{:>7}'.format('IOR/s')
541
        ret.append(self.curse_add_line(msg))
542
        msg = '{:>7}'.format('IOW/s')
543
        ret.append(self.curse_add_line(msg))
544
        msg = '{:>7}'.format('Rx/s')
545
        ret.append(self.curse_add_line(msg))
546
        msg = '{:>7}'.format('Tx/s')
547
        ret.append(self.curse_add_line(msg))
548
        msg = ' {:8}'.format('Command')
549
        ret.append(self.curse_add_line(msg))
550
        # Data
551
        for container in self.stats['containers']:
552
            ret.append(self.curse_new_line())
553
            # Name
554
            ret.append(self.curse_add_line(self._msg_name(container=container,
555
                                                          max_width=name_max_width)))
556
            # Status
557
            status = self.container_alert(container['Status'])
558
            msg = '{:>10}'.format(container['Status'][0:10])
559
            ret.append(self.curse_add_line(msg, status))
560
            # CPU
561
            try:
562
                msg = '{:>6.1f}'.format(container['cpu']['total'])
563
            except KeyError:
564
                msg = '{:>6}'.format('_')
565
            ret.append(self.curse_add_line(msg, self.get_views(item=container['name'],
566
                                                               key='cpu',
567
                                                               option='decoration')))
568
            # MEM
569
            try:
570
                msg = '{:>7}'.format(self.auto_unit(container['memory']['usage']))
571
            except KeyError:
572
                msg = '{:>7}'.format('_')
573
            ret.append(self.curse_add_line(msg, self.get_views(item=container['name'],
574
                                                               key='mem',
575
                                                               option='decoration')))
576
            try:
577
                msg = '{:>7}'.format(self.auto_unit(container['memory']['limit']))
578
            except KeyError:
579
                msg = '{:>7}'.format('_')
580
            ret.append(self.curse_add_line(msg))
581
            # IO R/W
582
            unit = 'B'
583
            for r in ['ior', 'iow']:
584
                try:
585
                    value = self.auto_unit(int(container['io'][r] // container['io']['time_since_update'])) + unit
586
                    msg = '{:>7}'.format(value)
587
                except KeyError:
588
                    msg = '{:>7}'.format('_')
589
                ret.append(self.curse_add_line(msg))
590
            # NET RX/TX
591
            if args.byte:
592
                # Bytes per second (for dummy)
593
                to_bit = 1
594
                unit = ''
595
            else:
596
                # Bits per second (for real network administrator | Default)
597
                to_bit = 8
598
                unit = 'b'
599
            for r in ['rx', 'tx']:
600
                try:
601
                    value = self.auto_unit(int(container['network'][r] // container['network']['time_since_update'] * to_bit)) + unit
602
                    msg = '{:>7}'.format(value)
603
                except KeyError:
604
                    msg = '{:>7}'.format('_')
605
                ret.append(self.curse_add_line(msg))
606
            # Command
607
            if container['Command'] is not None:
608
                msg = ' {}'.format(' '.join(container['Command']))
609
            else:
610
                msg = ' {}'.format('_')
611
            ret.append(self.curse_add_line(msg, splittable=True))
612
613
        return ret
614
615
    def _msg_name(self, container, max_width):
616
        """Build the container name."""
617
        name = container['name']
618
        if len(name) > max_width:
619
            name = '_' + name[-max_width + 1:]
620
        else:
621
            name = name[:max_width]
622
        return ' {:{width}}'.format(name, width=max_width)
623
624
    def container_alert(self, status):
625
        """Analyse the container status."""
626
        if status in ('running'):
627
            return 'OK'
628
        elif status in ('exited'):
629
            return 'WARNING'
630
        elif status in ('dead'):
631
            return 'CRITICAL'
632
        else:
633
            return 'CAREFUL'
634
635
636
class ThreadDockerGrabber(threading.Thread):
637
    """
638
    Specific thread to grab docker stats.
639
640
    stats is a dict
641
    """
642
643
    def __init__(self, container):
644
        """Init the class.
645
646
        container: instance of Docker-py Container
647
        """
648
        super(ThreadDockerGrabber, self).__init__()
649
        # Event needed to stop properly the thread
650
        self._stopper = threading.Event()
651
        # The docker-py return stats as a stream
652
        self._container = container
653
        self._stats_stream = container.stats(decode=True)
654
        # The class return the stats as a dict
655
        self._stats = {}
656
        logger.debug("docker plugin - Create thread for container {}".format(self._container.name))
657
658
    def run(self):
659
        """Grab the stats.
660
661
        Infinite loop, should be stopped by calling the stop() method
662
        """
663
        try:
664
            for i in self._stats_stream:
665
                self._stats = i
666
                time.sleep(0.1)
667
                if self.stopped():
668
                    break
669
        except:
0 ignored issues
show
Coding Style Best Practice introduced by
General except handlers without types should be used sparingly.

Typically, you would use general except handlers when you intend to specifically handle all types of errors, f.e. when logging. Otherwise, such general error handlers can mask errors in your application that you want to know of.

Loading history...
670
            logger.debug("docker plugin - Exception thrown during run")
671
            self.stop()
672
673
    @property
674
    def stats(self):
675
        """Stats getter."""
676
        return self._stats
677
678
    @stats.setter
679
    def stats(self, value):
680
        """Stats setter."""
681
        self._stats = value
682
683
    def stop(self, timeout=None):
684
        """Stop the thread."""
685
        logger.debug("docker plugin - Close thread for container {}".format(self._container.name))
686
        self._stopper.set()
687
688
    def stopped(self):
689
        """Return True is the thread is stopped."""
690
        return self._stopper.isSet()
691
692
693
def sort_stats(stats):
694
    # Sort Docker stats using the same function than processes
695
    sortedby = 'cpu_percent'
696
    sortedby_secondary = 'memory_usage'
697
    if glances_processes.sort_key.startswith('memory'):
698
        sortedby = 'memory_usage'
699
        sortedby_secondary = 'cpu_percent'
700
    sort_stats_processes(stats['containers'],
701
                         sortedby=sortedby,
702
                         sortedby_secondary=sortedby_secondary)
703
    return stats
704