Test Failed
Push — master ( 69b639...e7fa0a )
by Nicolas
04:05 queued 01:05
created

Plugin.get_docker_cpu()   B

Complexity

Conditions 6

Size

Total Lines 52
Code Lines 31

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 6
eloc 31
nop 3
dl 0
loc 52
rs 8.2026
c 0
b 0
f 0

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
# -*- coding: utf-8 -*-
2
#
3
# This file is part of Glances.
4
#
5
# Copyright (C) 2019 Nicolargo <[email protected]>
6
#
7
# Glances is free software; you can redistribute it and/or modify
8
# it under the terms of the GNU Lesser General Public License as published by
9
# the Free Software Foundation, either version 3 of the License, or
10
# (at your option) any later version.
11
#
12
# Glances is distributed in the hope that it will be useful,
13
# but WITHOUT ANY WARRANTY; without even the implied warranty of
14
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
# GNU Lesser General Public License for more details.
16
#
17
# You should have received a copy of the GNU Lesser General Public License
18
# along with this program. If not, see <http://www.gnu.org/licenses/>.
19
20
"""Docker plugin."""
21
22
import os
23
import threading
24
import time
25
from copy import deepcopy
26
27
from glances.logger import logger
28
from glances.compat import iterkeys, itervalues, nativestr
29
from glances.timer import getTimeSinceLastUpdate
30
from glances.plugins.glances_plugin import GlancesPlugin
31
from glances.processes import sort_stats as sort_stats_processes, weighted, glances_processes
32
33
# Docker-py library (optional and Linux-only)
34
# https://github.com/docker/docker-py
35
try:
36
    import docker
37
except Exception as e:
38
    import_error_tag = True
39
    # Display debu message if import KeyError
40
    logger.warning("Error loading Docker Python Lib. Docker plugin is disabled ({})".format(e))
41
else:
42
    import_error_tag = False
43
44
# Define the items history list (list of items to add to history)
45
# TODO: For the moment limited to the CPU. Had to change the graph exports
46
#       method to display one graph per container.
47
# items_history_list = [{'name': 'cpu_percent',
48
#                        'description': 'Container CPU consumption in %',
49
#                        'y_unit': '%'},
50
#                       {'name': 'memory_usage',
51
#                        'description': 'Container memory usage in bytes',
52
#                        'y_unit': 'B'},
53
#                       {'name': 'network_rx',
54
#                        'description': 'Container network RX bitrate in bits per second',
55
#                        'y_unit': 'bps'},
56
#                       {'name': 'network_tx',
57
#                        'description': 'Container network TX bitrate in bits per second',
58
#                        'y_unit': 'bps'},
59
#                       {'name': 'io_r',
60
#                        'description': 'Container IO bytes read per second',
61
#                        'y_unit': 'Bps'},
62
#                       {'name': 'io_w',
63
#                        'description': 'Container IO bytes write per second',
64
#                        'y_unit': 'Bps'}]
65
items_history_list = [{'name': 'cpu_percent',
66
                       'description': 'Container CPU consumption in %',
67
                       'y_unit': '%'}]
68
69
70
# List of key to remove before export
71
export_exclude_list = ['cpu', 'io', 'memory', 'network']
72
73
74
class Plugin(GlancesPlugin):
75
    """Glances Docker plugin.
76
77
    stats is a dict: {'version': {...}, 'containers': [{}, {}]}
78
    """
79
80
    def __init__(self, args=None, config=None):
81
        """Init the plugin."""
82
        super(Plugin, self).__init__(args=args,
83
                                     config=config,
84
                                     items_history_list=items_history_list)
85
86
        # The plgin can be disable using: args.disable_docker
87
        self.args = args
88
89
        # Default config keys
90
        self.config = config
91
92
        # We want to display the stat in the curse interface
93
        self.display_curse = True
94
95
        # Init the Docker API
96
        self.docker_client = self.connect()
97
98
        # Dict of thread (to grab stats asynchroniously, one thread is created by container)
99
        # key: Container Id
100
        # value: instance of ThreadDockerGrabber
101
        self.thread_list = {}
102
103
        # Force a first update because we need two update to have the first stat
104
        self.update()
105
        self.refresh_timer.set(0)
106
107
    def exit(self):
108
        """Overwrite the exit method to close threads."""
109
        for t in itervalues(self.thread_list):
110
            t.stop()
111
        # Call the father class
112
        super(Plugin, self).exit()
113
114
    def get_key(self):
115
        """Return the key of the list."""
116
        return 'name'
117
118
    def get_export(self):
119
        """Overwrite the default export method.
120
121
        - Only exports containers
122
        - The key is the first container name
123
        """
124
        try:
125
            ret = deepcopy(self.stats['containers'])
126
        except KeyError as e:
127
            logger.debug("docker plugin - Docker export error {}".format(e))
128
            ret = []
129
130
        # Remove fields uses to compute rate
131
        for container in ret:
132
            for i in export_exclude_list:
133
                container.pop(i)
134
135
        return ret
136
137
    def connect(self):
138
        """Connect to the Docker server."""
139
        try:
140
            # If the following line replace the next one, the issue #1878
141
            # is reproduced (Docker containers information missing with Docker 20.10.x)
142
            # So, for the moment disable the timeout option
143
            ret = docker.from_env()
144
        except Exception as e:
145
            logger.error("docker plugin - Can not connect to Docker ({})".format(e))
146
            ret = None
147
148
        return ret
149
150
    def _all_tag(self):
151
        """Return the all tag of the Glances/Docker configuration file.
152
153
        # By default, Glances only display running containers
154
        # Set the following key to True to display all containers
155
        all=True
156
        """
157
        all_tag = self.get_conf_value('all')
158
        if len(all_tag) == 0:
159
            return False
160
        else:
161
            return all_tag[0].lower() == 'true'
162
163
    @GlancesPlugin._check_decorator
164
    @GlancesPlugin._log_result_decorator
165
    def update(self):
166
        """Update Docker stats using the input method."""
167
        # Init new stats
168
        stats = self.get_init_value()
169
170
        # The Docker-py lib is mandatory and connection should be ok
171
        if import_error_tag or self.docker_client is None:
172
            return self.stats
173
174
        if self.input_method == 'local':
175
            # Update stats
176
177
            # Docker version
178
            # Exemple: {
179
            #     "KernelVersion": "3.16.4-tinycore64",
180
            #     "Arch": "amd64",
181
            #     "ApiVersion": "1.15",
182
            #     "Version": "1.3.0",
183
            #     "GitCommit": "c78088f",
184
            #     "Os": "linux",
185
            #     "GoVersion": "go1.3.3"
186
            # }
187
            try:
188
                stats['version'] = self.docker_client.version()
189
            except Exception as e:
190
                # Correct issue#649
191
                logger.error("{} plugin - Cannot get Docker version ({})".format(self.plugin_name, e))
192
                # We may have lost connection remove version info
193
                if 'version' in self.stats:
194
                    del self.stats['version']
195
                self.stats['containers'] = []
196
                return self.stats
197
198
            # Update current containers list
199
            try:
200
                # Issue #1152: Docker module doesn't export details about stopped containers
201
                # The Docker/all key of the configuration file should be set to True
202
                containers = self.docker_client.containers.list(all=self._all_tag()) or []
203
            except Exception as e:
204
                logger.error("{} plugin - Cannot get containers list ({})".format(self.plugin_name, e))
205
                # We may have lost connection empty the containers list.
206
                self.stats['containers'] = []
207
                return self.stats
208
209
            # Start new thread for new container
210
            for container in containers:
211
                if container.id not in self.thread_list:
212
                    # Thread did not exist in the internal dict
213
                    # Create it and add it to the internal dict
214
                    logger.debug("{} plugin - Create thread for container {}".format(self.plugin_name, container.id[:12]))
215
                    t = ThreadDockerGrabber(container)
216
                    self.thread_list[container.id] = t
217
                    t.start()
218
219
            # Stop threads for non-existing containers
220
            nonexisting_containers = set(iterkeys(self.thread_list)) - set([c.id for c in containers])
221
            for container_id in nonexisting_containers:
222
                # Stop the thread
223
                logger.debug("{} plugin - Stop thread for old container {}".format(self.plugin_name, container_id[:12]))
224
                self.thread_list[container_id].stop()
225
                # Delete the item from the dict
226
                del self.thread_list[container_id]
227
228
            # Get stats for all containers
229
            stats['containers'] = []
230
            for container in containers:
231
                # Only show specific containers
232
                if not self.is_show(nativestr(container.name)):
233
                    continue
234
235
                # Do not take hiden container into account
236
                if self.is_hide(nativestr(container.name)):
237
                    continue
238
239
                # Init the stats for the current container
240
                container_stats = {}
241
                # The key is the container name and not the Id
242
                container_stats['key'] = self.get_key()
243
                # Export name (first name in the Names list, without the /)
244
                container_stats['name'] = nativestr(container.name)
245
                # Export global Names (used by the WebUI)
246
                container_stats['Names'] = [nativestr(container.name)]
247
                # Container Id
248
                container_stats['Id'] = container.id
249
                # Container Image
250
                container_stats['Image'] = container.image.tags
251
                # Global stats (from attrs)
252
                container_stats['Status'] = container.attrs['State']['Status']
253
                container_stats['Command'] = container.attrs['Config']['Entrypoint']
254
                # Standards stats
255
                # See https://docs.docker.com/engine/api/v1.41/#operation/ContainerStats
256
                # Be aware that the API can change... (example see issue #1857)
257
                if container_stats['Status'] in ('running', 'paused'):
258
                    # CPU
259
                    container_stats['cpu'] = self.get_docker_cpu(container.id, self.thread_list[container.id].stats)
260
                    container_stats['cpu_percent'] = container_stats['cpu'].get('total', None)
261
                    # MEM
262
                    container_stats['memory'] = self.get_docker_memory(container.id, self.thread_list[container.id].stats)
263
                    container_stats['memory_usage'] = container_stats['memory'].get('usage', None)
264
                    if container_stats['memory'].get('cache', None) is not None:
265
                        container_stats['memory_usage'] -= container_stats['memory']['cache']
266
                    # IO
267
                    container_stats['io'] = self.get_docker_io(container.id, self.thread_list[container.id].stats)
268
                    container_stats['io_r'] = container_stats['io'].get('ior', None)
269
                    container_stats['io_w'] = container_stats['io'].get('iow', None)
270
                    # NET
271
                    container_stats['network'] = self.get_docker_network(container.id, self.thread_list[container.id].stats)
272
                    container_stats['network_rx'] = container_stats['network'].get('rx', None)
273
                    container_stats['network_tx'] = container_stats['network'].get('tx', None)
274
                else:
275
                    container_stats['cpu'] = {}
276
                    container_stats['cpu_percent'] = None
277
                    container_stats['memory'] = {}
278
                    container_stats['memory_percent'] = None
279
                    container_stats['io'] = {}
280
                    container_stats['io_r'] = None
281
                    container_stats['io_w'] = None
282
                    container_stats['network'] = {}
283
                    container_stats['network_rx'] = None
284
                    container_stats['network_tx'] = None
285
                # Add current container stats to the stats list
286
                stats['containers'].append(container_stats)
287
288
        elif self.input_method == 'snmp':
289
            # Update stats using SNMP
290
            # Not available
291
            pass
292
293
        # Sort and update the stats
294
        self.stats = sort_stats(stats)
295
296
        return self.stats
297
298
    def get_docker_cpu(self, container_id, all_stats):
299
        """Return the container CPU usage.
300
301
        Input: id is the full container id
302
               all_stats is the output of the stats method of the Docker API
303
        Output: a dict {'total': 1.49}
304
        """
305
        ret = {'total': 0.0}
306
307
        # Read the stats
308
        # For each container, you will find a pseudo-file cpuacct.stat,
309
        # containing the CPU usage accumulated by the processes of the container.
310
        # Those times are expressed in ticks of 1/USER_HZ of a second.
311
        # On x86 systems, USER_HZ is 100.
312
        cpu_new = {}
313
        precpu_new = {}
314
        try:
315
            cpu_new['total'] = all_stats['cpu_stats']['cpu_usage'].get(
316
                'total_usage', None)
317
            precpu_new['total'] = all_stats['precpu_stats']['cpu_usage'].get(
318
                'total_usage', None)
319
            cpu_new['system'] = all_stats['cpu_stats'].get(
320
                'system_cpu_usage', None)
321
            precpu_new['system'] = all_stats['precpu_stats'].get(
322
                'system_cpu_usage', None)
323
            # Issue #1857
324
            # If either precpu_stats.online_cpus or cpu_stats.online_cpus is nil
325
            # then for compatibility with older daemons the length of
326
            # the corresponding cpu_usage.percpu_usage array should be used.
327
            if 'online_cpus' in all_stats['cpu_stats'] and \
328
               all_stats['cpu_stats']['online_cpus'] is not None:
329
                cpu_new['nb_core'] = all_stats['cpu_stats']['online_cpus']
330
            else:
331
                cpu_new['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage'] or [])
332
        except KeyError as e:
333
            logger.debug(
334
                "docker plugin - Cannot grab CPU usage for container {} ({})".format(container_id, e))
335
            logger.debug(all_stats)
336
        else:
337
            try:
338
                cpu_delta = cpu_new['total'] - precpu_new['total']
339
                system_cpu_delta = cpu_new['system'] - precpu_new['system']
340
                # CPU usage % = (cpu_delta / system_cpu_delta) * number_cpus * 100.0
341
                ret['total'] = (cpu_delta / system_cpu_delta) * \
342
                    cpu_new['nb_core'] * 100.0
343
            except TypeError as e:
344
                logger.debug(
345
                    "docker plugin - Cannot compute CPU usage for container {} ({})".format(container_id, e))
346
                logger.debug(all_stats)
347
348
        # Return the stats
349
        return ret
350
351
    def get_docker_memory(self, container_id, all_stats):
352
        """Return the container MEMORY.
353
354
        Input: id is the full container id
355
               all_stats is the output of the stats method of the Docker API
356
        Output: a dict {'rss': 1015808, 'cache': 356352,  'usage': ..., 'max_usage': ...}
357
        """
358
        ret = {}
359
        # Read the stats
360
        try:
361
            # Issue #1857
362
            # Some stats are not always available in ['memory_stats']['stats']
363
            if 'rss' in all_stats['memory_stats']['stats']:
364
                ret['rss'] = all_stats['memory_stats']['stats']['rss']
365
            elif 'total_rss' in all_stats['memory_stats']['stats']:
366
                ret['rss'] = all_stats['memory_stats']['stats']['total_rss']
367
            else:
368
                ret['rss'] = None
369
            ret['cache'] = all_stats['memory_stats']['stats'].get(
370
                'cache', None)
371
            ret['max_usage'] = all_stats['memory_stats'].get(
372
                'max_usage', None)
373
            # Mandatory fields
374
            ret['usage'] = all_stats['memory_stats']['usage']
375
            ret['limit'] = all_stats['memory_stats']['limit']
376
        except (KeyError, TypeError) as e:
377
            # all_stats do not have MEM information
378
            logger.debug("docker plugin - Cannot grab MEM usage for container {} ({})".format(container_id, e))
379
            logger.debug(all_stats)
380
        # Return the stats
381
        return ret
382
383
    def get_docker_network(self, container_id, all_stats):
384
        """Return the container network usage using the Docker API (v1.0 or higher).
385
386
        Input: id is the full container id
387
        Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}.
388
        with:
389
            time_since_update: number of seconds elapsed between the latest grab
390
            rx: Number of byte received
391
            tx: Number of byte transmited
392
        """
393
        # Init the returned dict
394
        network_new = {}
395
396
        # Read the rx/tx stats (in bytes)
397
        try:
398
            netcounters = all_stats["networks"]
399
        except KeyError as e:
400
            # all_stats do not have NETWORK information
401
            logger.debug("docker plugin - Cannot grab NET usage for container {} ({})".format(container_id, e))
402
            logger.debug(all_stats)
403
            # No fallback available...
404
            return network_new
405
406
        # Previous network interface stats are stored in the network_old variable
407
        if not hasattr(self, 'inetcounters_old'):
408
            # First call, we init the network_old var
409
            self.netcounters_old = {}
410
            try:
411
                self.netcounters_old[container_id] = netcounters
412
            except (IOError, UnboundLocalError):
413
                pass
414
415
        if container_id not in self.netcounters_old:
416
            try:
417
                self.netcounters_old[container_id] = netcounters
418
            except (IOError, UnboundLocalError):
419
                pass
420
        else:
421
            # By storing time data we enable Rx/s and Tx/s calculations in the
422
            # XML/RPC API, which would otherwise be overly difficult work
423
            # for users of the API
424
            try:
425
                network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{}'.format(container_id))
426
                network_new['rx'] = netcounters["eth0"]["rx_bytes"] - self.netcounters_old[container_id]["eth0"]["rx_bytes"]
427
                network_new['tx'] = netcounters["eth0"]["tx_bytes"] - self.netcounters_old[container_id]["eth0"]["tx_bytes"]
428
                network_new['cumulative_rx'] = netcounters["eth0"]["rx_bytes"]
429
                network_new['cumulative_tx'] = netcounters["eth0"]["tx_bytes"]
430
            except KeyError as e:
431
                # all_stats do not have INTERFACE information
432
                logger.debug("docker plugin - Cannot grab network interface usage for container {} ({})".format(container_id, e))
433
                logger.debug(all_stats)
434
435
            # Save stats to compute next bitrate
436
            self.netcounters_old[container_id] = netcounters
437
438
        # Return the stats
439
        return network_new
440
441
    def get_docker_io(self, container_id, all_stats):
442
        """Return the container IO usage using the Docker API (v1.0 or higher).
443
444
        Input: id is the full container id
445
        Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}.
446
        with:
447
            time_since_update: number of seconds elapsed between the latest grab
448
            ior: Number of byte readed
449
            iow: Number of byte written
450
        """
451
        # Init the returned dict
452
        io_new = {}
453
454
        # Read the ior/iow stats (in bytes)
455
        try:
456
            iocounters = all_stats["blkio_stats"]
457
        except KeyError as e:
458
            # all_stats do not have io information
459
            logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e))
460
            logger.debug(all_stats)
461
            # No fallback available...
462
            return io_new
463
464
        # Previous io interface stats are stored in the io_old variable
465
        if not hasattr(self, 'iocounters_old'):
466
            # First call, we init the io_old var
467
            self.iocounters_old = {}
468
            try:
469
                self.iocounters_old[container_id] = iocounters
470
            except (IOError, UnboundLocalError):
471
                pass
472
473
        if container_id not in self.iocounters_old:
474
            try:
475
                self.iocounters_old[container_id] = iocounters
476
            except (IOError, UnboundLocalError):
477
                pass
478
        else:
479
            # By storing time data we enable IoR/s and IoW/s calculations in the
480
            # XML/RPC API, which would otherwise be overly difficult work
481
            # for users of the API
482
            try:
483
                # Read IOR and IOW value in the structure list of dict
484
                ior = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value']
485
                iow = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value']
486
                ior_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value']
487
                iow_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value']
488
            except (TypeError, IndexError, KeyError) as e:
489
                # all_stats do not have io information
490
                logger.debug("docker plugin - Cannot grab block IO usage for container {} ({})".format(container_id, e))
491
            else:
492
                io_new['time_since_update'] = getTimeSinceLastUpdate('docker_io_{}'.format(container_id))
493
                io_new['ior'] = ior - ior_old
494
                io_new['iow'] = iow - iow_old
495
                io_new['cumulative_ior'] = ior
496
                io_new['cumulative_iow'] = iow
497
498
                # Save stats to compute next bitrate
499
                self.iocounters_old[container_id] = iocounters
500
501
        # Return the stats
502
        return io_new
503
504
    def get_user_ticks(self):
505
        """Return the user ticks by reading the environment variable."""
506
        return os.sysconf(os.sysconf_names['SC_CLK_TCK'])
507
508
    def get_stats_action(self):
509
        """Return stats for the action.
510
511
        Docker will return self.stats['containers']
512
        """
513
        return self.stats['containers']
514
515
    def update_views(self):
516
        """Update stats views."""
517
        # Call the father's method
518
        super(Plugin, self).update_views()
519
520
        if 'containers' not in self.stats:
521
            return False
522
523
        # Add specifics informations
524
        # Alert
525
        for i in self.stats['containers']:
526
            # Init the views for the current container (key = container name)
527
            self.views[i[self.get_key()]] = {'cpu': {}, 'mem': {}}
528
            # CPU alert
529
            if 'cpu' in i and 'total' in i['cpu']:
530
                # Looking for specific CPU container threasold in the conf file
531
                alert = self.get_alert(i['cpu']['total'],
532
                                       header=i['name'] + '_cpu',
533
                                       action_key=i['name'])
534
                if alert == 'DEFAULT':
535
                    # Not found ? Get back to default CPU threasold value
536
                    alert = self.get_alert(i['cpu']['total'], header='cpu')
537
                self.views[i[self.get_key()]]['cpu']['decoration'] = alert
538
            # MEM alert
539
            if 'memory' in i and 'usage' in i['memory']:
540
                # Looking for specific MEM container threasold in the conf file
541
                alert = self.get_alert(i['memory']['usage'],
542
                                       maximum=i['memory']['limit'],
543
                                       header=i['name'] + '_mem',
544
                                       action_key=i['name'])
545
                if alert == 'DEFAULT':
546
                    # Not found ? Get back to default MEM threasold value
547
                    alert = self.get_alert(i['memory']['usage'],
548
                                           maximum=i['memory']['limit'],
549
                                           header='mem')
550
                self.views[i[self.get_key()]]['mem']['decoration'] = alert
551
552
        return True
553
554
    def msg_curse(self, args=None, max_width=None):
555
        """Return the dict to display in the curse interface."""
556
        # Init the return message
557
        ret = []
558
559
        # Only process if stats exist (and non null) and display plugin enable...
560
        if not self.stats \
561
           or 'containers' not in self.stats or len(self.stats['containers']) == 0 \
562
           or self.is_disable():
563
            return ret
564
565
        # Build the string message
566
        # Title
567
        msg = '{}'.format('CONTAINERS')
568
        ret.append(self.curse_add_line(msg, "TITLE"))
569
        msg = ' {}'.format(len(self.stats['containers']))
570
        ret.append(self.curse_add_line(msg))
571
        msg = ' (served by Docker {})'.format(self.stats['version']["Version"])
572
        ret.append(self.curse_add_line(msg))
573
        ret.append(self.curse_new_line())
574
        # Header
575
        ret.append(self.curse_new_line())
576
        # Get the maximum containers name
577
        # Max size is configurable. See feature request #1723.
578
        name_max_width = min(self.config.get_int_value('docker',
579
                                                       'max_name_size',
580
                                                       default=20)
581
                             if self.config is not None else 20,
582
                             len(max(self.stats['containers'],
583
                                 key=lambda x: len(x['name']))['name']))
584
        msg = ' {:{width}}'.format('Name', width=name_max_width)
585
        ret.append(self.curse_add_line(msg))
586
        msg = '{:>10}'.format('Status')
587
        ret.append(self.curse_add_line(msg))
588
        msg = '{:>6}'.format('CPU%')
589
        ret.append(self.curse_add_line(msg))
590
        msg = '{:>7}'.format('MEM')
591
        ret.append(self.curse_add_line(msg))
592
        msg = '{:>7}'.format('/MAX')
593
        ret.append(self.curse_add_line(msg))
594
        msg = '{:>7}'.format('IOR/s')
595
        ret.append(self.curse_add_line(msg))
596
        msg = '{:>7}'.format('IOW/s')
597
        ret.append(self.curse_add_line(msg))
598
        msg = '{:>7}'.format('Rx/s')
599
        ret.append(self.curse_add_line(msg))
600
        msg = '{:>7}'.format('Tx/s')
601
        ret.append(self.curse_add_line(msg))
602
        msg = ' {:8}'.format('Command')
603
        ret.append(self.curse_add_line(msg))
604
        # Data
605
        for container in self.stats['containers']:
606
            ret.append(self.curse_new_line())
607
            # Name
608
            ret.append(self.curse_add_line(self._msg_name(container=container,
609
                                                          max_width=name_max_width)))
610
            # Status
611
            status = self.container_alert(container['Status'])
612
            msg = '{:>10}'.format(container['Status'][0:10])
613
            ret.append(self.curse_add_line(msg, status))
614
            # CPU
615
            try:
616
                msg = '{:>6.1f}'.format(container['cpu']['total'])
617
            except KeyError:
618
                msg = '{:>6}'.format('_')
619
            ret.append(self.curse_add_line(msg, self.get_views(item=container['name'],
620
                                                               key='cpu',
621
                                                               option='decoration')))
622
            # MEM
623
            try:
624
                msg = '{:>7}'.format(self.auto_unit(container['memory']['usage']))
625
            except KeyError:
626
                msg = '{:>7}'.format('_')
627
            ret.append(self.curse_add_line(msg, self.get_views(item=container['name'],
628
                                                               key='mem',
629
                                                               option='decoration')))
630
            try:
631
                msg = '{:>7}'.format(self.auto_unit(container['memory']['limit']))
632
            except KeyError:
633
                msg = '{:>7}'.format('_')
634
            ret.append(self.curse_add_line(msg))
635
            # IO R/W
636
            unit = 'B'
637
            for r in ['ior', 'iow']:
638
                try:
639
                    value = self.auto_unit(int(container['io'][r] // container['io']['time_since_update'])) + unit
640
                    msg = '{:>7}'.format(value)
641
                except KeyError:
642
                    msg = '{:>7}'.format('_')
643
                ret.append(self.curse_add_line(msg))
644
            # NET RX/TX
645
            if args.byte:
646
                # Bytes per second (for dummy)
647
                to_bit = 1
648
                unit = ''
649
            else:
650
                # Bits per second (for real network administrator | Default)
651
                to_bit = 8
652
                unit = 'b'
653
            for r in ['rx', 'tx']:
654
                try:
655
                    value = self.auto_unit(int(container['network'][r] // container['network']['time_since_update'] * to_bit)) + unit
656
                    msg = '{:>7}'.format(value)
657
                except KeyError:
658
                    msg = '{:>7}'.format('_')
659
                ret.append(self.curse_add_line(msg))
660
            # Command
661
            if container['Command'] is not None:
662
                msg = ' {}'.format(' '.join(container['Command']))
663
            else:
664
                msg = ' {}'.format('_')
665
            ret.append(self.curse_add_line(msg, splittable=True))
666
667
        return ret
668
669
    def _msg_name(self, container, max_width):
670
        """Build the container name."""
671
        name = container['name']
672
        if len(name) > max_width:
673
            name = '_' + name[-max_width + 1:]
674
        else:
675
            name = name[:max_width]
676
        return ' {:{width}}'.format(name, width=max_width)
677
678
    def container_alert(self, status):
679
        """Analyse the container status."""
680
        if status in ('running'):
681
            return 'OK'
682
        elif status in ('exited'):
683
            return 'WARNING'
684
        elif status in ('dead'):
685
            return 'CRITICAL'
686
        else:
687
            return 'CAREFUL'
688
689
690
class ThreadDockerGrabber(threading.Thread):
691
    """
692
    Specific thread to grab docker stats.
693
694
    stats is a dict
695
    """
696
697
    def __init__(self, container):
698
        """Init the class.
699
700
        container: instance of Docker-py Container
701
        """
702
        super(ThreadDockerGrabber, self).__init__()
703
        # Event needed to stop properly the thread
704
        self._stopper = threading.Event()
705
        # The docker-py return stats as a stream
706
        self._container = container
707
        self._stats_stream = container.stats(decode=True)
708
        # The class return the stats as a dict
709
        self._stats = {}
710
        logger.debug("docker plugin - Create thread for container {}".format(self._container.name))
711
712
    def run(self):
713
        """Grab the stats.
714
715
        Infinite loop, should be stopped by calling the stop() method
716
        """
717
        try:
718
            for i in self._stats_stream:
719
                self._stats = i
720
                time.sleep(0.1)
721
                if self.stopped():
722
                    break
723
        except:
724
            logger.debug("docker plugin - Exception thrown during run")
725
            self.stop()
726
727
    @property
728
    def stats(self):
729
        """Stats getter."""
730
        return self._stats
731
732
    @stats.setter
733
    def stats(self, value):
734
        """Stats setter."""
735
        self._stats = value
736
737
    def stop(self, timeout=None):
738
        """Stop the thread."""
739
        logger.debug("docker plugin - Close thread for container {}".format(self._container.name))
740
        self._stopper.set()
741
742
    def stopped(self):
743
        """Return True is the thread is stopped."""
744
        return self._stopper.isSet()
745
746
747
def sort_stats(stats):
748
    # Sort Docker stats using the same function than processes
749
    sortedby = 'cpu_percent'
750
    sortedby_secondary = 'memory_usage'
751
    if glances_processes.sort_key.startswith('memory'):
752
        sortedby = 'memory_usage'
753
        sortedby_secondary = 'cpu_percent'
754
    sort_stats_processes(stats['containers'],
755
                         sortedby=sortedby,
756
                         sortedby_secondary=sortedby_secondary)
757
    return stats
758