Completed
Push — master ( 2b80fa...6ea077 )
by Nicolas
01:22
created

Plugin.__connect_old()   B

Complexity

Conditions 5

Size

Total Lines 26

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 5
c 0
b 0
f 0
dl 0
loc 26
rs 8.0894
1
# -*- coding: utf-8 -*-
2
#
3
# This file is part of Glances.
4
#
5
# Copyright (C) 2017 Nicolargo <[email protected]>
6
#
7
# Glances is free software; you can redistribute it and/or modify
8
# it under the terms of the GNU Lesser General Public License as published by
9
# the Free Software Foundation, either version 3 of the License, or
10
# (at your option) any later version.
11
#
12
# Glances is distributed in the hope that it will be useful,
13
# but WITHOUT ANY WARRANTY; without even the implied warranty of
14
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
# GNU Lesser General Public License for more details.
16
#
17
# You should have received a copy of the GNU Lesser General Public License
18
# along with this program. If not, see <http://www.gnu.org/licenses/>.
19
20
"""Docker plugin."""
21
22
import os
23
import re
24
import threading
25
import time
26
27
from glances.compat import iterkeys, itervalues
28
from glances.logger import logger
29
from glances.timer import getTimeSinceLastUpdate
30
from glances.plugins.glances_plugin import GlancesPlugin
31
32
# Docker-py library (optional and Linux-only)
33
# https://github.com/docker/docker-py
34
try:
35
    import docker
36
    import requests
37
except ImportError as e:
38
    logger.debug("Docker library not found (%s). Glances cannot grab Docker info." % e)
39
    docker_tag = False
40
else:
41
    docker_tag = True
42
43
44
class Plugin(GlancesPlugin):
45
46
    """Glances Docker plugin.
47
48
    stats is a list
49
    """
50
51
    def __init__(self, args=None):
52
        """Init the plugin."""
53
        super(Plugin, self).__init__(args=args)
54
55
        # The plgin can be disable using: args.disable_docker
56
        self.args = args
57
58
        # We want to display the stat in the curse interface
59
        self.display_curse = True
60
61
        # Init the Docker API
62
        self.docker_client = False
63
64
        # Dict of thread (to grab stats asynchroniously, one thread is created by container)
65
        # key: Container Id
66
        # value: instance of ThreadDockerGrabber
67
        self.thread_list = {}
68
69
        # Init the stats
70
        self.reset()
71
72
    def exit(self):
73
        """Overwrite the exit method to close threads"""
74
        for t in itervalues(self.thread_list):
75
            t.stop()
76
        # Call the father class
77
        super(Plugin, self).exit()
78
79
    def get_key(self):
80
        """Return the key of the list."""
81
        return 'name'
82
83
    def get_export(self):
84
        """Overwrite the default export method.
85
86
        - Only exports containers
87
        - The key is the first container name
88
        """
89
        ret = []
90
        try:
91
            ret = self.stats['containers']
92
        except KeyError as e:
93
            logger.debug("Docker export error {}".format(e))
94
        return ret
95
96
    def __connect_old(self, version):
97
        """Connect to the Docker server with the 'old school' method"""
98
        # Glances is compatible with both API 2.0 and <2.0
99
        # (thanks to the @bacondropped patch)
100
        if hasattr(docker, 'APIClient'):
101
            # Correct issue #1000 for API 2.0
102
            init_docker = docker.APIClient
103
        elif hasattr(docker, 'Client'):
104
            # < API 2.0
105
            init_docker = docker.Client
106
        else:
107
            # Can not found init method (new API version ?)
108
            logger.error("Can not found any way to init the Docker API")
109
            return None
110
        # Init connection to the Docker API
111
        try:
112
            if version is None:
113
                ret = init_docker(base_url='unix://var/run/docker.sock')
114
            else:
115
                ret = init_docker(base_url='unix://var/run/docker.sock',
116
                                  version=version)
117
        except NameError:
118
            # docker lib not found
119
            return None
120
121
        return ret
122
123
    def connect(self, version=None):
124
        """Connect to the Docker server."""
125
        if hasattr(docker, 'from_env') and version is not None:
126
            # Connect to Docker using the default socket or
127
            # the configuration in your environment
128
            ret = docker.from_env()
129
        else:
130
            ret = self.__connect_old(version=version)
131
132
        # Check the server connection with the version() method
133
        try:
134
            ret.version()
135
        except requests.exceptions.ConnectionError as e:
136
            # Connexion error (Docker not detected)
137
            # Let this message in debug mode
138
            logger.debug("Can't connect to the Docker server (%s)" % e)
139
            return None
140
        except docker.errors.APIError as e:
141
            if version is None:
142
                # API error (Version mismatch ?)
143
                logger.debug("Docker API error (%s)" % e)
144
                # Try the connection with the server version
145
                version = re.search('(?:server API version|server)\:\ (.*)\)\".*\)', str(e))
146
                if version:
147
                    logger.debug("Try connection with Docker API version %s" % version.group(1))
148
                    ret = self.connect(version=version.group(1))
149
                else:
150
                    logger.debug("Can not retreive Docker server version")
151
                    ret = None
152
            else:
153
                # API error
154
                logger.error("Docker API error (%s)" % e)
155
                ret = None
156
        except Exception as e:
157
            # Others exceptions...
158
            # Connexion error (Docker not detected)
159
            logger.error("Can't connect to the Docker server (%s)" % e)
160
            ret = None
161
162
        # Log an info if Docker plugin is disabled
163
        if ret is None:
164
            logger.debug("Docker plugin is disable because an error has been detected")
165
166
        return ret
167
168
    def reset(self):
169
        """Reset/init the stats."""
170
        self.stats = {}
171
172
    @GlancesPlugin._check_decorator
173
    @GlancesPlugin._log_result_decorator
174
    def update(self):
175
        """Update Docker stats using the input method."""
176
        global docker_tag
177
178
        # Reset stats
179
        self.reset()
180
181
        # Get the current Docker API client
182
        if not self.docker_client:
183
            # First time, try to connect to the server
184
            try:
185
                self.docker_client = self.connect()
186
            except Exception:
187
                docker_tag = False
188
            else:
189
                if self.docker_client is None:
190
                    docker_tag = False
191
192
        # The Docker-py lib is mandatory
193
        if not docker_tag:
194
            return self.stats
195
196
        if self.input_method == 'local':
197
            # Update stats
198
199
            # Docker version
200
            # Exemple: {
201
            #     "KernelVersion": "3.16.4-tinycore64",
202
            #     "Arch": "amd64",
203
            #     "ApiVersion": "1.15",
204
            #     "Version": "1.3.0",
205
            #     "GitCommit": "c78088f",
206
            #     "Os": "linux",
207
            #     "GoVersion": "go1.3.3"
208
            # }
209
            try:
210
                self.stats['version'] = self.docker_client.version()
211
            except Exception as e:
212
                # Correct issue#649
213
                logger.error("{} plugin - Cannot get Docker version ({})".format(self.plugin_name, e))
214
                return self.stats
215
216
            # Container globals information
217
            # Example: [{u'Status': u'Up 36 seconds',
218
            #            u'Created': 1420378904,
219
            #            u'Image': u'nginx:1',
220
            #            u'Ports': [{u'Type': u'tcp', u'PrivatePort': 443},
221
            #                       {u'IP': u'0.0.0.0', u'Type': u'tcp', u'PublicPort': 8080, u'PrivatePort': 80}],
222
            #            u'Command': u"nginx -g 'daemon off;'",
223
            #            u'Names': [u'/webstack_nginx_1'],
224
            #            u'Id': u'b0da859e84eb4019cf1d965b15e9323006e510352c402d2f442ea632d61faaa5'}]
225
226
            # Update current containers list
227
            try:
228
                self.stats['containers'] = self.docker_client.containers() or []
229
            except Exception as e:
230
                logger.error("{} plugin - Cannot get containers list ({})".format(self.plugin_name, e))
231
                return self.stats
232
233
            # Start new thread for new container
234
            for container in self.stats['containers']:
235
                if container['Id'] not in self.thread_list:
236
                    # Thread did not exist in the internal dict
237
                    # Create it and add it to the internal dict
238
                    logger.debug("{} plugin - Create thread for container {}".format(self.plugin_name, container['Id'][:12]))
239
                    t = ThreadDockerGrabber(self.docker_client, container['Id'])
240
                    self.thread_list[container['Id']] = t
241
                    t.start()
242
243
            # Stop threads for non-existing containers
244
            nonexisting_containers = set(iterkeys(self.thread_list)) - set([c['Id'] for c in self.stats['containers']])
245
            for container_id in nonexisting_containers:
246
                # Stop the thread
247
                logger.debug("{} plugin - Stop thread for old container {}".format(self.plugin_name, container_id[:12]))
248
                self.thread_list[container_id].stop()
249
                # Delete the item from the dict
250
                del self.thread_list[container_id]
251
252
            # Get stats for all containers
253
            for container in self.stats['containers']:
254
                # The key is the container name and not the Id
255
                container['key'] = self.get_key()
256
257
                # Export name (first name in the list, without the /)
258
                container['name'] = container['Names'][0][1:]
259
260
                container['cpu'] = self.get_docker_cpu(container['Id'], self.thread_list[container['Id']].stats)
261
                container['memory'] = self.get_docker_memory(container['Id'], self.thread_list[container['Id']].stats)
262
                container['network'] = self.get_docker_network(container['Id'], self.thread_list[container['Id']].stats)
263
                container['io'] = self.get_docker_io(container['Id'], self.thread_list[container['Id']].stats)
264
265
        elif self.input_method == 'snmp':
266
            # Update stats using SNMP
267
            # Not available
268
            pass
269
270
        return self.stats
271
272
    def get_docker_cpu(self, container_id, all_stats):
273
        """Return the container CPU usage.
274
275
        Input: id is the full container id
276
               all_stats is the output of the stats method of the Docker API
277
        Output: a dict {'total': 1.49}
278
        """
279
        cpu_new = {}
280
        ret = {'total': 0.0}
281
282
        # Read the stats
283
        # For each container, you will find a pseudo-file cpuacct.stat,
284
        # containing the CPU usage accumulated by the processes of the container.
285
        # Those times are expressed in ticks of 1/USER_HZ of a second.
286
        # On x86 systems, USER_HZ is 100.
287
        try:
288
            cpu_new['total'] = all_stats['cpu_stats']['cpu_usage']['total_usage']
289
            cpu_new['system'] = all_stats['cpu_stats']['system_cpu_usage']
290
            cpu_new['nb_core'] = len(all_stats['cpu_stats']['cpu_usage']['percpu_usage'] or [])
291
        except KeyError as e:
292
            # all_stats do not have CPU information
293
            logger.debug("Cannot grab CPU usage for container {} ({})".format(container_id, e))
294
            logger.debug(all_stats)
295
        else:
296
            # Previous CPU stats stored in the cpu_old variable
297
            if not hasattr(self, 'cpu_old'):
298
                # First call, we init the cpu_old variable
299
                self.cpu_old = {}
300
                try:
301
                    self.cpu_old[container_id] = cpu_new
302
                except (IOError, UnboundLocalError):
303
                    pass
304
305
            if container_id not in self.cpu_old:
306
                try:
307
                    self.cpu_old[container_id] = cpu_new
308
                except (IOError, UnboundLocalError):
309
                    pass
310
            else:
311
                #
312
                cpu_delta = float(cpu_new['total'] - self.cpu_old[container_id]['total'])
313
                system_delta = float(cpu_new['system'] - self.cpu_old[container_id]['system'])
314
                if cpu_delta > 0.0 and system_delta > 0.0:
315
                    ret['total'] = (cpu_delta / system_delta) * float(cpu_new['nb_core']) * 100
316
317
                # Save stats to compute next stats
318
                self.cpu_old[container_id] = cpu_new
319
320
        # Return the stats
321
        return ret
322
323
    def get_docker_memory(self, container_id, all_stats):
324
        """Return the container MEMORY.
325
326
        Input: id is the full container id
327
               all_stats is the output of the stats method of the Docker API
328
        Output: a dict {'rss': 1015808, 'cache': 356352,  'usage': ..., 'max_usage': ...}
329
        """
330
        ret = {}
331
        # Read the stats
332
        try:
333
            # Do not exist anymore with Docker 1.11 (issue #848)
334
            # ret['rss'] = all_stats['memory_stats']['stats']['rss']
335
            # ret['cache'] = all_stats['memory_stats']['stats']['cache']
336
            ret['usage'] = all_stats['memory_stats']['usage']
337
            ret['limit'] = all_stats['memory_stats']['limit']
338
            ret['max_usage'] = all_stats['memory_stats']['max_usage']
339
        except (KeyError, TypeError) as e:
340
            # all_stats do not have MEM information
341
            logger.debug("Cannot grab MEM usage for container {} ({})".format(container_id, e))
342
            logger.debug(all_stats)
343
        # Return the stats
344
        return ret
345
346
    def get_docker_network(self, container_id, all_stats):
347
        """Return the container network usage using the Docker API (v1.0 or higher).
348
349
        Input: id is the full container id
350
        Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}.
351
        with:
352
            time_since_update: number of seconds elapsed between the latest grab
353
            rx: Number of byte received
354
            tx: Number of byte transmited
355
        """
356
        # Init the returned dict
357
        network_new = {}
358
359
        # Read the rx/tx stats (in bytes)
360
        try:
361
            netcounters = all_stats["networks"]
362
        except KeyError as e:
363
            # all_stats do not have NETWORK information
364
            logger.debug("Cannot grab NET usage for container {} ({})".format(container_id, e))
365
            logger.debug(all_stats)
366
            # No fallback available...
367
            return network_new
368
369
        # Previous network interface stats are stored in the network_old variable
370
        if not hasattr(self, 'inetcounters_old'):
371
            # First call, we init the network_old var
372
            self.netcounters_old = {}
373
            try:
374
                self.netcounters_old[container_id] = netcounters
375
            except (IOError, UnboundLocalError):
376
                pass
377
378
        if container_id not in self.netcounters_old:
379
            try:
380
                self.netcounters_old[container_id] = netcounters
381
            except (IOError, UnboundLocalError):
382
                pass
383
        else:
384
            # By storing time data we enable Rx/s and Tx/s calculations in the
385
            # XML/RPC API, which would otherwise be overly difficult work
386
            # for users of the API
387
            try:
388
                network_new['time_since_update'] = getTimeSinceLastUpdate('docker_net_{}'.format(container_id))
389
                network_new['rx'] = netcounters["eth0"]["rx_bytes"] - self.netcounters_old[container_id]["eth0"]["rx_bytes"]
390
                network_new['tx'] = netcounters["eth0"]["tx_bytes"] - self.netcounters_old[container_id]["eth0"]["tx_bytes"]
391
                network_new['cumulative_rx'] = netcounters["eth0"]["rx_bytes"]
392
                network_new['cumulative_tx'] = netcounters["eth0"]["tx_bytes"]
393
            except KeyError as e:
394
                # all_stats do not have INTERFACE information
395
                logger.debug("Cannot grab network interface usage for container {} ({})".format(container_id, e))
396
                logger.debug(all_stats)
397
398
            # Save stats to compute next bitrate
399
            self.netcounters_old[container_id] = netcounters
400
401
        # Return the stats
402
        return network_new
403
404
    def get_docker_io(self, container_id, all_stats):
405
        """Return the container IO usage using the Docker API (v1.0 or higher).
406
407
        Input: id is the full container id
408
        Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}.
409
        with:
410
            time_since_update: number of seconds elapsed between the latest grab
411
            ior: Number of byte readed
412
            iow: Number of byte written
413
        """
414
        # Init the returned dict
415
        io_new = {}
416
417
        # Read the ior/iow stats (in bytes)
418
        try:
419
            iocounters = all_stats["blkio_stats"]
420
        except KeyError as e:
421
            # all_stats do not have io information
422
            logger.debug("Cannot grab block IO usage for container {} ({})".format(container_id, e))
423
            logger.debug(all_stats)
424
            # No fallback available...
425
            return io_new
426
427
        # Previous io interface stats are stored in the io_old variable
428
        if not hasattr(self, 'iocounters_old'):
429
            # First call, we init the io_old var
430
            self.iocounters_old = {}
431
            try:
432
                self.iocounters_old[container_id] = iocounters
433
            except (IOError, UnboundLocalError):
434
                pass
435
436
        if container_id not in self.iocounters_old:
437
            try:
438
                self.iocounters_old[container_id] = iocounters
439
            except (IOError, UnboundLocalError):
440
                pass
441
        else:
442
            # By storing time data we enable IoR/s and IoW/s calculations in the
443
            # XML/RPC API, which would otherwise be overly difficult work
444
            # for users of the API
445
            try:
446
                # Read IOR and IOW value in the structure list of dict
447
                ior = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value']
448
                iow = [i for i in iocounters['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value']
449
                ior_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Read'][0]['value']
450
                iow_old = [i for i in self.iocounters_old[container_id]['io_service_bytes_recursive'] if i['op'] == 'Write'][0]['value']
451
            except (IndexError, KeyError) as e:
452
                # all_stats do not have io information
453
                logger.debug("Cannot grab block IO usage for container {} ({})".format(container_id, e))
454
            else:
455
                io_new['time_since_update'] = getTimeSinceLastUpdate('docker_io_{}'.format(container_id))
456
                io_new['ior'] = ior - ior_old
457
                io_new['iow'] = iow - iow_old
458
                io_new['cumulative_ior'] = ior
459
                io_new['cumulative_iow'] = iow
460
461
                # Save stats to compute next bitrate
462
                self.iocounters_old[container_id] = iocounters
463
464
        # Return the stats
465
        return io_new
466
467
    def get_user_ticks(self):
468
        """Return the user ticks by reading the environment variable."""
469
        return os.sysconf(os.sysconf_names['SC_CLK_TCK'])
470
471
    def get_stats_action(self):
472
        """Return stats for the action
473
        Docker will return self.stats['containers']"""
474
        return self.stats['containers']
475
476
    def update_views(self):
477
        """Update stats views."""
478
        # Call the father's method
479
        super(Plugin, self).update_views()
480
481
        if 'containers' not in self.stats:
482
            return False
483
484
        # Add specifics informations
485
        # Alert
486
        for i in self.stats['containers']:
487
            # Init the views for the current container (key = container name)
488
            self.views[i[self.get_key()]] = {'cpu': {}, 'mem': {}}
489
            # CPU alert
490
            if 'cpu' in i and 'total' in i['cpu']:
491
                # Looking for specific CPU container threasold in the conf file
492
                alert = self.get_alert(i['cpu']['total'],
493
                                       header=i['name'] + '_cpu',
494
                                       action_key=i['name'])
495
                if alert == 'DEFAULT':
496
                    # Not found ? Get back to default CPU threasold value
497
                    alert = self.get_alert(i['cpu']['total'], header='cpu')
498
                self.views[i[self.get_key()]]['cpu']['decoration'] = alert
499
            # MEM alert
500
            if 'memory' in i and 'usage' in i['memory']:
501
                # Looking for specific MEM container threasold in the conf file
502
                alert = self.get_alert(i['memory']['usage'],
503
                                       maximum=i['memory']['limit'],
504
                                       header=i['name'] + '_mem',
505
                                       action_key=i['name'])
506
                if alert == 'DEFAULT':
507
                    # Not found ? Get back to default MEM threasold value
508
                    alert = self.get_alert(i['memory']['usage'],
509
                                           maximum=i['memory']['limit'],
510
                                           header='mem')
511
                self.views[i[self.get_key()]]['mem']['decoration'] = alert
512
513
        return True
514
515
    def msg_curse(self, args=None):
516
        """Return the dict to display in the curse interface."""
517
        # Init the return message
518
        ret = []
519
520
        # Only process if stats exist (and non null) and display plugin enable...
521
        if not self.stats \
522
           or len(self.stats['containers']) == 0 \
523
           or self.is_disable():
524
            return ret
525
526
        # Build the string message
527
        # Title
528
        msg = '{}'.format('CONTAINERS')
529
        ret.append(self.curse_add_line(msg, "TITLE"))
530
        msg = ' {}'.format(len(self.stats['containers']))
531
        ret.append(self.curse_add_line(msg))
532
        msg = ' (served by Docker {})'.format(self.stats['version']["Version"])
533
        ret.append(self.curse_add_line(msg))
534
        ret.append(self.curse_new_line())
535
        # Header
536
        ret.append(self.curse_new_line())
537
        # msg = '{:>14}'.format('Id')
538
        # ret.append(self.curse_add_line(msg))
539
        # Get the maximum containers name (cutted to 20 char max)
540
        name_max_width = min(20, len(max(self.stats['containers'], key=lambda x: len(x['name']))['name']))
541
        msg = ' {:{width}}'.format('Name', width=name_max_width)
542
        ret.append(self.curse_add_line(msg))
543
        msg = '{:>26}'.format('Status')
544
        ret.append(self.curse_add_line(msg))
545
        msg = '{:>6}'.format('CPU%')
546
        ret.append(self.curse_add_line(msg))
547
        msg = '{:>7}'.format('MEM')
548
        ret.append(self.curse_add_line(msg))
549
        msg = '{:>7}'.format('/MAX')
550
        ret.append(self.curse_add_line(msg))
551
        msg = '{:>7}'.format('IOR/s')
552
        ret.append(self.curse_add_line(msg))
553
        msg = '{:>7}'.format('IOW/s')
554
        ret.append(self.curse_add_line(msg))
555
        msg = '{:>7}'.format('Rx/s')
556
        ret.append(self.curse_add_line(msg))
557
        msg = '{:>7}'.format('Tx/s')
558
        ret.append(self.curse_add_line(msg))
559
        msg = ' {:8}'.format('Command')
560
        ret.append(self.curse_add_line(msg))
561
        # Data
562
        for container in self.stats['containers']:
563
            ret.append(self.curse_new_line())
564
            # Id
565
            # msg = '{:>14}'.format(container['Id'][0:12])
566
            # ret.append(self.curse_add_line(msg))
567
            # Name
568
            name = container['name']
569
            if len(name) > name_max_width:
570
                name = '_' + name[-name_max_width + 1:]
571
            else:
572
                name = name[:name_max_width]
573
            msg = ' {:{width}}'.format(name, width=name_max_width)
574
            ret.append(self.curse_add_line(msg))
575
            # Status
576
            status = self.container_alert(container['Status'])
577
            msg = container['Status'].replace("minute", "min")
578
            msg = '{:>26}'.format(msg[0:25])
579
            ret.append(self.curse_add_line(msg, status))
580
            # CPU
581
            try:
582
                msg = '{:>6.1f}'.format(container['cpu']['total'])
583
            except KeyError:
584
                msg = '{:>6}'.format('?')
585
            ret.append(self.curse_add_line(msg, self.get_views(item=container['name'],
586
                                                               key='cpu',
587
                                                               option='decoration')))
588
            # MEM
589
            try:
590
                msg = '{:>7}'.format(self.auto_unit(container['memory']['usage']))
591
            except KeyError:
592
                msg = '{:>7}'.format('?')
593
            ret.append(self.curse_add_line(msg, self.get_views(item=container['name'],
594
                                                               key='mem',
595
                                                               option='decoration')))
596
            try:
597
                msg = '{:>7}'.format(self.auto_unit(container['memory']['limit']))
598
            except KeyError:
599
                msg = '{:>7}'.format('?')
600
            ret.append(self.curse_add_line(msg))
601
            # IO R/W
602
            for r in ['ior', 'iow']:
603
                try:
604
                    value = self.auto_unit(int(container['io'][r] // container['io']['time_since_update'] * 8)) + "b"
605
                    msg = '{:>7}'.format(value)
606
                except KeyError:
607
                    msg = '{:>7}'.format('?')
608
                ret.append(self.curse_add_line(msg))
609
            # NET RX/TX
610
            if args.byte:
611
                # Bytes per second (for dummy)
612
                to_bit = 1
613
                unit = ''
614
            else:
615
                # Bits per second (for real network administrator | Default)
616
                to_bit = 8
617
                unit = 'b'
618
            for r in ['rx', 'tx']:
619
                try:
620
                    value = self.auto_unit(int(container['network'][r] // container['network']['time_since_update'] * to_bit)) + unit
621
                    msg = '{:>7}'.format(value)
622
                except KeyError:
623
                    msg = '{:>7}'.format('?')
624
                ret.append(self.curse_add_line(msg))
625
            # Command
626
            msg = ' {}'.format(container['Command'])
627
            ret.append(self.curse_add_line(msg, splittable=True))
628
629
        return ret
630
631
    def container_alert(self, status):
632
        """Analyse the container status."""
633
        if "Paused" in status:
634
            return 'CAREFUL'
635
        else:
636
            return 'OK'
637
638
639
class ThreadDockerGrabber(threading.Thread):
640
    """
641
    Specific thread to grab docker stats.
642
643
    stats is a dict
644
    """
645
646
    def __init__(self, docker_client, container_id):
647
        """Init the class:
648
        docker_client: instance of Docker-py client
649
        container_id: Id of the container"""
650
        logger.debug("docker plugin - Create thread for container {}".format(container_id[:12]))
651
        super(ThreadDockerGrabber, self).__init__()
652
        # Event needed to stop properly the thread
653
        self._stopper = threading.Event()
654
        # The docker-py return stats as a stream
655
        self._container_id = container_id
656
        self._stats_stream = docker_client.stats(container_id, decode=True)
657
        # The class return the stats as a dict
658
        self._stats = {}
659
660
    def run(self):
661
        """Function called to grab stats.
662
        Infinite loop, should be stopped by calling the stop() method"""
663
664
        for i in self._stats_stream:
665
            self._stats = i
666
            time.sleep(0.1)
667
            if self.stopped():
668
                break
669
670
    @property
671
    def stats(self):
672
        """Stats getter"""
673
        return self._stats
674
675
    @stats.setter
676
    def stats(self, value):
677
        """Stats setter"""
678
        self._stats = value
679
680
    def stop(self, timeout=None):
681
        """Stop the thread"""
682
        logger.debug("docker plugin - Close thread for container {}".format(self._container_id[:12]))
683
        self._stopper.set()
684
685
    def stopped(self):
686
        """Return True is the thread is stopped"""
687
        return self._stopper.isSet()
688