Test Failed
Push — develop ( 6db367...4b15e6 )
by Nicolas
02:36
created

glances.plugins.containers.engines.podman   F

Complexity

Total Complexity 61

Size/Duplication

Total Lines 407
Duplicated Lines 0 %

Importance

Changes 0
Metric Value
eloc 250
dl 0
loc 407
rs 3.52
c 0
b 0
f 0
wmc 61

21 Methods

Rating   Name   Duplication   Size   Complexity  
A PodmanContainerStatsFetcher.stop() 0 2 1
A PodmanContainerStatsFetcher.__init__() 0 13 1
A PodmanPodStatsFetcher._get_network_stats() 0 25 4
A PodmanPodStatsFetcher.activity_stats() 0 22 2
A PodmanExtension.__init__() 0 14 2
A PodmanPodStatsFetcher._log_debug() 0 3 1
C PodmanExtension.update() 0 51 11
A PodmanExtension.update_version() 0 4 1
A PodmanPodStatsFetcher.__init__() 0 7 1
A PodmanPodStatsFetcher._get_io_stats() 0 25 4
B PodmanExtension.generate_stats() 0 53 6
A PodmanExtension.key() 0 4 1
A PodmanContainerStatsFetcher.time_since_update() 0 4 1
C PodmanContainerStatsFetcher._compute_activity_stats() 0 51 9
A PodmanExtension.stop() 0 7 3
A PodmanPodStatsFetcher._get_cpu_stats() 0 11 2
A PodmanExtension.connect() 0 10 2
A PodmanPodStatsFetcher.stop() 0 2 1
A PodmanContainerStatsFetcher.activity_stats() 0 10 1
A PodmanPodStatsFetcher._get_memory_stats() 0 20 4
A PodmanContainerStatsFetcher.get_streamed_stats() 0 8 3

How to fix   Complexity   

Complexity

Complex classes like glances.plugins.containers.engines.podman often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
#
2
# This file is part of Glances.
3
#
4
# SPDX-FileCopyrightText: 2022 Nicolas Hennion <[email protected]>
5
#
6
# SPDX-License-Identifier: LGPL-3.0-only
7
8
"""Podman Extension unit for Glances' Containers plugin."""
9
10
import time
11
from datetime import datetime
12
from typing import Any, Optional
13
14
from glances.globals import nativestr, pretty_date, replace_special_chars, string_value_to_float
15
from glances.logger import logger
16
from glances.stats_streamer import ThreadedIterableStreamer
17
18
# Podman library (optional and Linux-only)
19
# https://pypi.org/project/podman/
20
try:
21
    from podman import PodmanClient
22
except Exception as e:
23
    disable_plugin_podman = True
24
    # Display debug message if import KeyError
25
    logger.warning(f"Error loading Podman deps Lib. Podman feature in the Containers plugin is disabled ({e})")
26
else:
27
    disable_plugin_podman = False
28
29
30
class PodmanContainerStatsFetcher:
31
    MANDATORY_FIELDS = ["CPU", "MemUsage", "MemLimit", "BlockInput", "BlockOutput"]
32
33
    def __init__(self, container):
34
        self._container = container
35
36
        # Previous stats are stored in the self._old_computed_stats variable
37
        # We store time data to enable rate calculations to avoid complexity for consumers of the APIs exposed.
38
        self._old_computed_stats = {}
39
40
        # Last time when output stats (results) were computed
41
        self._last_stats_computed_time = 0
42
43
        # Threaded Streamer
44
        stats_iterable = container.stats(decode=True)
45
        self._streamer = ThreadedIterableStreamer(stats_iterable, initial_stream_value={})
46
47
    def stop(self):
48
        self._streamer.stop()
49
50
    def get_streamed_stats(self) -> dict[str, Any]:
51
        stats = self._streamer.stats
52
        if stats is None or stats.get("Error", False):
53
            logger.error(f"containers (Podman) Container({self._container.id}): Stats fetching failed")
54
            logger.debug(f"containers (Podman) Container({self._container.id}): ", stats)
55
            return None
56
57
        return stats["Stats"][0]
58
59
    @property
60
    def activity_stats(self) -> dict[str, Any]:
61
        """Activity Stats
62
63
        Each successive access of activity_stats will cause computation of activity_stats
64
        """
65
        computed_activity_stats = self._compute_activity_stats()
66
        self._old_computed_stats = computed_activity_stats
67
        self._last_stats_computed_time = time.time()
68
        return computed_activity_stats
69
70
    def _compute_activity_stats(self) -> dict[str, dict[str, Any]]:
71
        stats = {"cpu": {}, "memory": {}, "io": {}, "network": {}}
72
        api_stats = self.get_streamed_stats()
73
74
        # Glances breaks if Podman container is started while it is running See #3199
75
        if api_stats is None:
76
            # If stats fetching failed, return empty stats
77
            logger.error(f"containers (Podman) Container({self._container.id}): Failed to fetch stats")
78
            return stats
79
80
        if any(field not in api_stats for field in self.MANDATORY_FIELDS) or (
81
            "Network" not in api_stats and any(k not in api_stats for k in ['NetInput', 'NetOutput'])
82
        ):
83
            logger.error(f"containers (Podman) Container({self._container.id}): Missing mandatory fields")
84
            return stats
85
86
        try:
87
            stats["cpu"]["total"] = api_stats['CPU']
88
89
            stats["memory"]["usage"] = api_stats["MemUsage"]
90
            stats["memory"]["limit"] = api_stats["MemLimit"]
91
92
            stats["io"]["ior"] = api_stats["BlockInput"]
93
            stats["io"]["iow"] = api_stats["BlockOutput"]
94
            stats["io"]["time_since_update"] = 1
95
            # Hardcode `time_since_update` to 1 as podman already sends at the same fixed rate per second
96
97
            if "Network" not in api_stats:
98
                # For podman rooted mode
99
                stats["network"]['rx'] = api_stats["NetInput"]
100
                stats["network"]['tx'] = api_stats["NetOutput"]
101
                stats["network"]['time_since_update'] = 1
102
                # Hardcode to 1 as podman already sends at the same fixed rate per second
103
            elif api_stats["Network"] is not None:
104
                # api_stats["Network"] can be None if the infra container of the pod is killed
105
                # For podman in rootless mode
106
                stats['network'] = {
107
                    "cumulative_rx": sum(interface["RxBytes"] for interface in api_stats["Network"].values()),
108
                    "cumulative_tx": sum(interface["TxBytes"] for interface in api_stats["Network"].values()),
109
                }
110
                # Using previous stats to calculate rates
111
                old_network_stats = self._old_computed_stats.get("network")
112
                if old_network_stats:
113
                    stats['network']['time_since_update'] = round(self.time_since_update)
114
                    stats['network']['rx'] = stats['network']['cumulative_rx'] - old_network_stats["cumulative_rx"]
115
                    stats['network']['tx'] = stats['network']['cumulative_tx'] - old_network_stats['cumulative_tx']
116
117
        except ValueError as e:
118
            logger.error(f"containers (Podman) Container({self._container.id}): Non float stats values found", e)
119
120
        return stats
121
122
    @property
123
    def time_since_update(self) -> float:
124
        # In case no update (at startup), default to 1
125
        return max(1, self._streamer.last_update_time - self._last_stats_computed_time)
126
127
128
class PodmanPodStatsFetcher:
129
    def __init__(self, pod_manager):
130
        self._pod_manager = pod_manager
131
132
        # Threaded Streamer
133
        # Temporary patch to get podman extension working
134
        stats_iterable = (pod_manager.stats(decode=True) for _ in iter(int, 1))
135
        self._streamer = ThreadedIterableStreamer(stats_iterable, initial_stream_value={}, sleep_duration=2)
136
137
    def _log_debug(self, msg, exception=None):
138
        logger.debug(f"containers (Podman): Pod Manager - {msg} ({exception})")
139
        logger.debug(self._streamer.stats)
140
141
    def stop(self):
142
        self._streamer.stop()
143
144
    @property
145
    def activity_stats(self):
146
        result_stats = {}
147
        container_stats = self._streamer.stats
148
        for stat in container_stats:
149
            io_stats = self._get_io_stats(stat)
150
            cpu_stats = self._get_cpu_stats(stat)
151
            memory_stats = self._get_memory_stats(stat)
152
            network_stats = self._get_network_stats(stat)
153
154
            computed_stats = {
155
                "name": stat["Name"],
156
                "cid": stat["CID"],
157
                "pod_id": stat["Pod"],
158
                "io": io_stats or {},
159
                "memory": memory_stats or {},
160
                "network": network_stats or {},
161
                "cpu": cpu_stats or {},
162
            }
163
            result_stats[stat["CID"]] = computed_stats
164
165
        return result_stats
166
167
    def _get_cpu_stats(self, stats: dict) -> Optional[dict]:
168
        """Return the container CPU usage.
169
170
        Output: a dict {'total': 1.49}
171
        """
172
        if "CPU" not in stats:
173
            self._log_debug("Missing CPU usage fields")
174
            return None
175
176
        cpu_usage = string_value_to_float(stats["CPU"].rstrip("%"))
177
        return {"total": cpu_usage}
178
179
    def _get_memory_stats(self, stats) -> Optional[dict]:
180
        """Return the container MEMORY.
181
182
        Output: a dict {'usage': ..., 'limit': ...}
183
        """
184
        if "MemUsage" not in stats or "/" not in stats["MemUsage"]:
185
            self._log_debug("Missing MEM usage fields")
186
            return None
187
188
        memory_usage_str = stats["MemUsage"]
189
        usage_str, limit_str = memory_usage_str.split("/")
190
191
        try:
192
            usage = string_value_to_float(usage_str)
193
            limit = string_value_to_float(limit_str)
194
        except ValueError as e:
195
            self._log_debug("Compute MEM usage failed", e)
196
            return None
197
198
        return {'usage': usage, 'limit': limit, 'inactive_file': 0}
199
200
    def _get_network_stats(self, stats) -> Optional[dict]:
201
        """Return the container network usage using the Docker API (v1.0 or higher).
202
203
        Output: a dict {'time_since_update': 3000, 'rx': 10, 'tx': 65}.
204
        with:
205
            time_since_update: number of seconds elapsed between the latest grab
206
            rx: Number of bytes received
207
            tx: Number of bytes transmitted
208
        """
209
        if "NetIO" not in stats or "/" not in stats["NetIO"]:
210
            self._log_debug("Compute MEM usage failed")
211
            return None
212
213
        net_io_str = stats["NetIO"]
214
        rx_str, tx_str = net_io_str.split("/")
215
216
        try:
217
            rx = string_value_to_float(rx_str)
218
            tx = string_value_to_float(tx_str)
219
        except ValueError as e:
220
            self._log_debug("Compute MEM usage failed", e)
221
            return None
222
223
        # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculation procedure
224
        return {"rx": rx, "tx": tx, "time_since_update": 1}
225
226
    def _get_io_stats(self, stats) -> Optional[dict]:
227
        """Return the container IO usage using the Docker API (v1.0 or higher).
228
229
        Output: a dict {'time_since_update': 3000, 'ior': 10, 'iow': 65}.
230
        with:
231
            time_since_update: number of seconds elapsed between the latest grab
232
            ior: Number of bytes read
233
            iow: Number of bytes written
234
        """
235
        if "BlockIO" not in stats or "/" not in stats["BlockIO"]:
236
            self._log_debug("Missing BlockIO usage fields")
237
            return None
238
239
        block_io_str = stats["BlockIO"]
240
        ior_str, iow_str = block_io_str.split("/")
241
242
        try:
243
            ior = string_value_to_float(ior_str)
244
            iow = string_value_to_float(iow_str)
245
        except ValueError as e:
246
            self._log_debug("Compute BlockIO usage failed", e)
247
            return None
248
249
        # Hardcode `time_since_update` to 1 as podman docs don't specify the rate calculation procedure
250
        return {"ior": ior, "iow": iow, "time_since_update": 1}
251
252
253
class PodmanExtension:
254
    """Glances' Containers Plugin's Docker Extension unit"""
255
256
    CONTAINER_ACTIVE_STATUS = ['running', 'paused']
257
258
    def __init__(self, podman_sock):
259
        self.disable = disable_plugin_podman
260
        if self.disable:
261
            raise Exception("Missing libs required to run Podman Extension (Containers)")
262
263
        self.display_error = True
264
265
        self.client = None
266
        self.ext_name = "containers (Podman)"
267
        self.podman_sock = podman_sock
268
        self.pods_stats_fetcher = None
269
        self.container_stats_fetchers = {}
270
271
        self.connect()
272
273
    def connect(self):
274
        """Connect to Podman."""
275
        try:
276
            self.client = PodmanClient(base_url=self.podman_sock)
277
            # PodmanClient works lazily, so make a ping to determine if socket is open
278
            self.client.ping()
279
        except Exception as e:
280
            logger.debug(f"{self.ext_name} plugin - Can't connect to Podman ({e})")
281
            self.client = None
282
            self.disable = True
283
284
    def update_version(self):
285
        # Long and not useful anymore because the information is no more displayed in UIs
286
        # return self.client.version()
287
        return {}
288
289
    def stop(self) -> None:
290
        # Stop all streaming threads
291
        for t in self.container_stats_fetchers.values():
292
            t.stop()
293
294
        if self.pods_stats_fetcher:
295
            self.pods_stats_fetcher.stop()
296
297
    def update(self, all_tag) -> tuple[dict, list[dict[str, Any]]]:
298
        """Update Podman stats using the input method."""
299
300
        if not self.client or self.disable:
301
            return {}, []
302
303
        version_stats = self.update_version()
304
305
        # Update current containers list
306
        try:
307
            # Issue #1152: Podman module doesn't export details about stopped containers
308
            # The Containers/all key of the configuration file should be set to True
309
            containers = self.client.containers.list(all=all_tag)
310
            if not self.pods_stats_fetcher:
311
                self.pods_stats_fetcher = PodmanPodStatsFetcher(self.client.pods)
312
            self.display_error = True
313
        except Exception as e:
314
            if self.display_error:
315
                logger.error(f"{self.ext_name} plugin - Can't get containers list ({e})")
316
                self.display_error = False
317
            else:
318
                logger.debug(f"{self.ext_name} plugin - Can't get containers list ({e})")
319
            return version_stats, []
320
321
        # Start new thread for new container
322
        for container in containers:
323
            if container.id not in self.container_stats_fetchers:
324
                # StatsFetcher did not exist in the internal dict
325
                # Create it, add it to the internal dict
326
                logger.debug(f"{self.ext_name} plugin - Create thread for container {container.id[:12]}")
327
                self.container_stats_fetchers[container.id] = PodmanContainerStatsFetcher(container)
328
329
        # Stop threads for non-existing containers
330
        absent_containers = set(self.container_stats_fetchers.keys()) - {c.id for c in containers}
331
        for container_id in absent_containers:
332
            # Stop the StatsFetcher
333
            logger.debug(f"{self.ext_name} plugin - Stop thread for old container {container_id[:12]}")
334
            self.container_stats_fetchers[container_id].stop()
335
            # Delete the StatsFetcher from the dict
336
            del self.container_stats_fetchers[container_id]
337
338
        # Get stats for all containers
339
        container_stats = [self.generate_stats(container) for container in containers]
340
341
        pod_stats = self.pods_stats_fetcher.activity_stats
342
        for stats in container_stats:
343
            if stats["id"][:12] in pod_stats:
344
                stats["pod_name"] = pod_stats[stats["id"][:12]]["name"]
345
                stats["pod_id"] = pod_stats[stats["id"][:12]]["pod_id"]
346
347
        return version_stats, container_stats
348
349
    @property
350
    def key(self) -> str:
351
        """Return the key of the list."""
352
        return 'name'
353
354
    def generate_stats(self, container) -> dict[str, Any]:
355
        # Init the stats for the current container
356
        stats = {
357
            'key': self.key,
358
            'name': nativestr(container.name),
359
            'id': container.id,
360
            'image': ','.join(container.image.tags if container.image.tags else []),
361
            'status': container.attrs['State'],
362
            'created': container.attrs['Created'],
363
            'command': container.attrs.get('Command') or [],
364
            'io': {},
365
            'cpu': {},
366
            'memory': {},
367
            'network': {},
368
            'io_rx': None,
369
            'io_wx': None,
370
            'cpu_percent': None,
371
            'memory_percent': None,
372
            'network_rx': None,
373
            'network_tx': None,
374
            'uptime': None,
375
        }
376
377
        if stats['status'] not in self.CONTAINER_ACTIVE_STATUS:
378
            return stats
379
380
        stats_fetcher = self.container_stats_fetchers[container.id]
381
        activity_stats = stats_fetcher.activity_stats
382
        stats.update(activity_stats)
383
384
        # Additional fields
385
        stats['cpu_percent'] = stats['cpu'].get('total')
386
        stats['memory_usage'] = stats['memory'].get('usage')
387
        if stats['memory'].get('cache') is not None:
388
            stats['memory_usage'] -= stats['memory']['cache']
389
        stats['memory_inactive_file'] = stats['memory'].get('inactive_file')
390
        stats['memory_limit'] = stats['memory'].get('limit')
391
392
        if all(k in stats['io'] for k in ('ior', 'iow', 'time_since_update')):
393
            stats['io_rx'] = stats['io']['ior'] // stats['io']['time_since_update']
394
            stats['io_wx'] = stats['io']['iow'] // stats['io']['time_since_update']
395
396
        if all(k in stats['network'] for k in ('rx', 'tx', 'time_since_update')):
397
            stats['network_rx'] = stats['network']['rx'] // stats['network']['time_since_update']
398
            stats['network_tx'] = stats['network']['tx'] // stats['network']['time_since_update']
399
400
        started_at = datetime.fromtimestamp(container.attrs['StartedAt'])
401
        stats['uptime'] = pretty_date(started_at)
402
403
        # Manage special chars in command (see issue#2733)
404
        stats['command'] = replace_special_chars(' '.join(stats['command']))
405
406
        return stats
407