Total Complexity | 83 |
Total Lines | 555 |
Duplicated Lines | 4.14 % |
Changes | 0 |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like glances.plugins.containers often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | # |
||
2 | # This file is part of Glances. |
||
3 | # |
||
4 | # SPDX-FileCopyrightText: 2022 Nicolas Hennion <[email protected]> |
||
5 | # |
||
6 | # SPDX-License-Identifier: LGPL-3.0-only |
||
7 | # |
||
8 | |||
9 | """Containers plugin.""" |
||
10 | |||
11 | from copy import deepcopy |
||
12 | from functools import partial, reduce |
||
13 | from typing import Any, Dict, List, Optional, Tuple |
||
14 | |||
15 | from glances.globals import iteritems, itervalues, nativestr |
||
16 | from glances.logger import logger |
||
17 | from glances.plugins.containers.engines import ContainersExtension |
||
18 | from glances.plugins.containers.engines.docker import DockerExtension, import_docker_error_tag |
||
19 | from glances.plugins.containers.engines.podman import PodmanExtension, import_podman_error_tag |
||
20 | from glances.plugins.plugin.model import GlancesPluginModel |
||
21 | from glances.processes import glances_processes |
||
22 | from glances.processes import sort_stats as sort_stats_processes |
||
23 | |||
24 | # Fields description |
||
25 | # description: human readable description |
||
26 | # short_name: shortname to use un UI |
||
27 | # unit: unit type |
||
28 | # rate: is it a rate ? If yes, // by time_since_update when displayed, |
||
29 | # min_symbol: Auto unit should be used if value > than 1 'X' (K, M, G)... |
||
30 | fields_description = { |
||
31 | 'name': { |
||
32 | 'description': 'Container name', |
||
33 | }, |
||
34 | 'id': { |
||
35 | 'description': 'Container ID', |
||
36 | }, |
||
37 | 'image': { |
||
38 | 'description': 'Container image', |
||
39 | }, |
||
40 | 'status': { |
||
41 | 'description': 'Container status', |
||
42 | }, |
||
43 | 'created': { |
||
44 | 'description': 'Container creation date', |
||
45 | }, |
||
46 | 'command': { |
||
47 | 'description': 'Container command', |
||
48 | }, |
||
49 | 'cpu_percent': { |
||
50 | 'description': 'Container CPU consumption', |
||
51 | 'unit': 'percent', |
||
52 | }, |
||
53 | 'memory_usage': { |
||
54 | 'description': 'Container memory usage', |
||
55 | 'unit': 'byte', |
||
56 | }, |
||
57 | 'io_rx': { |
||
58 | 'description': 'Container IO bytes read rate', |
||
59 | 'unit': 'bytepersecond', |
||
60 | }, |
||
61 | 'io_wx': { |
||
62 | 'description': 'Container IO bytes write rate', |
||
63 | 'unit': 'bytepersecond', |
||
64 | }, |
||
65 | 'network_rx': { |
||
66 | 'description': 'Container network RX bitrate', |
||
67 | 'unit': 'bitpersecond', |
||
68 | }, |
||
69 | 'network_tx': { |
||
70 | 'description': 'Container network TX bitrate', |
||
71 | 'unit': 'bitpersecond', |
||
72 | }, |
||
73 | 'uptime': { |
||
74 | 'description': 'Container uptime', |
||
75 | }, |
||
76 | 'engine': { |
||
77 | 'description': 'Container engine (Docker and Podman are currently supported)', |
||
78 | }, |
||
79 | 'pod_name': { |
||
80 | 'description': 'Pod name (only with Podman)', |
||
81 | }, |
||
82 | 'pod_id': { |
||
83 | 'description': 'Pod ID (only with Podman)', |
||
84 | }, |
||
85 | } |
||
86 | |||
87 | # Define the items history list (list of items to add to history) |
||
88 | # TODO: For the moment limited to the CPU. Had to change the graph exports |
||
89 | # method to display one graph per container. |
||
90 | # items_history_list = [{'name': 'cpu_percent', |
||
91 | # 'description': 'Container CPU consumption in %', |
||
92 | # 'y_unit': '%'}, |
||
93 | # {'name': 'memory_usage', |
||
94 | # 'description': 'Container memory usage in bytes', |
||
95 | # 'y_unit': 'B'}, |
||
96 | # {'name': 'network_rx', |
||
97 | # 'description': 'Container network RX bitrate in bits per second', |
||
98 | # 'y_unit': 'bps'}, |
||
99 | # {'name': 'network_tx', |
||
100 | # 'description': 'Container network TX bitrate in bits per second', |
||
101 | # 'y_unit': 'bps'}, |
||
102 | # {'name': 'io_r', |
||
103 | # 'description': 'Container IO bytes read per second', |
||
104 | # 'y_unit': 'Bps'}, |
||
105 | # {'name': 'io_w', |
||
106 | # 'description': 'Container IO bytes write per second', |
||
107 | # 'y_unit': 'Bps'}] |
||
108 | items_history_list = [{'name': 'cpu_percent', 'description': 'Container CPU consumption in %', 'y_unit': '%'}] |
||
109 | |||
110 | # List of key to remove before export |
||
111 | export_exclude_list = ['cpu', 'io', 'memory', 'network'] |
||
112 | |||
113 | # Sort dictionary for human |
||
114 | sort_for_human = { |
||
115 | 'io_counters': 'disk IO', |
||
116 | 'cpu_percent': 'CPU consumption', |
||
117 | 'memory_usage': 'memory consumption', |
||
118 | 'cpu_times': 'uptime', |
||
119 | 'name': 'container name', |
||
120 | None: 'None', |
||
121 | } |
||
122 | |||
123 | |||
124 | class PluginModel(GlancesPluginModel): |
||
125 | """Glances Docker plugin. |
||
126 | |||
127 | stats is a dict: {'version': {...}, 'containers': [{}, {}]} |
||
128 | """ |
||
129 | |||
130 | def __init__(self, args=None, config=None): |
||
131 | """Init the plugin.""" |
||
132 | super().__init__( |
||
133 | args=args, config=config, items_history_list=items_history_list, fields_description=fields_description |
||
134 | ) |
||
135 | |||
136 | # The plugin can be disabled using: args.disable_docker |
||
137 | self.args = args |
||
138 | |||
139 | # Default config keys |
||
140 | self.config = config |
||
141 | |||
142 | # We want to display the stat in the curse interface |
||
143 | self.display_curse = True |
||
144 | |||
145 | self.watchers: Dict[str, ContainersExtension] = {} |
||
146 | |||
147 | # Init the Docker API |
||
148 | if not import_docker_error_tag: |
||
149 | self.watchers['docker'] = DockerExtension() |
||
150 | |||
151 | # Init the Podman API |
||
152 | if not import_podman_error_tag: |
||
153 | self.watchers['podman'] = PodmanExtension(podman_sock=self._podman_sock()) |
||
154 | |||
155 | # Sort key |
||
156 | self.sort_key = None |
||
157 | |||
158 | # Force a first update because we need two update to have the first stat |
||
159 | self.update() |
||
160 | self.refresh_timer.set(0) |
||
161 | |||
162 | def _podman_sock(self) -> str: |
||
163 | """Return the podman sock. |
||
164 | Could be desfined in the [docker] section thanks to the podman_sock option. |
||
165 | Default value: unix:///run/user/1000/podman/podman.sock |
||
166 | """ |
||
167 | conf_podman_sock = self.get_conf_value('podman_sock') |
||
168 | if len(conf_podman_sock) == 0: |
||
169 | return "unix:///run/user/1000/podman/podman.sock" |
||
170 | return conf_podman_sock[0] |
||
171 | |||
172 | def exit(self) -> None: |
||
173 | """Overwrite the exit method to close threads.""" |
||
174 | for watcher in itervalues(self.watchers): |
||
175 | watcher.stop() |
||
176 | |||
177 | # Call the father class |
||
178 | super().exit() |
||
179 | |||
180 | def get_key(self) -> str: |
||
181 | """Return the key of the list.""" |
||
182 | return 'name' |
||
183 | |||
184 | def get_export(self) -> List[Dict]: |
||
185 | """Overwrite the default export method. |
||
186 | |||
187 | - Only exports containers |
||
188 | - The key is the first container name |
||
189 | """ |
||
190 | try: |
||
191 | ret = deepcopy(self.stats) |
||
192 | except KeyError as e: |
||
193 | logger.debug(f"docker plugin - Docker export error {e}") |
||
194 | ret = [] |
||
195 | |||
196 | # Remove fields uses to compute rate |
||
197 | for container in ret: |
||
198 | for i in export_exclude_list: |
||
199 | container.pop(i) |
||
200 | |||
201 | return ret |
||
202 | |||
203 | def _all_tag(self) -> bool: |
||
204 | """Return the all tag of the Glances/Docker configuration file. |
||
205 | |||
206 | # By default, Glances only display running containers |
||
207 | # Set the following key to True to display all containers |
||
208 | all=True |
||
209 | """ |
||
210 | all_tag = self.get_conf_value('all') |
||
211 | if len(all_tag) == 0: |
||
212 | return False |
||
213 | return all_tag[0].lower() == 'true' |
||
214 | |||
215 | @GlancesPluginModel._check_decorator |
||
216 | @GlancesPluginModel._log_result_decorator |
||
217 | def update(self) -> List[Dict]: |
||
218 | """Update Docker and podman stats using the input method.""" |
||
219 | # Connection should be ok |
||
220 | if not self.watchers: |
||
221 | return self.get_init_value() |
||
222 | |||
223 | if self.input_method != 'local': |
||
224 | return self.get_init_value() |
||
225 | |||
226 | # Update stats |
||
227 | stats = [] |
||
228 | for engine, watcher in iteritems(self.watchers): |
||
229 | _, containers = watcher.update(all_tag=self._all_tag()) |
||
230 | containers_filtered = [] |
||
231 | for container in containers: |
||
232 | container["engine"] = engine |
||
233 | if 'key' in container and container['key'] in container: |
||
234 | if not self.is_hide(nativestr(container[container['key']])): |
||
235 | containers_filtered.append(container) |
||
236 | else: |
||
237 | containers_filtered.append(container) |
||
238 | stats.extend(containers_filtered) |
||
239 | |||
240 | # Sort and update the stats |
||
241 | # @TODO: Have a look because sort did not work for the moment (need memory stats ?) |
||
242 | self.sort_key, self.stats = sort_docker_stats(stats) |
||
243 | return self.stats |
||
244 | |||
245 | @staticmethod |
||
246 | def memory_usage_no_cache(mem: Dict[str, float]) -> float: |
||
247 | """Return the 'real' memory usage by removing inactive_file to usage""" |
||
248 | # Ref: https://github.com/docker/docker-py/issues/3210 |
||
249 | return mem['usage'] - (mem['inactive_file'] if 'inactive_file' in mem else 0) |
||
250 | |||
251 | def update_views(self) -> bool: |
||
252 | """Update stats views.""" |
||
253 | # Call the father's method |
||
254 | super().update_views() |
||
255 | |||
256 | if not self.stats: |
||
257 | return False |
||
258 | |||
259 | # Add specifics information |
||
260 | # Alert |
||
261 | for i in self.stats: |
||
262 | # Init the views for the current container (key = container name) |
||
263 | self.views[i[self.get_key()]] = {'cpu': {}, 'mem': {}} |
||
264 | # CPU alert |
||
265 | if 'cpu' in i and 'total' in i['cpu']: |
||
266 | # Looking for specific CPU container threshold in the conf file |
||
267 | alert = self.get_alert(i['cpu']['total'], header=i['name'] + '_cpu', action_key=i['name']) |
||
268 | if alert == 'DEFAULT': |
||
269 | # Not found ? Get back to default CPU threshold value |
||
270 | alert = self.get_alert(i['cpu']['total'], header='cpu') |
||
271 | self.views[i[self.get_key()]]['cpu']['decoration'] = alert |
||
272 | # MEM alert |
||
273 | if 'memory' in i and 'usage' in i['memory']: |
||
274 | # Looking for specific MEM container threshold in the conf file |
||
275 | alert = self.get_alert( |
||
276 | self.memory_usage_no_cache(i['memory']), |
||
277 | maximum=i['memory']['limit'], |
||
278 | header=i['name'] + '_mem', |
||
279 | action_key=i['name'], |
||
280 | ) |
||
281 | if alert == 'DEFAULT': |
||
282 | # Not found ? Get back to default MEM threshold value |
||
283 | alert = self.get_alert( |
||
284 | self.memory_usage_no_cache(i['memory']), maximum=i['memory']['limit'], header='mem' |
||
285 | ) |
||
286 | self.views[i[self.get_key()]]['mem']['decoration'] = alert |
||
287 | |||
288 | # Display Engine and Pod name ? |
||
289 | show_pod_name = False |
||
290 | if any(ct.get("pod_name") for ct in self.stats): |
||
291 | show_pod_name = True |
||
292 | self.views['show_pod_name'] = show_pod_name |
||
293 | show_engine_name = False |
||
294 | if len({ct["engine"] for ct in self.stats}) > 1: |
||
295 | show_engine_name = True |
||
296 | self.views['show_engine_name'] = show_engine_name |
||
297 | |||
298 | return True |
||
299 | |||
300 | def build_title(self, ret): |
||
301 | msg = '{}'.format('CONTAINERS') |
||
302 | ret.append(self.curse_add_line(msg, "TITLE")) |
||
303 | if len(self.stats) > 1: |
||
304 | msg = f' {len(self.stats)}' |
||
305 | ret.append(self.curse_add_line(msg)) |
||
306 | msg = f' sorted by {sort_for_human[self.sort_key]}' |
||
307 | ret.append(self.curse_add_line(msg)) |
||
308 | if not self.views['show_engine_name']: |
||
309 | msg = f' (served by {self.stats[0].get("engine", "")})' |
||
310 | ret.append(self.curse_add_line(msg)) |
||
311 | ret.append(self.curse_new_line()) |
||
312 | return ret |
||
313 | |||
314 | def maybe_add_engine_name_or_pod_line(self, ret): |
||
315 | if self.views['show_engine_name']: |
||
316 | ret = self.add_msg_to_line(ret, ' {:{width}}'.format('Engine', width=6)) |
||
317 | if self.views['show_pod_name']: |
||
318 | ret = self.add_msg_to_line(ret, ' {:{width}}'.format('Pod', width=12)) |
||
319 | |||
320 | return ret |
||
321 | |||
322 | def maybe_add_engine_name_or_pod_name(self, ret, container): |
||
323 | ret.append(self.curse_new_line()) |
||
324 | if self.views['show_engine_name']: |
||
325 | ret.append(self.curse_add_line(' {:{width}}'.format(container["engine"], width=6))) |
||
326 | if self.views['show_pod_name']: |
||
327 | ret.append(self.curse_add_line(' {:{width}}'.format(container.get("pod_id", "-"), width=12))) |
||
328 | |||
329 | return ret |
||
330 | |||
331 | def build_container_name(self, name_max_width): |
||
332 | def build_for_this_max_length(ret, container): |
||
333 | ret.append( |
||
334 | self.curse_add_line(' {:{width}}'.format(container['name'][:name_max_width], width=name_max_width)) |
||
335 | ) |
||
336 | |||
337 | return ret |
||
338 | |||
339 | return build_for_this_max_length |
||
340 | |||
341 | def build_header(self, ret, name_max_width): |
||
342 | ret.append(self.curse_new_line()) |
||
343 | |||
344 | ret = self.maybe_add_engine_name_or_pod_line(ret) |
||
345 | |||
346 | msg = ' {:{width}}'.format('Name', width=name_max_width) |
||
347 | ret.append(self.curse_add_line(msg, 'SORT' if self.sort_key == 'name' else 'DEFAULT')) |
||
348 | |||
349 | msgs = ['{:>10}'.format('Status'), '{:>10}'.format('Uptime')] |
||
350 | ret = reduce(self.add_msg_to_line, msgs, ret) |
||
351 | |||
352 | msg = '{:>6}'.format('CPU%') |
||
353 | ret.append(self.curse_add_line(msg, 'SORT' if self.sort_key == 'cpu_percent' else 'DEFAULT')) |
||
354 | msg = '{:>7}'.format('MEM') |
||
355 | ret.append(self.curse_add_line(msg, 'SORT' if self.sort_key == 'memory_usage' else 'DEFAULT')) |
||
356 | |||
357 | msgs = [ |
||
358 | '/{:<7}'.format('MAX'), |
||
359 | '{:>7}'.format('IOR/s'), |
||
360 | ' {:<7}'.format('IOW/s'), |
||
361 | '{:>7}'.format('Rx/s'), |
||
362 | ' {:<7}'.format('Tx/s'), |
||
363 | ' {:8}'.format('Command'), |
||
364 | ] |
||
365 | |||
366 | return reduce(self.add_msg_to_line, msgs, ret) |
||
367 | |||
368 | def add_msg_to_line(self, ret, msg): |
||
369 | ret.append(self.curse_add_line(msg)) |
||
370 | |||
371 | return ret |
||
372 | |||
373 | def get_max_of_container_names(self): |
||
374 | return min( |
||
375 | self.config.get_int_value('containers', 'max_name_size', default=20) if self.config is not None else 20, |
||
376 | len(max(self.stats, key=lambda x: len(x['name']))['name']), |
||
377 | ) |
||
378 | |||
379 | def build_status_name(self, ret, container): |
||
380 | status = self.container_alert(container['status']) |
||
381 | msg = '{:>10}'.format(container['status'][0:10]) |
||
382 | ret.append(self.curse_add_line(msg, status)) |
||
383 | |||
384 | return ret |
||
385 | |||
386 | def build_uptime_line(self, ret, container): |
||
387 | if container['uptime']: |
||
388 | msg = '{:>10}'.format(container['uptime']) |
||
389 | else: |
||
390 | msg = '{:>10}'.format('_') |
||
391 | |||
392 | return self.add_msg_to_line(ret, msg) |
||
393 | |||
394 | def build_cpu_line(self, ret, container): |
||
395 | try: |
||
396 | msg = '{:>6.1f}'.format(container['cpu']['total']) |
||
397 | except (KeyError, TypeError): |
||
398 | msg = '{:>6}'.format('_') |
||
399 | ret.append(self.curse_add_line(msg, self.get_views(item=container['name'], key='cpu', option='decoration'))) |
||
400 | |||
401 | return ret |
||
402 | |||
403 | def build_memory_line(self, ret, container): |
||
404 | try: |
||
405 | msg = '{:>7}'.format(self.auto_unit(self.memory_usage_no_cache(container['memory']))) |
||
406 | except KeyError: |
||
407 | msg = '{:>7}'.format('_') |
||
408 | ret.append(self.curse_add_line(msg, self.get_views(item=container['name'], key='mem', option='decoration'))) |
||
409 | try: |
||
410 | msg = '/{:<7}'.format(self.auto_unit(container['memory']['limit'])) |
||
411 | except (KeyError, TypeError): |
||
412 | msg = '/{:<7}'.format('_') |
||
413 | ret.append(self.curse_add_line(msg)) |
||
414 | |||
415 | return ret |
||
416 | |||
417 | def build_io_line(self, ret, container): |
||
418 | unit = 'B' |
||
419 | try: |
||
420 | value = self.auto_unit(int(container['io_rx'])) + unit |
||
421 | msg = f'{value:>7}' |
||
422 | except (KeyError, TypeError): |
||
423 | msg = '{:>7}'.format('_') |
||
424 | ret.append(self.curse_add_line(msg)) |
||
425 | try: |
||
426 | value = self.auto_unit(int(container['io_wx'])) + unit |
||
427 | msg = f' {value:<7}' |
||
428 | except (KeyError, TypeError): |
||
429 | msg = ' {:<7}'.format('_') |
||
430 | ret.append(self.curse_add_line(msg)) |
||
431 | |||
432 | return ret |
||
433 | |||
434 | def build_net_line(self, args): |
||
435 | def build_with_this_args(ret, container): |
||
436 | if args.byte: |
||
437 | # Bytes per second (for dummy) |
||
438 | to_bit = 1 |
||
439 | unit = '' |
||
440 | else: |
||
441 | # Bits per second (for real network administrator | Default) |
||
442 | to_bit = 8 |
||
443 | unit = 'b' |
||
444 | try: |
||
445 | value = self.auto_unit(int(container['network_rx'] * to_bit)) + unit |
||
446 | msg = f'{value:>7}' |
||
447 | except (KeyError, TypeError): |
||
448 | msg = '{:>7}'.format('_') |
||
449 | ret.append(self.curse_add_line(msg)) |
||
450 | try: |
||
451 | value = self.auto_unit(int(container['network_tx'] * to_bit)) + unit |
||
452 | msg = f' {value:<7}' |
||
453 | except (KeyError, TypeError): |
||
454 | msg = ' {:<7}'.format('_') |
||
455 | ret.append(self.curse_add_line(msg)) |
||
456 | |||
457 | return ret |
||
458 | |||
459 | return build_with_this_args |
||
460 | |||
461 | def build_cmd_line(self, ret, container): |
||
462 | if container['command'] is not None: |
||
463 | msg = ' {}'.format(container['command']) |
||
464 | else: |
||
465 | msg = ' {}'.format('_') |
||
466 | ret.append(self.curse_add_line(msg, splittable=True)) |
||
467 | |||
468 | return ret |
||
469 | |||
470 | def msg_curse(self, args=None, max_width: Optional[int] = None) -> List[str]: |
||
471 | """Return the dict to display in the curse interface.""" |
||
472 | # Init the return message |
||
473 | init = [] |
||
474 | |||
475 | # Only process if stats exist (and non null) and display plugin enable... |
||
476 | conditions = [not self.stats, len(self.stats) == 0, self.is_disabled()] |
||
477 | if any(conditions): |
||
478 | return init |
||
479 | |||
480 | # Build the string message |
||
481 | # Get the maximum containers name |
||
482 | # Max size is configurable. See feature request #1723. |
||
483 | name_max_width = self.get_max_of_container_names() |
||
484 | |||
485 | steps = [ |
||
486 | self.build_title, |
||
487 | partial(self.build_header, name_max_width=name_max_width), |
||
488 | self.build_data_line(name_max_width, args), |
||
489 | ] |
||
490 | |||
491 | return reduce(lambda ret, step: step(ret), steps, init) |
||
492 | |||
493 | def build_data_line(self, name_max_width, args): |
||
494 | def build_for_this_params(ret): |
||
495 | build_data_with_params = self.build_container_data(name_max_width, args) |
||
496 | return reduce(build_data_with_params, self.stats, ret) |
||
497 | |||
498 | return build_for_this_params |
||
499 | |||
500 | def build_container_data(self, name_max_width, args): |
||
501 | def build_with_this_params(ret, container): |
||
502 | steps = [ |
||
503 | self.maybe_add_engine_name_or_pod_name, |
||
504 | self.build_container_name(name_max_width), |
||
505 | self.build_status_name, |
||
506 | self.build_uptime_line, |
||
507 | self.build_cpu_line, |
||
508 | self.build_memory_line, |
||
509 | self.build_io_line, |
||
510 | self.build_net_line(args), |
||
511 | self.build_cmd_line, |
||
512 | ] |
||
513 | |||
514 | return reduce(lambda ret, step: step(ret, container), steps, ret) |
||
515 | |||
516 | return build_with_this_params |
||
517 | |||
518 | @staticmethod |
||
519 | def container_alert(status: str) -> str: |
||
520 | """Analyse the container status. |
||
521 | One of created, restarting, running, removing, paused, exited, or dead |
||
522 | """ |
||
523 | if status == 'running': |
||
524 | return 'OK' |
||
525 | if status == 'dead': |
||
526 | return 'ERROR' |
||
527 | if status in ['created', 'restarting', 'exited']: |
||
528 | return 'WARNING' |
||
529 | return 'INFO' |
||
530 | |||
531 | |||
532 | View Code Duplication | def sort_docker_stats(stats: List[Dict[str, Any]]) -> Tuple[str, List[Dict[str, Any]]]: |
|
|
|||
533 | # Make VM sort related to process sort |
||
534 | if glances_processes.sort_key == 'memory_percent': |
||
535 | sort_by = 'memory_usage' |
||
536 | sort_by_secondary = 'cpu_percent' |
||
537 | elif glances_processes.sort_key == 'name': |
||
538 | sort_by = 'name' |
||
539 | sort_by_secondary = 'cpu_percent' |
||
540 | else: |
||
541 | sort_by = 'cpu_percent' |
||
542 | sort_by_secondary = 'memory_usage' |
||
543 | |||
544 | # Sort docker stats |
||
545 | sort_stats_processes( |
||
546 | stats, |
||
547 | sorted_by=sort_by, |
||
548 | sorted_by_secondary=sort_by_secondary, |
||
549 | # Reverse for all but name |
||
550 | reverse=glances_processes.sort_key != 'name', |
||
551 | ) |
||
552 | |||
553 | # Return the main sort key and the sorted stats |
||
554 | return sort_by, stats |
||
555 |