1
|
|
|
# -*- coding: utf-8 -*- |
2
|
|
|
"""Couchbase metrics reader.""" |
3
|
|
|
import sys |
4
|
|
|
import time |
5
|
|
|
import json |
6
|
|
|
import urllib2 |
|
|
|
|
7
|
|
|
from collections import deque |
8
|
|
|
from threading import Lock |
9
|
|
|
|
10
|
|
|
from plumd import Counter, Gauge, String, Reader, ResultSet, Result |
|
|
|
|
11
|
|
|
|
12
|
|
|
PY3 = sys.version_info > (3,) |
13
|
|
|
|
14
|
|
|
__author__ = 'Kenny Freeman' |
15
|
|
|
__email__ = '[email protected]' |
16
|
|
|
__license__ = "ISCL" |
17
|
|
|
__docformat__ = 'reStructuredText' |
18
|
|
|
|
19
|
|
|
|
20
|
|
|
def get_sample_i(arr, timestamp): |
21
|
|
|
"""Return the index in arr where arr[i] < ts. |
22
|
|
|
|
23
|
|
|
This is used to reduce the samples returned from the Couchbase rest api |
24
|
|
|
to the timestamps occouring after our last poll time. |
25
|
|
|
|
26
|
|
|
:param timestamp: the last poll timestamp to compare against |
27
|
|
|
:type timestamp: long |
28
|
|
|
:rtype: int |
29
|
|
|
""" |
30
|
|
|
# arr metrics include a list of timestamps (milliseconds) |
31
|
|
|
# find the sample number to start recording from, start from the end of the list |
32
|
|
|
sample_i = len(arr) - 1 |
33
|
|
|
while sample_i > 0: |
34
|
|
|
if arr[sample_i] < timestamp: |
35
|
|
|
break |
36
|
|
|
sample_i -= 1 |
37
|
|
|
return sample_i |
38
|
|
|
|
39
|
|
|
|
40
|
|
|
def avg(arr): |
41
|
|
|
"""Return average value from the list of values. |
42
|
|
|
|
43
|
|
|
:param arr: A list of int or float values |
44
|
|
|
:type arr: list |
45
|
|
|
:rtype: float |
46
|
|
|
""" |
47
|
|
|
ret = 0.0 |
48
|
|
|
items = len(arr) |
49
|
|
|
if items > 0: |
50
|
|
|
ret = sum(arr)/items |
51
|
|
|
return ret |
52
|
|
|
|
53
|
|
|
|
54
|
|
|
def stats(arr): |
55
|
|
|
"""Return the min, max and average value from the list of values. |
56
|
|
|
|
57
|
|
|
:param arr: A list of int or float values |
58
|
|
|
:type arr: list |
59
|
|
|
:rtype: tuple(float, float, float) |
60
|
|
|
""" |
61
|
|
|
ret = 0.0 |
62
|
|
|
items = len(arr) |
63
|
|
|
if items > 0: |
64
|
|
|
ret = sum(arr)/items |
65
|
|
|
return (min(arr), max(arr), ret) |
66
|
|
|
|
67
|
|
|
|
68
|
|
|
class CouchBase(Reader): |
69
|
|
|
"""Plugin to record nginx stub_status metrics.""" |
70
|
|
|
|
71
|
|
|
# default config values |
72
|
|
|
defaults = { |
73
|
|
|
'poll.interval': 10, |
74
|
|
|
'host': 'localhost', |
75
|
|
|
'port': 8091, |
76
|
|
|
'username': 'Administrator', |
77
|
|
|
'password': '', |
78
|
|
|
'proto': 'http', |
79
|
|
|
'timeout': 10, |
80
|
|
|
'metrics_index': [ |
81
|
|
|
'indexerThreads', |
82
|
|
|
'maxRollbackPoints', |
83
|
|
|
'memorySnapshotInterval', |
84
|
|
|
'stableSnapshotInterval' |
85
|
|
|
], |
86
|
|
|
'metrics_bucket': [ |
87
|
|
|
"avg_bg_wait_time", |
88
|
|
|
"avg_disk_commit_time", |
89
|
|
|
"avg_disk_update_time", |
90
|
|
|
"bg_wait_count", |
91
|
|
|
"bg_wait_total", |
92
|
|
|
"bytes_read", |
93
|
|
|
"bytes_written", |
94
|
|
|
"cas_badval", |
95
|
|
|
"cas_hits", |
96
|
|
|
"cas_misses", |
97
|
|
|
"cmd_get", |
98
|
|
|
"cmd_set", |
99
|
|
|
"couch_docs_actual_disk_size", |
100
|
|
|
"couch_docs_data_size", |
101
|
|
|
"couch_docs_disk_size", |
102
|
|
|
"couch_docs_fragmentation", |
103
|
|
|
"couch_spatial_data_size", |
104
|
|
|
"couch_spatial_disk_size", |
105
|
|
|
"couch_spatial_ops", |
106
|
|
|
"couch_total_disk_size", |
107
|
|
|
"couch_views_actual_disk_size", |
108
|
|
|
"couch_views_data_size", |
109
|
|
|
"couch_views_disk_size", |
110
|
|
|
"couch_views_fragmentation", |
111
|
|
|
"couch_views_ops", |
112
|
|
|
"cpu_idle_ms", |
113
|
|
|
"cpu_local_ms", |
114
|
|
|
"cpu_utilization_rate", |
115
|
|
|
"curr_connections", |
116
|
|
|
"curr_items", |
117
|
|
|
"curr_items_tot", |
118
|
|
|
"decr_hits", |
119
|
|
|
"decr_misses", |
120
|
|
|
"delete_hits", |
121
|
|
|
"delete_misses", |
122
|
|
|
"disk_commit_count", |
123
|
|
|
"disk_commit_total", |
124
|
|
|
"disk_update_count", |
125
|
|
|
"disk_update_total", |
126
|
|
|
"disk_write_queue", |
127
|
|
|
"ep_bg_fetched", |
128
|
|
|
"ep_cache_miss_rate", |
129
|
|
|
"ep_dcp_2i_backoff", |
130
|
|
|
"ep_dcp_2i_count", |
131
|
|
|
"ep_dcp_2i_items_remaining", |
132
|
|
|
"ep_dcp_2i_items_sent", |
133
|
|
|
"ep_dcp_2i_producer_count", |
134
|
|
|
"ep_dcp_2i_total_backlog_size", |
135
|
|
|
"ep_dcp_2i_total_bytes", |
136
|
|
|
"ep_dcp_fts_backoff", |
137
|
|
|
"ep_dcp_fts_count", |
138
|
|
|
"ep_dcp_fts_items_remaining", |
139
|
|
|
"ep_dcp_fts_items_sent", |
140
|
|
|
"ep_dcp_fts_producer_count", |
141
|
|
|
"ep_dcp_fts_total_backlog_size", |
142
|
|
|
"ep_dcp_fts_total_bytes", |
143
|
|
|
"ep_dcp_other_backoff", |
144
|
|
|
"ep_dcp_other_count", |
145
|
|
|
"ep_dcp_other_items_remaining", |
146
|
|
|
"ep_dcp_other_items_sent", |
147
|
|
|
"ep_dcp_other_producer_count", |
148
|
|
|
"ep_dcp_other_total_backlog_size", |
149
|
|
|
"ep_dcp_other_total_bytes", |
150
|
|
|
"ep_dcp_replica_backoff", |
151
|
|
|
"ep_dcp_replica_count", |
152
|
|
|
"ep_dcp_replica_items_remaining", |
153
|
|
|
"ep_dcp_replica_items_sent", |
154
|
|
|
"ep_dcp_replica_producer_count", |
155
|
|
|
"ep_dcp_replica_total_backlog_size", |
156
|
|
|
"ep_dcp_replica_total_bytes", |
157
|
|
|
"ep_dcp_views_backoff", |
158
|
|
|
"ep_dcp_views_count", |
159
|
|
|
"ep_dcp_views_items_remaining", |
160
|
|
|
"ep_dcp_views_items_sent", |
161
|
|
|
"ep_dcp_views_producer_count", |
162
|
|
|
"ep_dcp_views_total_backlog_size", |
163
|
|
|
"ep_dcp_views_total_bytes", |
164
|
|
|
"ep_dcp_views+indexes_backoff", |
165
|
|
|
"ep_dcp_views+indexes_count", |
166
|
|
|
"ep_dcp_views+indexes_items_remaining", |
167
|
|
|
"ep_dcp_views+indexes_items_sent", |
168
|
|
|
"ep_dcp_views+indexes_producer_count", |
169
|
|
|
"ep_dcp_views+indexes_total_backlog_size", |
170
|
|
|
"ep_dcp_views+indexes_total_bytes", |
171
|
|
|
"ep_dcp_xdcr_backoff", |
172
|
|
|
"ep_dcp_xdcr_count", |
173
|
|
|
"ep_dcp_xdcr_items_remaining", |
174
|
|
|
"ep_dcp_xdcr_items_sent", |
175
|
|
|
"ep_dcp_xdcr_producer_count", |
176
|
|
|
"ep_dcp_xdcr_total_backlog_size", |
177
|
|
|
"ep_dcp_xdcr_total_bytes", |
178
|
|
|
"ep_diskqueue_drain", |
179
|
|
|
"ep_diskqueue_fill", |
180
|
|
|
"ep_diskqueue_items", |
181
|
|
|
"ep_flusher_todo", |
182
|
|
|
"ep_item_commit_failed", |
183
|
|
|
"ep_kv_size", |
184
|
|
|
"ep_max_size", |
185
|
|
|
"ep_mem_high_wat", |
186
|
|
|
"ep_mem_low_wat", |
187
|
|
|
"ep_meta_data_memory", |
188
|
|
|
"ep_num_non_resident", |
189
|
|
|
"ep_num_ops_del_meta", |
190
|
|
|
"ep_num_ops_del_ret_meta", |
191
|
|
|
"ep_num_ops_get_meta", |
192
|
|
|
"ep_num_ops_set_meta", |
193
|
|
|
"ep_num_ops_set_ret_meta", |
194
|
|
|
"ep_num_value_ejects", |
195
|
|
|
"ep_oom_errors", |
196
|
|
|
"ep_ops_create", |
197
|
|
|
"ep_ops_update", |
198
|
|
|
"ep_overhead", |
199
|
|
|
"ep_queue_size", |
200
|
|
|
"ep_resident_items_rate", |
201
|
|
|
"ep_tap_rebalance_count", |
202
|
|
|
"ep_tap_rebalance_qlen", |
203
|
|
|
"ep_tap_rebalance_queue_backfillremaining", |
204
|
|
|
"ep_tap_rebalance_queue_backoff", |
205
|
|
|
"ep_tap_rebalance_queue_drain", |
206
|
|
|
"ep_tap_rebalance_queue_fill", |
207
|
|
|
"ep_tap_rebalance_queue_itemondisk", |
208
|
|
|
"ep_tap_rebalance_total_backlog_size", |
209
|
|
|
"ep_tap_replica_count", |
210
|
|
|
"ep_tap_replica_qlen", |
211
|
|
|
"ep_tap_replica_queue_backfillremaining", |
212
|
|
|
"ep_tap_replica_queue_backoff", |
213
|
|
|
"ep_tap_replica_queue_drain", |
214
|
|
|
"ep_tap_replica_queue_fill", |
215
|
|
|
"ep_tap_replica_queue_itemondisk", |
216
|
|
|
"ep_tap_replica_total_backlog_size", |
217
|
|
|
"ep_tap_total_count", |
218
|
|
|
"ep_tap_total_qlen", |
219
|
|
|
"ep_tap_total_queue_backfillremaining", |
220
|
|
|
"ep_tap_total_queue_backoff", |
221
|
|
|
"ep_tap_total_queue_drain", |
222
|
|
|
"ep_tap_total_queue_fill", |
223
|
|
|
"ep_tap_total_queue_itemondisk", |
224
|
|
|
"ep_tap_total_total_backlog_size", |
225
|
|
|
"ep_tap_user_count", |
226
|
|
|
"ep_tap_user_qlen", |
227
|
|
|
"ep_tap_user_queue_backfillremaining", |
228
|
|
|
"ep_tap_user_queue_backoff", |
229
|
|
|
"ep_tap_user_queue_drain", |
230
|
|
|
"ep_tap_user_queue_fill", |
231
|
|
|
"ep_tap_user_queue_itemondisk", |
232
|
|
|
"ep_tap_user_total_backlog_size", |
233
|
|
|
"ep_tmp_oom_errors", |
234
|
|
|
"ep_vb_total", |
235
|
|
|
"evictions", |
236
|
|
|
"get_hits", |
237
|
|
|
"get_misses", |
238
|
|
|
"hibernated_requests", |
239
|
|
|
"hibernated_waked", |
240
|
|
|
"hit_ratio", |
241
|
|
|
"incr_hits", |
242
|
|
|
"incr_misses", |
243
|
|
|
"mem_actual_free", |
244
|
|
|
"mem_actual_used", |
245
|
|
|
"mem_free", |
246
|
|
|
"mem_total", |
247
|
|
|
"mem_used", |
248
|
|
|
"mem_used_sys", |
249
|
|
|
"misses", |
250
|
|
|
"ops", |
251
|
|
|
"rest_requests", |
252
|
|
|
"swap_total", |
253
|
|
|
"swap_used", |
254
|
|
|
"timestamp", |
255
|
|
|
"vb_active_eject", |
256
|
|
|
"vb_active_itm_memory", |
257
|
|
|
"vb_active_meta_data_memory", |
258
|
|
|
"vb_active_num", |
259
|
|
|
"vb_active_num_non_resident", |
260
|
|
|
"vb_active_ops_create", |
261
|
|
|
"vb_active_ops_update", |
262
|
|
|
"vb_active_queue_age", |
263
|
|
|
"vb_active_queue_drain", |
264
|
|
|
"vb_active_queue_fill", |
265
|
|
|
"vb_active_queue_size", |
266
|
|
|
"vb_active_resident_items_ratio", |
267
|
|
|
"vb_avg_active_queue_age", |
268
|
|
|
"vb_avg_pending_queue_age", |
269
|
|
|
"vb_avg_replica_queue_age", |
270
|
|
|
"vb_avg_total_queue_age", |
271
|
|
|
"vb_pending_curr_items", |
272
|
|
|
"vb_pending_eject", |
273
|
|
|
"vb_pending_itm_memory", |
274
|
|
|
"vb_pending_meta_data_memory", |
275
|
|
|
"vb_pending_num", |
276
|
|
|
"vb_pending_num_non_resident", |
277
|
|
|
"vb_pending_ops_create", |
278
|
|
|
"vb_pending_ops_update", |
279
|
|
|
"vb_pending_queue_age", |
280
|
|
|
"vb_pending_queue_drain", |
281
|
|
|
"vb_pending_queue_fill", |
282
|
|
|
"vb_pending_queue_size", |
283
|
|
|
"vb_pending_resident_items_ratio", |
284
|
|
|
"vb_replica_curr_items", |
285
|
|
|
"vb_replica_eject", |
286
|
|
|
"vb_replica_itm_memory", |
287
|
|
|
"vb_replica_meta_data_memory", |
288
|
|
|
"vb_replica_num", |
289
|
|
|
"vb_replica_num_non_resident", |
290
|
|
|
"vb_replica_ops_create", |
291
|
|
|
"vb_replica_ops_update", |
292
|
|
|
"vb_replica_queue_age", |
293
|
|
|
"vb_replica_queue_drain", |
294
|
|
|
"vb_replica_queue_fill", |
295
|
|
|
"vb_replica_queue_size", |
296
|
|
|
"vb_replica_resident_items_ratio", |
297
|
|
|
"vb_total_queue_age", |
298
|
|
|
"xdc_ops" |
299
|
|
|
], |
300
|
|
|
'limit_bucket': False |
301
|
|
|
} |
302
|
|
|
|
303
|
|
|
def __init__(self, log, config): |
304
|
|
|
"""Plugin to record nginx stub_status metrics. |
305
|
|
|
|
306
|
|
|
:param log: A logger |
307
|
|
|
:type log: logging.RootLogger |
308
|
|
|
:param config: a plumd.config.Conf configuration helper instance. |
309
|
|
|
:type config: plumd.config.Conf |
310
|
|
|
""" |
311
|
|
|
super(CouchBase, self).__init__(log, config) |
312
|
|
|
self.config.defaults(CouchBase.defaults) |
|
|
|
|
313
|
|
|
|
314
|
|
|
# Nginx connection |
315
|
|
|
self.base_url = "{0}://{1}:{2}".format(self.config.get("proto"), |
|
|
|
|
316
|
|
|
self.config.get("host"), |
|
|
|
|
317
|
|
|
self.config.get("port")) |
|
|
|
|
318
|
|
|
self.auth = (self.config.get('username'), self.config.get('password')) |
|
|
|
|
319
|
|
|
self.http_pm = urllib2.HTTPPasswordMgrWithDefaultRealm() |
320
|
|
|
self.urls = dict() |
321
|
|
|
self.timeout = config.get('timeout') |
322
|
|
|
self.lock = Lock() |
323
|
|
|
self.last_poll = 0 |
324
|
|
|
|
325
|
|
|
def authenticate(self, url): |
326
|
|
|
"""Setup authentication to the given url. |
327
|
|
|
|
328
|
|
|
:param url: The URL to setup authentication for. |
329
|
|
|
:type url: str |
330
|
|
|
""" |
331
|
|
|
auth = self.auth |
332
|
|
|
with self.lock: |
333
|
|
|
self.http_pm.add_password(None, url, auth[0], auth[1]) |
334
|
|
|
handler = urllib2.HTTPBasicAuthHandler(self.http_pm) |
335
|
|
|
opener = urllib2.build_opener(handler) |
336
|
|
|
try: |
337
|
|
|
opener.open(url) |
338
|
|
|
except ValueError as exc: |
339
|
|
|
err = "Couchbase: invalid url: {0}: {1}" |
340
|
|
|
self.log.error(err.format(url, exc)) |
|
|
|
|
341
|
|
|
urllib2.install_opener(opener) |
342
|
|
|
self.urls[url] = True |
343
|
|
|
|
344
|
|
|
def request(self, url): |
345
|
|
|
"""Make an HTTP request and return decoded JSON. |
346
|
|
|
|
347
|
|
|
:param url: The URL to request. |
348
|
|
|
:type url: str |
349
|
|
|
:rtype: object |
350
|
|
|
""" |
351
|
|
|
if url not in self.urls: |
352
|
|
|
self.authenticate(url) |
353
|
|
|
|
354
|
|
|
resp = "" |
355
|
|
|
ret = dict() |
356
|
|
|
try: |
357
|
|
|
self.lock.acquire(True) |
358
|
|
|
resp = urllib2.urlopen(url, timeout=self.timeout) |
359
|
|
|
except urllib2.HTTPError as exc: |
360
|
|
|
err = "Couchbase: HTTP Error: {0}: {1}" |
361
|
|
|
self.log.error(err.format(url, exc)) |
|
|
|
|
362
|
|
|
return ret |
363
|
|
|
finally: |
364
|
|
|
self.lock.release() |
365
|
|
|
if resp is None or resp.getcode() != 200: |
|
|
|
|
366
|
|
|
return ret |
367
|
|
|
try: |
368
|
|
|
ret = json.load(resp) |
369
|
|
|
except ValueError as exc: |
370
|
|
|
err = "Couchbase: JSON decode error: {0}: {1}: {2}" |
371
|
|
|
self.log.error(err.format(url, resp, exc)) |
|
|
|
|
372
|
|
|
return ret |
373
|
|
|
return ret |
374
|
|
|
|
375
|
|
|
def get_index_settings(self, rset): |
376
|
|
|
"""Return index settings. |
377
|
|
|
|
378
|
|
|
:param rset: A ResultSet to add results to |
379
|
|
|
:type rset: ResultSet |
380
|
|
|
""" |
381
|
|
|
url = "{0}/settings/indexes".format(self.base_url) |
382
|
|
|
data = self.request(url) |
383
|
|
|
metric_names = self.config.get('metrics_index') |
|
|
|
|
384
|
|
|
metrics = deque() |
385
|
|
|
for name in metric_names: |
386
|
|
|
if name in data: |
387
|
|
|
metrics.append(Gauge(name, data[name])) |
388
|
|
|
res = Result("couchbase.index", metrics) |
389
|
|
|
rset.add(res) |
390
|
|
|
return |
391
|
|
|
|
392
|
|
|
def get_pool_stats_all(self, rset): |
393
|
|
|
"""Get basic pool metrics, add to the ResultSet. |
394
|
|
|
|
395
|
|
|
todo: reduce cyclo complexity |
396
|
|
|
|
397
|
|
|
:param rset: A ResultSet to add results to |
398
|
|
|
:type rset: ResultSet |
399
|
|
|
""" |
400
|
|
|
# get basic metrics for the given pool |
401
|
|
|
pool_url = "{0}/pools/default/buckets/".format(self.base_url) |
402
|
|
|
data = self.request(pool_url) |
403
|
|
|
|
404
|
|
|
# response should be an array of dicts - we want basicStats from each |
405
|
|
|
mnames = None |
406
|
|
|
if self.config.get("limit_bucket"): |
|
|
|
|
407
|
|
|
mnames = deque(self.config.get("metrics_bucket")) |
|
|
|
|
408
|
|
|
for bucket in data: |
409
|
|
|
try: |
410
|
|
|
# record misc pool metrics |
411
|
|
|
self.get_pool_stats_misc(data, rset) |
412
|
|
|
# also record detailed bucket metrics |
413
|
|
|
self.get_bucket_stats(bucket['name'], rset, mnames) |
414
|
|
|
except KeyError as exc: |
415
|
|
|
err = "Couchbase: get_pool_stats_all: server response missing key: {0}" |
416
|
|
|
self.log.error(err.format(exc)) |
|
|
|
|
417
|
|
|
return |
418
|
|
|
|
419
|
|
|
def get_pool_stats_misc(self, data, rset): |
420
|
|
|
"""Get basic pool metrics, add to the ResultSet. |
421
|
|
|
|
422
|
|
|
todo: reduce cyclo complexity |
423
|
|
|
|
424
|
|
|
:param data: Couchbase metrics returned from api call |
425
|
|
|
:type data: dict |
426
|
|
|
:param rset: A ResultSet to add results to |
427
|
|
|
:type rset: ResultSet |
428
|
|
|
""" |
429
|
|
|
# data should be an array of dicts - we want basicStats from each |
430
|
|
|
for bucket in data: |
431
|
|
|
cnt_healthy = 0 |
432
|
|
|
cnt_repl = 0 |
433
|
|
|
cnt_members = 0 |
434
|
|
|
metrics = deque() |
435
|
|
|
for entry in bucket['nodes']: |
436
|
|
|
if entry["status"] == "healthy": |
437
|
|
|
cnt_healthy += 1 |
438
|
|
|
if entry["replication"] == 1: |
439
|
|
|
cnt_repl += 1 |
440
|
|
|
if entry["clusterMembership"] == "active": |
441
|
|
|
cnt_members += 1 |
442
|
|
|
if "thisNode" in entry and entry["thisNode"]: |
443
|
|
|
for key, val in entry["interestingStats"].items(): |
444
|
|
|
# record metrics |
445
|
|
|
metrics.append(Gauge(key, val)) |
446
|
|
|
# other various metrics |
447
|
|
|
metrics.append(Gauge("nodes_healthy", cnt_healthy)) |
448
|
|
|
metrics.append(Gauge("nodes_replicating", cnt_repl)) |
449
|
|
|
metrics.append(Gauge("nodes_members", cnt_members)) |
450
|
|
|
# record metrics into a Result with meta data for the pool infos |
451
|
|
|
res = Result("couchbase.bucket_basic", metrics) |
452
|
|
|
res.meta.add(String("bucket", bucket["name"])) |
453
|
|
|
rset.add(res) |
454
|
|
|
return |
455
|
|
|
|
456
|
|
|
def get_bucket_stats(self, bucket, rset, mnames): |
457
|
|
|
"""Get bucket metrics, add to the ResultSet. |
458
|
|
|
|
459
|
|
|
:param bucket: The name of the bucket to query |
460
|
|
|
:type bucket: str |
461
|
|
|
:param rset: A ResultSet to add results to |
462
|
|
|
:param mnames: A deque/iterable of metric names to record |
463
|
|
|
:type mnames: deque |
464
|
|
|
:type rset: ResultSet |
465
|
|
|
""" |
466
|
|
|
bucket_url = "{0}/pools/default/buckets/{1}/stats".format(self.base_url, bucket) |
467
|
|
|
data = self.request(bucket_url) |
468
|
|
|
try: |
469
|
|
|
metrics = deque() |
470
|
|
|
# arr metrics include a list of timestamps (milliseconds) |
471
|
|
|
# find the sample number to start recording from, start from the end of the list |
472
|
|
|
times = data["op"]["samples"]["timestamp"] |
473
|
|
|
sample_i = get_sample_i(times, self.last_poll) |
474
|
|
|
|
475
|
|
|
if mnames is not None: |
476
|
|
|
# record average values from the samples |
477
|
|
|
for metric, val in data["op"]["samples"].items(): |
478
|
|
|
if metric not in mnames: |
479
|
|
|
continue |
480
|
|
|
avg_val = avg(val[sample_i:]) |
481
|
|
|
metrics.append(Gauge(metric, avg_val)) |
482
|
|
|
else: |
483
|
|
|
# record average values from the samples |
484
|
|
|
for metric, val in data["op"]["samples"].items(): |
485
|
|
|
avg_val = avg(val[sample_i:]) |
486
|
|
|
metrics.append(Gauge(metric, avg_val)) |
487
|
|
|
|
488
|
|
|
except KeyError as exc: |
489
|
|
|
err = "Couchbase: get_bucket_stats: server response missing key: {0}" |
490
|
|
|
self.log.error(err.format(exc)) |
|
|
|
|
491
|
|
|
# record metrics into a Result with meta data for the pool infos |
492
|
|
|
res = Result("couchbase.bucket_basic", metrics) |
493
|
|
|
res.meta.add(String("bucket", bucket)) |
494
|
|
|
rset.add(res) |
495
|
|
|
return |
496
|
|
|
|
497
|
|
|
def get_node_stats_all(self, rset): |
498
|
|
|
"""Get node metrics, add to the ResultSet. |
499
|
|
|
|
500
|
|
|
todo: |
501
|
|
|
- record compaction settings |
502
|
|
|
- reduce cyclo complexity |
503
|
|
|
|
504
|
|
|
:param rset: A ResultSet to add metrics to. |
505
|
|
|
:type rset: ResultSet |
506
|
|
|
""" |
507
|
|
|
# fix me - pool is name: url dict |
508
|
|
|
node_url = "{0}/pools/nodes/".format(self.base_url) |
509
|
|
|
data = self.request(node_url) |
510
|
|
|
try: |
511
|
|
|
self.get_node_stats_misc(data, rset) |
512
|
|
|
|
513
|
|
|
stat = "nodes" |
514
|
|
|
metrics = deque() |
515
|
|
|
for node in data[stat]: |
516
|
|
|
# only record metrics for the current node |
517
|
|
|
if "thisNode" in node and node["thisNode"]: |
518
|
|
|
# record all key,val in interestingStats |
519
|
|
|
for metric, val in node["interestingStats"].items(): |
520
|
|
|
metrics.append(Gauge(metric, val)) |
521
|
|
|
for name in ["mcdMemoryAllocated", "mcdMemoryReserved"]: |
522
|
|
|
metrics.append(Gauge(name, node[name])) |
523
|
|
|
except KeyError as exc: |
524
|
|
|
err = "Couchbase: get_node_stats: server response missing key: {0}" |
525
|
|
|
self.log.error(err.format(exc)) |
|
|
|
|
526
|
|
|
res = Result("couchbase.nodes", metrics) |
527
|
|
|
rset.add(res) |
528
|
|
|
return |
529
|
|
|
|
530
|
|
|
def get_node_stats_misc(self, data, rset): |
531
|
|
|
"""Get node metrics, add to the ResultSet. |
532
|
|
|
|
533
|
|
|
todo: |
534
|
|
|
- record compaction settings |
535
|
|
|
- reduce cyclo complexity |
536
|
|
|
|
537
|
|
|
:param data: A dict from couchbase api call containing metrics. |
538
|
|
|
:type data: dict |
539
|
|
|
:param rset: A ResultSet to add metrics to. |
540
|
|
|
:type rset: ResultSet |
541
|
|
|
""" |
542
|
|
|
metrics = deque() |
543
|
|
|
for key, val in data["counters"].items(): |
544
|
|
|
metrics.append(Counter(key, val)) |
545
|
|
|
res = Result("couchbase.counters", metrics) |
546
|
|
|
rset.add(res) |
547
|
|
|
|
548
|
|
|
metrics = deque() |
549
|
|
|
for name in ["ftsMemoryQuota", "indexMemoryQuota", "maxBucketCount", |
550
|
|
|
"memoryQuota"]: |
551
|
|
|
metrics.append(Gauge(name, data[name])) |
552
|
|
|
res = Result("couchbase.quotas", metrics) |
553
|
|
|
rset.add(res) |
554
|
|
|
|
555
|
|
|
stat = "storageTotals" |
556
|
|
|
metrics = deque() |
557
|
|
|
for key in data[stat].keys(): |
558
|
|
|
for metric, val in data[stat][key].items(): |
559
|
|
|
name = "{0}_{1}".format(key, metric) |
560
|
|
|
metrics.append(Gauge(name, val)) |
561
|
|
|
res = Result("couchbase.storage", metrics) |
562
|
|
|
rset.add(res) |
563
|
|
|
return |
564
|
|
|
|
565
|
|
|
def poll(self): |
566
|
|
|
"""Query Couchbase for metrics. |
567
|
|
|
|
568
|
|
|
:rtype: ResultSet |
569
|
|
|
""" |
570
|
|
|
rset = ResultSet() |
571
|
|
|
self.get_index_settings(rset) |
572
|
|
|
self.get_pool_stats_all(rset) |
573
|
|
|
self.get_node_stats_all(rset) |
574
|
|
|
self.last_poll = time.time() * 1000 |
575
|
|
|
return rset |
576
|
|
|
|
577
|
|
|
|
578
|
|
|
|
This can be caused by one of the following:
1. Missing Dependencies
This error could indicate a configuration issue of Pylint. Make sure that your libraries are available by adding the necessary commands.
2. Missing __init__.py files
This error could also result from missing
__init__.py
files in your module folders. Make sure that you place one file in each sub-folder.