Browse Source

[python] make units compliant with IEC standard (#4995)

* apache units fix

* beanstalk

* bind_rndc

* boinc

* ceph

* chrony

* couchdb

* dns_query

* dnsdist

* dockerd

* dovecot

* elasticsearch by @vlvkobal <3

* example

* exim

* fail2ban

* freeradius minor fixes

* freeradius minor fixes

* freeradius minor fixes

* go_expvar

* haproxy

* hddtemp

* httpcheck

* icecast

* ipfs

* isc_dhcpd

* litespeed

* logind

* megacli

* memcached

* mongodb

* monit

* mysql

* nginx

* nginx_plus

* nsd

* ntpd

* nvidia_smi

* openldap

* ovpn_status

* phpfm

* portcheck

* postfix

* postgres

* powerdns

* proxysql

* puppet

* rabbitmq

* redis

* restroshare

* samba

* sensors

* smartdlog

* spigotmc

* springboot

* squid

* retroshare

* tomcat

* retroshare

* tor

* traefik

* traefik

* unbound

* uwsgi

* varnish

* w1sensor

* web_log

* ok codacy

* retroshare

* ipfs
Ilya Mashchenko 6 years ago
parent
commit
97b32703c6

+ 70 - 41
collectors/python.d.plugin/apache/apache.chart.py

@@ -5,63 +5,60 @@
 
 
 from bases.FrameworkServices.UrlService import UrlService
 from bases.FrameworkServices.UrlService import UrlService
 
 
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
 
 
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-#             'update_every': update_every,
-#             'retries': retries,
-#             'priority': priority,
-#             'url': 'http://www.apache.org/server-status?auto'
-#          }}
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['requests', 'connections', 'conns_async', 'net', 'workers', 'reqpersec', 'bytespersec', 'bytesperreq']
+ORDER = [
+    'requests',
+    'connections',
+    'conns_async',
+    'net',
+    'workers',
+    'reqpersec',
+    'bytespersec',
+    'bytesperreq',
+]
 
 
 CHARTS = {
 CHARTS = {
     'bytesperreq': {
     'bytesperreq': {
-        'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request',
+        'options': [None, 'Lifetime Avg. Request Size', 'KiB',
                     'statistics', 'apache.bytesperreq', 'area'],
                     'statistics', 'apache.bytesperreq', 'area'],
         'lines': [
         'lines': [
-            ['size_req']
+            ['size_req', 'size', 'absolute', 1, 1024 * 100000]
         ]},
         ]},
     'workers': {
     'workers': {
-        'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
+        'options': [None, 'Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
         'lines': [
         'lines': [
             ['idle'],
             ['idle'],
             ['busy'],
             ['busy'],
         ]},
         ]},
     'reqpersec': {
     'reqpersec': {
-        'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics',
+        'options': [None, 'Lifetime Avg. Requests/s', 'requests/s', 'statistics',
                     'apache.reqpersec', 'area'],
                     'apache.reqpersec', 'area'],
         'lines': [
         'lines': [
-            ['requests_sec']
+            ['requests_sec', 'requests', 'absolute', 1, 100000]
         ]},
         ]},
     'bytespersec': {
     'bytespersec': {
-        'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
+        'options': [None, 'Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
                     'apache.bytesperreq', 'area'],
                     'apache.bytesperreq', 'area'],
         'lines': [
         'lines': [
-            ['size_sec', None, 'absolute', 8, 1000]
+            ['size_sec', None, 'absolute', 8, 1000 * 100000]
         ]},
         ]},
     'requests': {
     'requests': {
-        'options': [None, 'apache Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
+        'options': [None, 'Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
         'lines': [
         'lines': [
             ['requests', None, 'incremental']
             ['requests', None, 'incremental']
         ]},
         ]},
     'net': {
     'net': {
-        'options': [None, 'apache Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
+        'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
         'lines': [
         'lines': [
             ['sent', None, 'incremental', 8, 1]
             ['sent', None, 'incremental', 8, 1]
         ]},
         ]},
     'connections': {
     'connections': {
-        'options': [None, 'apache Connections', 'connections', 'connections', 'apache.connections', 'line'],
+        'options': [None, 'Connections', 'connections', 'connections', 'apache.connections', 'line'],
         'lines': [
         'lines': [
             ['connections']
             ['connections']
         ]},
         ]},
     'conns_async': {
     'conns_async': {
-        'options': [None, 'apache Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
+        'options': [None, 'Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
         'lines': [
         'lines': [
             ['keepalive'],
             ['keepalive'],
             ['closing'],
             ['closing'],
@@ -85,6 +82,14 @@ ASSIGNMENT = {
     'ConnsAsyncWriting': 'writing'
     'ConnsAsyncWriting': 'writing'
 }
 }
 
 
+FLOAT_VALUES = [
+    'BytesPerReq',
+    'ReqPerSec',
+    'BytesPerSec',
+]
+
+LIGHTTPD_MARKER = 'idle_servers'
+
 
 
 class Service(UrlService):
 class Service(UrlService):
     def __init__(self, configuration=None, name=None):
     def __init__(self, configuration=None, name=None):
@@ -95,20 +100,15 @@ class Service(UrlService):
 
 
     def check(self):
     def check(self):
         self._manager = self._build_manager()
         self._manager = self._build_manager()
+
         data = self._get_data()
         data = self._get_data()
+
         if not data:
         if not data:
             return None
             return None
 
 
-        if 'idle_servers' in data:
-            self.module_name = 'lighttpd'
-            for chart in self.definitions:
-                if chart == 'workers':
-                    lines = self.definitions[chart]['lines']
-                    lines[0] = ['idle_servers', 'idle']
-                    lines[1] = ['busy_servers', 'busy']
-                opts = self.definitions[chart]['options']
-                opts[1] = opts[1].replace('apache', 'lighttpd')
-                opts[4] = opts[4].replace('apache', 'lighttpd')
+        if LIGHTTPD_MARKER in data:
+            self.turn_into_lighttpd()
+
         return True
         return True
 
 
     def _get_data(self):
     def _get_data(self):
@@ -117,15 +117,44 @@ class Service(UrlService):
         :return: dict
         :return: dict
         """
         """
         raw_data = self._get_raw_data()
         raw_data = self._get_raw_data()
+
         if not raw_data:
         if not raw_data:
             return None
             return None
+
         data = dict()
         data = dict()
 
 
-        for row in raw_data.split('\n'):
-            tmp = row.split(':')
-            if tmp[0] in ASSIGNMENT:
-                try:
-                    data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1]))
-                except (IndexError, ValueError):
-                    continue
+        for line in raw_data.split('\n'):
+            try:
+                parse_line(line, data)
+            except ValueError:
+                continue
+
         return data or None
         return data or None
+
+    def turn_into_lighttpd(self):
+        self.module_name = 'lighttpd'
+        for chart in self.definitions:
+            if chart == 'workers':
+                lines = self.definitions[chart]['lines']
+                lines[0] = ['idle_servers', 'idle']
+                lines[1] = ['busy_servers', 'busy']
+            opts = self.definitions[chart]['options']
+            opts[1] = opts[1].replace('apache', 'lighttpd')
+            opts[4] = opts[4].replace('apache', 'lighttpd')
+
+
+def parse_line(line, data):
+    parts = line.split(':')
+
+    if len(parts) != 2:
+        return
+
+    key, value = parts[0], parts[1]
+
+    if key not in ASSIGNMENT:
+        return
+
+    if key in FLOAT_VALUES:
+        data[ASSIGNMENT[key]] = int((float(value) * 100000))
+    else:
+        data[ASSIGNMENT[key]] = int(value)

+ 11 - 5
collectors/python.d.plugin/beanstalk/beanstalk.chart.py

@@ -12,12 +12,18 @@ except ImportError:
 from bases.FrameworkServices.SimpleService import SimpleService
 from bases.FrameworkServices.SimpleService import SimpleService
 from bases.loaders import safe_load
 from bases.loaders import safe_load
 
 
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
 
 
-ORDER = ['cpu_usage', 'jobs_rate', 'connections_rate', 'commands_rate', 'current_tubes', 'current_jobs',
-         'current_connections', 'binlog', 'uptime']
+ORDER = [
+    'cpu_usage',
+    'jobs_rate',
+    'connections_rate',
+    'commands_rate',
+    'current_tubes',
+    'current_jobs',
+    'current_connections',
+    'binlog',
+    'uptime',
+]
 
 
 CHARTS = {
 CHARTS = {
     'cpu_usage': {
     'cpu_usage': {

+ 22 - 7
collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py

@@ -11,10 +11,15 @@ from subprocess import Popen
 from bases.collection import find_binary
 from bases.collection import find_binary
 from bases.FrameworkServices.SimpleService import SimpleService
 from bases.FrameworkServices.SimpleService import SimpleService
 
 
-priority = 60000
+
 update_every = 30
 update_every = 30
 
 
-ORDER = ['name_server_statistics', 'incoming_queries', 'outgoing_queries', 'named_stats_size']
+ORDER = [
+    'name_server_statistics',
+    'incoming_queries',
+    'outgoing_queries',
+    'named_stats_size',
+]
 
 
 CHARTS = {
 CHARTS = {
     'name_server_statistics': {
     'name_server_statistics': {
@@ -43,7 +48,7 @@ CHARTS = {
         'lines': [
         'lines': [
         ]},
         ]},
     'named_stats_size': {
     'named_stats_size': {
-        'options': [None, 'Named Stats File Size', 'MB', 'file size', 'bind_rndc.stats_size', 'line'],
+        'options': [None, 'Named Stats File Size', 'MiB', 'file size', 'bind_rndc.stats_size', 'line'],
         'lines': [
         'lines': [
             ['stats_size', None, 'absolute', 1, 1 << 20]
             ['stats_size', None, 'absolute', 1, 1 << 20]
         ]
         ]
@@ -91,10 +96,20 @@ class Service(SimpleService):
         self.definitions = CHARTS
         self.definitions = CHARTS
         self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
         self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
         self.rndc = find_binary('rndc')
         self.rndc = find_binary('rndc')
-        self.data = dict(nms_requests=0, nms_responses=0, nms_failure=0, nms_auth=0,
-                         nms_non_auth=0, nms_nxrrset=0, nms_success=0, nms_nxdomain=0,
-                         nms_recursion=0, nms_duplicate=0, nms_rejected_queries=0,
-                         nms_dropped_queries=0)
+        self.data = dict(
+            nms_requests=0,
+            nms_responses=0,
+            nms_failure=0,
+            nms_auth=0,
+            nms_non_auth=0,
+            nms_nxrrset=0,
+            nms_success=0,
+            nms_nxdomain=0,
+            nms_recursion=0,
+            nms_duplicate=0,
+            nms_rejected_queries=0,
+            nms_dropped_queries=0,
+        )
 
 
     def check(self):
     def check(self):
         if not self.rndc:
         if not self.rndc:

+ 11 - 3
collectors/python.d.plugin/boinc/boinc.chart.py

@@ -10,7 +10,12 @@ from bases.FrameworkServices.SimpleService import SimpleService
 from third_party import boinc_client
 from third_party import boinc_client
 
 
 
 
-ORDER = ['tasks', 'states', 'sched_states', 'process_states']
+ORDER = [
+    'tasks',
+    'states',
+    'sched_states',
+    'process_states',
+]
 
 
 CHARTS = {
 CHARTS = {
     'tasks': {
     'tasks': {
@@ -141,14 +146,16 @@ class Service(SimpleService):
     def _get_data(self):
     def _get_data(self):
         if not self.is_alive():
         if not self.is_alive():
             return None
             return None
+
         data = dict(_DATA_TEMPLATE)
         data = dict(_DATA_TEMPLATE)
-        results = []
+
         try:
         try:
             results = self.client.get_tasks()
             results = self.client.get_tasks()
         except socket.error:
         except socket.error:
             self.error('Connection is dead')
             self.error('Connection is dead')
             self.alive = False
             self.alive = False
             return None
             return None
+
         for task in results:
         for task in results:
             data['total'] += 1
             data['total'] += 1
             data[_TASK_MAP[task.state]] += 1
             data[_TASK_MAP[task.state]] += 1
@@ -159,4 +166,5 @@ class Service(SimpleService):
                     data[_PROC_MAP[task.active_task_state]] += 1
                     data[_PROC_MAP[task.active_task_state]] += 1
             except AttributeError:
             except AttributeError:
                 pass
                 pass
-        return data
+
+        return data or None

+ 8 - 8
collectors/python.d.plugin/ceph/ceph.chart.py

@@ -9,13 +9,13 @@ try:
 except ImportError:
 except ImportError:
     CEPH = False
     CEPH = False
 
 
-import os
 import json
 import json
+import os
+
 from bases.FrameworkServices.SimpleService import SimpleService
 from bases.FrameworkServices.SimpleService import SimpleService
 
 
 # default module values (can be overridden per job in `config`)
 # default module values (can be overridden per job in `config`)
 update_every = 10
 update_every = 10
-priority = 60000
 
 
 ORDER = [
 ORDER = [
     'general_usage',
     'general_usage',
@@ -36,7 +36,7 @@ ORDER = [
 
 
 CHARTS = {
 CHARTS = {
     'general_usage': {
     'general_usage': {
-        'options': [None, 'Ceph General Space', 'KB', 'general', 'ceph.general_usage', 'stacked'],
+        'options': [None, 'Ceph General Space', 'KiB', 'general', 'ceph.general_usage', 'stacked'],
         'lines': [
         'lines': [
             ['general_available', 'avail', 'absolute'],
             ['general_available', 'avail', 'absolute'],
             ['general_usage', 'used', 'absolute']
             ['general_usage', 'used', 'absolute']
@@ -49,7 +49,7 @@ CHARTS = {
         ]
         ]
     },
     },
     'general_bytes': {
     'general_bytes': {
-        'options': [None, 'Ceph General Read/Write Data/s', 'KB', 'general', 'ceph.general_bytes',
+        'options': [None, 'Ceph General Read/Write Data/s', 'KiB/s', 'general', 'ceph.general_bytes',
                     'area'],
                     'area'],
         'lines': [
         'lines': [
             ['general_read_bytes', 'read', 'absolute', 1, 1024],
             ['general_read_bytes', 'read', 'absolute', 1, 1024],
@@ -73,7 +73,7 @@ CHARTS = {
         ]
         ]
     },
     },
     'pool_usage': {
     'pool_usage': {
-        'options': [None, 'Ceph Pools', 'KB', 'pool', 'ceph.pool_usage', 'line'],
+        'options': [None, 'Ceph Pools', 'KiB', 'pool', 'ceph.pool_usage', 'line'],
         'lines': []
         'lines': []
     },
     },
     'pool_objects': {
     'pool_objects': {
@@ -81,11 +81,11 @@ CHARTS = {
         'lines': []
         'lines': []
     },
     },
     'pool_read_bytes': {
     'pool_read_bytes': {
-        'options': [None, 'Ceph Read Pool Data/s', 'KB', 'pool', 'ceph.pool_read_bytes', 'area'],
+        'options': [None, 'Ceph Read Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_read_bytes', 'area'],
         'lines': []
         'lines': []
     },
     },
     'pool_write_bytes': {
     'pool_write_bytes': {
-        'options': [None, 'Ceph Write Pool Data/s', 'KB', 'pool', 'ceph.pool_write_bytes', 'area'],
+        'options': [None, 'Ceph Write Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_write_bytes', 'area'],
         'lines': []
         'lines': []
     },
     },
     'pool_read_operations': {
     'pool_read_operations': {
@@ -97,7 +97,7 @@ CHARTS = {
         'lines': []
         'lines': []
     },
     },
     'osd_usage': {
     'osd_usage': {
-        'options': [None, 'Ceph OSDs', 'KB', 'osd', 'ceph.osd_usage', 'line'],
+        'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'],
         'lines': []
         'lines': []
     },
     },
     'osd_apply_latency': {
     'osd_apply_latency': {

+ 12 - 3
collectors/python.d.plugin/chrony/chrony.chart.py

@@ -7,10 +7,19 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
 
 
 # default module values (can be overridden per job in `config`)
 # default module values (can be overridden per job in `config`)
 update_every = 5
 update_every = 5
-priority = 60000
+
+CHRONY_COMMAND = 'chronyc -n tracking'
 
 
 # charts order (can be overridden if you want less charts, or different order)
 # charts order (can be overridden if you want less charts, or different order)
-ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew']
+ORDER = [
+    'system',
+    'offsets',
+    'stratum',
+    'root',
+    'frequency',
+    'residualfreq',
+    'skew',
+]
 
 
 CHARTS = {
 CHARTS = {
     'system': {
     'system': {
@@ -76,9 +85,9 @@ class Service(ExecutableService):
     def __init__(self, configuration=None, name=None):
     def __init__(self, configuration=None, name=None):
         ExecutableService.__init__(
         ExecutableService.__init__(
             self, configuration=configuration, name=name)
             self, configuration=configuration, name=name)
-        self.command = 'chronyc -n tracking'
         self.order = ORDER
         self.order = ORDER
         self.definitions = CHARTS
         self.definitions = CHARTS
+        self.command = CHRONY_COMMAND
 
 
     def _get_data(self):
     def _get_data(self):
         """
         """

+ 18 - 28
collectors/python.d.plugin/couchdb/couchdb.chart.py

@@ -8,6 +8,7 @@ from collections import namedtuple, defaultdict
 from json import loads
 from json import loads
 from threading import Thread
 from threading import Thread
 from socket import gethostbyname, gaierror
 from socket import gethostbyname, gaierror
+
 try:
 try:
     from queue import Queue
     from queue import Queue
 except ImportError:
 except ImportError:
@@ -15,9 +16,9 @@ except ImportError:
 
 
 from bases.FrameworkServices.UrlService import UrlService
 from bases.FrameworkServices.UrlService import UrlService
 
 
-# default module values (can be overridden per job in `config`)
+
 update_every = 1
 update_every = 1
-priority = 60000
+
 
 
 METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
 METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
 
 
@@ -108,7 +109,7 @@ ORDER = [
 
 
 CHARTS = {
 CHARTS = {
     'activity': {
     'activity': {
-        'options': [None, 'Overall Activity', 'req/s',
+        'options': [None, 'Overall Activity', 'requests/s',
                     'dbactivity', 'couchdb.activity', 'stacked'],
                     'dbactivity', 'couchdb.activity', 'stacked'],
         'lines': [
         'lines': [
             ['couchdb_database_reads', 'DB reads', 'incremental'],
             ['couchdb_database_reads', 'DB reads', 'incremental'],
@@ -117,7 +118,7 @@ CHARTS = {
         ]
         ]
     },
     },
     'request_methods': {
     'request_methods': {
-        'options': [None, 'HTTP request methods', 'req/s',
+        'options': [None, 'HTTP request methods', 'requests/s',
                     'httptraffic', 'couchdb.request_methods',
                     'httptraffic', 'couchdb.request_methods',
                     'stacked'],
                     'stacked'],
         'lines': [
         'lines': [
@@ -132,7 +133,7 @@ CHARTS = {
         ]
         ]
     },
     },
     'response_codes': {
     'response_codes': {
-        'options': [None, 'HTTP response status codes', 'resp/s',
+        'options': [None, 'HTTP response status codes', 'responses/s',
                     'httptraffic', 'couchdb.response_codes',
                     'httptraffic', 'couchdb.response_codes',
                     'stacked'],
                     'stacked'],
         'lines': [
         'lines': [
@@ -150,15 +151,13 @@ CHARTS = {
         ]
         ]
     },
     },
     'open_files': {
     'open_files': {
-        'options': [None, 'Open files', 'files',
-                    'ops', 'couchdb.open_files', 'line'],
+        'options': [None, 'Open files', 'files', 'ops', 'couchdb.open_files', 'line'],
         'lines': [
         'lines': [
             ['couchdb_open_os_files', '# files', 'absolute']
             ['couchdb_open_os_files', '# files', 'absolute']
         ]
         ]
     },
     },
     'active_tasks': {
     'active_tasks': {
-        'options': [None, 'Active task breakdown', 'tasks',
-                    'ops', 'couchdb.active_tasks', 'stacked'],
+        'options': [None, 'Active task breakdown', 'tasks', 'ops', 'couchdb.active_tasks', 'stacked'],
         'lines': [
         'lines': [
             ['activetasks_indexer', 'Indexer', 'absolute'],
             ['activetasks_indexer', 'Indexer', 'absolute'],
             ['activetasks_database_compaction', 'DB Compaction', 'absolute'],
             ['activetasks_database_compaction', 'DB Compaction', 'absolute'],
@@ -167,8 +166,7 @@ CHARTS = {
         ]
         ]
     },
     },
     'replicator_jobs': {
     'replicator_jobs': {
-        'options': [None, 'Replicator job breakdown', 'jobs',
-                    'ops', 'couchdb.replicator_jobs', 'stacked'],
+        'options': [None, 'Replicator job breakdown', 'jobs', 'ops', 'couchdb.replicator_jobs', 'stacked'],
         'lines': [
         'lines': [
             ['couch_replicator_jobs_running', 'Running', 'absolute'],
             ['couch_replicator_jobs_running', 'Running', 'absolute'],
             ['couch_replicator_jobs_pending', 'Pending', 'absolute'],
             ['couch_replicator_jobs_pending', 'Pending', 'absolute'],
@@ -178,8 +176,7 @@ CHARTS = {
         ]
         ]
     },
     },
     'erlang_memory': {
     'erlang_memory': {
-        'options': [None, 'Erlang VM memory usage', 'bytes',
-                    'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
+        'options': [None, 'Erlang VM memory usage', 'B', 'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
         'lines': [
         'lines': [
             ['memory_atom', 'atom', 'absolute'],
             ['memory_atom', 'atom', 'absolute'],
             ['memory_binary', 'binaries', 'absolute'],
             ['memory_binary', 'binaries', 'absolute'],
@@ -190,23 +187,20 @@ CHARTS = {
         ]
         ]
     },
     },
     'erlang_reductions': {
     'erlang_reductions': {
-        'options': [None, 'Erlang reductions', 'count',
-                    'erlang', 'couchdb.reductions', 'line'],
+        'options': [None, 'Erlang reductions', 'count', 'erlang', 'couchdb.reductions', 'line'],
         'lines': [
         'lines': [
             ['reductions', 'reductions', 'incremental']
             ['reductions', 'reductions', 'incremental']
         ]
         ]
     },
     },
     'erlang_proc_counts': {
     'erlang_proc_counts': {
-        'options': [None, 'Process counts', 'count',
-                    'erlang', 'couchdb.proccounts', 'line'],
+        'options': [None, 'Process counts', 'count', 'erlang', 'couchdb.proccounts', 'line'],
         'lines': [
         'lines': [
             ['os_proc_count', 'OS procs', 'absolute'],
             ['os_proc_count', 'OS procs', 'absolute'],
             ['process_count', 'erl procs', 'absolute']
             ['process_count', 'erl procs', 'absolute']
         ]
         ]
     },
     },
     'erlang_peak_msg_queue': {
     'erlang_peak_msg_queue': {
-        'options': [None, 'Peak message queue size', 'count',
-                    'erlang', 'couchdb.peakmsgqueue',
+        'options': [None, 'Peak message queue size', 'count', 'erlang', 'couchdb.peakmsgqueue',
                     'line'],
                     'line'],
         'lines': [
         'lines': [
             ['peak_msg_queue', 'peak size', 'absolute']
             ['peak_msg_queue', 'peak size', 'absolute']
@@ -214,18 +208,15 @@ CHARTS = {
     },
     },
     # Lines for the following are added as part of check()
     # Lines for the following are added as part of check()
     'db_sizes_file': {
     'db_sizes_file': {
-        'options': [None, 'Database sizes (file)', 'KB',
-                    'perdbstats', 'couchdb.db_sizes_file', 'line'],
+        'options': [None, 'Database sizes (file)', 'KiB', 'perdbstats', 'couchdb.db_sizes_file', 'line'],
         'lines': []
         'lines': []
     },
     },
     'db_sizes_external': {
     'db_sizes_external': {
-        'options': [None, 'Database sizes (external)', 'KB',
-                    'perdbstats', 'couchdb.db_sizes_external', 'line'],
+        'options': [None, 'Database sizes (external)', 'KiB', 'perdbstats', 'couchdb.db_sizes_external', 'line'],
         'lines': []
         'lines': []
     },
     },
     'db_sizes_active': {
     'db_sizes_active': {
-        'options': [None, 'Database sizes (active)', 'KB',
-                    'perdbstats', 'couchdb.db_sizes_active', 'line'],
+        'options': [None, 'Database sizes (active)', 'KiB', 'perdbstats', 'couchdb.db_sizes_active', 'line'],
         'lines': []
         'lines': []
     },
     },
     'db_doc_counts': {
     'db_doc_counts': {
@@ -234,8 +225,7 @@ CHARTS = {
         'lines': []
         'lines': []
     },
     },
     'db_doc_del_counts': {
     'db_doc_del_counts': {
-        'options': [None, 'Database # of deleted docs', 'docs',
-                    'perdbstats', 'couchdb_db_doc_del_count', 'line'],
+        'options': [None, 'Database # of deleted docs', 'docs', 'perdbstats', 'couchdb_db_doc_del_count', 'line'],
         'lines': []
         'lines': []
     }
     }
 }
 }
@@ -255,7 +245,7 @@ class Service(UrlService):
         try:
         try:
             self.dbs = self.configuration.get('databases').split(' ')
             self.dbs = self.configuration.get('databases').split(' ')
         except (KeyError, AttributeError):
         except (KeyError, AttributeError):
-            self.dbs = []
+            self.dbs = list()
 
 
     def check(self):
     def check(self):
         if not (self.host and self.port):
         if not (self.host and self.port):

+ 14 - 7
collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py

@@ -28,10 +28,7 @@ except ImportError:
 from bases.FrameworkServices.SimpleService import SimpleService
 from bases.FrameworkServices.SimpleService import SimpleService
 
 
 
 
-# default module values (can be overridden per job in `config`)
 update_every = 5
 update_every = 5
-priority = 60000
-retries = 60
 
 
 
 
 class Service(SimpleService):
 class Service(SimpleService):
@@ -46,14 +43,14 @@ class Service(SimpleService):
 
 
     def check(self):
     def check(self):
         if not DNS_PYTHON:
         if not DNS_PYTHON:
-            self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py')
+            self.error("'python-dnspython' package is needed to use dns_query_time.chart.py")
             return False
             return False
 
 
         self.timeout = self.timeout if isinstance(self.timeout, int) else 4
         self.timeout = self.timeout if isinstance(self.timeout, int) else 4
 
 
         if not all([self.domains, self.server_list,
         if not all([self.domains, self.server_list,
                     isinstance(self.server_list, str), isinstance(self.domains, str)]):
                     isinstance(self.server_list, str), isinstance(self.domains, str)]):
-            self.error('server_list and domain_list can\'t be empty')
+            self.error("server_list and domain_list can't be empty")
             return False
             return False
         else:
         else:
             self.domains, self.server_list = self.domains.split(), self.server_list.split()
             self.domains, self.server_list = self.domains.split(), self.server_list.split()
@@ -129,17 +126,27 @@ def create_charts(aggregate, server_list):
             }
             }
         }
         }
         for ns in server_list:
         for ns in server_list:
-            definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'])
+            dim = [
+                '_'.join(['ns', ns.replace('.', '_')]),
+                ns,
+                'absolute',
+            ]
+            definitions['dns_group']['lines'].append(dim)
 
 
         return order, definitions
         return order, definitions
     else:
     else:
         order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
         order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
         definitions = dict()
         definitions = dict()
+
         for ns in server_list:
         for ns in server_list:
             definitions[''.join(['dns_', ns.replace('.', '_')])] = {
             definitions[''.join(['dns_', ns.replace('.', '_')])] = {
                 'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'],
                 'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'],
                 'lines': [
                 'lines': [
-                    ['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute']
+                    [
+                        '_'.join(['ns', ns.replace('.', '_')]),
+                        ns,
+                        'absolute',
+                    ]
                 ]
                 ]
             }
             }
         return order, definitions
         return order, definitions

+ 2 - 2
collectors/python.d.plugin/dnsdist/dnsdist.chart.py

@@ -90,9 +90,9 @@ CHARTS = {
         ]
         ]
     },
     },
     'servermem': {
     'servermem': {
-        'options': [None, 'DNSDIST server memory utilization', 'MB', 'server', 'dnsdist.servermem', 'area'],
+        'options': [None, 'DNSDIST server memory utilization', 'MiB', 'server', 'dnsdist.servermem', 'area'],
         'lines': [
         'lines': [
-            ['real-memory-usage', 'memory usage', 'absolute', 1, 1048576]
+            ['real-memory-usage', 'memory usage', 'absolute', 1, 1 << 20]
         ]
         ]
     },
     },
     'query_latency': {
     'query_latency': {

+ 6 - 4
collectors/python.d.plugin/dockerd/dockerd.chart.py

@@ -23,21 +23,21 @@ ORDER = [
 
 
 CHARTS = {
 CHARTS = {
     'running_containers': {
     'running_containers': {
-        'options': [None, 'Number of running containers', 'running containers', 'running containers',
+        'options': [None, 'Number of running containers', 'containers', 'running containers',
                     'docker.running_containers', 'line'],
                     'docker.running_containers', 'line'],
         'lines': [
         'lines': [
             ['running_containers', 'running']
             ['running_containers', 'running']
         ]
         ]
     },
     },
     'healthy_containers': {
     'healthy_containers': {
-        'options': [None, 'Number of healthy containers', 'healthy containers', 'healthy containers',
+        'options': [None, 'Number of healthy containers', 'containers', 'healthy containers',
                     'docker.healthy_containers', 'line'],
                     'docker.healthy_containers', 'line'],
         'lines': [
         'lines': [
             ['healthy_containers', 'healthy']
             ['healthy_containers', 'healthy']
         ]
         ]
     },
     },
     'unhealthy_containers': {
     'unhealthy_containers': {
-        'options': [None, 'Number of unhealthy containers', 'unhealthy containers', 'unhealthy containers',
+        'options': [None, 'Number of unhealthy containers', 'containers', 'unhealthy containers',
                     'docker.unhealthy_containers', 'line'],
                     'docker.unhealthy_containers', 'line'],
         'lines': [
         'lines': [
             ['unhealthy_containers', 'unhealthy']
             ['unhealthy_containers', 'unhealthy']
@@ -51,10 +51,11 @@ class Service(SimpleService):
         SimpleService.__init__(self, configuration=configuration, name=name)
         SimpleService.__init__(self, configuration=configuration, name=name)
         self.order = ORDER
         self.order = ORDER
         self.definitions = CHARTS
         self.definitions = CHARTS
+        self.client = None
 
 
     def check(self):
     def check(self):
         if not HAS_DOCKER:
         if not HAS_DOCKER:
-            self.error('\'docker\' package is needed to use docker.chart.py')
+            self.error("'docker' package is needed to use docker.chart.py")
             return False
             return False
 
 
         self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock'))
         self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock'))
@@ -69,6 +70,7 @@ class Service(SimpleService):
 
 
     def get_data(self):
     def get_data(self):
         data = dict()
         data = dict()
+
         data['running_containers'] = len(self.client.containers.list(sparse=True))
         data['running_containers'] = len(self.client.containers.list(sparse=True))
         data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True))
         data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True))
         data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True))
         data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True))

Some files were not shown because too many files changed in this diff