Просмотр исходного кода

[python] make units compliant with IEC standard (#4995)

* apache units fix

* beanstalk

* bind_rndc

* boinc

* ceph

* chrony

* couchdb

* dns_query

* dnsdist

* dockerd

* dovecot

* elasticsearch by @vlvkobal <3

* example

* exim

* fail2ban

* freeradius minor fixes

* freeradius minor fixes

* freeradius minor fixes

* go_expvar

* haproxy

* hddtemp

* httpcheck

* icecast

* ipfs

* isc_dhcpd

* litespeed

* logind

* megacli

* memcached

* mongodb

* monit

* mysql

* nginx

* nginx_plus

* nsd

* ntpd

* nvidia_smi

* openldap

* ovpn_status

* phpfm

* portcheck

* postfix

* postgres

* powerdns

* proxysql

* puppet

* rabbitmq

* redis

* restroshare

* samba

* sensors

* smartdlog

* spigotmc

* springboot

* squid

* retroshare

* tomcat

* retroshare

* tor

* traefik

* traefik

* unbound

* uwsgi

* varnish

* w1sensor

* web_log

* ok codacy

* retroshare

* ipfs
Ilya Mashchenko 6 лет назад
Родитель
Сommit
97b32703c6

+ 70 - 41
collectors/python.d.plugin/apache/apache.chart.py

@@ -5,63 +5,60 @@
 
 from bases.FrameworkServices.UrlService import UrlService
 
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
 
-# default job configuration (overridden by python.d.plugin)
-# config = {'local': {
-#             'update_every': update_every,
-#             'retries': retries,
-#             'priority': priority,
-#             'url': 'http://www.apache.org/server-status?auto'
-#          }}
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = ['requests', 'connections', 'conns_async', 'net', 'workers', 'reqpersec', 'bytespersec', 'bytesperreq']
+ORDER = [
+    'requests',
+    'connections',
+    'conns_async',
+    'net',
+    'workers',
+    'reqpersec',
+    'bytespersec',
+    'bytesperreq',
+]
 
 CHARTS = {
     'bytesperreq': {
-        'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request',
+        'options': [None, 'Lifetime Avg. Request Size', 'KiB',
                     'statistics', 'apache.bytesperreq', 'area'],
         'lines': [
-            ['size_req']
+            ['size_req', 'size', 'absolute', 1, 1024 * 100000]
         ]},
     'workers': {
-        'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
+        'options': [None, 'Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
         'lines': [
             ['idle'],
             ['busy'],
         ]},
     'reqpersec': {
-        'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics',
+        'options': [None, 'Lifetime Avg. Requests/s', 'requests/s', 'statistics',
                     'apache.reqpersec', 'area'],
         'lines': [
-            ['requests_sec']
+            ['requests_sec', 'requests', 'absolute', 1, 100000]
         ]},
     'bytespersec': {
-        'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
+        'options': [None, 'Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
                     'apache.bytesperreq', 'area'],
         'lines': [
-            ['size_sec', None, 'absolute', 8, 1000]
+            ['size_sec', None, 'absolute', 8, 1000 * 100000]
         ]},
     'requests': {
-        'options': [None, 'apache Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
+        'options': [None, 'Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
         'lines': [
             ['requests', None, 'incremental']
         ]},
     'net': {
-        'options': [None, 'apache Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
+        'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
         'lines': [
             ['sent', None, 'incremental', 8, 1]
         ]},
     'connections': {
-        'options': [None, 'apache Connections', 'connections', 'connections', 'apache.connections', 'line'],
+        'options': [None, 'Connections', 'connections', 'connections', 'apache.connections', 'line'],
         'lines': [
             ['connections']
         ]},
     'conns_async': {
-        'options': [None, 'apache Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
+        'options': [None, 'Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
         'lines': [
             ['keepalive'],
             ['closing'],
@@ -85,6 +82,14 @@ ASSIGNMENT = {
     'ConnsAsyncWriting': 'writing'
 }
 
+FLOAT_VALUES = [
+    'BytesPerReq',
+    'ReqPerSec',
+    'BytesPerSec',
+]
+
+LIGHTTPD_MARKER = 'idle_servers'
+
 
 class Service(UrlService):
     def __init__(self, configuration=None, name=None):
@@ -95,20 +100,15 @@ class Service(UrlService):
 
     def check(self):
         self._manager = self._build_manager()
+
         data = self._get_data()
+
         if not data:
             return None
 
-        if 'idle_servers' in data:
-            self.module_name = 'lighttpd'
-            for chart in self.definitions:
-                if chart == 'workers':
-                    lines = self.definitions[chart]['lines']
-                    lines[0] = ['idle_servers', 'idle']
-                    lines[1] = ['busy_servers', 'busy']
-                opts = self.definitions[chart]['options']
-                opts[1] = opts[1].replace('apache', 'lighttpd')
-                opts[4] = opts[4].replace('apache', 'lighttpd')
+        if LIGHTTPD_MARKER in data:
+            self.turn_into_lighttpd()
+
         return True
 
     def _get_data(self):
@@ -117,15 +117,44 @@ class Service(UrlService):
         :return: dict
         """
         raw_data = self._get_raw_data()
+
         if not raw_data:
             return None
+
         data = dict()
 
-        for row in raw_data.split('\n'):
-            tmp = row.split(':')
-            if tmp[0] in ASSIGNMENT:
-                try:
-                    data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1]))
-                except (IndexError, ValueError):
-                    continue
+        for line in raw_data.split('\n'):
+            try:
+                parse_line(line, data)
+            except ValueError:
+                continue
+
         return data or None
+
+    def turn_into_lighttpd(self):
+        self.module_name = 'lighttpd'
+        for chart in self.definitions:
+            if chart == 'workers':
+                lines = self.definitions[chart]['lines']
+                lines[0] = ['idle_servers', 'idle']
+                lines[1] = ['busy_servers', 'busy']
+            opts = self.definitions[chart]['options']
+            opts[1] = opts[1].replace('apache', 'lighttpd')
+            opts[4] = opts[4].replace('apache', 'lighttpd')
+
+
+def parse_line(line, data):
+    parts = line.split(':')
+
+    if len(parts) != 2:
+        return
+
+    key, value = parts[0], parts[1]
+
+    if key not in ASSIGNMENT:
+        return
+
+    if key in FLOAT_VALUES:
+        data[ASSIGNMENT[key]] = int((float(value) * 100000))
+    else:
+        data[ASSIGNMENT[key]] = int(value)

+ 11 - 5
collectors/python.d.plugin/beanstalk/beanstalk.chart.py

@@ -12,12 +12,18 @@ except ImportError:
 from bases.FrameworkServices.SimpleService import SimpleService
 from bases.loaders import safe_load
 
-# default module values (can be overridden per job in `config`)
-# update_every = 2
-priority = 60000
 
-ORDER = ['cpu_usage', 'jobs_rate', 'connections_rate', 'commands_rate', 'current_tubes', 'current_jobs',
-         'current_connections', 'binlog', 'uptime']
+ORDER = [
+    'cpu_usage',
+    'jobs_rate',
+    'connections_rate',
+    'commands_rate',
+    'current_tubes',
+    'current_jobs',
+    'current_connections',
+    'binlog',
+    'uptime',
+]
 
 CHARTS = {
     'cpu_usage': {

+ 22 - 7
collectors/python.d.plugin/bind_rndc/bind_rndc.chart.py

@@ -11,10 +11,15 @@ from subprocess import Popen
 from bases.collection import find_binary
 from bases.FrameworkServices.SimpleService import SimpleService
 
-priority = 60000
+
 update_every = 30
 
-ORDER = ['name_server_statistics', 'incoming_queries', 'outgoing_queries', 'named_stats_size']
+ORDER = [
+    'name_server_statistics',
+    'incoming_queries',
+    'outgoing_queries',
+    'named_stats_size',
+]
 
 CHARTS = {
     'name_server_statistics': {
@@ -43,7 +48,7 @@ CHARTS = {
         'lines': [
         ]},
     'named_stats_size': {
-        'options': [None, 'Named Stats File Size', 'MB', 'file size', 'bind_rndc.stats_size', 'line'],
+        'options': [None, 'Named Stats File Size', 'MiB', 'file size', 'bind_rndc.stats_size', 'line'],
         'lines': [
             ['stats_size', None, 'absolute', 1, 1 << 20]
         ]
@@ -91,10 +96,20 @@ class Service(SimpleService):
         self.definitions = CHARTS
         self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
         self.rndc = find_binary('rndc')
-        self.data = dict(nms_requests=0, nms_responses=0, nms_failure=0, nms_auth=0,
-                         nms_non_auth=0, nms_nxrrset=0, nms_success=0, nms_nxdomain=0,
-                         nms_recursion=0, nms_duplicate=0, nms_rejected_queries=0,
-                         nms_dropped_queries=0)
+        self.data = dict(
+            nms_requests=0,
+            nms_responses=0,
+            nms_failure=0,
+            nms_auth=0,
+            nms_non_auth=0,
+            nms_nxrrset=0,
+            nms_success=0,
+            nms_nxdomain=0,
+            nms_recursion=0,
+            nms_duplicate=0,
+            nms_rejected_queries=0,
+            nms_dropped_queries=0,
+        )
 
     def check(self):
         if not self.rndc:

+ 11 - 3
collectors/python.d.plugin/boinc/boinc.chart.py

@@ -10,7 +10,12 @@ from bases.FrameworkServices.SimpleService import SimpleService
 from third_party import boinc_client
 
 
-ORDER = ['tasks', 'states', 'sched_states', 'process_states']
+ORDER = [
+    'tasks',
+    'states',
+    'sched_states',
+    'process_states',
+]
 
 CHARTS = {
     'tasks': {
@@ -141,14 +146,16 @@ class Service(SimpleService):
     def _get_data(self):
         if not self.is_alive():
             return None
+
         data = dict(_DATA_TEMPLATE)
-        results = []
+
         try:
             results = self.client.get_tasks()
         except socket.error:
             self.error('Connection is dead')
             self.alive = False
             return None
+
         for task in results:
             data['total'] += 1
             data[_TASK_MAP[task.state]] += 1
@@ -159,4 +166,5 @@ class Service(SimpleService):
                     data[_PROC_MAP[task.active_task_state]] += 1
             except AttributeError:
                 pass
-        return data
+
+        return data or None

+ 8 - 8
collectors/python.d.plugin/ceph/ceph.chart.py

@@ -9,13 +9,13 @@ try:
 except ImportError:
     CEPH = False
 
-import os
 import json
+import os
+
 from bases.FrameworkServices.SimpleService import SimpleService
 
 # default module values (can be overridden per job in `config`)
 update_every = 10
-priority = 60000
 
 ORDER = [
     'general_usage',
@@ -36,7 +36,7 @@ ORDER = [
 
 CHARTS = {
     'general_usage': {
-        'options': [None, 'Ceph General Space', 'KB', 'general', 'ceph.general_usage', 'stacked'],
+        'options': [None, 'Ceph General Space', 'KiB', 'general', 'ceph.general_usage', 'stacked'],
         'lines': [
             ['general_available', 'avail', 'absolute'],
             ['general_usage', 'used', 'absolute']
@@ -49,7 +49,7 @@ CHARTS = {
         ]
     },
     'general_bytes': {
-        'options': [None, 'Ceph General Read/Write Data/s', 'KB', 'general', 'ceph.general_bytes',
+        'options': [None, 'Ceph General Read/Write Data/s', 'KiB/s', 'general', 'ceph.general_bytes',
                     'area'],
         'lines': [
             ['general_read_bytes', 'read', 'absolute', 1, 1024],
@@ -73,7 +73,7 @@ CHARTS = {
         ]
     },
     'pool_usage': {
-        'options': [None, 'Ceph Pools', 'KB', 'pool', 'ceph.pool_usage', 'line'],
+        'options': [None, 'Ceph Pools', 'KiB', 'pool', 'ceph.pool_usage', 'line'],
         'lines': []
     },
     'pool_objects': {
@@ -81,11 +81,11 @@ CHARTS = {
         'lines': []
     },
     'pool_read_bytes': {
-        'options': [None, 'Ceph Read Pool Data/s', 'KB', 'pool', 'ceph.pool_read_bytes', 'area'],
+        'options': [None, 'Ceph Read Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_read_bytes', 'area'],
         'lines': []
     },
     'pool_write_bytes': {
-        'options': [None, 'Ceph Write Pool Data/s', 'KB', 'pool', 'ceph.pool_write_bytes', 'area'],
+        'options': [None, 'Ceph Write Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_write_bytes', 'area'],
         'lines': []
     },
     'pool_read_operations': {
@@ -97,7 +97,7 @@ CHARTS = {
         'lines': []
     },
     'osd_usage': {
-        'options': [None, 'Ceph OSDs', 'KB', 'osd', 'ceph.osd_usage', 'line'],
+        'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'],
         'lines': []
     },
     'osd_apply_latency': {

+ 12 - 3
collectors/python.d.plugin/chrony/chrony.chart.py

@@ -7,10 +7,19 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
 
 # default module values (can be overridden per job in `config`)
 update_every = 5
-priority = 60000
+
+CHRONY_COMMAND = 'chronyc -n tracking'
 
 # charts order (can be overridden if you want less charts, or different order)
-ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew']
+ORDER = [
+    'system',
+    'offsets',
+    'stratum',
+    'root',
+    'frequency',
+    'residualfreq',
+    'skew',
+]
 
 CHARTS = {
     'system': {
@@ -76,9 +85,9 @@ class Service(ExecutableService):
     def __init__(self, configuration=None, name=None):
         ExecutableService.__init__(
             self, configuration=configuration, name=name)
-        self.command = 'chronyc -n tracking'
         self.order = ORDER
         self.definitions = CHARTS
+        self.command = CHRONY_COMMAND
 
     def _get_data(self):
         """

+ 18 - 28
collectors/python.d.plugin/couchdb/couchdb.chart.py

@@ -8,6 +8,7 @@ from collections import namedtuple, defaultdict
 from json import loads
 from threading import Thread
 from socket import gethostbyname, gaierror
+
 try:
     from queue import Queue
 except ImportError:
@@ -15,9 +16,9 @@ except ImportError:
 
 from bases.FrameworkServices.UrlService import UrlService
 
-# default module values (can be overridden per job in `config`)
+
 update_every = 1
-priority = 60000
+
 
 METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
 
@@ -108,7 +109,7 @@ ORDER = [
 
 CHARTS = {
     'activity': {
-        'options': [None, 'Overall Activity', 'req/s',
+        'options': [None, 'Overall Activity', 'requests/s',
                     'dbactivity', 'couchdb.activity', 'stacked'],
         'lines': [
             ['couchdb_database_reads', 'DB reads', 'incremental'],
@@ -117,7 +118,7 @@ CHARTS = {
         ]
     },
     'request_methods': {
-        'options': [None, 'HTTP request methods', 'req/s',
+        'options': [None, 'HTTP request methods', 'requests/s',
                     'httptraffic', 'couchdb.request_methods',
                     'stacked'],
         'lines': [
@@ -132,7 +133,7 @@ CHARTS = {
         ]
     },
     'response_codes': {
-        'options': [None, 'HTTP response status codes', 'resp/s',
+        'options': [None, 'HTTP response status codes', 'responses/s',
                     'httptraffic', 'couchdb.response_codes',
                     'stacked'],
         'lines': [
@@ -150,15 +151,13 @@ CHARTS = {
         ]
     },
     'open_files': {
-        'options': [None, 'Open files', 'files',
-                    'ops', 'couchdb.open_files', 'line'],
+        'options': [None, 'Open files', 'files', 'ops', 'couchdb.open_files', 'line'],
         'lines': [
             ['couchdb_open_os_files', '# files', 'absolute']
         ]
     },
     'active_tasks': {
-        'options': [None, 'Active task breakdown', 'tasks',
-                    'ops', 'couchdb.active_tasks', 'stacked'],
+        'options': [None, 'Active task breakdown', 'tasks', 'ops', 'couchdb.active_tasks', 'stacked'],
         'lines': [
             ['activetasks_indexer', 'Indexer', 'absolute'],
             ['activetasks_database_compaction', 'DB Compaction', 'absolute'],
@@ -167,8 +166,7 @@ CHARTS = {
         ]
     },
     'replicator_jobs': {
-        'options': [None, 'Replicator job breakdown', 'jobs',
-                    'ops', 'couchdb.replicator_jobs', 'stacked'],
+        'options': [None, 'Replicator job breakdown', 'jobs', 'ops', 'couchdb.replicator_jobs', 'stacked'],
         'lines': [
             ['couch_replicator_jobs_running', 'Running', 'absolute'],
             ['couch_replicator_jobs_pending', 'Pending', 'absolute'],
@@ -178,8 +176,7 @@ CHARTS = {
         ]
     },
     'erlang_memory': {
-        'options': [None, 'Erlang VM memory usage', 'bytes',
-                    'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
+        'options': [None, 'Erlang VM memory usage', 'B', 'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
         'lines': [
             ['memory_atom', 'atom', 'absolute'],
             ['memory_binary', 'binaries', 'absolute'],
@@ -190,23 +187,20 @@ CHARTS = {
         ]
     },
     'erlang_reductions': {
-        'options': [None, 'Erlang reductions', 'count',
-                    'erlang', 'couchdb.reductions', 'line'],
+        'options': [None, 'Erlang reductions', 'count', 'erlang', 'couchdb.reductions', 'line'],
         'lines': [
             ['reductions', 'reductions', 'incremental']
         ]
     },
     'erlang_proc_counts': {
-        'options': [None, 'Process counts', 'count',
-                    'erlang', 'couchdb.proccounts', 'line'],
+        'options': [None, 'Process counts', 'count', 'erlang', 'couchdb.proccounts', 'line'],
         'lines': [
             ['os_proc_count', 'OS procs', 'absolute'],
             ['process_count', 'erl procs', 'absolute']
         ]
     },
     'erlang_peak_msg_queue': {
-        'options': [None, 'Peak message queue size', 'count',
-                    'erlang', 'couchdb.peakmsgqueue',
+        'options': [None, 'Peak message queue size', 'count', 'erlang', 'couchdb.peakmsgqueue',
                     'line'],
         'lines': [
             ['peak_msg_queue', 'peak size', 'absolute']
@@ -214,18 +208,15 @@ CHARTS = {
     },
     # Lines for the following are added as part of check()
     'db_sizes_file': {
-        'options': [None, 'Database sizes (file)', 'KB',
-                    'perdbstats', 'couchdb.db_sizes_file', 'line'],
+        'options': [None, 'Database sizes (file)', 'KiB', 'perdbstats', 'couchdb.db_sizes_file', 'line'],
         'lines': []
     },
     'db_sizes_external': {
-        'options': [None, 'Database sizes (external)', 'KB',
-                    'perdbstats', 'couchdb.db_sizes_external', 'line'],
+        'options': [None, 'Database sizes (external)', 'KiB', 'perdbstats', 'couchdb.db_sizes_external', 'line'],
         'lines': []
     },
     'db_sizes_active': {
-        'options': [None, 'Database sizes (active)', 'KB',
-                    'perdbstats', 'couchdb.db_sizes_active', 'line'],
+        'options': [None, 'Database sizes (active)', 'KiB', 'perdbstats', 'couchdb.db_sizes_active', 'line'],
         'lines': []
     },
     'db_doc_counts': {
@@ -234,8 +225,7 @@ CHARTS = {
         'lines': []
     },
     'db_doc_del_counts': {
-        'options': [None, 'Database # of deleted docs', 'docs',
-                    'perdbstats', 'couchdb_db_doc_del_count', 'line'],
+        'options': [None, 'Database # of deleted docs', 'docs', 'perdbstats', 'couchdb_db_doc_del_count', 'line'],
         'lines': []
     }
 }
@@ -255,7 +245,7 @@ class Service(UrlService):
         try:
             self.dbs = self.configuration.get('databases').split(' ')
         except (KeyError, AttributeError):
-            self.dbs = []
+            self.dbs = list()
 
     def check(self):
         if not (self.host and self.port):

+ 14 - 7
collectors/python.d.plugin/dns_query_time/dns_query_time.chart.py

@@ -28,10 +28,7 @@ except ImportError:
 from bases.FrameworkServices.SimpleService import SimpleService
 
 
-# default module values (can be overridden per job in `config`)
 update_every = 5
-priority = 60000
-retries = 60
 
 
 class Service(SimpleService):
@@ -46,14 +43,14 @@ class Service(SimpleService):
 
     def check(self):
         if not DNS_PYTHON:
-            self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py')
+            self.error("'python-dnspython' package is needed to use dns_query_time.chart.py")
             return False
 
         self.timeout = self.timeout if isinstance(self.timeout, int) else 4
 
         if not all([self.domains, self.server_list,
                     isinstance(self.server_list, str), isinstance(self.domains, str)]):
-            self.error('server_list and domain_list can\'t be empty')
+            self.error("server_list and domain_list can't be empty")
             return False
         else:
             self.domains, self.server_list = self.domains.split(), self.server_list.split()
@@ -129,17 +126,27 @@ def create_charts(aggregate, server_list):
             }
         }
         for ns in server_list:
-            definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'])
+            dim = [
+                '_'.join(['ns', ns.replace('.', '_')]),
+                ns,
+                'absolute',
+            ]
+            definitions['dns_group']['lines'].append(dim)
 
         return order, definitions
     else:
         order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
         definitions = dict()
+
         for ns in server_list:
             definitions[''.join(['dns_', ns.replace('.', '_')])] = {
                 'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'],
                 'lines': [
-                    ['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute']
+                    [
+                        '_'.join(['ns', ns.replace('.', '_')]),
+                        ns,
+                        'absolute',
+                    ]
                 ]
             }
         return order, definitions

+ 2 - 2
collectors/python.d.plugin/dnsdist/dnsdist.chart.py

@@ -90,9 +90,9 @@ CHARTS = {
         ]
     },
     'servermem': {
-        'options': [None, 'DNSDIST server memory utilization', 'MB', 'server', 'dnsdist.servermem', 'area'],
+        'options': [None, 'DNSDIST server memory utilization', 'MiB', 'server', 'dnsdist.servermem', 'area'],
         'lines': [
-            ['real-memory-usage', 'memory usage', 'absolute', 1, 1048576]
+            ['real-memory-usage', 'memory usage', 'absolute', 1, 1 << 20]
         ]
     },
     'query_latency': {

+ 6 - 4
collectors/python.d.plugin/dockerd/dockerd.chart.py

@@ -23,21 +23,21 @@ ORDER = [
 
 CHARTS = {
     'running_containers': {
-        'options': [None, 'Number of running containers', 'running containers', 'running containers',
+        'options': [None, 'Number of running containers', 'containers', 'running containers',
                     'docker.running_containers', 'line'],
         'lines': [
             ['running_containers', 'running']
         ]
     },
     'healthy_containers': {
-        'options': [None, 'Number of healthy containers', 'healthy containers', 'healthy containers',
+        'options': [None, 'Number of healthy containers', 'containers', 'healthy containers',
                     'docker.healthy_containers', 'line'],
         'lines': [
             ['healthy_containers', 'healthy']
         ]
     },
     'unhealthy_containers': {
-        'options': [None, 'Number of unhealthy containers', 'unhealthy containers', 'unhealthy containers',
+        'options': [None, 'Number of unhealthy containers', 'containers', 'unhealthy containers',
                     'docker.unhealthy_containers', 'line'],
         'lines': [
             ['unhealthy_containers', 'unhealthy']
@@ -51,10 +51,11 @@ class Service(SimpleService):
         SimpleService.__init__(self, configuration=configuration, name=name)
         self.order = ORDER
         self.definitions = CHARTS
+        self.client = None
 
     def check(self):
         if not HAS_DOCKER:
-            self.error('\'docker\' package is needed to use docker.chart.py')
+            self.error("'docker' package is needed to use docker.chart.py")
             return False
 
         self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock'))
@@ -69,6 +70,7 @@ class Service(SimpleService):
 
     def get_data(self):
         data = dict()
+
         data['running_containers'] = len(self.client.containers.list(sparse=True))
         data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True))
         data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True))

Некоторые файлы не были показаны из-за большого количества измененных файлов