proxysql.chart.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. # -*- coding: utf-8 -*-
  2. # Description: Proxysql netdata python.d module
  3. # Author: Ali Borhani (alibo)
  4. # SPDX-License-Identifier: GPL-3.0+
  5. from bases.FrameworkServices.MySQLService import MySQLService
  6. disabled_by_default = True
  7. def query(table, *params):
  8. return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params))
  9. # https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_global
  10. QUERY_GLOBAL = query(
  11. "stats_mysql_global",
  12. "Variable_Name",
  13. "Variable_Value"
  14. )
  15. # https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_connection_pool
  16. QUERY_CONNECTION_POOL = query(
  17. "stats_mysql_connection_pool",
  18. "hostgroup",
  19. "srv_host",
  20. "srv_port",
  21. "status",
  22. "ConnUsed",
  23. "ConnFree",
  24. "ConnOK",
  25. "ConnERR",
  26. "Queries",
  27. "Bytes_data_sent",
  28. "Bytes_data_recv",
  29. "Latency_us"
  30. )
  31. # https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_commands_counters
  32. QUERY_COMMANDS = query(
  33. "stats_mysql_commands_counters",
  34. "Command",
  35. "Total_Time_us",
  36. "Total_cnt",
  37. "cnt_100us",
  38. "cnt_500us",
  39. "cnt_1ms",
  40. "cnt_5ms",
  41. "cnt_10ms",
  42. "cnt_50ms",
  43. "cnt_100ms",
  44. "cnt_500ms",
  45. "cnt_1s",
  46. "cnt_5s",
  47. "cnt_10s",
  48. "cnt_INFs"
  49. )
  50. GLOBAL_STATS = [
  51. 'client_connections_aborted',
  52. 'client_connections_connected',
  53. 'client_connections_created',
  54. 'client_connections_non_idle',
  55. 'proxysql_uptime',
  56. 'questions',
  57. 'slow_queries'
  58. ]
  59. CONNECTION_POOL_STATS = [
  60. 'status',
  61. 'connused',
  62. 'connfree',
  63. 'connok',
  64. 'connerr',
  65. 'queries',
  66. 'bytes_data_sent',
  67. 'bytes_data_recv',
  68. 'latency_us'
  69. ]
  70. ORDER = [
  71. 'connections',
  72. 'active_transactions',
  73. 'questions',
  74. 'pool_overall_net',
  75. 'commands_count',
  76. 'commands_duration',
  77. 'pool_status',
  78. 'pool_net',
  79. 'pool_queries',
  80. 'pool_latency',
  81. 'pool_connection_used',
  82. 'pool_connection_free',
  83. 'pool_connection_ok',
  84. 'pool_connection_error'
  85. ]
  86. HISTOGRAM_ORDER = [
  87. '100us',
  88. '500us',
  89. '1ms',
  90. '5ms',
  91. '10ms',
  92. '50ms',
  93. '100ms',
  94. '500ms',
  95. '1s',
  96. '5s',
  97. '10s',
  98. 'inf'
  99. ]
  100. STATUS = {
  101. "ONLINE": 1,
  102. "SHUNNED": 2,
  103. "OFFLINE_SOFT": 3,
  104. "OFFLINE_HARD": 4
  105. }
  106. CHARTS = {
  107. 'pool_status': {
  108. 'options': [None, 'ProxySQL Backend Status', 'status', 'status', 'proxysql.pool_status', 'line'],
  109. 'lines': []
  110. },
  111. 'pool_net': {
  112. 'options': [None, 'ProxySQL Backend Bandwidth', 'kilobits/s', 'bandwidth', 'proxysql.pool_net', 'area'],
  113. 'lines': []
  114. },
  115. 'pool_overall_net': {
  116. 'options': [None, 'ProxySQL Backend Overall Bandwidth', 'kilobits/s', 'overall_bandwidth',
  117. 'proxysql.pool_overall_net', 'area'],
  118. 'lines': [
  119. ['bytes_data_recv', 'in', 'incremental', 8, 1000],
  120. ['bytes_data_sent', 'out', 'incremental', -8, 1000]
  121. ]
  122. },
  123. 'questions': {
  124. 'options': [None, 'ProxySQL Frontend Questions', 'questions/s', 'questions', 'proxysql.questions', 'line'],
  125. 'lines': [
  126. ['questions', 'questions', 'incremental'],
  127. ['slow_queries', 'slow_queries', 'incremental']
  128. ]
  129. },
  130. 'pool_queries': {
  131. 'options': [None, 'ProxySQL Backend Queries', 'queries/s', 'queries', 'proxysql.queries', 'line'],
  132. 'lines': []
  133. },
  134. 'active_transactions': {
  135. 'options': [None, 'ProxySQL Frontend Active Transactions', 'transactions/s', 'active_transactions',
  136. 'proxysql.active_transactions', 'line'],
  137. 'lines': [
  138. ['active_transactions', 'active_transactions', 'absolute']
  139. ]
  140. },
  141. 'pool_latency': {
  142. 'options': [None, 'ProxySQL Backend Latency', 'milliseconds', 'latency', 'proxysql.latency', 'line'],
  143. 'lines': []
  144. },
  145. 'connections': {
  146. 'options': [None, 'ProxySQL Frontend Connections', 'connections/s', 'connections', 'proxysql.connections',
  147. 'line'],
  148. 'lines': [
  149. ['client_connections_connected', 'connected', 'absolute'],
  150. ['client_connections_created', 'created', 'incremental'],
  151. ['client_connections_aborted', 'aborted', 'incremental'],
  152. ['client_connections_non_idle', 'non_idle', 'absolute']
  153. ]
  154. },
  155. 'pool_connection_used': {
  156. 'options': [None, 'ProxySQL Used Connections', 'connections', 'pool_connections',
  157. 'proxysql.pool_used_connections', 'line'],
  158. 'lines': []
  159. },
  160. 'pool_connection_free': {
  161. 'options': [None, 'ProxySQL Free Connections', 'connections', 'pool_connections',
  162. 'proxysql.pool_free_connections', 'line'],
  163. 'lines': []
  164. },
  165. 'pool_connection_ok': {
  166. 'options': [None, 'ProxySQL Established Connections', 'connections', 'pool_connections',
  167. 'proxysql.pool_ok_connections', 'line'],
  168. 'lines': []
  169. },
  170. 'pool_connection_error': {
  171. 'options': [None, 'ProxySQL Error Connections', 'connections', 'pool_connections',
  172. 'proxysql.pool_error_connections', 'line'],
  173. 'lines': []
  174. },
  175. 'commands_count': {
  176. 'options': [None, 'ProxySQL Commands', 'commands', 'commands', 'proxysql.commands_count', 'line'],
  177. 'lines': []
  178. },
  179. 'commands_duration': {
  180. 'options': [None, 'ProxySQL Commands Duration', 'milliseconds', 'commands', 'proxysql.commands_duration',
  181. 'line'],
  182. 'lines': []
  183. }
  184. }
  185. class Service(MySQLService):
  186. def __init__(self, configuration=None, name=None):
  187. MySQLService.__init__(self, configuration=configuration, name=name)
  188. self.order = ORDER
  189. self.definitions = CHARTS
  190. self.queries = dict(
  191. global_status=QUERY_GLOBAL,
  192. connection_pool_status=QUERY_CONNECTION_POOL,
  193. commands_status=QUERY_COMMANDS
  194. )
  195. def _get_data(self):
  196. raw_data = self._get_raw_data(description=True)
  197. if not raw_data:
  198. return None
  199. to_netdata = dict()
  200. if 'global_status' in raw_data:
  201. global_status = dict(raw_data['global_status'][0])
  202. for key in global_status:
  203. if key.lower() in GLOBAL_STATS:
  204. to_netdata[key.lower()] = global_status[key]
  205. if 'connection_pool_status' in raw_data:
  206. to_netdata['bytes_data_recv'] = 0
  207. to_netdata['bytes_data_sent'] = 0
  208. for record in raw_data['connection_pool_status'][0]:
  209. backend = self.generate_backend(record)
  210. name = self.generate_backend_name(backend)
  211. for key in backend:
  212. if key in CONNECTION_POOL_STATS:
  213. if key == 'status':
  214. backend[key] = self.convert_status(backend[key])
  215. if len(self.charts) > 0:
  216. if (name + '_status') not in self.charts['pool_status']:
  217. self.add_backend_dimensions(name)
  218. to_netdata["{0}_{1}".format(name, key)] = backend[key]
  219. if key == 'bytes_data_recv':
  220. to_netdata['bytes_data_recv'] += int(backend[key])
  221. if key == 'bytes_data_sent':
  222. to_netdata['bytes_data_sent'] += int(backend[key])
  223. if 'commands_status' in raw_data:
  224. for record in raw_data['commands_status'][0]:
  225. cmd = self.generate_command_stats(record)
  226. name = cmd['name']
  227. if len(self.charts) > 0:
  228. if (name + '_count') not in self.charts['commands_count']:
  229. self.add_command_dimensions(name)
  230. self.add_histogram_chart(cmd)
  231. to_netdata[name + '_count'] = cmd['count']
  232. to_netdata[name + '_duration'] = cmd['duration']
  233. for histogram in cmd['histogram']:
  234. dimId = 'commands_histogram_{0}_{1}'.format(name, histogram)
  235. to_netdata[dimId] = cmd['histogram'][histogram]
  236. return to_netdata or None
  237. def add_backend_dimensions(self, name):
  238. self.charts['pool_status'].add_dimension([name + '_status', name, 'absolute'])
  239. self.charts['pool_net'].add_dimension([name + '_bytes_data_recv', 'from_' + name, 'incremental', 8, 1024])
  240. self.charts['pool_net'].add_dimension([name + '_bytes_data_sent', 'to_' + name, 'incremental', -8, 1024])
  241. self.charts['pool_queries'].add_dimension([name + '_queries', name, 'incremental'])
  242. self.charts['pool_latency'].add_dimension([name + '_latency_us', name, 'absolute', 1, 1000])
  243. self.charts['pool_connection_used'].add_dimension([name + '_connused', name, 'absolute'])
  244. self.charts['pool_connection_free'].add_dimension([name + '_connfree', name, 'absolute'])
  245. self.charts['pool_connection_ok'].add_dimension([name + '_connok', name, 'incremental'])
  246. self.charts['pool_connection_error'].add_dimension([name + '_connerr', name, 'incremental'])
  247. def add_command_dimensions(self, cmd):
  248. self.charts['commands_count'].add_dimension([cmd + '_count', cmd, 'incremental'])
  249. self.charts['commands_duration'].add_dimension([cmd + '_duration', cmd, 'incremental', 1, 1000])
  250. def add_histogram_chart(self, cmd):
  251. chart = self.charts.add_chart(self.histogram_chart(cmd))
  252. for histogram in HISTOGRAM_ORDER:
  253. dimId = 'commands_histogram_{0}_{1}'.format(cmd['name'], histogram)
  254. chart.add_dimension([dimId, histogram, 'incremental'])
  255. @staticmethod
  256. def histogram_chart(cmd):
  257. return [
  258. 'commands_histogram_' + cmd['name'],
  259. None,
  260. 'ProxySQL {0} Command Histogram'.format(cmd['name'].title()),
  261. 'commands',
  262. 'commands_histogram',
  263. 'proxysql.commands_histogram_' + cmd['name'],
  264. 'stacked'
  265. ]
  266. @staticmethod
  267. def generate_backend(data):
  268. return {
  269. 'hostgroup': data[0],
  270. 'srv_host': data[1],
  271. 'srv_port': data[2],
  272. 'status': data[3],
  273. 'connused': data[4],
  274. 'connfree': data[5],
  275. 'connok': data[6],
  276. 'connerr': data[7],
  277. 'queries': data[8],
  278. 'bytes_data_sent': data[9],
  279. 'bytes_data_recv': data[10],
  280. 'latency_us': data[11]
  281. }
  282. @staticmethod
  283. def generate_command_stats(data):
  284. return {
  285. 'name': data[0].lower(),
  286. 'duration': data[1],
  287. 'count': data[2],
  288. 'histogram': {
  289. '100us': data[3],
  290. '500us': data[4],
  291. '1ms': data[5],
  292. '5ms': data[6],
  293. '10ms': data[7],
  294. '50ms': data[8],
  295. '100ms': data[9],
  296. '500ms': data[10],
  297. '1s': data[11],
  298. '5s': data[12],
  299. '10s': data[13],
  300. 'inf': data[14]
  301. }
  302. }
  303. @staticmethod
  304. def generate_backend_name(backend):
  305. hostgroup = backend['hostgroup'].replace(' ', '_').lower()
  306. host = backend['srv_host'].replace('.', '_')
  307. return "{0}_{1}_{2}".format(hostgroup, host, backend['srv_port'])
  308. @staticmethod
  309. def convert_status(status):
  310. if status in STATUS:
  311. return STATUS[status]
  312. return -1