varnish.chart.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. # -*- coding: utf-8 -*-
  2. # Description: varnish netdata python.d module
  3. # Author: ilyam8
  4. # SPDX-License-Identifier: GPL-3.0-or-later
  5. import re
  6. from bases.FrameworkServices.ExecutableService import ExecutableService
  7. from bases.collection import find_binary
  8. ORDER = [
  9. 'session_connections',
  10. 'client_requests',
  11. 'all_time_hit_rate',
  12. 'current_poll_hit_rate',
  13. 'cached_objects_expired',
  14. 'cached_objects_nuked',
  15. 'threads_total',
  16. 'threads_statistics',
  17. 'threads_queue_len',
  18. 'backend_connections',
  19. 'backend_requests',
  20. 'esi_statistics',
  21. 'memory_usage',
  22. 'uptime'
  23. ]
  24. CHARTS = {
  25. 'session_connections': {
  26. 'options': [None, 'Connections Statistics', 'connections/s',
  27. 'client metrics', 'varnish.session_connection', 'line'],
  28. 'lines': [
  29. ['sess_conn', 'accepted', 'incremental'],
  30. ['sess_dropped', 'dropped', 'incremental']
  31. ]
  32. },
  33. 'client_requests': {
  34. 'options': [None, 'Client Requests', 'requests/s',
  35. 'client metrics', 'varnish.client_requests', 'line'],
  36. 'lines': [
  37. ['client_req', 'received', 'incremental']
  38. ]
  39. },
  40. 'all_time_hit_rate': {
  41. 'options': [None, 'All History Hit Rate Ratio', 'percentage', 'cache performance',
  42. 'varnish.all_time_hit_rate', 'stacked'],
  43. 'lines': [
  44. ['cache_hit', 'hit', 'percentage-of-absolute-row'],
  45. ['cache_miss', 'miss', 'percentage-of-absolute-row'],
  46. ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']]
  47. },
  48. 'current_poll_hit_rate': {
  49. 'options': [None, 'Current Poll Hit Rate Ratio', 'percentage', 'cache performance',
  50. 'varnish.current_poll_hit_rate', 'stacked'],
  51. 'lines': [
  52. ['cache_hit', 'hit', 'percentage-of-incremental-row'],
  53. ['cache_miss', 'miss', 'percentage-of-incremental-row'],
  54. ['cache_hitpass', 'hitpass', 'percentage-of-incremental-row']
  55. ]
  56. },
  57. 'cached_objects_expired': {
  58. 'options': [None, 'Expired Objects', 'expired/s', 'cache performance',
  59. 'varnish.cached_objects_expired', 'line'],
  60. 'lines': [
  61. ['n_expired', 'objects', 'incremental']
  62. ]
  63. },
  64. 'cached_objects_nuked': {
  65. 'options': [None, 'Least Recently Used Nuked Objects', 'nuked/s', 'cache performance',
  66. 'varnish.cached_objects_nuked', 'line'],
  67. 'lines': [
  68. ['n_lru_nuked', 'objects', 'incremental']
  69. ]
  70. },
  71. 'threads_total': {
  72. 'options': [None, 'Number Of Threads In All Pools', 'number', 'thread related metrics',
  73. 'varnish.threads_total', 'line'],
  74. 'lines': [
  75. ['threads', None, 'absolute']
  76. ]
  77. },
  78. 'threads_statistics': {
  79. 'options': [None, 'Threads Statistics', 'threads/s', 'thread related metrics',
  80. 'varnish.threads_statistics', 'line'],
  81. 'lines': [
  82. ['threads_created', 'created', 'incremental'],
  83. ['threads_failed', 'failed', 'incremental'],
  84. ['threads_limited', 'limited', 'incremental']
  85. ]
  86. },
  87. 'threads_queue_len': {
  88. 'options': [None, 'Current Queue Length', 'requests', 'thread related metrics',
  89. 'varnish.threads_queue_len', 'line'],
  90. 'lines': [
  91. ['thread_queue_len', 'in queue']
  92. ]
  93. },
  94. 'backend_connections': {
  95. 'options': [None, 'Backend Connections Statistics', 'connections/s', 'backend metrics',
  96. 'varnish.backend_connections', 'line'],
  97. 'lines': [
  98. ['backend_conn', 'successful', 'incremental'],
  99. ['backend_unhealthy', 'unhealthy', 'incremental'],
  100. ['backend_reuse', 'reused', 'incremental'],
  101. ['backend_toolate', 'closed', 'incremental'],
  102. ['backend_recycle', 'resycled', 'incremental'],
  103. ['backend_fail', 'failed', 'incremental']
  104. ]
  105. },
  106. 'backend_requests': {
  107. 'options': [None, 'Requests To The Backend', 'requests/s', 'backend metrics',
  108. 'varnish.backend_requests', 'line'],
  109. 'lines': [
  110. ['backend_req', 'sent', 'incremental']
  111. ]
  112. },
  113. 'esi_statistics': {
  114. 'options': [None, 'ESI Statistics', 'problems/s', 'esi related metrics', 'varnish.esi_statistics', 'line'],
  115. 'lines': [
  116. ['esi_errors', 'errors', 'incremental'],
  117. ['esi_warnings', 'warnings', 'incremental']
  118. ]
  119. },
  120. 'memory_usage': {
  121. 'options': [None, 'Memory Usage', 'MiB', 'memory usage', 'varnish.memory_usage', 'stacked'],
  122. 'lines': [
  123. ['memory_free', 'free', 'absolute', 1, 1 << 20],
  124. ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]]
  125. },
  126. 'uptime': {
  127. 'lines': [
  128. ['uptime', None, 'absolute']
  129. ],
  130. 'options': [None, 'Uptime', 'seconds', 'uptime', 'varnish.uptime', 'line']
  131. }
  132. }
  133. def backend_charts_template(name):
  134. order = [
  135. '{0}_response_statistics'.format(name),
  136. ]
  137. charts = {
  138. order[0]: {
  139. 'options': [None, 'Backend "{0}"'.format(name), 'kilobits/s', 'backend response statistics',
  140. 'varnish.backend', 'area'],
  141. 'lines': [
  142. ['{0}_beresp_hdrbytes'.format(name), 'header', 'incremental', 8, 1000],
  143. ['{0}_beresp_bodybytes'.format(name), 'body', 'incremental', -8, 1000]
  144. ]
  145. },
  146. }
  147. return order, charts
  148. def disk_charts_template(name):
  149. order = [
  150. 'disk_{0}_usage'.format(name),
  151. ]
  152. charts = {
  153. order[0]: {
  154. 'options': [None, 'Disk "{0}" Usage'.format(name), 'KiB', 'disk usage', 'varnish.disk_usage', 'stacked'],
  155. 'lines': [
  156. ['{0}.g_space'.format(name), 'free', 'absolute', 1, 1 << 10],
  157. ['{0}.g_bytes'.format(name), 'allocated', 'absolute', 1, 1 << 10]
  158. ]
  159. },
  160. }
  161. return order, charts
  162. VARNISHSTAT = 'varnishstat'
  163. re_version = re.compile(r'varnish-(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)')
  164. class VarnishVersion:
  165. def __init__(self, major, minor, patch):
  166. self.major = major
  167. self.minor = minor
  168. self.patch = patch
  169. def __str__(self):
  170. return '{0}.{1}.{2}'.format(self.major, self.minor, self.patch)
  171. class Parser:
  172. _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)')
  173. _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_]+).(beresp[\w_]+)\s+(\d+)')
  174. _default = re.compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)')
  175. def __init__(self):
  176. self.re_default = None
  177. self.re_backend = None
  178. def init(self, data):
  179. data = ''.join(data)
  180. parsed_main = Parser._default.findall(data)
  181. if parsed_main:
  182. self.re_default = Parser._default
  183. parsed_backend = Parser._backend_new.findall(data)
  184. if parsed_backend:
  185. self.re_backend = Parser._backend_new
  186. else:
  187. parsed_backend = Parser._backend_old.findall(data)
  188. if parsed_backend:
  189. self.re_backend = Parser._backend_old
  190. def server_stats(self, data):
  191. return self.re_default.findall(''.join(data))
  192. def backend_stats(self, data):
  193. return self.re_backend.findall(''.join(data))
  194. class Service(ExecutableService):
  195. def __init__(self, configuration=None, name=None):
  196. ExecutableService.__init__(self, configuration=configuration, name=name)
  197. self.order = ORDER
  198. self.definitions = CHARTS
  199. self.instance_name = configuration.get('instance_name')
  200. self.parser = Parser()
  201. self.command = None
  202. self.collected_vbe = set()
  203. self.collected_smf = set()
  204. def create_command(self):
  205. varnishstat = find_binary(VARNISHSTAT)
  206. if not varnishstat:
  207. self.error("can't locate '{0}' binary or binary is not executable by user netdata".format(VARNISHSTAT))
  208. return False
  209. command = [varnishstat, '-V']
  210. reply = self._get_raw_data(stderr=True, command=command)
  211. if not reply:
  212. self.error(
  213. "no output from '{0}'. Is varnish running? Not enough privileges?".format(' '.join(self.command)))
  214. return False
  215. ver = parse_varnish_version(reply)
  216. if not ver:
  217. self.error("failed to parse reply from '{0}', used regex :'{1}', reply : {2}".format(
  218. ' '.join(command), re_version.pattern, reply))
  219. return False
  220. if self.instance_name:
  221. self.command = [varnishstat, '-1', '-n', self.instance_name]
  222. else:
  223. self.command = [varnishstat, '-1']
  224. if ver.major > 4:
  225. self.command.extend(['-t', '1'])
  226. self.info("varnish version: {0}, will use command: '{1}'".format(ver, ' '.join(self.command)))
  227. return True
  228. def check(self):
  229. if not self.create_command():
  230. return False
  231. # STDOUT is not empty
  232. reply = self._get_raw_data()
  233. if not reply:
  234. self.error("no output from '{0}'. Is it running? Not enough privileges?".format(' '.join(self.command)))
  235. return False
  236. self.parser.init(reply)
  237. # Output is parsable
  238. if not self.parser.re_default:
  239. self.error('cant parse the output...')
  240. return False
  241. return True
  242. def get_data(self):
  243. """
  244. Format data received from shell command
  245. :return: dict
  246. """
  247. raw = self._get_raw_data()
  248. if not raw:
  249. return None
  250. data = dict()
  251. server_stats = self.parser.server_stats(raw)
  252. if not server_stats:
  253. return None
  254. stats = dict((param, value) for _, param, value in server_stats)
  255. data.update(stats)
  256. self.get_vbe_backends(data, raw)
  257. self.get_smf_disks(server_stats)
  258. # varnish 5 uses default.g_bytes and default.g_space
  259. data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes')
  260. data['memory_free'] = data.get('s0.g_space') or data.get('default.g_space')
  261. return data
  262. def get_vbe_backends(self, data, raw):
  263. if not self.parser.re_backend:
  264. return
  265. stats = self.parser.backend_stats(raw)
  266. if not stats:
  267. return
  268. for (name, param, value) in stats:
  269. data['_'.join([name, param])] = value
  270. if name in self.collected_vbe:
  271. continue
  272. self.collected_vbe.add(name)
  273. self.add_backend_charts(name)
  274. def get_smf_disks(self, server_stats):
  275. # [('SMF.', 'ssdStorage.c_req', '47686'),
  276. # ('SMF.', 'ssdStorage.c_fail', '0'),
  277. # ('SMF.', 'ssdStorage.c_bytes', '668102656'),
  278. # ('SMF.', 'ssdStorage.c_freed', '140980224'),
  279. # ('SMF.', 'ssdStorage.g_alloc', '39753'),
  280. # ('SMF.', 'ssdStorage.g_bytes', '527122432'),
  281. # ('SMF.', 'ssdStorage.g_space', '53159968768'),
  282. # ('SMF.', 'ssdStorage.g_smf', '40130'),
  283. # ('SMF.', 'ssdStorage.g_smf_frag', '311'),
  284. # ('SMF.', 'ssdStorage.g_smf_large', '66')]
  285. disks = [name for typ, name, _ in server_stats if typ.startswith('SMF') and name.endswith('g_space')]
  286. if not disks:
  287. return
  288. for disk in disks:
  289. disk = disk.split('.')[0] # ssdStorage
  290. if disk in self.collected_smf:
  291. continue
  292. self.collected_smf.add(disk)
  293. self.add_disk_charts(disk)
  294. def add_backend_charts(self, backend_name):
  295. self.add_charts(backend_name, backend_charts_template)
  296. def add_disk_charts(self, disk_name):
  297. self.add_charts(disk_name, disk_charts_template)
  298. def add_charts(self, name, charts_template):
  299. order, charts = charts_template(name)
  300. for chart_name in order:
  301. params = [chart_name] + charts[chart_name]['options']
  302. dimensions = charts[chart_name]['lines']
  303. new_chart = self.charts.add_chart(params)
  304. for dimension in dimensions:
  305. new_chart.add_dimension(dimension)
  306. def parse_varnish_version(lines):
  307. m = re_version.search(lines[0])
  308. if not m:
  309. return None
  310. m = m.groupdict()
  311. return VarnishVersion(
  312. int(m['major']),
  313. int(m['minor']),
  314. int(m['patch']),
  315. )