haproxy.conf 3.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485
  1. # netdata python.d.plugin configuration for haproxy
  2. #
  3. # This file is in YaML format. Generally the format is:
  4. #
  5. # name: value
  6. #
  7. # There are 2 sections:
  8. # - global variables
  9. # - one or more JOBS
  10. #
  11. # JOBS allow you to collect values from multiple sources.
  12. # Each source will have its own set of charts.
  13. #
  14. # JOB parameters have to be indented (using spaces only, example below).
  15. # ----------------------------------------------------------------------
  16. # Global Variables
  17. # These variables set the defaults for all JOBs, however each JOB
  18. # may define its own, overriding the defaults.
  19. # update_every sets the default data collection frequency.
  20. # If unset, the python.d.plugin default is used.
  21. # update_every: 1
  22. # priority controls the order of charts at the netdata dashboard.
  23. # Lower numbers move the charts towards the top of the page.
  24. # If unset, the default for python.d.plugin is used.
  25. # priority: 60000
  26. # retries sets the number of retries to be made in case of failures.
  27. # If unset, the default for python.d.plugin is used.
  28. # Attempts to restore the service are made once every update_every
  29. # and only if the module has collected values in the past.
  30. # retries: 60
  31. # autodetection_retry sets the job re-check interval in seconds.
  32. # The job is not deleted if check fails.
  33. # Attempts to start the job are made once every autodetection_retry.
  34. # This feature is disabled by default.
  35. # autodetection_retry: 0
  36. # ----------------------------------------------------------------------
  37. # JOBS (data collection sources)
  38. #
  39. # The default JOBS share the same *name*. JOBS with the same name
  40. # are mutually exclusive. Only one of them will be allowed running at
  41. # any time. This allows autodetection to try several alternatives and
  42. # pick the one that works.
  43. #
  44. # Any number of jobs is supported.
  45. #
  46. # All python.d.plugin JOBS (for all its modules) support a set of
  47. # predefined parameters. These are:
  48. #
  49. # job_name:
  50. # name: myname # the JOB's name as it will appear at the
  51. # # dashboard (by default is the job_name)
  52. # # JOBs sharing a name are mutually exclusive
  53. # update_every: 1 # the JOB's data collection frequency
  54. # priority: 60000 # the JOB's order on the dashboard
  55. # retries: 60 # the JOB's number of restoration attempts
  56. # autodetection_retry: 0 # the JOB's re-check interval in seconds
  57. #
  58. # Additionally to the above, haproxy also supports the following:
  59. #
  60. # IMPORTANT: socket MUST BE readable AND writable by netdata user
  61. #
  62. # socket: 'path/to/haproxy/sock'
  63. #
  64. # OR
  65. # url: 'http://<ip.address>:<port>/<url>;csv;norefresh'
  66. # [user: USERNAME] only if stats auth is used
  67. # [pass: PASSWORD] only if stats auth is used
  68. # ----------------------------------------------------------------------
  69. # AUTO-DETECTION JOBS
  70. # only one of them will run (they have the same name)
  71. #via_url:
  72. # user : 'admin'
  73. # pass : 'password'
  74. # url : 'http://127.0.0.1:7000/haproxy_stats;csv;norefresh'
  75. #via_socket:
  76. # socket: '/var/run/haproxy/admin.sock'