zscores.conf 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. # netdata python.d.plugin configuration for example
  2. #
  3. # This file is in YaML format. Generally the format is:
  4. #
  5. # name: value
  6. #
  7. # There are 2 sections:
  8. # - global variables
  9. # - one or more JOBS
  10. #
  11. # JOBS allow you to collect values from multiple sources.
  12. # Each source will have its own set of charts.
  13. #
  14. # JOB parameters have to be indented (using spaces only, example below).
  15. # ----------------------------------------------------------------------
  16. # Global Variables
  17. # These variables set the defaults for all JOBs, however each JOB
  18. # may define its own, overriding the defaults.
  19. # update_every sets the default data collection frequency.
  20. # If unset, the python.d.plugin default is used.
  21. update_every: 5
  22. # priority controls the order of charts at the netdata dashboard.
  23. # Lower numbers move the charts towards the top of the page.
  24. # If unset, the default for python.d.plugin is used.
  25. # priority: 60000
  26. # penalty indicates whether to apply penalty to update_every in case of failures.
  27. # Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
  28. # penalty: yes
  29. # autodetection_retry sets the job re-check interval in seconds.
  30. # The job is not deleted if check fails.
  31. # Attempts to start the job are made once every autodetection_retry.
  32. # This feature is disabled by default.
  33. # autodetection_retry: 0
  34. # ----------------------------------------------------------------------
  35. # JOBS (data collection sources)
  36. #
  37. # The default JOBS share the same *name*. JOBS with the same name
  38. # are mutually exclusive. Only one of them will be allowed running at
  39. # any time. This allows autodetection to try several alternatives and
  40. # pick the one that works.
  41. #
  42. # Any number of jobs is supported.
  43. #
  44. # All python.d.plugin JOBS (for all its modules) support a set of
  45. # predefined parameters. These are:
  46. #
  47. # job_name:
  48. # name: myname # the JOB's name as it will appear at the
  49. # # dashboard (by default is the job_name)
  50. # # JOBs sharing a name are mutually exclusive
  51. # update_every: 1 # the JOB's data collection frequency
  52. # priority: 60000 # the JOB's order on the dashboard
  53. # penalty: yes # the JOB's penalty
  54. # autodetection_retry: 0 # the JOB's re-check interval in seconds
  55. #
  56. # Additionally to the above, example also supports the following:
  57. #
  58. # - none
  59. #
  60. # ----------------------------------------------------------------------
  61. # AUTO-DETECTION JOBS
  62. # only one of them will run (they have the same name)
  63. local:
  64. name: 'local'
  65. # what host to pull data from
  66. host: '127.0.0.1:19999'
  67. # what charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
  68. charts_regex: 'system\..*'
  69. # Charts to exclude, useful if you would like to exclude some specific charts.
  70. # Note: should be a ',' separated string like 'chart.name,chart.name'.
  71. charts_to_exclude: 'system.uptime'
  72. # length of time to base calculations off for mean and stddev
  73. train_secs: 14400 # use last 4 hours to work out the mean and stddev for the zscore
  74. # offset preceding latest data to ignore when calculating mean and stddev
  75. offset_secs: 300 # ignore last 5 minutes of data when calculating the mean and stddev
  76. # recalculate the mean and stddev every n steps of the collector
  77. train_every_n: 900 # recalculate mean and stddev every 15 minutes
  78. # smooth the z score by averaging it over last n values
  79. z_smooth_n: 15 # take a rolling average of the last 15 zscore values to reduce sensitivity to temporary 'spikes'
  80. # cap absolute value of zscore (before smoothing) for better stability
  81. z_clip: 10 # cap each zscore at 10 so as to avoid really large individual zscores swamping any rolling average
  82. # set z_abs: 'true' to make all zscores be absolute values only.
  83. z_abs: 'true'
  84. # burn in period in which to initially calculate mean and stddev on every step
  85. burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or initial calculations fail to return
  86. # mode can be to get a zscore 'per_dim' or 'per_chart'
  87. mode: 'per_chart' # 'per_chart' means individual dimension level smoothed zscores will be aggregated to one zscore per chart per time step
  88. # per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'
  89. per_chart_agg: 'mean' # 'absmax' will take the max absolute value across all dimensions but will maintain the sign. 'mean' will just average.