metadata.yaml 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. plugin_name: python.d.plugin
  2. modules:
  3. - meta:
  4. plugin_name: python.d.plugin
  5. module_name: ceph
  6. monitored_instance:
  7. name: Ceph
  8. link: 'https://ceph.io/'
  9. categories:
  10. - data-collection.storage-mount-points-and-filesystems
  11. icon_filename: 'ceph.svg'
  12. related_resources:
  13. integrations:
  14. list: []
  15. info_provided_to_referring_integrations:
  16. description: ''
  17. keywords:
  18. - ceph
  19. - storage
  20. most_popular: false
  21. overview:
  22. data_collection:
  23. metrics_description: 'This collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.'
  24. method_description: 'Uses the `rados` python module to connect to a Ceph cluster.'
  25. supported_platforms:
  26. include: []
  27. exclude: []
  28. multi_instance: true
  29. additional_permissions:
  30. description: ''
  31. default_behavior:
  32. auto_detection:
  33. description: ''
  34. limits:
  35. description: ''
  36. performance_impact:
  37. description: ''
  38. setup:
  39. prerequisites:
  40. list:
  41. - title: '`rados` python module'
  42. description: 'Make sure the `rados` python module is installed'
  43. - title: 'Granting read permissions to ceph group from keyring file'
  44. description: 'Execute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`'
  45. - title: 'Create a specific rados_id'
  46. description: 'You can optionally create a rados_id to use instead of admin'
  47. configuration:
  48. file:
  49. name: python.d/ceph.conf
  50. options:
  51. description: |
  52. There are 2 sections:
  53. * Global variables
  54. * One or more JOBS that can define multiple different instances to monitor.
  55. The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
  56. Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
  57. Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
  58. folding:
  59. title: "Config options"
  60. enabled: true
  61. list:
  62. - name: update_every
  63. description: Sets the default data collection frequency.
  64. default_value: 5
  65. required: false
  66. - name: priority
  67. description: Controls the order of charts at the netdata dashboard.
  68. default_value: 60000
  69. required: false
  70. - name: autodetection_retry
  71. description: Sets the job re-check interval in seconds.
  72. default_value: 0
  73. required: false
  74. - name: penalty
  75. description: Indicates whether to apply penalty to update_every in case of failures.
  76. default_value: yes
  77. required: false
  78. - name: name
  79. description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
  80. default_value: ''
  81. required: false
  82. - name: config_file
  83. description: Ceph config file
  84. default_value: ''
  85. required: true
  86. - name: keyring_file
  87. description: Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission.
  88. default_value: ''
  89. required: true
  90. - name: rados_id
  91. description: A rados user id to use for connecting to the Ceph cluster.
  92. default_value: 'admin'
  93. required: false
  94. examples:
  95. folding:
  96. enabled: true
  97. title: "Config"
  98. list:
  99. - name: Basic local Ceph cluster
  100. description: A basic configuration to connect to a local Ceph cluster.
  101. folding:
  102. enabled: false
  103. config: |
  104. local:
  105. config_file: '/etc/ceph/ceph.conf'
  106. keyring_file: '/etc/ceph/ceph.client.admin.keyring'
  107. troubleshooting:
  108. problems:
  109. list: []
  110. alerts:
  111. - name: ceph_cluster_space_usage
  112. link: https://github.com/netdata/netdata/blob/master/health/health.d/ceph.conf
  113. metric: ceph.general_usage
  114. info: cluster disk space utilization
  115. metrics:
  116. folding:
  117. title: Metrics
  118. enabled: false
  119. description: ""
  120. availability: []
  121. scopes:
  122. - name: global
  123. description: "These metrics refer to the entire monitored application."
  124. labels: []
  125. metrics:
  126. - name: ceph.general_usage
  127. description: Ceph General Space
  128. unit: "KiB"
  129. chart_type: stacked
  130. dimensions:
  131. - name: avail
  132. - name: used
  133. - name: ceph.general_objects
  134. description: Ceph General Objects
  135. unit: "objects"
  136. chart_type: area
  137. dimensions:
  138. - name: cluster
  139. - name: ceph.general_bytes
  140. description: Ceph General Read/Write Data/s
  141. unit: "KiB/s"
  142. chart_type: area
  143. dimensions:
  144. - name: read
  145. - name: write
  146. - name: ceph.general_operations
  147. description: Ceph General Read/Write Operations/s
  148. unit: "operations"
  149. chart_type: area
  150. dimensions:
  151. - name: read
  152. - name: write
  153. - name: ceph.general_latency
  154. description: Ceph General Apply/Commit latency
  155. unit: "milliseconds"
  156. chart_type: area
  157. dimensions:
  158. - name: apply
  159. - name: commit
  160. - name: ceph.pool_usage
  161. description: Ceph Pools
  162. unit: "KiB"
  163. chart_type: line
  164. dimensions:
  165. - name: a dimension per Ceph Pool
  166. - name: ceph.pool_objects
  167. description: Ceph Pools
  168. unit: "objects"
  169. chart_type: line
  170. dimensions:
  171. - name: a dimension per Ceph Pool
  172. - name: ceph.pool_read_bytes
  173. description: Ceph Read Pool Data/s
  174. unit: "KiB/s"
  175. chart_type: area
  176. dimensions:
  177. - name: a dimension per Ceph Pool
  178. - name: ceph.pool_write_bytes
  179. description: Ceph Write Pool Data/s
  180. unit: "KiB/s"
  181. chart_type: area
  182. dimensions:
  183. - name: a dimension per Ceph Pool
  184. - name: ceph.pool_read_operations
  185. description: Ceph Read Pool Operations/s
  186. unit: "operations"
  187. chart_type: area
  188. dimensions:
  189. - name: a dimension per Ceph Pool
  190. - name: ceph.pool_write_operations
  191. description: Ceph Write Pool Operations/s
  192. unit: "operations"
  193. chart_type: area
  194. dimensions:
  195. - name: a dimension per Ceph Pool
  196. - name: ceph.osd_usage
  197. description: Ceph OSDs
  198. unit: "KiB"
  199. chart_type: line
  200. dimensions:
  201. - name: a dimension per Ceph OSD
  202. - name: ceph.osd_size
  203. description: Ceph OSDs size
  204. unit: "KiB"
  205. chart_type: line
  206. dimensions:
  207. - name: a dimension per Ceph OSD
  208. - name: ceph.apply_latency
  209. description: Ceph OSDs apply latency
  210. unit: "milliseconds"
  211. chart_type: line
  212. dimensions:
  213. - name: a dimension per Ceph OSD
  214. - name: ceph.commit_latency
  215. description: Ceph OSDs commit latency
  216. unit: "milliseconds"
  217. chart_type: line
  218. dimensions:
  219. - name: a dimension per Ceph OSD