Browse Source

Add a riak plugin (#6286)

* Add a Riak plugin.
Johannes Christ 5 years ago
parent
commit
c5ab82558d

+ 1 - 0
collectors/python.d.plugin/Makefile.am

@@ -87,6 +87,7 @@ include rabbitmq/Makefile.inc
 include redis/Makefile.inc
 include rethinkdbs/Makefile.inc
 include retroshare/Makefile.inc
+include riakkv/Makefile.inc
 include samba/Makefile.inc
 include sensors/Makefile.inc
 include smartd_log/Makefile.inc

+ 1 - 0
collectors/python.d.plugin/python.d.conf

@@ -88,6 +88,7 @@ nginx_log: no
 # redis: yes
 # rethinkdbs: yes
 # retroshare: yes
+# riakkv: yes
 # samba: yes
 # sensors: yes
 # smartd_log: yes

+ 13 - 0
collectors/python.d.plugin/riakkv/Makefile.inc

@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA       += riakkv/riakkv.chart.py
+dist_pythonconfig_DATA += riakkv/riakkv.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA       += riakkv/README.md riakkv/Makefile.inc
+

+ 110 - 0
collectors/python.d.plugin/riakkv/README.md

@@ -0,0 +1,110 @@
+# riakkv
+
+Monitors one or more Riak KV servers.
+
+**Requirements:**
+
+* An accessible `/stats` endpoint. See [the Riak KV configuration reference]
+  documentation](https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces)
+  for how to enable this.
+
+The following charts are included, which are mostly derived from the metrics
+listed
+[here](https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#riak-metrics-to-graph).
+
+1. **Throughput** in operations/s
+  * **KV operations**
+    * gets
+    * puts
+
+  * **Data type updates**
+    * counters
+    * sets
+    * maps
+
+  * **Search queries**
+    * queries
+
+  * **Search documents**
+    * indexed
+
+  * **Strong consistency operations**
+    * gets
+    * puts
+
+2. **Latency** in milliseconds
+  * **KV latency** of the past minute
+    * get (mean, median, 95th / 99th / 100th percentile)
+    * put (mean, median, 95th / 99th / 100th percentile)
+
+  * **Data type latency** of the past minute
+    * counter_merge (mean, median, 95th / 99th / 100th percentile)
+    * set_merge (mean, median, 95th / 99th / 100th percentile)
+    * map_merge (mean, median, 95th / 99th / 100th percentile)
+
+  * **Search latency** of the past minute
+    * query (median, min, max, 95th / 99th percentile)
+    * index (median, min, max, 95th / 99th percentile)
+
+  * **Strong consistency latency** of the past minute
+    * get (mean, median, 95th / 99th / 100th percentile)
+    * put (mean, median, 95th / 99th / 100th percentile)
+
+3. **Erlang VM metrics**
+  * **System counters**
+    * processes
+
+  * **Memory allocation** in MB
+    * processes.allocated
+    * processes.used
+
+4. **General load / health metrics**
+  * **Siblings encountered in KV operations** during the past minute
+    * get (mean, median, 95th / 99th / 100th percentile)
+
+  * **Object size in KV operations** during the past minute in KB
+    * get (mean, median, 95th / 99th / 100th percentile)
+
+  * **Message queue length** in unprocessed messages
+    * vnodeq_size (mean, median, 95th / 99th / 100th percentile)
+
+  * **Index operations** encountered by Search
+    * errors
+
+  * **Protocol buffer connections**
+    * active
+
+  * **Repair operations coordinated by this node**
+    * read
+
+  * **Active finite state machines by kind**
+    * get
+    * put
+    * secondary_index
+    * list_keys
+
+  * **Rejected finite state machines**
+    * get
+    * put
+
+  * **Number of writes to Search failed due to bad data format by reason**
+    * bad_entry
+    * extract_fail
+
+
+### configuration
+
+The module needs to be passed the full URL to Riak's stats endpoint.
+For example:
+
+```yaml
+myriak:
+  url: http://myriak.example.com:8098/stats
+```
+
+With no explicit configuration given, the module will attempt to connect to
+`http://localhost:8098/stats`.
+
+The default update frequency for the plugin is set to 2 seconds as Riak
+internally updates the metrics every second. If we were to update the metrics
+every second, the resulting graph would contain odd jitter.

+ 315 - 0
collectors/python.d.plugin/riakkv/riakkv.chart.py

@@ -0,0 +1,315 @@
+# -*- coding: utf-8 -*-
+# Description: riak netdata python.d module
+#
+# See also:
+# https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html
+
+from json import loads
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# Riak updates the metrics at the /stats endpoint every 1 second.
+# If we use `update_every = 1` here, that means we might get weird jitter in the graph,
+# so the default is set to 2 seconds to prevent it.
+update_every = 2
+
+# charts order (can be overridden if you want less charts, or different order)
+ORDER = [
+    # Throughput metrics
+    # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#throughput-metrics
+    # Collected in totals.
+    "kv.node_operations",  # K/V node operations.
+    "dt.vnode_updates",  # Data type vnode updates.
+    "search.queries",  # Search queries on the node.
+    "search.documents",  # Documents indexed by Search.
+    "consistent.operations",  # Consistent node operations.
+
+    # Latency metrics
+    # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#throughput-metrics
+    # Collected for the past minute in milliseconds,
+    # returned from riak in microseconds.
+    "kv.latency.get",  # K/V GET FSM traversal latency.
+    "kv.latency.put",  # K/V PUT FSM traversal latency.
+    "dt.latency.counter",  # Update Counter Data type latency.
+    "dt.latency.set",  # Update Set Data type latency.
+    "dt.latency.map",  # Update Map Data type latency.
+    "search.latency.query",  # Search query latency.
+    "search.latency.index",  # Time it takes for search to index a new document.
+    "consistent.latency.get",  # Strong consistent read latency.
+    "consistent.latency.put",  # Strong consistent write latency.
+
+    # Erlang resource usage metrics
+    # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#erlang-resource-usage-metrics
+    # Processes collected as a gauge,
+    # memory collected as Megabytes, returned as bytes from Riak.
+    "vm.processes",  # Number of processes currently running in the Erlang VM.
+    "vm.memory.processes",  # Total amount of memory allocated & used for Erlang processes.
+
+    # General Riak Load / Health metrics
+    # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-load-health-metrics
+    # The following are collected by Riak over the past minute:
+    "kv.siblings_encountered.get",  # Siblings encountered during GET operations by this node.
+    "kv.objsize.get",  # Object size encountered by this node.
+    "search.vnodeq_size",  # Number of unprocessed messages in the vnode message queues (Search).
+    # The following are calculated in total, or as gauges:
+    "search.index_errors",  # Errors of the search subsystem while indexing documents.
+    "core.pbc",  # Number of currently active protocol buffer connections.
+    "core.repairs",  # Total read repair operations coordinated by this node.
+    "core.fsm_active",  # Active finite state machines by kind.
+    "core.fsm_rejected",  # Rejected finite state machines by kind.
+
+    # General Riak Search Load / Health metrics
+    # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-search-load-health-metrics
+    # Reported as counters.
+    "search.errors",  # Write and read errors of the Search subsystem.
+]
+
+CHARTS = {
+    # Throughput metrics
+    "kv.node_operations": {
+        "options": [None, "Reads & writes coordinated by this node", "operations/s", "throughput", "riak.kv.throughput", "line"],
+        "lines": [
+            ["node_gets_total", "gets", "incremental"],
+            ["node_puts_total", "puts", "incremental"]
+        ]
+    },
+    "dt.vnode_updates": {
+        "options": [None, "Update operations coordinated by local vnodes by data type", "operations/s", "throughput", "riak.dt.vnode_updates", "line"],
+        "lines": [
+            ["vnode_counter_update_total", "counters", "incremental"],
+            ["vnode_set_update_total", "sets", "incremental"],
+            ["vnode_map_update_total", "maps", "incremental"],
+        ]
+    },
+    "search.queries": {
+        "options": [None, "Search queries on the node", "queries/s", "throughput", "riak.search", "line"],
+        "lines": [
+            ["search_query_throughput_count", "queries", "incremental"]
+        ]
+    },
+    "search.documents": {
+        "options": [None, "Documents indexed by search", "documents/s", "throughput", "riak.search.documents", "line"],
+        "lines": [
+            ["search_index_throughput_count", "indexed", "incremental"]
+        ]
+    },
+    "consistent.operations": {
+        "options": [None, "Consistent node operations", "operations/s", "throughput", "riak.consistent.operations", "line"],
+        "lines": [
+            ["consistent_gets_total", "gets", "incremental"],
+            ["consistent_puts_total", "puts", "incremental"],
+        ]
+    },
+
+    # Latency metrics
+    "kv.latency.get": {
+        "options": [None, "Time between reception of a client GET request and subsequent response to client", "ms", "latency", "riak.kv.latency.get", "line"],
+        "lines": [
+            ["node_get_fsm_time_mean", "mean", "absolute", 1, 1000],
+            ["node_get_fsm_time_median", "median", "absolute", 1, 1000],
+            ["node_get_fsm_time_95", "95", "absolute", 1, 1000],
+            ["node_get_fsm_time_99", "99", "absolute", 1, 1000],
+            ["node_get_fsm_time_100", "100", "absolute", 1, 1000],
+        ]
+    },
+    "kv.latency.put": {
+        "options": [None, "Time between reception of a client PUT request and subsequent response to client", "ms", "latency", "riak.kv.latency.put", "line"],
+        "lines": [
+            ["node_put_fsm_time_mean", "mean", "absolute", 1, 1000],
+            ["node_put_fsm_time_median", "median", "absolute", 1, 1000],
+            ["node_put_fsm_time_95", "95", "absolute", 1, 1000],
+            ["node_put_fsm_time_99", "99", "absolute", 1, 1000],
+            ["node_put_fsm_time_100", "100", "absolute", 1, 1000],
+        ]
+    },
+    "dt.latency.counter": {
+        "options": [None, "Time it takes to perform an Update Counter operation", "ms", "latency", "riak.dt.latency.counter_merge", "line"],
+        "lines": [
+            ["object_counter_merge_time_mean", "mean", "absolute", 1, 1000],
+            ["object_counter_merge_time_median", "median", "absolute", 1, 1000],
+            ["object_counter_merge_time_95", "95", "absolute", 1, 1000],
+            ["object_counter_merge_time_99", "99", "absolute", 1, 1000],
+            ["object_counter_merge_time_100", "100", "absolute", 1, 1000],
+        ]
+    },
+    "dt.latency.set": {
+        "options": [None, "Time it takes to perform an Update Set operation", "ms", "latency", "riak.dt.latency.set_merge", "line"],
+        "lines": [
+            ["object_set_merge_time_mean", "mean", "absolute", 1, 1000],
+            ["object_set_merge_time_median", "median", "absolute", 1, 1000],
+            ["object_set_merge_time_95", "95", "absolute", 1, 1000],
+            ["object_set_merge_time_99", "99", "absolute", 1, 1000],
+            ["object_set_merge_time_100", "100", "absolute", 1, 1000],
+        ]
+    },
+    "dt.latency.map": {
+        "options": [None, "Time it takes to perform an Update Map operation", "ms", "latency", "riak.dt.latency.map_merge", "line"],
+        "lines": [
+            ["object_map_merge_time_mean", "mean", "absolute", 1, 1000],
+            ["object_map_merge_time_median", "median", "absolute", 1, 1000],
+            ["object_map_merge_time_95", "95", "absolute", 1, 1000],
+            ["object_map_merge_time_99", "99", "absolute", 1, 1000],
+            ["object_map_merge_time_100", "100", "absolute", 1, 1000],
+        ]
+    },
+    "search.latency.query": {
+        "options": [None, "Search query latency", "ms", "latency", "riak.search.latency.query", "line"],
+        "lines": [
+            ["search_query_latency_median", "median", "absolute", 1, 1000],
+            ["search_query_latency_min", "min", "absolute", 1, 1000],
+            ["search_query_latency_95", "95", "absolute", 1, 1000],
+            ["search_query_latency_99", "99", "absolute", 1, 1000],
+            ["search_query_latency_999", "999", "absolute", 1, 1000],
+            ["search_query_latency_max", "max", "absolute", 1, 1000],
+        ]
+    },
+    "search.latency.index": {
+        "options": [None, "Time it takes Search to index a new document", "ms", "latency", "riak.search.latency.index", "line"],
+        "lines": [
+            ["search_index_latency_median", "median", "absolute", 1, 1000],
+            ["search_index_latency_min", "min", "absolute", 1, 1000],
+            ["search_index_latency_95", "95", "absolute", 1, 1000],
+            ["search_index_latency_99", "99", "absolute", 1, 1000],
+            ["search_index_latency_999", "999", "absolute", 1, 1000],
+            ["search_index_latency_max", "max", "absolute", 1, 1000],
+        ]
+    },
+
+    # Riak Strong Consistency metrics
+    "consistent.latency.get": {
+        "options": [None, "Strongly consistent read latency", "ms", "latency", "riak.consistent.latency.get", "line"],
+        "lines": [
+            ["consistent_get_time_mean", "mean", "absolute", 1, 1000],
+            ["consistent_get_time_median", "median", "absolute", 1, 1000],
+            ["consistent_get_time_95", "95", "absolute", 1, 1000],
+            ["consistent_get_time_99", "99", "absolute", 1, 1000],
+            ["consistent_get_time_100", "100", "absolute", 1, 1000],
+        ]
+    },
+    "consistent.latency.put": {
+        "options": [None, "Strongly consistent write latency", "ms", "latency", "riak.consistent.latency.put", "line"],
+        "lines": [
+            ["consistent_put_time_mean", "mean", "absolute", 1, 1000],
+            ["consistent_put_time_median", "median", "absolute", 1, 1000],
+            ["consistent_put_time_95", "95", "absolute", 1, 1000],
+            ["consistent_put_time_99", "99", "absolute", 1, 1000],
+            ["consistent_put_time_100", "100", "absolute", 1, 1000],
+        ]
+    },
+
+    # BEAM metrics
+    "vm.processes": {
+        "options": [None, "Total processes running in the Erlang VM", "total", "vm", "riak.vm", "line"],
+        "lines": [
+            ["sys_process_count", "processes", "absolute"],
+        ]
+    },
+    "vm.memory.processes": {
+        "options": [None, "Memory allocated & used by Erlang processes", "MB", "vm", "riak.vm.memory.processes", "line"],
+        "lines": [
+            ["memory_processes", "allocated", "absolute", 1, 1024 * 1024],
+            ["memory_processes_used", "used", "absolute", 1, 1024 * 1024]
+        ]
+    },
+
+    # General Riak Load/Health metrics
+    "kv.siblings_encountered.get": {
+        "options": [None, "Number of siblings encountered during GET operations by this node during the past minute", "siblings", "load", "riak.kv.siblings_encountered.get", "line"],
+        "lines": [
+            ["node_get_fsm_siblings_mean", "mean", "absolute"],
+            ["node_get_fsm_siblings_median", "median", "absolute"],
+            ["node_get_fsm_siblings_95", "95", "absolute"],
+            ["node_get_fsm_siblings_99", "99", "absolute"],
+            ["node_get_fsm_siblings_100", "100", "absolute"],
+        ]
+    },
+    "kv.objsize.get": {
+        "options": [None, "Object size encountered by this node during the past minute", "KB", "load", "riak.kv.objsize.get", "line"],
+        "lines": [
+            ["node_get_fsm_objsize_mean", "mean", "absolute", 1, 1024],
+            ["node_get_fsm_objsize_median", "median", "absolute", 1, 1024],
+            ["node_get_fsm_objsize_95", "95", "absolute", 1, 1024],
+            ["node_get_fsm_objsize_99", "99", "absolute", 1, 1024],
+            ["node_get_fsm_objsize_100", "100", "absolute", 1, 1024],
+        ]
+    },
+    "search.vnodeq_size": {
+        "options": [None, "Number of unprocessed messages in the vnode message queues of Search on this node in the past minute", "messages", "load", "riak.search.vnodeq_size", "line"],
+        "lines": [
+            ["riak_search_vnodeq_mean", "mean", "absolute"],
+            ["riak_search_vnodeq_median", "median", "absolute"],
+            ["riak_search_vnodeq_95", "95", "absolute"],
+            ["riak_search_vnodeq_99", "99", "absolute"],
+            ["riak_search_vnodeq_100", "100", "absolute"],
+        ]
+    },
+    "search.index_errors": {
+        "options": [None, "Number of document index errors encountered by Search", "errors", "load", "riak.search.index", "line"],
+        "lines": [
+            ["search_index_fail_count", "errors", "absolute"]
+        ]
+    },
+    "core.pbc": {
+        "options": [None, "Protocol buffer connections by status", "connections", "load", "riak.core.protobuf_connections", "line"],
+        "lines": [
+            ["pbc_active", "active", "absolute"],
+            # ["pbc_connects", "established_pastmin", "absolute"]
+        ]
+    },
+    "core.repairs": {
+        "options": [None, "Number of repair operations this node has coordinated", "repairs", "load", "riak.core.repairs", "line"],
+        "lines": [
+            ["read_repairs", "read", "absolute"]
+        ]
+    },
+    "core.fsm_active": {
+        "options": [None, "Active finite state machines by kind", "fsms", "load", "riak.core.fsm_active", "line"],
+        "lines": [
+            ["node_get_fsm_active", "get", "absolute"],
+            ["node_put_fsm_active", "put", "absolute"],
+            ["index_fsm_active", "secondary index", "absolute"],
+            ["list_fsm_active", "list keys", "absolute"]
+        ]
+    },
+    "core.fsm_rejected": {
+        # Writing "Sidejob's" here seems to cause some weird issues: it results in this chart being rendered in
+        # its own context and additionally, moves the entire Riak graph all the way up to the top of the Netdata
+        # dashboard for some reason.
+        "options": [None, "Finite state machines being rejected by Sidejobs overload protection", "fsms", "load", "riak.core.fsm_rejected", "line"],
+        "lines": [
+            ["node_get_fsm_rejected", "get", "absolute"],
+            ["node_put_fsm_rejected", "put", "absolute"]
+        ]
+    },
+
+    # General Riak Search Load / Health metrics
+    "search.errors": {
+        "options": [None, "Number of writes to Search failed due to bad data format by reason", "writes", "load", "riak.search.index", "line"],
+        "lines": [
+            ["search_index_bad_entry_count", "bad_entry", "absolute"],
+            ["search_index_extract_fail_count", "extract_fail", "absolute"],
+        ]
+    }
+}
+
+
+class Service(UrlService):
+    def __init__(self, configuration=None, name=None):
+        UrlService.__init__(self, configuration=configuration, name=name)
+        self.order = ORDER
+        self.definitions = CHARTS
+
+    def _get_data(self):
+        """
+        Format data received from http request
+        :return: dict
+        """
+        raw = self._get_raw_data()
+        if not raw:
+            return None
+
+        try:
+            return loads(raw)
+        except (TypeError, ValueError) as err:
+            self.error(err)
+            return None

+ 68 - 0
collectors/python.d.plugin/riakkv/riakkv.conf

@@ -0,0 +1,68 @@
+# netdata python.d.plugin configuration for riak
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+#  - global variables
+#  - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+#     name: myname            # the JOB's name as it will appear at the
+#                             # dashboard (by default is the job_name)
+#                             # JOBs sharing a name are mutually exclusive
+#     update_every: 1         # the JOB's data collection frequency
+#     priority: 60000         # the JOB's order on the dashboard
+#     penalty: yes            # the JOB's penalty
+#     autodetection_retry: 0  # the JOB's re-check interval in seconds
+#
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+local:
+  url  : 'http://localhost:8098/stats'

+ 1 - 0
health/Makefile.am

@@ -70,6 +70,7 @@ dist_healthconfig_DATA = \
     health.d/ram.conf \
     health.d/redis.conf \
     health.d/retroshare.conf \
+    health.d/riakkv.conf \
     health.d/softnet.conf \
     health.d/squid.conf \
     health.d/stiebeleltron.conf \

+ 80 - 0
health/health.d/riakkv.conf

@@ -0,0 +1,80 @@
+# Ensure that Riak is running.  template: riak_last_collected_secs
+template: riak_last_collected_secs
+      on: riak.kv.throughput
+    calc: $now - $last_collected_t
+   units: seconds ago
+   every: 10s
+    warn: $this > (($status >= $WARNING)  ? ($update_every) : ( 5 * $update_every))
+    crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
+   delay: down 5m multiplier 1.5 max 1h
+    info: number of seconds since the last successful data collection
+      to: dba
+
+# Warn if a list keys operation is running.
+template: riak_list_keys_active
+      on: riak.core.fsm_active
+    calc: $list_fsm_active
+   units: state machines
+   every: 10s
+    warn: $list_fsm_active > 0
+    info: number of currently running list keys finite state machines
+      to: dba
+
+
+## Timing healthchecks
+# KV GET
+template: 1h_kv_get_mean_latency
+      on: riak.kv.latency.get
+    calc: $node_get_fsm_time_mean
+  lookup: average -1h unaligned of time
+   every: 30s
+   units: ms
+    info: mean average KV GET latency over the last hour
+
+template: riak_kv_get_slow
+      on: riak.kv.latency.get
+    calc: $mean
+  lookup: average -3m unaligned of time
+   units: ms
+   every: 10s
+    warn: ($this > ($1h_kv_get_mean_latency * 2) )
+    crit: ($this > ($1h_kv_get_mean_latency * 3) )
+    info: average KV GET time over the last 3 minutes, compared to the average over the last hour
+   delay: down 5m multiplier 1.5 max 1h
+      to: dba
+
+# KV PUT
+template: 1h_kv_put_mean_latency
+      on: riak.kv.latency.put
+    calc: $node_put_fsm_time_mean
+  lookup: average -1h unaligned of time
+   every: 30s
+   units: ms
+    info: mean average KV PUT latency over the last hour
+
+template: riak_kv_put_slow
+      on: riak.kv.latency.put
+    calc: $mean
+  lookup: average -3m unaligned of time
+   units: ms
+   every: 10s
+    warn: ($this > ($1h_kv_put_mean_latency * 2) )
+    crit: ($this > ($1h_kv_put_mean_latency * 3) )
+    info: average KV PUT time over the last 3 minutes, compared to the average over the last hour
+   delay: down 5m multiplier 1.5 max 1h
+      to: dba
+
+
+## VM healthchecks
+
+# Default Erlang VM process limit: 262144
+# On systems observed, this is < 2000, but may grow depending on load.
+template: riak_vm_high_process_count
+      on: riak.vm
+    calc: $sys_process_count
+   units: processes
+   every: 10s
+    warn: $this > 10000
+    crit: $this > 100000
+    info: number of processes running in the Erlang VM (the default limit on ERTS 10.2.4 is 262144)
+      to: dba

+ 6 - 0
web/gui/dashboard_info.js

@@ -267,6 +267,12 @@ netdataDashboard.menu = {
         info: 'Performance metrics for <b>RetroShare</b>. RetroShare is open source software for encrypted filesharing, serverless email, instant messaging, online chat, and BBS, based on a friend-to-friend network built on GNU Privacy Guard (GPG).'
     },
 
+    'riakkv': {
+        title: 'Riak KV',
+        icon: '<i class="fas fa-database"></i>',
+        info: 'Metrics for <b>Riak KV</b>, the distributed key-value store.'
+    },
+
     'ipfs': {
         title: 'IPFS',
         icon: '<i class="fas fa-folder-open"></i>',