Browse Source

Remove backends subsystem (#12146)

Vladimir Kobal 3 years ago
parent
commit
52456f5baf

+ 0 - 5
.github/CODEOWNERS

@@ -8,11 +8,6 @@
 .travis/ @Ferroin @iigorkarpov @maneamarius @kaskavel
 .github/ @Ferroin @iigorkarpov @maneamarius @kaskavel
 aclk/ @stelfrag @underhood
-backends/ @thiagoftsm @vlvkobal
-backends/graphite/ @thiagoftsm @vlvkobal
-backends/json/ @thiagoftsm @vlvkobal
-backends/opentsdb/ @thiagoftsm @vlvkobal
-backends/prometheus/ @vlvkobal @thiagoftsm
 build/ @Ferroin @iigorkarpov @maneamarius
 contrib/debian @Ferroin @iigorkarpov @maneamarius
 collectors/ @vlvkobal

+ 0 - 4
.github/labeler.yml

@@ -15,10 +15,6 @@ ACLK:
   - aclk/**/*
   - mqtt_websockets
 
-area/backends:
-  - backends/*
-  - backends/**/*
-
 area/claim:
   - claim/*
 

+ 21 - 52
CMakeLists.txt

@@ -745,19 +745,6 @@ set(STREAMING_PLUGIN_FILES
         streaming/sender.c
         )
 
-set(BACKENDS_PLUGIN_FILES
-        backends/backends.c
-        backends/backends.h
-        backends/graphite/graphite.c
-        backends/graphite/graphite.h
-        backends/json/json.c
-        backends/json/json.h
-        backends/opentsdb/opentsdb.c
-        backends/opentsdb/opentsdb.h
-        backends/prometheus/backend_prometheus.c
-        backends/prometheus/backend_prometheus.h
-        )
-
 set(CLAIM_PLUGIN_FILES
         claim/claim.c
         claim/claim.h
@@ -884,23 +871,6 @@ set(MONGODB_EXPORTING_FILES
         exporting/mongodb/mongodb.h
         )
 
-set(KINESIS_BACKEND_FILES
-        backends/aws_kinesis/aws_kinesis.c
-        backends/aws_kinesis/aws_kinesis.h
-        backends/aws_kinesis/aws_kinesis_put_record.cc
-        backends/aws_kinesis/aws_kinesis_put_record.h
-        )
-
-set(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES
-        backends/prometheus/remote_write/remote_write.cc
-        backends/prometheus/remote_write/remote_write.h
-        )
-
-set(MONGODB_BACKEND_FILES
-        backends/mongodb/mongodb.c
-        backends/mongodb/mongodb.h
-        )
-
 set(DAEMON_FILES
         daemon/buildinfo.c
         daemon/buildinfo.h
@@ -960,7 +930,6 @@ set(NETDATA_FILES
         collectors/all.h
         ${DAEMON_FILES}
         ${API_PLUGIN_FILES}
-        ${BACKENDS_PLUGIN_FILES}
         ${EXPORTING_ENGINE_FILES}
         ${CHECKS_PLUGIN_FILES}
         ${HEALTH_PLUGIN_FILES}
@@ -997,25 +966,25 @@ add_definitions(
 )
 
 # -----------------------------------------------------------------------------
-# kinesis backend
+# kinesis exporting connector
 
 IF(KINESIS_LIBRARIES AND AWS_CORE_LIBRARIES AND HAVE_AWS_EVENT_STREAM AND HAVE_AWS_COMMON AND HAVE_AWS_CHECKSUMS AND
    CRYPTO_LIBRARIES AND SSL_LIBRARIES AND CURL_LIBRARIES)
-    SET(ENABLE_BACKEND_KINESIS True)
+    SET(ENABLE_EXPORTING_KINESIS True)
 ELSE()
-    SET(ENABLE_BACKEND_KINESIS False)
+    SET(ENABLE_EXPORTING_KINESIS False)
 ENDIF()
 
-IF(ENABLE_BACKEND_KINESIS)
-    message(STATUS "kinesis backend: enabled")
-    list(APPEND NETDATA_FILES ${KINESIS_BACKEND_FILES} ${KINESIS_EXPORTING_FILES})
+IF(ENABLE_EXPORTING_KINESIS)
+    message(STATUS "kinesis exporting: enabled")
+    list(APPEND NETDATA_FILES ${KINESIS_EXPORTING_FILES})
     list(APPEND NETDATA_COMMON_LIBRARIES ${KINESIS_LIBRARIES} ${AWS_CORE_LIBRARIES}
                                          ${CRYPTO_LIBRARIES} ${SSL_LIBRARIES} ${CURL_LIBRARIES})
     list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${KINESIS_INCLUDE_DIRS} ${AWS_CORE_INCLUDE_DIRS}
                                             ${CRYPTO_INCLUDE_DIRS} ${SSL_INCLUDE_DIRS} ${CURL_INCLUDE_DIRS})
     list(APPEND NETDATA_COMMON_CFLAGS ${CRYPTO_CFLAGS_OTHER} ${SSL_CFLAGS_OTHER} ${CURL_CFLAGS_OTHER})
 ELSE()
-    message(STATUS "kinesis backend: disabled (requires AWS SDK for C++)")
+    message(STATUS "kinesis exporting: disabled (requires AWS SDK for C++)")
 ENDIF()
 
 # -----------------------------------------------------------------------------
@@ -1038,16 +1007,16 @@ ELSE()
 ENDIF()
 
 # -----------------------------------------------------------------------------
-# prometheus remote write backend
+# prometheus remote write exporting connector
 
 IF(PROTOBUF_LIBRARIES AND SNAPPY_LIBRARIES)
-    SET(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE True)
+    SET(ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE True)
 ELSE()
-    SET(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE False)
+    SET(ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE False)
 ENDIF()
 
-IF(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
-    message(STATUS "prometheus remote write backend: enabled")
+IF(ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE)
+    message(STATUS "prometheus remote write exporting: enabled")
 
     find_package(Protobuf REQUIRED)
     
@@ -1083,26 +1052,26 @@ IF(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
 
     protobuf_remote_write_generate_cpp(PROTO_SRCS PROTO_HDRS exporting/prometheus/remote_write/remote_write.proto)
 
-    list(APPEND NETDATA_FILES ${PROMETHEUS_REMOTE_WRITE_BACKEND_FILES} ${PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES} ${PROTO_SRCS} ${PROTO_HDRS})
+    list(APPEND NETDATA_FILES ${PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES} ${PROTO_SRCS} ${PROTO_HDRS})
     list(APPEND NETDATA_COMMON_LIBRARIES ${PROTOBUF_LIBRARIES} ${SNAPPY_LIBRARIES})
     list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${PROTOBUF_INCLUDE_DIRS} ${SNAPPY_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR})
     list(APPEND NETDATA_COMMON_CFLAGS ${PROTOBUF_CFLAGS_OTHER} ${SNAPPY_CFLAGS_OTHER})
 ELSE()
-    message(STATUS "prometheus remote write backend: disabled (requires protobuf and snappy libraries)")
+    message(STATUS "prometheus remote write exporting: disabled (requires protobuf and snappy libraries)")
 ENDIF()
 
 # -----------------------------------------------------------------------------
-# mongodb backend
+# mongodb exporting connector
 
 IF(MONGOC_LIBRARIES)
-    message(STATUS "mongodb backend: enabled")
+    message(STATUS "mongodb exporting: enabled")
 
-    list(APPEND NETDATA_FILES ${MONGODB_BACKEND_FILES} ${MONGODB_EXPORTING_FILES})
+    list(APPEND NETDATA_FILES ${MONGODB_EXPORTING_FILES})
     list(APPEND NETDATA_COMMON_LIBRARIES ${MONGOC_LIBRARIES})
     list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${MONGOC_INCLUDE_DIRS})
     list(APPEND NETDATA_COMMON_CFLAGS ${MONGOC_CFLAGS_OTHER})
 ELSE()
-    message(STATUS "mongodb backend: disabled (requires mongoc library)")
+    message(STATUS "mongodb exporting: disabled (requires mongoc library)")
 ENDIF()
 
 set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} m ${CMAKE_THREAD_LIBS_INIT})
@@ -1223,7 +1192,7 @@ ELSEIF(MACOS)
 
 ENDIF()
 
-IF(ENABLE_BACKEND_KINESIS OR ENABLE_EXPORTING_PUBSUB OR ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
+IF(ENABLE_EXPORTING_KINESIS OR ENABLE_EXPORTING_PUBSUB OR ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE)
     set_property(TARGET netdata PROPERTY CXX_STANDARD 11)
     set_property(TARGET netdata PROPERTY CMAKE_CXX_STANDARD_REQUIRED ON)
 ENDIF()
@@ -1397,7 +1366,7 @@ if(BUILD_TESTING)
     set(KINESIS_LINK_OPTIONS)
     set(PUBSUB_LINK_OPTIONS)
     set(MONGODB_LINK_OPTIONS)
-if(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
+if(ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE)
     list(APPEND EXPORTING_ENGINE_FILES ${PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES} ${PROTO_SRCS} ${PROTO_HDRS})
     list(
         APPEND PROMETHEUS_REMOTE_WRITE_LINK_OPTIONS
@@ -1407,7 +1376,7 @@ if(ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE)
         -Wl,--wrap=add_metric
     )
 endif()
-if(ENABLE_BACKEND_KINESIS)
+if(ENABLE_EXPORTING_KINESIS)
     list(APPEND EXPORTING_ENGINE_FILES ${KINESIS_EXPORTING_FILES})
     list(
         APPEND KINESIS_LINK_OPTIONS

+ 13 - 45
Makefile.am

@@ -99,7 +99,6 @@ dist_noinst_SCRIPTS = \
 # Compile netdata binaries
 
 SUBDIRS += \
-    backends \
     collectors \
     daemon \
     database \
@@ -580,19 +579,6 @@ WEB_PLUGIN_FILES = \
     web/server/static/static-threaded.h \
     $(NULL)
 
-BACKENDS_PLUGIN_FILES = \
-    backends/backends.c \
-    backends/backends.h \
-    backends/graphite/graphite.c \
-    backends/graphite/graphite.h \
-    backends/json/json.c \
-    backends/json/json.h \
-    backends/opentsdb/opentsdb.c \
-    backends/opentsdb/opentsdb.h \
-    backends/prometheus/backend_prometheus.c \
-    backends/prometheus/backend_prometheus.h \
-    $(NULL)
-
 CLAIM_FILES = \
     claim/claim.c \
     claim/claim.h \
@@ -831,23 +817,6 @@ MONGODB_EXPORTING_FILES = \
     exporting/mongodb/mongodb.h \
     $(NULL)
 
-KINESIS_BACKEND_FILES = \
-    backends/aws_kinesis/aws_kinesis.c \
-    backends/aws_kinesis/aws_kinesis.h \
-    backends/aws_kinesis/aws_kinesis_put_record.cc \
-    backends/aws_kinesis/aws_kinesis_put_record.h \
-    $(NULL)
-
-PROMETHEUS_REMOTE_WRITE_BACKEND_FILES = \
-    backends/prometheus/remote_write/remote_write.cc \
-    backends/prometheus/remote_write/remote_write.h \
-    $(NULL)
-
-MONGODB_BACKEND_FILES = \
-    backends/mongodb/mongodb.c \
-    backends/mongodb/mongodb.h \
-    $(NULL)
-
 DAEMON_FILES = \
     daemon/buildinfo.c \
     daemon/buildinfo.h \
@@ -877,7 +846,6 @@ NETDATA_FILES = \
     $(DAEMON_FILES) \
     $(LIBNETDATA_FILES) \
     $(API_PLUGIN_FILES) \
-    $(BACKENDS_PLUGIN_FILES) \
     $(EXPORTING_ENGINE_FILES) \
     $(CHECKS_PLUGIN_FILES) \
     $(HEALTH_PLUGIN_FILES) \
@@ -1065,8 +1033,8 @@ if ENABLE_PLUGIN_SLABINFO
         $(NULL)
 endif
 
-if ENABLE_BACKEND_KINESIS
-    netdata_SOURCES += $(KINESIS_BACKEND_FILES) $(KINESIS_EXPORTING_FILES)
+if ENABLE_EXPORTING_KINESIS
+    netdata_SOURCES += $(KINESIS_EXPORTING_FILES)
     netdata_LDADD += $(OPTIONAL_KINESIS_LIBS)
 endif
 
@@ -1075,17 +1043,17 @@ if ENABLE_EXPORTING_PUBSUB
     netdata_LDADD += $(OPTIONAL_PUBSUB_LIBS)
 endif
 
-if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE
-    netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES) $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
+if ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE
+    netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
     netdata_LDADD += $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS) \
         $(OPTIONAL_PROTOBUF_LIBS) \
         $(NULL)
-    BACKEND_PROMETHEUS_BUILT_SOURCES = \
+    EXPORTING_PROMETHEUS_BUILT_SOURCES = \
         exporting/prometheus/remote_write/remote_write.pb.cc \
         exporting/prometheus/remote_write/remote_write.pb.h \
         $(NULL)
-    BUILT_SOURCES += $(BACKEND_PROMETHEUS_BUILT_SOURCES)
-    nodist_netdata_SOURCES += $(BACKEND_PROMETHEUS_BUILT_SOURCES)
+    BUILT_SOURCES += $(EXPORTING_PROMETHEUS_BUILT_SOURCES)
+    nodist_netdata_SOURCES += $(EXPORTING_PROMETHEUS_BUILT_SOURCES)
 
 exporting/prometheus/remote_write/remote_write.pb.cc \
 exporting/prometheus/remote_write/remote_write.pb.h: exporting/prometheus/remote_write/remote_write.proto
@@ -1093,8 +1061,8 @@ exporting/prometheus/remote_write/remote_write.pb.h: exporting/prometheus/remote
 
 endif
 
-if ENABLE_BACKEND_MONGODB
-    netdata_SOURCES += $(MONGODB_BACKEND_FILES) $(MONGODB_EXPORTING_FILES)
+if ENABLE_EXPORTING_MONGODB
+    netdata_SOURCES += $(MONGODB_EXPORTING_FILES)
     netdata_LDADD += $(OPTIONAL_MONGOC_LIBS)
 endif
 
@@ -1217,7 +1185,7 @@ if ENABLE_UNITTESTS
         $(TEST_LDFLAGS) \
         $(NULL)
     exporting_tests_exporting_engine_testdriver_LDADD = $(NETDATA_COMMON_LIBS) $(TEST_LIBS)
-if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE
+if ENABLE_EXPORTING_PROMETHEUS_REMOTE_WRITE
     exporting_tests_exporting_engine_testdriver_SOURCES += $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
     exporting_tests_exporting_engine_testdriver_LDADD += \
         $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS) \
@@ -1229,9 +1197,9 @@ if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE
         -Wl,--wrap=add_label \
         -Wl,--wrap=add_metric \
         $(NULL)
-    nodist_exporting_tests_exporting_engine_testdriver_SOURCES = $(BACKEND_PROMETHEUS_BUILT_SOURCES)
+    nodist_exporting_tests_exporting_engine_testdriver_SOURCES = $(EXPORTING_PROMETHEUS_BUILT_SOURCES)
 endif
-if ENABLE_BACKEND_KINESIS
+if ENABLE_EXPORTING_KINESIS
     exporting_tests_exporting_engine_testdriver_SOURCES += $(KINESIS_EXPORTING_FILES)
     exporting_tests_exporting_engine_testdriver_LDADD += $(OPTIONAL_KINESIS_LIBS)
     exporting_tests_exporting_engine_testdriver_LDFLAGS += \
@@ -1251,7 +1219,7 @@ if ENABLE_EXPORTING_PUBSUB
         -Wl,--wrap=pubsub_get_result \
         $(NULL)
 endif
-if ENABLE_BACKEND_MONGODB
+if ENABLE_EXPORTING_MONGODB
     exporting_tests_exporting_engine_testdriver_SOURCES += $(MONGODB_EXPORTING_FILES)
     exporting_tests_exporting_engine_testdriver_LDADD += $(OPTIONAL_MONGOC_LIBS)
     exporting_tests_exporting_engine_testdriver_LDFLAGS += \

+ 0 - 22
backends/Makefile.am

@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-SUBDIRS = \
-    graphite \
-    json \
-    opentsdb \
-    prometheus \
-    aws_kinesis \
-    mongodb \
-    $(NULL)
-
-dist_noinst_DATA = \
-    README.md \
-    WALKTHROUGH.md \
-    $(NULL)
-
-dist_noinst_SCRIPTS = \
-    nc-backend.sh \
-    $(NULL)

+ 0 - 236
backends/README.md

@@ -1,236 +0,0 @@
-<!--
-title: "Metrics long term archiving"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/README.md
--->
-
-# Metrics long term archiving
-
-> ⚠️ The backends system is now deprecated in favor of the [exporting engine](/exporting/README.md).
-
-Netdata supports backends for archiving the metrics, or providing long term dashboards, using Grafana or other tools,
-like this:
-
-![image](https://cloud.githubusercontent.com/assets/2662304/20649711/29f182ba-b4ce-11e6-97c8-ab2c0ab59833.png)
-
-Since Netdata collects thousands of metrics per server per second, which would easily congest any backend server when
-several Netdata servers are sending data to it, Netdata allows sending metrics at a lower frequency, by resampling them.
-
-So, although Netdata collects metrics every second, it can send to the backend servers averages or sums every X seconds
-(though, it can send them per second if you need it to).
-
-## features
-
-1.  Supported backends
-
-    -   **graphite** (`plaintext interface`, used by **Graphite**, **InfluxDB**, **KairosDB**, **Blueflood**,
-        **ElasticSearch** via logstash tcp input and the graphite codec, etc)
-
-        metrics are sent to the backend server as `prefix.hostname.chart.dimension`. `prefix` is configured below,
-        `hostname` is the hostname of the machine (can also be configured).
-
-    -   **opentsdb** (`telnet or HTTP interfaces`, used by **OpenTSDB**, **InfluxDB**, **KairosDB**, etc)
-
-        metrics are sent to opentsdb as `prefix.chart.dimension` with tag `host=hostname`.
-
-    -   **json** document DBs
-
-        metrics are sent to a document db, `JSON` formatted.
-
-    -   **prometheus** is described at [prometheus page](/backends/prometheus/README.md) since it pulls data from
-        Netdata.
-
-    -   **prometheus remote write** (a binary snappy-compressed protocol buffer encoding over HTTP used by
-        **Elasticsearch**, **Gnocchi**, **Graphite**, **InfluxDB**, **Kafka**, **OpenTSDB**, **PostgreSQL/TimescaleDB**,
-        **Splunk**, **VictoriaMetrics**, and a lot of other [storage
-        providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage))
-
-        metrics are labeled in the format, which is used by Netdata for the [plaintext prometheus
-        protocol](/backends/prometheus/README.md). Notes on using the remote write backend are [here](/backends/prometheus/remote_write/README.md).
-
-    -   **TimescaleDB** via [community-built connector](/backends/TIMESCALE.md) that takes JSON streams from a Netdata
-        client and writes them to a TimescaleDB table.
-
-    -   **AWS Kinesis Data Streams**
-
-        metrics are sent to the service in `JSON` format.
-
-    -   **MongoDB**
-
-        metrics are sent to the database in `JSON` format.
-
-2.  Only one backend may be active at a time.
-
-3.  Netdata can filter metrics (at the chart level), to send only a subset of the collected metrics.
-
-4.  Netdata supports three modes of operation for all backends:
-
-    -   `as-collected` sends to backends the metrics as they are collected, in the units they are collected. So,
-        counters are sent as counters and gauges are sent as gauges, much like all data collectors do. For example, to
-        calculate CPU utilization in this format, you need to know how to convert kernel ticks to percentage.
-
-    -   `average` sends to backends normalized metrics from the Netdata database. In this mode, all metrics are sent as
-        gauges, in the units Netdata uses. This abstracts data collection and simplifies visualization, but you will not
-        be able to copy and paste queries from other sources to convert units. For example, CPU utilization percentage
-        is calculated by Netdata, so Netdata will convert ticks to percentage and send the average percentage to the
-        backend.
-
-    -   `sum` or `volume`: the sum of the interpolated values shown on the Netdata graphs is sent to the backend. So, if
-        Netdata is configured to send data to the backend every 10 seconds, the sum of the 10 values shown on the
-        Netdata charts will be used.
-
-    Time-series databases suggest to collect the raw values (`as-collected`). If you plan to invest on building your
-    monitoring around a time-series database and you already know (or you will invest in learning) how to convert units
-    and normalize the metrics in Grafana or other visualization tools, we suggest to use `as-collected`.
-
-    If, on the other hand, you just need long term archiving of Netdata metrics and you plan to mainly work with
-    Netdata, we suggest to use `average`. It decouples visualization from data collection, so it will generally be a lot
-    simpler. Furthermore, if you use `average`, the charts shown in the back-end will match exactly what you see in
-    Netdata, which is not necessarily true for the other modes of operation.
-
-5.  This code is smart enough, not to slow down Netdata, independently of the speed of the backend server.
-
-## configuration
-
-In `/etc/netdata/netdata.conf` you should have something like this (if not download the latest version of `netdata.conf`
-from your Netdata):
-
-```conf
-[backend]
-    enabled = yes | no
-    type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | prometheus_remote_write | json | kinesis | mongodb
-    host tags = list of TAG=VALUE
-    destination = space separated list of [PROTOCOL:]HOST[:PORT] - the first working will be used, or a region for kinesis
-    data source = average | sum | as collected
-    prefix = Netdata
-    hostname = my-name
-    update every = 10
-    buffer on failures = 10
-    timeout ms = 20000
-    send charts matching = *
-    send hosts matching = localhost *
-    send names instead of ids = yes
-```
-
--   `enabled = yes | no`, enables or disables sending data to a backend
-
--   `type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | json | kinesis | mongodb`, selects the backend
-    type
-
--   `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and
-     ports to connect to. Netdata will use the **first available** to send the metrics.
-
-     The format of each item in this list, is: `[PROTOCOL:]IP[:PORT]`.
-
-     `PROTOCOL` can be `udp` or `tcp`. `tcp` is the default and only supported by the current backends.
-
-     `IP` can be `XX.XX.XX.XX` (IPv4), or `[XX:XX...XX:XX]` (IPv6). For IPv6 you can to enclose the IP in `[]` to
-     separate it from the port.
-
-     `PORT` can be a number of a service name. If omitted, the default port for the backend will be used
-     (graphite = 2003, opentsdb = 4242).
-
-     Example IPv4:
-
-```conf
-   destination = 10.11.14.2:4242 10.11.14.3:4242 10.11.14.4:4242
-```
-
-   Example IPv6 and IPv4 together:
-
-```conf
-   destination = [ffff:...:0001]:2003 10.11.12.1:2003
-```
-
-   When multiple servers are defined, Netdata will try the next one when the first one fails. This allows you to
-   load-balance different servers: give your backend servers in different order on each Netdata.
-
-   Netdata also ships `nc-backend.sh`, a script that can be used as a fallback backend to save the
-   metrics to disk and push them to the time-series database when it becomes available again. It can also be used to
-   monitor / trace / debug the metrics Netdata generates.
-
-   For kinesis backend `destination` should be set to an AWS region (for example, `us-east-1`).
-
-   The MongoDB backend doesn't use the `destination` option for its configuration. It uses the `mongodb.conf`
-   [configuration file](/backends/mongodb/README.md) instead.
-
--   `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will
-     be sent to the backend.
-
--   `hostname = my-name`, is the hostname to be used for sending data to the backend server. By default this is
-     `[global].hostname`.
-
--   `prefix = Netdata`, is the prefix to add to all metrics.
-
--   `update every = 10`, is the number of seconds between sending data to the backend. Netdata will add some randomness
-     to this number, to prevent stressing the backend server when many Netdata servers send data to the same backend.
-     This randomness does not affect the quality of the data, only the time they are sent.
-
--   `buffer on failures = 10`, is the number of iterations (each iteration is `[backend].update every` seconds) to
-     buffer data, when the backend is not available. If the backend fails to receive the data after that many failures,
-     data loss on the backend is expected (Netdata will also log it).
-
--   `timeout ms = 20000`, is the timeout in milliseconds to wait for the backend server to process the data. By default
-     this is `2 * update_every * 1000`.
-
--   `send hosts matching = localhost *` includes one or more space separated patterns, using `*` as wildcard (any number
-     of times within each pattern). The patterns are checked against the hostname (the localhost is always checked as
-     `localhost`), allowing us to filter which hosts will be sent to the backend when this Netdata is a central Netdata
-     aggregating multiple hosts. A pattern starting with `!` gives a negative match. So to match all hosts named `*db*`
-     except hosts containing `*child*`, use `!*child* *db*` (so, the order is important: the first pattern
-     matching the hostname will be used - positive or negative).
-
--   `send charts matching = *` includes one or more space separated patterns, using `*` as wildcard (any number of times
-     within each pattern). The patterns are checked against both chart id and chart name. A pattern starting with `!`
-     gives a negative match. So to match all charts named `apps.*` except charts ending in `*reads`, use `!*reads
-     apps.*` (so, the order is important: the first pattern matching the chart id or the chart name will be used -
-     positive or negative).
-
--   `send names instead of ids = yes | no` controls the metric names Netdata should send to backend. Netdata supports
-     names and IDs for charts and dimensions. Usually IDs are unique identifiers as read by the system and names are
-     human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several cases they
-     are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
-
--   `host tags = list of TAG=VALUE` defines tags that should be appended on all metrics for the given host. These are
-     currently only sent to graphite, json, opentsdb and prometheus. Please use the appropriate format for each
-     time-series db. For example opentsdb likes them like `TAG1=VALUE1 TAG2=VALUE2`, but prometheus like `tag1="value1",
-     tag2="value2"`. Host tags are mirrored with database replication (streaming of metrics between Netdata servers).
-
-     Starting from Netdata v1.20 the host tags are parsed in accordance with a configured backend type and stored as
-     host labels so that they can be reused in API responses and exporting connectors. The parsing is supported for
-     graphite, json, opentsdb, and prometheus (default) backend types. You can check how the host tags were parsed using
-     the /api/v1/info API call.
-
-## monitoring operation
-
-Netdata provides 5 charts:
-
-1.  **Buffered metrics**, the number of metrics Netdata added to the buffer for dispatching them to the
-    backend server.
-
-2.  **Buffered data size**, the amount of data (in KB) Netdata added the buffer.
-
-3.  ~~**Backend latency**, the time the backend server needed to process the data Netdata sent. If there was a
-    re-connection involved, this includes the connection time.~~ (this chart has been removed, because it only measures
-    the time Netdata needs to give the data to the O/S - since the backend servers do not ack the reception, Netdata
-    does not have any means to measure this properly).
-
-4.  **Backend operations**, the number of operations performed by Netdata.
-
-5.  **Backend thread CPU usage**, the CPU resources consumed by the Netdata thread, that is responsible for sending the
-    metrics to the backend server.
-
-![image](https://cloud.githubusercontent.com/assets/2662304/20463536/eb196084-af3d-11e6-8ee5-ddbd3b4d8449.png)
-
-## alarms
-
-Netdata adds 4 alarms:
-
-1.  `backend_last_buffering`, number of seconds since the last successful buffering of backend data
-2.  `backend_metrics_sent`, percentage of metrics sent to the backend server
-3.  `backend_metrics_lost`, number of metrics lost due to repeating failures to contact the backend server
-4.  ~~`backend_slow`, the percentage of time between iterations needed by the backend time to process the data sent by
-    Netdata~~ (this was misleading and has been removed).
-
-![image](https://cloud.githubusercontent.com/assets/2662304/20463779/a46ed1c2-af43-11e6-91a5-07ca4533cac3.png)
-
-

+ 0 - 57
backends/TIMESCALE.md

@@ -1,57 +0,0 @@
-<!--
-title: "Writing metrics to TimescaleDB"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/TIMESCALE.md
--->
-
-# Writing metrics to TimescaleDB
-
-Thanks to Netdata's community of developers and system administrators, and Mahlon Smith
-([GitHub](https://github.com/mahlonsmith)/[Website](http://www.martini.nu/)) in particular, Netdata now supports
-archiving metrics directly to TimescaleDB.
-
-What's TimescaleDB? Here's how their team defines the project on their [GitHub page](https://github.com/timescale/timescaledb):
-
-> TimescaleDB is an open-source database designed to make SQL scalable for time-series data. It is engineered up from
-> PostgreSQL, providing automatic partitioning across time and space (partitioning key), as well as full SQL support.
-
-## Quickstart
-
-To get started archiving metrics to TimescaleDB right away, check out Mahlon's [`netdata-timescale-relay`
-repository](https://github.com/mahlonsmith/netdata-timescale-relay) on GitHub. 
-
-This small program takes JSON streams from a Netdata client and writes them to a PostgreSQL (aka TimescaleDB) table.
-You'll run this program in parallel with Netdata, and after a short [configuration
-process](https://github.com/mahlonsmith/netdata-timescale-relay#configuration), your metrics should start populating
-TimescaleDB.
-
-Finally, another member of Netdata's community has built a project that quickly launches Netdata, TimescaleDB, and
-Grafana in easy-to-manage Docker containers. Rune Juhl Jacobsen's
-[project](https://github.com/runejuhl/grafana-timescaledb) uses a `Makefile` to create everything, which makes it
-perfect for testing and experimentation.
-
-## Netdata&#8596;TimescaleDB in action
-
-Aside from creating incredible contributions to Netdata, Mahlon works at [LAIKA](https://www.laika.com/), an
-Oregon-based animation studio that's helped create acclaimed films like _Coraline_ and _Kubo and the Two Strings_.
-
-As part of his work to maintain the company's infrastructure of render farms, workstations, and virtual machines, he's
-using Netdata, `netdata-timescale-relay`, and TimescaleDB to store Netdata metrics alongside other data from other
-sources.
-
-> LAIKA is a long-time PostgreSQL user and added TimescaleDB to their infrastructure in 2018 to help manage and store
-> their IT metrics and time-series data. So far, the tool has been in production at LAIKA for over a year and helps them
-> with their use case of time-based logging, where they record over 8 million metrics an hour for netdata content alone.
-
-By archiving Netdata metrics to a backend like TimescaleDB, LAIKA can consolidate metrics data from distributed machines
-efficiently. Mahlon can then correlate Netdata metrics with other sources directly in TimescaleDB.
-
-And, because LAIKA will soon be storing years worth of Netdata metrics data in TimescaleDB, they can analyze long-term
-metrics as their films move from concept to final cut.
-
-Read the full blog post from LAIKA at the [TimescaleDB
-blog](https://blog.timescale.com/blog/writing-it-metrics-from-netdata-to-timescaledb/amp/).
-
-Thank you to Mahlon, Rune, TimescaleDB, and the members of the Netdata community that requested and then built this
-backend connection between Netdata and TimescaleDB!
-
-

+ 0 - 258
backends/WALKTHROUGH.md

@@ -1,258 +0,0 @@
-<!--
-title: "Netdata, Prometheus, Grafana stack"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/WALKTHROUGH.md
--->
-
-# Netdata, Prometheus, Grafana stack
-
-## Intro
-
-In this article I will walk you through the basics of getting Netdata, Prometheus and Grafana all working together and
-monitoring your application servers. This article will be using docker on your local workstation. We will be working
-with docker in an ad-hoc way, launching containers that run ‘/bin/bash’ and attaching a TTY to them. I use docker here
-in a purely academic fashion and do not condone running Netdata in a container. I pick this method so individuals
-without cloud accounts or access to VMs can try this out and for it’s speed of deployment.
-
-## Why Netdata, Prometheus, and Grafana
-
-Some time ago I was introduced to Netdata by a coworker. We were attempting to troubleshoot python code which seemed to
-be bottlenecked. I was instantly impressed by the amount of metrics Netdata exposes to you. I quickly added Netdata to
-my set of go-to tools when troubleshooting systems performance.
-
-Some time ago, even later, I was introduced to Prometheus. Prometheus is a monitoring application which flips the normal
-architecture around and polls rest endpoints for its metrics. This architectural change greatly simplifies and decreases
-the time necessary to begin monitoring your applications. Compared to current monitoring solutions the time spent on
-designing the infrastructure is greatly reduced. Running a single Prometheus server per application becomes feasible
-with the help of Grafana.
-
-Grafana has been the go to graphing tool for… some time now. It’s awesome, anyone that has used it knows it’s awesome.
-We can point Grafana at Prometheus and use Prometheus as a data source. This allows a pretty simple overall monitoring
-architecture: Install Netdata on your application servers, point Prometheus at Netdata, and then point Grafana at
-Prometheus.
-
-I’m omitting an important ingredient in this stack in order to keep this tutorial simple and that is service discovery.
-My personal preference is to use Consul. Prometheus can plug into consul and automatically begin to scrape new hosts
-that register a Netdata client with Consul.
-
-At the end of this tutorial you will understand how each technology fits together to create a modern monitoring stack.
-This stack will offer you visibility into your application and systems performance.
-
-## Getting Started - Netdata
-
-To begin let’s create our container which we will install Netdata on. We need to run a container, forward the necessary
-port that Netdata listens on, and attach a tty so we can interact with the bash shell on the container. But before we do
-this we want name resolution between the two containers to work. In order to accomplish this we will create a
-user-defined network and attach both containers to this network. The first command we should run is: 
-
-```sh
-docker network create --driver bridge netdata-tutorial
-```
-
-With this user-defined network created we can now launch our container we will install Netdata on and point it to this
-network.
-
-```sh
-docker run -it --name netdata --hostname netdata --network=netdata-tutorial -p 19999:19999  centos:latest '/bin/bash'
-```
-
-This command creates an interactive tty session (-it), gives the container both a name in relation to the docker daemon
-and a hostname (this is so you know what container is which when working in the shells and docker maps hostname
-resolution to this container), forwards the local port 19999 to the container’s port 19999 (-p 19999:19999), sets the
-command to run (/bin/bash) and then chooses the base container images (centos:latest). After running this you should be
-sitting inside the shell of the container.
-
-After we have entered the shell we can install Netdata. This process could not be easier. If you take a look at [this
-link](/packaging/installer/README.md), the Netdata devs give us several one-liners to install Netdata. I have not had
-any issues with these one liners and their bootstrapping scripts so far (If you guys run into anything do share). Run
-the following command in your container.
-
-```sh
-bash <(curl -Ss https://my-netdata.io/kickstart.sh) --dont-wait
-```
-
-After the install completes you should be able to hit the Netdata dashboard at <http://localhost:19999/> (replace
-localhost if you’re doing this on a VM or have the docker container hosted on a machine not on your local system). If
-this is your first time using Netdata I suggest you take a look around. The amount of time I’ve spent digging through
-/proc and calculating my own metrics has been greatly reduced by this tool. Take it all in.
-
-Next I want to draw your attention to a particular endpoint. Navigate to
-<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes> In your browser. This is the endpoint which
-publishes all the metrics in a format which Prometheus understands. Let’s take a look at one of these metrics.
-`netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0831255 1501271696000` This
-metric is representing several things which I will go in more details in the section on prometheus. For now understand
-that this metric: `netdata_system_cpu_percentage_average` has several labels: (chart, family, dimension). This
-corresponds with the first cpu chart you see on the Netdata dashboard.
-
-![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%204.00.45%20PM.png)
-
-This CHART is called ‘system.cpu’, The FAMILY is cpu, and the DIMENSION we are observing is “system”. You can begin to
-draw links between the charts in Netdata to the prometheus metrics format in this manner.
-
-## Prometheus
-
-We will be installing prometheus in a container for purpose of demonstration. While prometheus does have an official
-container I would like to walk through the install process and setup on a fresh container. This will allow anyone
-reading to migrate this tutorial to a VM or Server of any sort.
-
-Let’s start another container in the same fashion as we did the Netdata container. 
-
-```sh
-docker run -it --name prometheus --hostname prometheus
---network=netdata-tutorial -p 9090:9090  centos:latest '/bin/bash'
-``` 
-
-This should drop you into a shell once again. Once there quickly install your favorite editor as we will be editing
-files later in this tutorial. 
-
-```sh
-yum install vim -y
-```
-
-Prometheus provides a tarball of their latest stable versions [here](https://prometheus.io/download/).
-
-Let’s download the latest version and install into your container.
-
-```sh
-cd /tmp && curl -s https://api.github.com/repos/prometheus/prometheus/releases/latest \
-| grep "browser_download_url.*linux-amd64.tar.gz" \
-| cut -d '"' -f 4 \
-| wget -qi -
-
-mkdir /opt/prometheus
-
-sudo tar -xvf /tmp/prometheus-*linux-amd64.tar.gz -C /opt/prometheus --strip=1
-```
-
-This should get prometheus installed into the container. Let’s test that we can run prometheus and connect to it’s web
-interface.
-
-```sh
-/opt/prometheus/prometheus
-```
-
-Now attempt to go to <http://localhost:9090/>. You should be presented with the prometheus homepage. This is a good
-point to talk about Prometheus’s data model which can be viewed here: <https://prometheus.io/docs/concepts/data_model/>
-As explained we have two key elements in Prometheus metrics. We have the ‘metric’ and its ‘labels’. Labels allow for
-granularity between metrics. Let’s use our previous example to further explain.
-
-```conf
-netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0831255 1501271696000
-```
-
-Here our metric is ‘netdata_system_cpu_percentage_average’ and our labels are ‘chart’, ‘family’, and ‘dimension. The
-last two values constitute the actual metric value for the metric type (gauge, counter, etc…). We can begin graphing
-system metrics with this information, but first we need to hook up Prometheus to poll Netdata stats.
-
-Let’s move our attention to Prometheus’s configuration. Prometheus gets it config from the file located (in our example)
-at `/opt/prometheus/prometheus.yml`. I won’t spend an extensive amount of time going over the configuration values
-documented here: <https://prometheus.io/docs/operating/configuration/>. We will be adding a new“job” under the
-“scrape_configs”. Let’s make the “scrape_configs” section look like this (we can use the dns name Netdata due to the
-custom user-defined network we created in docker beforehand).
-
-```yaml
-scrape_configs:
-  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
-  - job_name: 'prometheus'
-
-    # metrics_path defaults to '/metrics'
-    # scheme defaults to 'http'.
-
-    static_configs:
-      - targets: ['localhost:9090']
-
-  - job_name: 'netdata'
-
-    metrics_path: /api/v1/allmetrics
-    params:
-      format: [ prometheus ]
-
-    static_configs:
-      - targets: ['netdata:19999']
-```
-
-Let’s start prometheus once again by running `/opt/prometheus/prometheus`. If we now navigate to prometheus at
-‘<http://localhost:9090/targets’> we should see our target being successfully scraped. If we now go back to the
-Prometheus’s homepage and begin to type ‘netdata\_’ Prometheus should auto complete metrics it is now scraping.
-
-![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.13.43%20PM.png)
-
-Let’s now start exploring how we can graph some metrics. Back in our Netdata container lets get the CPU spinning with a
-pointless busy loop. On the shell do the following:
-
-```sh
-[root@netdata /]# while true; do echo "HOT HOT HOT CPU"; done
-```
-
-Our Netdata cpu graph should be showing some activity. Let’s represent this in Prometheus. In order to do this let’s
-keep our metrics page open for reference: <http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes>  We are
-setting out to graph the data in the CPU chart so let’s search for “system.cpu”in the metrics page above. We come across
-a section of metrics with the first comments  `# COMMENT homogeneous chart "system.cpu", context "system.cpu", family
-"cpu", units "percentage"` Followed by the metrics. This is a good start now let us drill down to the specific metric we
-would like to graph.
-
-```conf
-# COMMENT
-netdata_system_cpu_percentage_average: dimension "system", value is percentage, gauge, dt 1501275951 to 1501275951 inclusive
-netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0000000 1501275951000
-```
-
-Here we learn that the metric name we care about is‘netdata_system_cpu_percentage_average’ so throw this into Prometheus
-and see what we get.  We should see something similar to this (I shut off my busy loop)
-
-![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.47.53%20PM.png)
-
-This is a good step toward what we want. Also make note that Prometheus will tag on an ‘instance’ label for us which
-corresponds to our statically defined job in the configuration file. This allows us to tailor our queries to specific
-instances. Now we need to isolate the dimension we want in our query. To do this let us refine the query slightly. Let’s
-query the dimension also. Place this into our query text box.
-`netdata_system_cpu_percentage_average{dimension="system"}` We now wind up with the following graph.
-
-![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%205.54.40%20PM.png)
-
-Awesome, this is exactly what we wanted. If you haven’t caught on yet we can emulate entire charts from Netdata by using
-the `chart` dimension. If you’d like you can combine the ‘chart’ and ‘instance’ dimension to create per-instance charts.
-Let’s give this a try: `netdata_system_cpu_percentage_average{chart="system.cpu", instance="netdata:19999"}`
-
-This is the basics of using Prometheus to query Netdata. I’d advise everyone at this point to read [this
-page](/backends/prometheus/README.md#using-netdata-with-prometheus). The key point here is that Netdata can export metrics from
-its internal DB or can send metrics “as-collected” by specifying the ‘source=as-collected’ url parameter like so.
-<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes&types=yes&source=as-collected> If you choose to use
-this method you will need to use Prometheus's set of functions here: <https://prometheus.io/docs/querying/functions/> to
-obtain useful metrics as you are now dealing with raw counters from the system. For example you will have to use the
-`irate()` function over a counter to get that metric's rate per second. If your graphing needs are met by using the
-metrics returned by Netdata's internal database (not specifying any source= url parameter) then use that. If you find
-limitations then consider re-writing your queries using the raw data and using Prometheus functions to get the desired
-chart.
-
-## Grafana
-
-Finally we make it to grafana. This is the easiest part in my opinion. This time we will actually run the official
-grafana docker container as all configuration we need to do is done via the GUI. Let’s run the following command:
-
-```sh
-docker run -i -p 3000:3000 --network=netdata-tutorial grafana/grafana
-```
-
-This will get grafana running at ‘<http://localhost:3000/’> Let’s go there and
-
-login using the credentials Admin:Admin.
-
-The first thing we want to do is click ‘Add data source’. Let’s make it look like the following screenshot
-
-![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%206.36.55%20PM.png)
-
-With this completed let’s graph! Create a new Dashboard by clicking on the top left Grafana Icon and create a new graph
-in that dashboard. Fill in the query like we did above and save.
-
-![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%206.39.38%20PM.png)
-
-## Conclusion
-
-There you have it, a complete systems monitoring stack which is very easy to deploy. From here I would begin to
-understand how Prometheus and a service discovery mechanism such as Consul can play together nicely. My current prod
-deployments automatically register Netdata services into Consul and Prometheus automatically begins to scrape them. Once
-achieved you do not have to think about the monitoring system until Prometheus cannot keep up with your scale. Once this
-happens there are options presented in the Prometheus documentation for solving this. Hope this was helpful, happy
-monitoring.
-
-

+ 0 - 12
backends/aws_kinesis/Makefile.am

@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
-    README.md \
-    $(NULL)
-
-dist_libconfig_DATA = \
-    aws_kinesis.conf \
-    $(NULL)

+ 0 - 53
backends/aws_kinesis/README.md

@@ -1,53 +0,0 @@
-<!--
-title: "Using Netdata with AWS Kinesis Data Streams"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/backends/aws_kinesis/README.md
--->
-
-# Using Netdata with AWS Kinesis Data Streams
-
-## Prerequisites
-
-To use AWS Kinesis as a backend AWS SDK for C++ should be
-[installed](https://docs.aws.amazon.com/en_us/sdk-for-cpp/v1/developer-guide/setup.html) first. `libcrypto`, `libssl`,
-and `libcurl` are also required to compile Netdata with Kinesis support enabled. Next, Netdata should be re-installed
-from the source. The installer will detect that the required libraries are now available.
-
-If the AWS SDK for C++ is being installed from source, it is useful to set `-DBUILD_ONLY="kinesis"`. Otherwise, the
-building process could take a very long time. Take a note, that the default installation path for the libraries is
-`/usr/local/lib64`. Many Linux distributions don't include this path as the default one for a library search, so it is
-advisable to use the following options to `cmake` while building the AWS SDK:
-
-```sh
-cmake -DCMAKE_INSTALL_LIBDIR=/usr/lib -DCMAKE_INSTALL_INCLUDEDIR=/usr/include -DBUILD_SHARED_LIBS=OFF -DBUILD_ONLY=kinesis <aws-sdk-cpp sources>
-```
-
-## Configuration
-
-To enable data sending to the kinesis backend set the following options in `netdata.conf`:
-
-```conf
-[backend]
-    enabled = yes
-    type = kinesis
-    destination = us-east-1
-```
-
-set the `destination` option to an AWS region.
-
-In the Netdata configuration directory run `./edit-config aws_kinesis.conf` and set AWS credentials and stream name:
-
-```yaml
-# AWS credentials
-aws_access_key_id = your_access_key_id
-aws_secret_access_key = your_secret_access_key
-
-# destination stream
-stream name = your_stream_name
-```
-
-Alternatively, AWS credentials can be set for the `netdata` user using AWS SDK for C++ [standard methods](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/credentials.html).
-
-A partition key for every record is computed automatically by Netdata with the purpose to distribute records across
-available shards evenly.
-
-

Some files were not shown because too many files changed in this diff