Browse Source

Prometheus web api connector (#8540)

* Fix the Prometheus web API code in the exporting engine

* Rename connector types

* Remove the conditional compilation of the exporting engine

* Use labels instead of tags

* Fix the exporter configuration

* Document functions

* Add unit tests
Vladimir Kobal 5 years ago
parent
commit
ebbce7c777

+ 4 - 26
Makefile.am

@@ -573,6 +573,7 @@ NETDATA_FILES = \
     $(LIBNETDATA_FILES) \
     $(API_PLUGIN_FILES) \
     $(BACKENDS_PLUGIN_FILES) \
+    $(EXPORTING_ENGINE_FILES) \
     $(CHECKS_PLUGIN_FILES) \
     $(HEALTH_PLUGIN_FILES) \
     $(IDLEJITTER_PLUGIN_FILES) \
@@ -608,12 +609,6 @@ if LINUX
 
 endif
 
-if ENABLE_EXPORTING
-    NETDATA_FILES += \
-        $(EXPORTING_ENGINE_FILES) \
-        $(NULL)
-endif
-
 NETDATA_COMMON_LIBS = \
     $(OPTIONAL_MATH_LIBS) \
     $(OPTIONAL_ZLIB_LIBS) \
@@ -745,23 +740,13 @@ if ENABLE_PLUGIN_SLABINFO
         $(NULL)
 endif
 
-if ENABLE_EXPORTING
-if ENABLE_BACKEND_KINESIS
-    netdata_SOURCES += $(KINESIS_EXPORTING_FILES)
-    netdata_LDADD += $(OPTIONAL_KINESIS_LIBS)
-endif
-endif
-
 if ENABLE_BACKEND_KINESIS
-    netdata_SOURCES += $(KINESIS_BACKEND_FILES)
+    netdata_SOURCES += $(KINESIS_BACKEND_FILES) $(KINESIS_EXPORTING_FILES)
     netdata_LDADD += $(OPTIONAL_KINESIS_LIBS)
 endif
 
 if ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE
-if ENABLE_EXPORTING
-    netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
-endif
-    netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES)
+    netdata_SOURCES += $(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES) $(PROMETHEUS_REMOTE_WRITE_EXPORTING_FILES)
     netdata_LDADD += $(OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS)
     BUILT_SOURCES = \
         exporting/prometheus/remote_write/remote_write.pb.cc \
@@ -775,15 +760,8 @@ exporting/prometheus/remote_write/remote_write.pb.h: exporting/prometheus/remote
 
 endif
 
-if ENABLE_EXPORTING
-if ENABLE_BACKEND_MONGODB
-    netdata_SOURCES += $(MONGODB_EXPORTING_FILES)
-    netdata_LDADD += $(OPTIONAL_MONGOC_LIBS)
-endif
-endif
-
 if ENABLE_BACKEND_MONGODB
-    netdata_SOURCES += $(MONGODB_BACKEND_FILES)
+    netdata_SOURCES += $(MONGODB_BACKEND_FILES) $(MONGODB_EXPORTING_FILES)
     netdata_LDADD += $(OPTIONAL_MONGOC_LIBS)
 endif
 

+ 0 - 4
backends/backends.h

@@ -27,10 +27,6 @@ typedef enum backend_types {
     BACKEND_TYPE_NUM                        // Number of backend types
 } BACKEND_TYPE;
 
-#ifdef ENABLE_EXPORTING
-#include "exporting/exporting_engine.h"
-#endif
-
 typedef int (**backend_response_checker_t)(BUFFER *);
 typedef int (**backend_request_formatter_t)(BUFFER *, const char *, RRDHOST *, const char *, RRDSET *, RRDDIM *, time_t, time_t, BACKEND_OPTIONS);
 

+ 20 - 20
backends/prometheus/backend_prometheus.c

@@ -44,7 +44,7 @@ static inline time_t prometheus_server_last_access(const char *server, RRDHOST *
     return 0;
 }
 
-static inline size_t prometheus_name_copy(char *d, const char *s, size_t usable) {
+static inline size_t backends_prometheus_name_copy(char *d, const char *s, size_t usable) {
     size_t n;
 
     for(n = 0; *s && n < usable ; d++, s++, n++) {
@@ -58,7 +58,7 @@ static inline size_t prometheus_name_copy(char *d, const char *s, size_t usable)
     return n;
 }
 
-static inline size_t prometheus_label_copy(char *d, const char *s, size_t usable) {
+static inline size_t backends_prometheus_label_copy(char *d, const char *s, size_t usable) {
     size_t n;
 
     // make sure we can escape one character without overflowing the buffer
@@ -78,7 +78,7 @@ static inline size_t prometheus_label_copy(char *d, const char *s, size_t usable
     return n;
 }
 
-static inline char *prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits) {
+static inline char *backends_prometheus_units_copy(char *d, const char *s, size_t usable, int showoldunits) {
     const char *sorig = s;
     char *ret = d;
     size_t n;
@@ -194,7 +194,7 @@ static int print_host_variables(RRDVAR *rv, void *data) {
             label_post = "}";
         }
 
-        prometheus_name_copy(opts->name, rv->name, sizeof(opts->name));
+        backends_prometheus_name_copy(opts->name, rv->name, sizeof(opts->name));
 
         if(opts->output_options & BACKENDS_PROMETHEUS_OUTPUT_TIMESTAMPS)
             buffer_sprintf(opts->wb
@@ -227,7 +227,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
     rrdhost_rdlock(host);
 
     char hostname[PROMETHEUS_ELEMENT_MAX + 1];
-    prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
+    backends_prometheus_label_copy(hostname, host->hostname, PROMETHEUS_ELEMENT_MAX);
 
     char labels[PROMETHEUS_LABELS_MAX + 1] = "";
     if(allhosts) {
@@ -299,9 +299,9 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
         char family[PROMETHEUS_ELEMENT_MAX + 1];
         char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
 
-        prometheus_label_copy(chart, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
-        prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
-        prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
+        backends_prometheus_label_copy(chart, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
+        backends_prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
+        backends_prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
 
         if(likely(backends_can_send_rrdset(backend_options, st))) {
             rrdset_rdlock(st);
@@ -317,7 +317,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
             }
             else {
                 if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE && !(output_options & BACKENDS_PROMETHEUS_OUTPUT_HIDEUNITS))
-                    prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, output_options & BACKENDS_PROMETHEUS_OUTPUT_OLDUNITS);
+                    backends_prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, output_options & BACKENDS_PROMETHEUS_OUTPUT_OLDUNITS);
             }
 
             if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
@@ -354,7 +354,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
                             // all the dimensions of the chart, has the same algorithm, multiplier and divisor
                             // we add all dimensions as labels
 
-                            prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+                            backends_prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
 
                             if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
                                 buffer_sprintf(wb
@@ -411,7 +411,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
                             // the dimensions of the chart, do not have the same algorithm, multiplier or divisor
                             // we create a metric per dimension
 
-                            prometheus_name_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+                            backends_prometheus_name_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
 
                             if(unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
                                 buffer_sprintf(wb
@@ -480,7 +480,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(RRDHOST *host, BUFFER
                             else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)
                                 suffix = "_sum";
 
-                            prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+                            backends_prometheus_label_copy(dimension, (output_options & BACKENDS_PROMETHEUS_OUTPUT_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
 
                             if (unlikely(output_options & BACKENDS_PROMETHEUS_OUTPUT_HELP))
                                 buffer_sprintf(wb, "# COMMENT %s_%s%s%s: dimension \"%s\", value is %s, gauge, dt %llu to %llu inclusive\n"
@@ -593,7 +593,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
         , size_t *count_dims_skipped
 ) {
     char hostname[PROMETHEUS_ELEMENT_MAX + 1];
-    prometheus_label_copy(hostname, __hostname, PROMETHEUS_ELEMENT_MAX);
+    backends_prometheus_label_copy(hostname, __hostname, PROMETHEUS_ELEMENT_MAX);
 
     backends_add_host_info("netdata_info", hostname, host->program_name, host->program_version, now_realtime_usec() / USEC_PER_MS);
 
@@ -620,9 +620,9 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
         char family[PROMETHEUS_ELEMENT_MAX + 1];
         char units[PROMETHEUS_ELEMENT_MAX + 1] = "";
 
-        prometheus_label_copy(chart, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
-        prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
-        prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
+        backends_prometheus_label_copy(chart, (backend_options & BACKEND_OPTION_SEND_NAMES && st->name)?st->name:st->id, PROMETHEUS_ELEMENT_MAX);
+        backends_prometheus_label_copy(family, st->family, PROMETHEUS_ELEMENT_MAX);
+        backends_prometheus_name_copy(context, st->context, PROMETHEUS_ELEMENT_MAX);
 
         if(likely(backends_can_send_rrdset(backend_options, st))) {
             rrdset_rdlock(st);
@@ -640,7 +640,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
             }
             else {
                 if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_AVERAGE)
-                    prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, 0);
+                    backends_prometheus_units_copy(units, st->units, PROMETHEUS_ELEMENT_MAX, 0);
             }
 
             // for each dimension
@@ -664,7 +664,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
                             // all the dimensions of the chart, has the same algorithm, multiplier and divisor
                             // we add all dimensions as labels
 
-                            prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+                            backends_prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
                             snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s", prefix, context, suffix);
 
                             backends_add_metric(name, chart, family, dimension, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time));
@@ -674,7 +674,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
                             // the dimensions of the chart, do not have the same algorithm, multiplier or divisor
                             // we create a metric per dimension
 
-                            prometheus_name_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+                            backends_prometheus_name_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
                             snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s_%s%s", prefix, context, dimension, suffix);
 
                             backends_add_metric(name, chart, family, NULL, hostname, rd->last_collected_value, timeval_msec(&rd->last_collected_time));
@@ -694,7 +694,7 @@ void backends_rrd_stats_remote_write_allmetrics_prometheus(
                             else if(BACKEND_OPTIONS_DATA_SOURCE(backend_options) == BACKEND_SOURCE_DATA_SUM)
                                 suffix = "_sum";
 
-                            prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
+                            backends_prometheus_label_copy(dimension, (backend_options & BACKEND_OPTION_SEND_NAMES && rd->name) ? rd->name : rd->id, PROMETHEUS_ELEMENT_MAX);
                             snprintf(name, PROMETHEUS_LABELS_MAX, "%s_%s%s%s", prefix, context, units, suffix);
 
                             backends_add_metric(name, chart, family, dimension, hostname, value, last_t * MSEC_PER_SEC);

+ 0 - 14
configure.ac

@@ -433,20 +433,6 @@ fi
 AC_MSG_RESULT([${enable_https}])
 AM_CONDITIONAL([ENABLE_HTTPS], [test "${enable_https}" = "yes"])
 
-# -----------------------------------------------------------------------------
-# Exporting engine
-AC_MSG_CHECKING([if netdata exporting engine should be used])
-if test "${UV_LIBS}"; then
-    enable_exporting_engine="yes"
-    AC_DEFINE([ENABLE_EXPORTING], [1], [netdata exporting engine usability])
-    OPTIONAL_UV_CFLAGS="${UV_CFLAGS}"
-    OPTIONAL_UV_LIBS="${UV_LIBS}"
-else
-    enable_exporting_engine="no"
-fi
-AC_MSG_RESULT([${enable_exporting_engine}])
-AM_CONDITIONAL([ENABLE_EXPORTING], [test "${enable_exporting_engine}" = "yes"])
-
 # -----------------------------------------------------------------------------
 # JSON-C
 test "${enable_jsonc}" = "yes" -a -z "${JSONC_LIBS}" && \

+ 2 - 0
daemon/common.h

@@ -50,6 +50,8 @@
 
 // backends for archiving the metrics
 #include "backends/backends.h"
+// the new exporting engine for archiving the metrics
+#include "exporting/exporting_engine.h"
 
 // the netdata API
 #include "web/api/web_api_v1.h"

+ 0 - 2
daemon/main.c

@@ -80,9 +80,7 @@ struct netdata_static_thread static_threads[] = {
 
         // common plugins for all systems
     {"BACKENDS",             NULL,                    NULL,         1, NULL, NULL, backends_main},
-#ifdef ENABLE_EXPORTING
     {"EXPORTING",            NULL,                    NULL,         1, NULL, NULL, exporting_main},
-#endif
     {"WEB_SERVER[static1]",  NULL,                    NULL,         0, NULL, NULL, socket_listen_main_static_threaded},
     {"STREAM",               NULL,                    NULL,         0, NULL, NULL, rrdpush_sender_thread},
 

+ 20 - 4
exporting/exporting_engine.h

@@ -14,10 +14,10 @@
 extern struct config exporting_config;
 
 #define EXPORTING_UPDATE_EVERY_OPTION_NAME "update every"
-#define EXPORTING_UPDATE_EVERY_DEFAULT     10
+#define EXPORTING_UPDATE_EVERY_DEFAULT 10
 
 typedef enum exporting_options {
-    EXPORTING_OPTION_NONE                   = 0,
+    EXPORTING_OPTION_NON                    = 0,
 
     EXPORTING_SOURCE_DATA_AS_COLLECTED      = (1 << 0),
     EXPORTING_SOURCE_DATA_AVERAGE           = (1 << 1),
@@ -42,10 +42,22 @@ typedef enum exporting_options {
      (instance->config.options & EXPORTING_OPTION_SEND_AUTOMATIC_LABELS &&                                             \
       label->label_source != LABEL_SOURCE_NETDATA_CONF))
 
+typedef enum exporting_connector_types {
+    EXPORTING_CONNECTOR_TYPE_UNKNOWN,                 // Invalid type
+    EXPORTING_CONNECTOR_TYPE_GRAPHITE,                // Send plain text to Graphite
+    EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_TELNET,   // Send data to OpenTSDB using telnet API
+    EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_HTTP,     // Send data to OpenTSDB using HTTP API
+    EXPORTING_CONNECTOR_TYPE_JSON,                    // Stores the data using JSON.
+    EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE, // The user selected to use Prometheus backend
+    EXPORTING_CONNECTOR_TYPE_KINESIS,                 // Send message to AWS Kinesis
+    EXPORTING_CONNECTOR_TYPE_MONGODB,                 // Send data to MongoDB collection
+    EXPORTING_CONNECTOR_TYPE_NUM                      // Number of backend types
+} EXPORTING_CONNECTOR_TYPE;
+
 struct engine;
 
 struct instance_config {
-    BACKEND_TYPE type;
+    EXPORTING_CONNECTOR_TYPE type;
 
     const char *name;
     const char *destination;
@@ -150,10 +162,12 @@ struct engine {
     struct instance *instance_root;
 };
 
+extern struct instance *prometheus_exporter_instance;
+
 void *exporting_main(void *ptr);
 
 struct engine *read_exporting_config();
-BACKEND_TYPE exporting_select_type(const char *type);
+EXPORTING_CONNECTOR_TYPE exporting_select_type(const char *type);
 
 int init_connectors(struct engine *engine);
 
@@ -187,4 +201,6 @@ void simple_connector_worker(void *instance_p);
 
 int send_internal_metrics(struct engine *engine);
 
+#include "exporting/prometheus/prometheus.h"
+
 #endif /* NETDATA_EXPORTING_ENGINE_H */

+ 8 - 8
exporting/init_connectors.c

@@ -32,35 +32,35 @@ int init_connectors(struct engine *engine)
         instance->after = engine->now;
 
         switch (instance->config.type) {
-            case BACKEND_TYPE_GRAPHITE:
+            case EXPORTING_CONNECTOR_TYPE_GRAPHITE:
                 if (init_graphite_instance(instance) != 0)
                     return 1;
                 break;
-            case BACKEND_TYPE_JSON:
+            case EXPORTING_CONNECTOR_TYPE_JSON:
                 if (init_json_instance(instance) != 0)
                     return 1;
                 break;
-            case BACKEND_TYPE_OPENTSDB_USING_TELNET:
+            case EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_TELNET:
                 if (init_opentsdb_telnet_instance(instance) != 0)
                     return 1;
                 break;
-            case BACKEND_TYPE_OPENTSDB_USING_HTTP:
+            case EXPORTING_CONNECTOR_TYPE_OPENTSDB_USING_HTTP:
                 if (init_opentsdb_http_instance(instance) != 0)
                     return 1;
                 break;
-            case BACKEND_TYPE_PROMETHEUS_REMOTE_WRITE:
+            case EXPORTING_CONNECTOR_TYPE_PROMETHEUS_REMOTE_WRITE:
 #if ENABLE_PROMETHEUS_REMOTE_WRITE
                 if (init_prometheus_remote_write_instance(instance) != 0)
                     return 1;
 #endif
                 break;
-            case BACKEND_TYPE_KINESIS:
+            case EXPORTING_CONNECTOR_TYPE_KINESIS:
 #if HAVE_KINESIS
                 if (init_aws_kinesis_instance(instance) != 0)
                     return 1;
 #endif
                 break;
-            case BACKEND_TYPE_MONGODB:
+            case EXPORTING_CONNECTOR_TYPE_MONGODB:
 #if HAVE_MONGOC
                 if (init_mongodb_instance(instance) != 0)
                     return 1;
@@ -77,7 +77,7 @@ int init_connectors(struct engine *engine)
             error("EXPORTING: cannot create tread worker. uv_thread_create(): %s", uv_strerror(error));
             return 1;
         }
-        char threadname[NETDATA_THREAD_NAME_MAX+1];
+        char threadname[NETDATA_THREAD_NAME_MAX + 1];
         snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "EXPORTING-%zu", instance->index);
         uv_thread_set_name_np(instance->thread, threadname);
     }

+ 1 - 1
exporting/mongodb/mongodb.c

@@ -208,7 +208,7 @@ int format_batch_mongodb(struct instance *instance)
         insert[documents_inserted] = bson_new_from_json((const uint8_t *)start, -1, &bson_error);
 
         if (unlikely(!insert[documents_inserted])) {
-            error("EXPORTING: %s", bson_error.message);
+            error("EXPORTING: Failed creating a BSON document from a JSON string \"%s\" : %s", start, bson_error.message);
             free_bson(insert, documents_inserted);
             return 1;
         }

+ 1 - 1
exporting/process_data.c

@@ -206,7 +206,7 @@ int start_host_formatting(struct engine *engine, RRDHOST *host)
  * Start chart formatting for every connector instance's buffer
  *
  * @param engine an engine data structure.
- * @param a chart.
+ * @param st a chart.
  * @return Returns 0 on success, 1 on failure.
  */
 int start_chart_formatting(struct engine *engine, RRDSET *st)

Some files were not shown because too many files changed in this diff