Browse Source

eBPF modular (#9148)

Convert the monolithic ebpf.plugin in a modular plugin.
thiagoftsm 4 years ago
parent
commit
9f25a1ca2f

+ 5 - 0
CMakeLists.txt

@@ -426,6 +426,11 @@ set(SLABINFO_PLUGIN_FILES
 
 set(EBPF_PROCESS_PLUGIN_FILES
         collectors/ebpf.plugin/ebpf.c
+        collectors/ebpf.plugin/ebpf.h
+        collectors/ebpf.plugin/ebpf_process.c
+        collectors/ebpf.plugin/ebpf_process.h
+        collectors/ebpf.plugin/ebpf_socket.c
+        collectors/ebpf.plugin/ebpf_socket.h
         )
 
 set(PROC_PLUGIN_FILES

+ 4 - 0
Makefile.am

@@ -266,6 +266,10 @@ PERF_PLUGIN_FILES = \
 
 EBPF_PLUGIN_FILES = \
     collectors/ebpf.plugin/ebpf.c \
+    collectors/ebpf.plugin/ebpf_process.c \
+    collectors/ebpf.plugin/ebpf_process.h \
+    collectors/ebpf.plugin/ebpf_socket.c \
+    collectors/ebpf.plugin/ebpf_socket.h \
     collectors/ebpf.plugin/ebpf.h \
     $(LIBNETDATA_FILES) \
     $(NULL)

+ 3 - 3
collectors/ebpf.plugin/README.md

@@ -130,14 +130,14 @@ cd /etc/netdata/   # Replace with your Netdata configuration directory, if not /
 
 The `[global]` section defines settings for the whole eBPF collector.
 
-#### load
+#### ebpf load mode
 
 The collector has two different eBPF programs. These programs monitor the same functions inside the kernel, but they
 monitor, process, and display different kinds of information.
 
 By default, this plugin uses the `entry` mode. Changing this mode can create significant overhead on your operating
-system, but also offer valuable information if you are developing or debugging software. The `load` option accepts the
-following values: ​
+system, but also offer valuable information if you are developing or debugging software. The `ebpf load mode` option
+accepts the following values: ​
 
 -   `entry`: This is the default mode. In this mode, the eBPF collector only monitors calls for the functions described
     in the sections above, and does not show charts related to errors.

File diff suppressed because it is too large
+ 232 - 783
collectors/ebpf.plugin/ebpf.c


+ 6 - 1
collectors/ebpf.plugin/ebpf.conf

@@ -1,2 +1,7 @@
 [global]
-    load = entry
+    ebpf load mode = entry
+    disable apps = yes
+
+[ebpf programs]
+    process = yes
+    network viewer = yes

+ 62 - 56
collectors/ebpf.plugin/ebpf.h

@@ -16,19 +16,6 @@
 # include <unistd.h>
 # include <dlfcn.h>
 
-# define NETDATA_GLOBAL_VECTOR 24
-# define NETDATA_MAX_MONITOR_VECTOR 9
-# define NETDATA_VFS_ERRORS 3
-# define NETDATA_PROCESS_ERRORS 4
-
-# define NETDATA_DEL_START 2
-# define NETDATA_IN_START_BYTE 3
-# define NETDATA_EXIT_START 5
-# define NETDATA_PROCESS_START 7
-# define NETDATA_PROCESS_RUNNING_COUNT 9
-
-# define NETDATA_EBPF_PROCESS_THREADS (uint32_t)3
-
 # include <fcntl.h>
 # include <ctype.h>
 # include <dirent.h>
@@ -40,6 +27,7 @@
 # include "../../libnetdata/clocks/clocks.h"
 # include "../../libnetdata/config/appconfig.h"
 # include "../../libnetdata/ebpf/ebpf.h"
+# include "../../daemon/main.h"
 
 typedef enum {
     MODE_RETURN = 0,    //This attaches kprobe when the function returns
@@ -84,26 +72,21 @@ typedef struct netdata_error_report {
     int err;
 }netdata_error_report_t;
 
+typedef struct ebpf_module {
+    const char *thread_name;
+    const char *config_name;
+    int enabled;
+    void *(*start_routine) (void *);
+    int update_time;
+    int global_charts;
+    int apps_charts;
+    netdata_run_mode_t mode;
+    netdata_ebpf_events_t *probes;
+    uint32_t thread_id;
+} ebpf_module_t;
+
 //Chart defintions
 # define NETDATA_EBPF_FAMILY "ebpf"
-# define NETDATA_FILE_GROUP "File"
-# define NETDATA_VFS_GROUP "VFS"
-# define NETDATA_PROCESS_GROUP "Process"
-
-# define NETDATA_FILE_OPEN_CLOSE_COUNT "file_descriptor"
-# define NETDATA_FILE_OPEN_ERR_COUNT "file_error"
-# define NETDATA_VFS_FILE_CLEAN_COUNT "deleted_objects"
-# define NETDATA_VFS_FILE_IO_COUNT "io"
-# define NETDATA_VFS_FILE_ERR_COUNT "io_error"
-
-# define NETDATA_EXIT_SYSCALL "exit"
-# define NETDATA_PROCESS_SYSCALL "process_thread"
-# define NETDATA_PROCESS_ERROR_NAME "task_error"
-# define NETDATA_PROCESS_STATUS_NAME "process_status"
-
-# define NETDATA_VFS_IO_FILE_BYTES "io_bytes"
-# define NETDATA_VFS_DIM_IN_FILE_BYTES "write"
-# define NETDATA_VFS_DIM_OUT_FILE_BYTES "read"
 
 //Log file
 # define NETDATA_DEVELOPER_LOG_FILE "developer.log"
@@ -116,40 +99,63 @@ typedef struct netdata_error_report {
 # define NETDATA_KERNEL_V5_3 328448
 # define NETDATA_KERNEL_V4_15 265984
 
-//Index from kernel
-# define NETDATA_KEY_CALLS_DO_SYS_OPEN 0
-# define NETDATA_KEY_ERROR_DO_SYS_OPEN 1
 
-# define NETDATA_KEY_CALLS_VFS_WRITE 2
-# define NETDATA_KEY_ERROR_VFS_WRITE 3
-# define NETDATA_KEY_BYTES_VFS_WRITE 4
+# define EBPF_MAX_MAPS 32
+
+
+//Threads
+extern void *ebpf_process_thread(void *ptr);
+extern void *ebpf_socket_thread(void *ptr);
+
+//Common variables
+extern pthread_mutex_t lock;
+extern int close_ebpf_plugin;
+extern int ebpf_nprocs;
+extern int running_on_kernel;
+extern char *ebpf_plugin_dir;
+extern char kernel_string[64];
+extern netdata_ebpf_events_t process_probes[];
+extern netdata_ebpf_events_t socket_probes[];
+
+//Common functions
+extern void ebpf_global_labels(netdata_syscall_stat_t *is,
+                               netdata_publish_syscall_t *pio,
+                               char **dim,
+                               char **name,
+                               int end);
+
+extern void ebpf_write_chart_cmd(char *type
+    , char *id
+    , char *axis
+    , char *web
+    , int order);
+
+extern void ebpf_write_global_dimension(char *n, char *d);
 
-# define NETDATA_KEY_CALLS_VFS_READ 5
-# define NETDATA_KEY_ERROR_VFS_READ 6
-# define NETDATA_KEY_BYTES_VFS_READ 7
+extern void ebpf_create_global_dimension(void *ptr, int end);
 
-# define NETDATA_KEY_CALLS_VFS_UNLINK 8
-# define NETDATA_KEY_ERROR_VFS_UNLINK 9
+extern void ebpf_create_chart(char *family
+    , char *name
+    , char *axis
+    , char *web
+    , int order
+    , void (*ncd)(void *, int)
+    , void *move
+    , int end);
 
-# define NETDATA_KEY_CALLS_DO_EXIT 10
+extern void write_begin_chart(char *family, char *name);
 
-# define NETDATA_KEY_CALLS_RELEASE_TASK 11
+extern void write_chart_dimension(char *dim, long long value);
 
-# define NETDATA_KEY_CALLS_DO_FORK 12
-# define NETDATA_KEY_ERROR_DO_FORK 13
+extern void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, int end);
 
-# define NETDATA_KEY_CALLS_CLOSE_FD 14
-# define NETDATA_KEY_ERROR_CLOSE_FD 15
+extern void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, int end);
 
-# define NETDATA_KEY_CALLS_SYS_CLONE 16
-# define NETDATA_KEY_ERROR_SYS_CLONE 17
+void write_io_chart(char *chart, char *family, char *dwrite, char *dread, netdata_publish_vfs_common_t *pvc);
 
-# define NETDATA_KEY_CALLS_VFS_WRITEV 18
-# define NETDATA_KEY_ERROR_VFS_WRITEV 19
-# define NETDATA_KEY_BYTES_VFS_WRITEV 20
+extern void fill_ebpf_functions(ebpf_functions_t *ef);
 
-# define NETDATA_KEY_CALLS_VFS_READV 21
-# define NETDATA_KEY_ERROR_VFS_READV 22
-# define NETDATA_KEY_BYTES_VFS_READV 23
+# define EBPF_GLOBAL_SECTION "global"
+# define EBPF_PROGRAMS_SECTION "ebpf programs"
 
 #endif

+ 514 - 0
collectors/ebpf.plugin/ebpf_process.c

@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <sys/resource.h>
+
+#include "ebpf.h"
+#include "ebpf_process.h"
+
+/*****************************************************************
+ *
+ *  GLOBAL VARIABLES
+ *
+ *****************************************************************/
+
+static char *process_dimension_names[NETDATA_MAX_MONITOR_VECTOR] = { "open", "close", "delete", "read", "write",
+                                                             "process", "task", "process", "thread" };
+static char *process_id_names[NETDATA_MAX_MONITOR_VECTOR] = { "do_sys_open", "__close_fd", "vfs_unlink", "vfs_read", "vfs_write",
+                                                      "do_exit", "release_task", "_do_fork", "sys_clone" };
+static char *status[] = { "process", "zombie" };
+
+static netdata_idx_t *process_hash_values = NULL;
+static netdata_syscall_stat_t *process_aggregated_data = NULL;
+static netdata_publish_syscall_t *process_publish_aggregated = NULL;
+
+static ebpf_functions_t process_functions;
+
+#ifndef STATIC
+/**
+ * Pointers used when collector is dynamically linked
+ */
+
+//Libbpf (It is necessary to have at least kernel 4.10)
+static int (*bpf_map_lookup_elem)(int, const void *, void *);
+
+static int *map_fd = NULL;
+/**
+ * End of the pointers
+ */
+ #endif
+
+/*****************************************************************
+ *
+ *  PROCESS DATA AND SEND TO NETDATA
+ *
+ *****************************************************************/
+
+/**
+ * Update publish structure before to send data to Netdata.
+ *
+ * @param publish  the first output structure with independent dimensions
+ * @param pvc      the second output structure with correlated dimensions
+ * @param input    the structure with the input data.
+ */
+static void ebpf_update_publish(netdata_publish_syscall_t *publish,
+                                   netdata_publish_vfs_common_t *pvc,
+                                   netdata_syscall_stat_t *input) {
+
+    netdata_publish_syscall_t *move = publish;
+    while(move) {
+        if(input->call != move->pcall) {
+            //This condition happens to avoid initial values with dimensions higher than normal values.
+            if(move->pcall) {
+                move->ncall = (input->call > move->pcall)?input->call - move->pcall: move->pcall - input->call;
+                move->nbyte = (input->bytes > move->pbyte)?input->bytes - move->pbyte: move->pbyte - input->bytes;
+                move->nerr = (input->ecall > move->nerr)?input->ecall - move->perr: move->perr - input->ecall;
+            } else {
+                move->ncall = 0;
+                move->nbyte = 0;
+                move->nerr = 0;
+            }
+
+            move->pcall = input->call;
+            move->pbyte = input->bytes;
+            move->perr = input->ecall;
+        } else {
+            move->ncall = 0;
+            move->nbyte = 0;
+            move->nerr = 0;
+        }
+
+        input = input->next;
+        move = move->next;
+    }
+
+    pvc->write = -((long)publish[2].nbyte);
+    pvc->read = (long)publish[3].nbyte;
+
+    pvc->running = (long)publish[7].ncall - (long)publish[8].ncall;
+    publish[6].ncall = -publish[6].ncall; // release
+    pvc->zombie = (long)publish[5].ncall + (long)publish[6].ncall;
+}
+
+
+/**
+ * Call the necessary functions to create a chart.
+ *
+ * @param family  the chart family
+ * @param move    the pointer with the values that will be published
+ */
+static void write_status_chart(char *family, netdata_publish_vfs_common_t *pvc) {
+    write_begin_chart(family, NETDATA_PROCESS_STATUS_NAME);
+
+    write_chart_dimension(status[0], (long long) pvc->running);
+    write_chart_dimension(status[1], (long long) pvc->zombie);
+
+    printf("END\n");
+}
+
+/**
+ * Send data to Netdata calling auxiliar functions.
+ *
+ * @param em the structure with thread information
+ */
+static void ebpf_process_send_data(ebpf_module_t *em) {
+    netdata_publish_vfs_common_t pvc;
+    ebpf_update_publish(process_publish_aggregated, &pvc, process_aggregated_data);
+
+    write_count_chart(NETDATA_FILE_OPEN_CLOSE_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
+    write_count_chart(NETDATA_VFS_FILE_CLEAN_COUNT,
+                             NETDATA_EBPF_FAMILY,
+                             &process_publish_aggregated[NETDATA_DEL_START],
+                             1);
+    write_count_chart(NETDATA_VFS_FILE_IO_COUNT,
+                             NETDATA_EBPF_FAMILY,
+                             &process_publish_aggregated[NETDATA_IN_START_BYTE],
+                             2);
+    write_count_chart(NETDATA_EXIT_SYSCALL,
+                             NETDATA_EBPF_FAMILY,
+                             &process_publish_aggregated[NETDATA_EXIT_START],
+                             2);
+    write_count_chart(NETDATA_PROCESS_SYSCALL,
+                             NETDATA_EBPF_FAMILY,
+                             &process_publish_aggregated[NETDATA_PROCESS_START],
+                             2);
+
+    write_status_chart(NETDATA_EBPF_FAMILY, &pvc);
+    if(em->mode < MODE_ENTRY) {
+        write_err_chart(NETDATA_FILE_OPEN_ERR_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
+        write_err_chart(NETDATA_VFS_FILE_ERR_COUNT,
+                               NETDATA_EBPF_FAMILY,
+                               &process_publish_aggregated[2],
+                               NETDATA_VFS_ERRORS);
+        write_err_chart(NETDATA_PROCESS_ERROR_NAME,
+                               NETDATA_EBPF_FAMILY,
+                               &process_publish_aggregated[NETDATA_PROCESS_START],
+                               2);
+
+        write_io_chart(NETDATA_VFS_IO_FILE_BYTES, NETDATA_EBPF_FAMILY, process_id_names[3],
+                       process_id_names[4], &pvc);
+    }
+}
+
+/*****************************************************************
+ *
+ *  READ INFORMATION FROM KERNEL RING
+ *
+ *****************************************************************/
+
+/**
+ * Read the hash table and store data to allocated vectors.
+ */
+static void read_hash_global_tables()
+{
+    uint64_t idx;
+    netdata_idx_t res[NETDATA_GLOBAL_VECTOR];
+
+    netdata_idx_t *val = process_hash_values;
+    for (idx = 0; idx < NETDATA_GLOBAL_VECTOR; idx++) {
+        if(!bpf_map_lookup_elem(map_fd[1], &idx, val)) {
+            uint64_t total = 0;
+            int i;
+            int end = (running_on_kernel < NETDATA_KERNEL_V4_15)?1:ebpf_nprocs;
+            for (i = 0; i < end; i++)
+                total += val[i];
+
+            res[idx] = total;
+        } else {
+            res[idx] = 0;
+        }
+    }
+
+    process_aggregated_data[0].call = res[NETDATA_KEY_CALLS_DO_SYS_OPEN];
+    process_aggregated_data[1].call = res[NETDATA_KEY_CALLS_CLOSE_FD];
+    process_aggregated_data[2].call = res[NETDATA_KEY_CALLS_VFS_UNLINK];
+    process_aggregated_data[3].call = res[NETDATA_KEY_CALLS_VFS_READ] + res[NETDATA_KEY_CALLS_VFS_READV];
+    process_aggregated_data[4].call = res[NETDATA_KEY_CALLS_VFS_WRITE] + res[NETDATA_KEY_CALLS_VFS_WRITEV];
+    process_aggregated_data[5].call = res[NETDATA_KEY_CALLS_DO_EXIT];
+    process_aggregated_data[6].call = res[NETDATA_KEY_CALLS_RELEASE_TASK];
+    process_aggregated_data[7].call = res[NETDATA_KEY_CALLS_DO_FORK];
+    process_aggregated_data[8].call = res[NETDATA_KEY_CALLS_SYS_CLONE];
+
+    process_aggregated_data[0].ecall = res[NETDATA_KEY_ERROR_DO_SYS_OPEN];
+    process_aggregated_data[1].ecall = res[NETDATA_KEY_ERROR_CLOSE_FD];
+    process_aggregated_data[2].ecall = res[NETDATA_KEY_ERROR_VFS_UNLINK];
+    process_aggregated_data[3].ecall = res[NETDATA_KEY_ERROR_VFS_READ] + res[NETDATA_KEY_ERROR_VFS_READV];
+    process_aggregated_data[4].ecall = res[NETDATA_KEY_ERROR_VFS_WRITE] + res[NETDATA_KEY_ERROR_VFS_WRITEV];
+    process_aggregated_data[7].ecall = res[NETDATA_KEY_ERROR_DO_FORK];
+    process_aggregated_data[8].ecall = res[NETDATA_KEY_ERROR_SYS_CLONE];
+
+    process_aggregated_data[2].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITE] +
+                                       (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITEV];
+    process_aggregated_data[3].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_READ] +
+                                       (uint64_t)res[NETDATA_KEY_BYTES_VFS_READV];
+}
+
+/*****************************************************************
+ *
+ *  FUNCTIONS WITH THE MAIN LOOP
+ *
+ *****************************************************************/
+
+
+/**
+ * Main loop for this collector.
+ *
+ * @param step the number of microseconds used with heart beat
+ * @param em   the structure with thread information
+ */
+static void process_collector(usec_t step, ebpf_module_t *em)
+{
+    heartbeat_t hb;
+    heartbeat_init(&hb);
+    while(!close_ebpf_plugin) {
+        usec_t dt = heartbeat_next(&hb, step);
+        (void)dt;
+
+        read_hash_global_tables();
+
+        pthread_mutex_lock(&lock);
+        ebpf_process_send_data(em);
+        pthread_mutex_unlock(&lock);
+
+        fflush(stdout);
+    }
+}
+
+/*****************************************************************
+ *
+ *  FUNCTIONS TO CREATE CHARTS
+ *
+ *****************************************************************/
+
+/**
+ * Create IO chart
+ *
+ * @param family the chart family
+ * @param name   the chart name
+ * @param axis   the axis label
+ * @param web    the group name used to attach the chart on dashaboard
+ * @param order  the order number of the specified chart
+ */
+static void ebpf_create_io_chart(char *family, char *name, char *axis, char *web, int order) {
+    printf("CHART %s.%s '' '' '%s' '%s' '' line %d 1 ''\n"
+        , family
+        , name
+        , axis
+        , web
+        , order);
+
+    printf("DIMENSION %s %s absolute 1 1\n", process_id_names[3], NETDATA_VFS_DIM_OUT_FILE_BYTES);
+    printf("DIMENSION %s %s absolute 1 1\n", process_id_names[4], NETDATA_VFS_DIM_IN_FILE_BYTES);
+}
+
+/**
+ * Create process status chart
+ *
+ * @param family the chart family
+ * @param name   the chart name
+ * @param axis   the axis label
+ * @param web    the group name used to attach the chart on dashaboard
+ * @param order  the order number of the specified chart
+ */
+static void ebpf_process_status_chart(char *family, char *name, char *axis, char *web, int order) {
+    printf("CHART %s.%s '' '' '%s' '%s' '' line %d 1 ''\n"
+        , family
+        , name
+        , axis
+        , web
+        , order);
+
+    printf("DIMENSION %s '' absolute 1 1\n", status[0]);
+    printf("DIMENSION %s '' absolute 1 1\n", status[1]);
+}
+
+/**
+ * Create global charts
+ *
+ * Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static void ebpf_create_global_charts(ebpf_module_t *em) {
+    ebpf_create_chart(NETDATA_EBPF_FAMILY
+        , NETDATA_FILE_OPEN_CLOSE_COUNT
+        , "Calls"
+        , NETDATA_FILE_GROUP
+        , 970
+        , ebpf_create_global_dimension
+        , process_publish_aggregated
+        , 2);
+
+    if (em->mode < MODE_ENTRY) {
+        ebpf_create_chart(NETDATA_EBPF_FAMILY
+            , NETDATA_FILE_OPEN_ERR_COUNT
+            , "Calls"
+            , NETDATA_FILE_GROUP
+            , 971
+            , ebpf_create_global_dimension
+            , process_publish_aggregated
+            , 2);
+    }
+
+    ebpf_create_chart(NETDATA_EBPF_FAMILY
+        , NETDATA_VFS_FILE_CLEAN_COUNT
+        , "Calls"
+        , NETDATA_VFS_GROUP
+        , 972
+        , ebpf_create_global_dimension
+        , &process_publish_aggregated[NETDATA_DEL_START]
+        , 1);
+
+    ebpf_create_chart(NETDATA_EBPF_FAMILY
+        , NETDATA_VFS_FILE_IO_COUNT
+        , "Calls"
+        , NETDATA_VFS_GROUP
+        , 973
+        , ebpf_create_global_dimension
+        , &process_publish_aggregated[NETDATA_IN_START_BYTE]
+        , 2);
+
+    if (em->mode < MODE_ENTRY) {
+        ebpf_create_io_chart(NETDATA_EBPF_FAMILY
+            , NETDATA_VFS_IO_FILE_BYTES
+            , "bytes/s"
+            , NETDATA_VFS_GROUP
+            , 974);
+
+        ebpf_create_chart(NETDATA_EBPF_FAMILY
+            , NETDATA_VFS_FILE_ERR_COUNT
+            , "Calls"
+            , NETDATA_VFS_GROUP
+            , 975
+            , ebpf_create_global_dimension
+            , &process_publish_aggregated[2]
+            , NETDATA_VFS_ERRORS);
+
+    }
+
+    ebpf_create_chart(NETDATA_EBPF_FAMILY
+        , NETDATA_PROCESS_SYSCALL
+        , "Calls"
+        , NETDATA_PROCESS_GROUP
+        , 976
+        , ebpf_create_global_dimension
+        , &process_publish_aggregated[NETDATA_PROCESS_START]
+        , 2);
+
+    ebpf_create_chart(NETDATA_EBPF_FAMILY
+        , NETDATA_EXIT_SYSCALL
+        , "Calls"
+        , NETDATA_PROCESS_GROUP
+        , 977
+        , ebpf_create_global_dimension
+        , &process_publish_aggregated[NETDATA_EXIT_START]
+        , 2);
+
+    ebpf_process_status_chart(NETDATA_EBPF_FAMILY
+        , NETDATA_PROCESS_STATUS_NAME
+        , "Total"
+        , NETDATA_PROCESS_GROUP
+        , 978);
+
+    if (em->mode < MODE_ENTRY) {
+        ebpf_create_chart(NETDATA_EBPF_FAMILY
+            , NETDATA_PROCESS_ERROR_NAME
+            , "Calls"
+            , NETDATA_PROCESS_GROUP
+            , 979
+            , ebpf_create_global_dimension
+            , &process_publish_aggregated[NETDATA_PROCESS_START]
+            , 2);
+    }
+
+}
+
+/*****************************************************************
+ *
+ *  FUNCTIONS TO CLOSE THE THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_process_cleanup(void *ptr)
+{
+    (void)ptr;
+    freez(process_aggregated_data);
+    freez(process_publish_aggregated);
+    freez(process_hash_values);
+
+    if (process_functions.libnetdata) {
+        dlclose(process_functions.libnetdata);
+    }
+
+    freez(process_functions.map_fd);
+}
+
+/*****************************************************************
+ *
+ *  FUNCTIONS TO START THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Allocate vectors used with this thread.
+ * We are not testing the return, because callocz does this and shutdown the software
+ * case it was not possible to allocate.
+ *
+ *  @param length is the length for the vectors used inside the collector.
+ */
+static void ebpf_process_allocate_global_vectors(size_t length) {
+    process_aggregated_data = callocz(length, sizeof(netdata_syscall_stat_t));
+    process_publish_aggregated = callocz(length, sizeof(netdata_publish_syscall_t));
+    process_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
+}
+
+static void change_collector_event() {
+    int i;
+    if (running_on_kernel < NETDATA_KERNEL_V5_3)
+        process_probes[10].name = NULL;
+
+    for (i = 0; process_probes[i].name ; i++ ) {
+        process_probes[i].type = 'p';
+    }
+}
+
+static void change_syscalls() {
+    static char *lfork = { "do_fork" };
+    process_id_names[7] = lfork;
+    process_probes[8].name = lfork;
+}
+
+/**
+ * Set local function pointers, this function will never be compiled with static libraries
+ */
+static void set_local_pointers(ebpf_module_t *em) {
+#ifndef STATIC
+    bpf_map_lookup_elem = process_functions.bpf_map_lookup_elem;
+
+#endif
+
+    map_fd = process_functions.map_fd;
+
+    if (em->mode == MODE_ENTRY) {
+        change_collector_event();
+    }
+
+    if (process_functions.isrh >= NETDATA_MINIMUM_RH_VERSION && process_functions.isrh < NETDATA_RH_8)
+        change_syscalls();
+}
+
+/*****************************************************************
+ *
+ *  EBPF PROCESS THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Process thread
+ *
+ * Thread used to generate process charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_process_thread(void *ptr)
+{
+    netdata_thread_cleanup_push(ebpf_process_cleanup, ptr);
+
+    ebpf_module_t *em = (ebpf_module_t *)ptr;
+    fill_ebpf_functions(&process_functions);
+
+    if (!em->enabled)
+        goto endprocess;
+
+    pthread_mutex_lock(&lock);
+    ebpf_process_allocate_global_vectors(NETDATA_MAX_MONITOR_VECTOR);
+
+    if (ebpf_load_libraries(&process_functions, "libnetdata_ebpf.so", ebpf_plugin_dir)) {
+        pthread_mutex_unlock(&lock);
+        goto endprocess;
+    }
+
+    set_local_pointers(em);
+    if (ebpf_load_program(ebpf_plugin_dir, em->thread_id, em->mode, kernel_string,
+                      em->thread_name, process_functions.map_fd, process_functions.load_bpf_file) ) {
+        pthread_mutex_unlock(&lock);
+        goto endprocess;
+    }
+
+    ebpf_global_labels(process_aggregated_data, process_publish_aggregated, process_dimension_names,
+                       process_id_names, NETDATA_MAX_MONITOR_VECTOR);
+
+    ebpf_create_global_charts(em);
+    pthread_mutex_unlock(&lock);
+    process_collector((usec_t)(em->update_time*USEC_PER_SEC), em);
+
+endprocess:
+    netdata_thread_cleanup_pop(1);
+    return NULL;
+}

+ 71 - 0
collectors/ebpf.plugin/ebpf_process.h

@@ -0,0 +1,71 @@
+#ifndef _NETDATA_EBPF_PROCESS_H_
+# define _NETDATA_EBPF_PROCESS_H_ 1
+
+# define NETDATA_FILE_GROUP "File"
+# define NETDATA_VFS_GROUP "VFS"
+# define NETDATA_PROCESS_GROUP "Process"
+
+# define NETDATA_GLOBAL_VECTOR 24
+# define NETDATA_MAX_MONITOR_VECTOR 9
+# define NETDATA_VFS_ERRORS 3
+
+# define NETDATA_DEL_START 2
+# define NETDATA_IN_START_BYTE 3
+# define NETDATA_EXIT_START 5
+# define NETDATA_PROCESS_START 7
+
+# define NETDATA_FILE_OPEN_CLOSE_COUNT "file_descriptor"
+# define NETDATA_FILE_OPEN_ERR_COUNT "file_error"
+# define NETDATA_VFS_FILE_CLEAN_COUNT "deleted_objects"
+# define NETDATA_VFS_FILE_IO_COUNT "io"
+# define NETDATA_VFS_FILE_ERR_COUNT "io_error"
+
+# define NETDATA_EXIT_SYSCALL "exit"
+# define NETDATA_PROCESS_SYSCALL "process_thread"
+# define NETDATA_PROCESS_ERROR_NAME "task_error"
+# define NETDATA_PROCESS_STATUS_NAME "process_status"
+
+# define NETDATA_VFS_IO_FILE_BYTES "io_bytes"
+# define NETDATA_VFS_DIM_IN_FILE_BYTES "write"
+# define NETDATA_VFS_DIM_OUT_FILE_BYTES "read"
+
+//Index from kernel
+typedef enum ebpf_process_index {
+    NETDATA_KEY_CALLS_DO_SYS_OPEN,
+    NETDATA_KEY_ERROR_DO_SYS_OPEN,
+
+    NETDATA_KEY_CALLS_VFS_WRITE,
+    NETDATA_KEY_ERROR_VFS_WRITE,
+    NETDATA_KEY_BYTES_VFS_WRITE,
+
+    NETDATA_KEY_CALLS_VFS_READ,
+    NETDATA_KEY_ERROR_VFS_READ,
+    NETDATA_KEY_BYTES_VFS_READ,
+
+    NETDATA_KEY_CALLS_VFS_UNLINK,
+    NETDATA_KEY_ERROR_VFS_UNLINK,
+
+    NETDATA_KEY_CALLS_DO_EXIT,
+
+    NETDATA_KEY_CALLS_RELEASE_TASK,
+
+    NETDATA_KEY_CALLS_DO_FORK,
+    NETDATA_KEY_ERROR_DO_FORK,
+
+    NETDATA_KEY_CALLS_CLOSE_FD,
+    NETDATA_KEY_ERROR_CLOSE_FD,
+
+    NETDATA_KEY_CALLS_SYS_CLONE,
+    NETDATA_KEY_ERROR_SYS_CLONE,
+
+    NETDATA_KEY_CALLS_VFS_WRITEV,
+    NETDATA_KEY_ERROR_VFS_WRITEV,
+    NETDATA_KEY_BYTES_VFS_WRITEV,
+
+    NETDATA_KEY_CALLS_VFS_READV,
+    NETDATA_KEY_ERROR_VFS_READV,
+    NETDATA_KEY_BYTES_VFS_READV
+
+} ebpf_process_index_t;
+
+# endif

+ 387 - 0
collectors/ebpf.plugin/ebpf_socket.c

@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <sys/resource.h>
+
+#include "ebpf.h"
+#include "ebpf_socket.h"
+
+/*****************************************************************
+ *
+ *  GLOBAL VARIABLES
+ *
+ *****************************************************************/
+
+static ebpf_functions_t socket_functions;
+
+static netdata_idx_t *socket_hash_values = NULL;
+static netdata_syscall_stat_t *socket_aggregated_data = NULL;
+static netdata_publish_syscall_t *socket_publish_aggregated = NULL;
+
+static char *socket_dimension_names[NETDATA_MAX_SOCKET_VECTOR] = { "sent", "received", "close", "sent", "received" };
+static char *socket_id_names[NETDATA_MAX_SOCKET_VECTOR] = { "tcp_sendmsg", "tcp_cleanup_rbuf", "tcp_close", "udp_sendmsg",
+                                                     "udp_recvmsg" };
+
+#ifndef STATIC
+/**
+ * Pointers used when collector is dynamically linked
+ */
+
+//Libbpf (It is necessary to have at least kernel 4.10)
+static int (*bpf_map_lookup_elem)(int, const void *, void *);
+static int (*bpf_map_delete_elem)(int fd, const void *key);
+
+static int *map_fd = NULL;
+/**
+ * End of the pointers
+ */
+#endif
+
+/*****************************************************************
+ *
+ *  PROCESS DATA AND SEND TO NETDATA
+ *
+ *****************************************************************/
+
+/**
+ * Update publish structure before to send data to Netdata.
+ *
+ * @param publish  the first output structure with independent dimensions
+ * @param tcp      structure to store IO from tcp sockets
+ * @param udp      structure to store IO from udp sockets
+ * @param input    the structure with the input data.
+ */
+static void ebpf_update_publish(netdata_publish_syscall_t *publish,
+                                netdata_publish_vfs_common_t *tcp,
+                                netdata_publish_vfs_common_t *udp,
+                                netdata_syscall_stat_t *input) {
+
+    netdata_publish_syscall_t *move = publish;
+    while(move) {
+        if(input->call != move->pcall) {
+            //This condition happens to avoid initial values with dimensions higher than normal values.
+            if(move->pcall) {
+                move->ncall = (input->call > move->pcall)?input->call - move->pcall: move->pcall - input->call;
+                move->nbyte = (input->bytes > move->pbyte)?input->bytes - move->pbyte: move->pbyte - input->bytes;
+                move->nerr = (input->ecall > move->nerr)?input->ecall - move->perr: move->perr - input->ecall;
+            } else {
+                move->ncall = 0;
+                move->nbyte = 0;
+                move->nerr = 0;
+            }
+
+            move->pcall = input->call;
+            move->pbyte = input->bytes;
+            move->perr = input->ecall;
+        } else {
+            move->ncall = 0;
+            move->nbyte = 0;
+            move->nerr = 0;
+        }
+
+        input = input->next;
+        move = move->next;
+    }
+
+    tcp->write = -((long)publish[0].nbyte);
+    tcp->read = (long)publish[1].nbyte;
+
+    udp->write = -((long)publish[3].nbyte);
+    udp->read = (long)publish[4].nbyte;
+}
+
+/**
+ * Send data to Netdata calling auxiliar functions.
+ *
+ * @param em the structure with thread information
+ */
+static void ebpf_process_send_data(ebpf_module_t *em) {
+    netdata_publish_vfs_common_t common_tcp;
+    netdata_publish_vfs_common_t common_udp;
+    ebpf_update_publish(socket_publish_aggregated, &common_tcp, &common_udp, socket_aggregated_data);
+
+    write_count_chart(NETDATA_TCP_FUNCTION_COUNT, NETDATA_EBPF_FAMILY, socket_publish_aggregated, 3);
+    write_io_chart(NETDATA_TCP_FUNCTION_BYTES, NETDATA_EBPF_FAMILY, socket_id_names[0], socket_id_names[1], &common_tcp);
+    if (em->mode < MODE_ENTRY) {
+        write_err_chart(NETDATA_TCP_FUNCTION_ERROR, NETDATA_EBPF_FAMILY, socket_publish_aggregated, 2);
+    }
+
+    write_count_chart(NETDATA_UDP_FUNCTION_COUNT, NETDATA_EBPF_FAMILY,
+                             &socket_publish_aggregated[NETDATA_UDP_START], 2);
+    write_io_chart(NETDATA_UDP_FUNCTION_BYTES, NETDATA_EBPF_FAMILY, socket_id_names[3], socket_id_names[4], &common_udp);
+    if (em->mode < MODE_ENTRY) {
+        write_err_chart(NETDATA_UDP_FUNCTION_ERROR, NETDATA_EBPF_FAMILY,
+                               &socket_publish_aggregated[NETDATA_UDP_START], 2);
+    }
+}
+
+/*****************************************************************
+ *
+ *  FUNCTIONS TO CREATE CHARTS
+ *
+ *****************************************************************/
+
+/**
+ * Create global charts
+ *
+ * Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static void ebpf_create_global_charts(ebpf_module_t *em) {
+    ebpf_create_chart(NETDATA_EBPF_FAMILY
+        , NETDATA_TCP_FUNCTION_COUNT
+        , "Calls"
+        , NETDATA_SOCKET_GROUP
+        , 950
+        , ebpf_create_global_dimension
+        , socket_publish_aggregated
+        , 3);
+
+    ebpf_create_chart(NETDATA_EBPF_FAMILY
+        , NETDATA_TCP_FUNCTION_BYTES
+        , "bytes/s"
+        , NETDATA_SOCKET_GROUP
+        , 951
+        , ebpf_create_global_dimension
+        , socket_publish_aggregated
+        , 3);
+
+    if (em->mode < MODE_ENTRY) {
+        ebpf_create_chart(NETDATA_EBPF_FAMILY
+            , NETDATA_TCP_FUNCTION_ERROR
+            , "Calls"
+            , NETDATA_SOCKET_GROUP
+            , 952
+            , ebpf_create_global_dimension
+            , socket_publish_aggregated
+            , 2);
+    }
+
+    ebpf_create_chart(NETDATA_EBPF_FAMILY
+        , NETDATA_UDP_FUNCTION_COUNT
+        , "Calls"
+        , NETDATA_SOCKET_GROUP
+        , 953
+        , ebpf_create_global_dimension
+        , &socket_publish_aggregated[NETDATA_UDP_START]
+        , 2);
+
+    ebpf_create_chart(NETDATA_EBPF_FAMILY
+        , NETDATA_UDP_FUNCTION_BYTES
+        , "bytes/s"
+        , NETDATA_SOCKET_GROUP
+        , 954
+        , ebpf_create_global_dimension
+        , &socket_publish_aggregated[NETDATA_UDP_START]
+        , 2);
+
+    if (em->mode < MODE_ENTRY) {
+        ebpf_create_chart(NETDATA_EBPF_FAMILY
+            , NETDATA_UDP_FUNCTION_ERROR
+            , "Calls"
+            , NETDATA_SOCKET_GROUP
+            , 955
+            , ebpf_create_global_dimension
+            , &socket_publish_aggregated[NETDATA_UDP_START]
+            , 2);
+    }
+}
+
+/*****************************************************************
+ *
+ *  READ INFORMATION FROM KERNEL RING
+ *
+ *****************************************************************/
+
+/**
+ * Read the hash table and store data to allocated vectors.
+ */
+static void read_hash_global_tables()
+{
+    uint64_t idx;
+    netdata_idx_t res[NETDATA_SOCKET_COUNTER];
+
+    netdata_idx_t *val = socket_hash_values;
+    for (idx = 0; idx < NETDATA_SOCKET_COUNTER ; idx++) {
+        if (!bpf_map_lookup_elem(map_fd[4], &idx, val)) {
+            uint64_t total = 0;
+            int i;
+            int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+            for (i = 0; i < end; i++)
+                total += val[i];
+
+            res[idx] = total;
+        } else {
+            res[idx] = 0;
+        }
+    }
+
+    socket_aggregated_data[0].call = res[NETDATA_KEY_CALLS_TCP_SENDMSG];
+    socket_aggregated_data[1].call = res[NETDATA_KEY_CALLS_TCP_CLEANUP_RBUF];
+    socket_aggregated_data[2].call = res[NETDATA_KEY_CALLS_TCP_CLOSE];
+    socket_aggregated_data[3].call = res[NETDATA_KEY_CALLS_UDP_RECVMSG];
+    socket_aggregated_data[4].call = res[NETDATA_KEY_CALLS_UDP_SENDMSG];
+
+    socket_aggregated_data[0].ecall = res[NETDATA_KEY_ERROR_TCP_SENDMSG];
+    socket_aggregated_data[1].ecall = res[NETDATA_KEY_ERROR_TCP_CLEANUP_RBUF];
+    socket_aggregated_data[3].ecall = res[NETDATA_KEY_ERROR_UDP_RECVMSG];
+    socket_aggregated_data[4].ecall = res[NETDATA_KEY_ERROR_UDP_SENDMSG];
+
+    socket_aggregated_data[0].bytes = res[NETDATA_KEY_BYTES_TCP_SENDMSG];
+    socket_aggregated_data[1].bytes = res[NETDATA_KEY_BYTES_TCP_CLEANUP_RBUF];
+    socket_aggregated_data[3].bytes = res[NETDATA_KEY_BYTES_UDP_RECVMSG];
+    socket_aggregated_data[4].bytes = res[NETDATA_KEY_BYTES_UDP_SENDMSG];
+}
+
+/*****************************************************************
+ *
+ *  FUNCTIONS WITH THE MAIN LOOP
+ *
+ *****************************************************************/
+
+
+/**
+ * Main loop for this collector.
+ *
+ * @param step the number of microseconds used with heart beat
+ * @param em   the structure with thread information
+ */
+static void socket_collector(usec_t step, ebpf_module_t *em)
+{
+    (void)em;
+    heartbeat_t hb;
+    heartbeat_init(&hb);
+    while(!close_ebpf_plugin) {
+        usec_t dt = heartbeat_next(&hb, step);
+        (void)dt;
+
+        read_hash_global_tables();
+
+        pthread_mutex_lock(&lock);
+        ebpf_process_send_data(em);
+        pthread_mutex_unlock(&lock);
+
+        fflush(stdout);
+    }
+}
+
+/*****************************************************************
+ *
+ *  FUNCTIONS TO CLOSE THE THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_socket_cleanup(void *ptr)
+{
+    (void)ptr;
+
+    freez(socket_aggregated_data);
+    freez(socket_publish_aggregated);
+    freez(socket_hash_values);
+
+    if (socket_functions.libnetdata) {
+        dlclose(socket_functions.libnetdata);
+    }
+
+    freez(socket_functions.map_fd);
+}
+
+/*****************************************************************
+ *
+ *  FUNCTIONS TO START THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Allocate vectors used with this thread.
+ * We are not testing the return, because callocz does this and shutdown the software
+ * case it was not possible to allocate.
+ *
+ * @param length is the length for the vectors used inside the collector.
+ */
+static void ebpf_socket_allocate_global_vectors(size_t length) {
+    socket_aggregated_data = callocz(length, sizeof(netdata_syscall_stat_t));
+    socket_publish_aggregated = callocz(length, sizeof(netdata_publish_syscall_t));
+    socket_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
+}
+
+static void change_collector_event() {
+    socket_probes[0].type = 'p';
+    socket_probes[5].type = 'p';
+}
+
+/**
+ * Set local function pointers, this function will never be compiled with static libraries
+ */
+static void set_local_pointers(ebpf_module_t *em) {
+#ifndef STATIC
+    bpf_map_lookup_elem = socket_functions.bpf_map_lookup_elem;
+    (void) bpf_map_lookup_elem;
+    bpf_map_delete_elem = socket_functions.bpf_map_delete_elem;
+    (void) bpf_map_delete_elem;
+#endif
+    map_fd = socket_functions.map_fd;
+
+    if (em->mode == MODE_ENTRY) {
+        change_collector_event();
+    }
+}
+
+/*****************************************************************
+ *
+ *  EBPF SOCKET THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Socket thread
+ *
+ * Thread used to generate socket charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_socket_thread(void *ptr)
+{
+    netdata_thread_cleanup_push(ebpf_socket_cleanup, ptr);
+
+    ebpf_module_t *em = (ebpf_module_t *)ptr;
+    fill_ebpf_functions(&socket_functions);
+
+    if (!em->enabled)
+        goto endsocket;
+
+    pthread_mutex_lock(&lock);
+
+    ebpf_socket_allocate_global_vectors(NETDATA_MAX_SOCKET_VECTOR);
+
+    if (ebpf_load_libraries(&socket_functions, "libnetdata_ebpf.so", ebpf_plugin_dir)) {
+        pthread_mutex_unlock(&lock);
+        goto endsocket;
+    }
+
+    set_local_pointers(em);
+    if (ebpf_load_program(ebpf_plugin_dir, em->thread_id, em->mode, kernel_string,
+                          em->thread_name, socket_functions.map_fd, socket_functions.load_bpf_file) ) {
+        pthread_mutex_unlock(&lock);
+        goto endsocket;
+    }
+
+    ebpf_global_labels(socket_aggregated_data, socket_publish_aggregated, socket_dimension_names,
+                       socket_id_names, NETDATA_MAX_SOCKET_VECTOR);
+
+    ebpf_create_global_charts(em);
+    pthread_mutex_unlock(&lock);
+
+    socket_collector((usec_t)(em->update_time*USEC_PER_SEC), em);
+
+endsocket:
+    netdata_thread_cleanup_pop(1);
+    return NULL;
+}

+ 40 - 0
collectors/ebpf.plugin/ebpf_socket.h

@@ -0,0 +1,40 @@
+#ifndef _NETDATA_EBPF_SOCKET_H_
+# define _NETDATA_EBPF_SOCKET_H_ 1
+
+# define NETDATA_SOCKET_COUNTER 13
+
+# define NETDATA_MAX_SOCKET_VECTOR 5
+
+# define NETDATA_UDP_START 3
+
+typedef enum ebpf_socket_idx {
+    NETDATA_KEY_CALLS_TCP_SENDMSG,
+    NETDATA_KEY_ERROR_TCP_SENDMSG,
+    NETDATA_KEY_BYTES_TCP_SENDMSG,
+
+    NETDATA_KEY_CALLS_TCP_CLEANUP_RBUF,
+    NETDATA_KEY_ERROR_TCP_CLEANUP_RBUF,
+    NETDATA_KEY_BYTES_TCP_CLEANUP_RBUF,
+
+    NETDATA_KEY_CALLS_TCP_CLOSE,
+
+    NETDATA_KEY_CALLS_UDP_RECVMSG,
+    NETDATA_KEY_ERROR_UDP_RECVMSG,
+    NETDATA_KEY_BYTES_UDP_RECVMSG,
+
+    NETDATA_KEY_CALLS_UDP_SENDMSG,
+    NETDATA_KEY_ERROR_UDP_SENDMSG,
+    NETDATA_KEY_BYTES_UDP_SENDMSG
+} ebpf_socket_index_t;
+
+# define NETDATA_SOCKET_GROUP "Socket"
+
+# define NETDATA_TCP_FUNCTION_COUNT "tcp_functions"
+# define NETDATA_TCP_FUNCTION_BYTES "tcp_bandwidth"
+# define NETDATA_TCP_FUNCTION_ERROR "tcp_error"
+# define NETDATA_UDP_FUNCTION_COUNT "udp_functions"
+# define NETDATA_UDP_FUNCTION_BYTES "udp_bandwidth"
+# define NETDATA_UDP_FUNCTION_ERROR "udp_error"
+
+
+#endif

Some files were not shown because too many files changed in this diff