Browse Source

Address issues on `EC2` (eBPF). (#14902)

thiagoftsm 1 year ago
parent
commit
24880f912a

+ 1 - 0
collectors/cgroups.plugin/sys_fs_cgroup.c

@@ -4790,6 +4790,7 @@ static void cgroup_main_cleanup(void *ptr) {
     }
 
     if (shm_cgroup_ebpf.header) {
+        shm_cgroup_ebpf.header->cgroup_root_count = 0;
         munmap(shm_cgroup_ebpf.header, shm_cgroup_ebpf.header->body_length);
     }
 

+ 246 - 63
collectors/ebpf.plugin/ebpf.c

@@ -28,11 +28,22 @@ int running_on_kernel = 0;
 int ebpf_nprocs;
 int isrh = 0;
 int main_thread_id = 0;
+int process_pid_fd = -1;
 
 pthread_mutex_t lock;
 pthread_mutex_t ebpf_exit_cleanup;
 pthread_mutex_t collect_data_mutex;
-pthread_cond_t collect_data_cond_var;
+
+struct netdata_static_thread cgroup_integration_thread = {
+    .name = "EBPF CGROUP INT",
+    .config_section = NULL,
+    .config_name = NULL,
+    .env_name = NULL,
+    .enabled = 1,
+    .thread = NULL,
+    .init_routine = NULL,
+    .start_routine = NULL
+};
 
 ebpf_module_t ebpf_modules[] = {
     { .thread_name = "process", .config_name = "process", .enabled = 0, .start_routine = ebpf_process_thread,
@@ -451,6 +462,14 @@ ebpf_plugin_stats_t plugin_statistics = {.core = 0, .legacy = 0, .running = 0, .
 
 #ifdef LIBBPF_MAJOR_VERSION
 struct btf *default_btf = NULL;
+struct cachestat_bpf *cachestat_bpf_obj = NULL;
+struct dc_bpf *dc_bpf_obj = NULL;
+struct fd_bpf *fd_bpf_obj = NULL;
+struct mount_bpf *mount_bpf_obj = NULL;
+struct shm_bpf *shm_bpf_obj = NULL;
+struct socket_bpf *socket_bpf_obj = NULL;
+struct swap_bpf *bpf_obj = NULL;
+struct vfs_bpf *vfs_bpf_obj = NULL;
 #else
 void *default_btf = NULL;
 #endif
@@ -515,10 +534,12 @@ static void ebpf_exit()
 #endif
     printf("DISABLE\n");
 
+    pthread_mutex_lock(&mutex_cgroup_shm);
     if (shm_ebpf_cgroup.header) {
-        munmap(shm_ebpf_cgroup.header, shm_ebpf_cgroup.header->body_length);
+        ebpf_unmap_cgroup_shared_memory();
         shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
     }
+    pthread_mutex_unlock(&mutex_cgroup_shm);
 
     exit(0);
 }
@@ -545,6 +566,126 @@ static void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link
         bpf_object__close(objects);
 }
 
+/**
+ * Unload Unique maps
+ *
+ * This function unload all BPF maps from threads using one unique BPF object.
+ */
+static void ebpf_unload_unique_maps()
+{
+    int i;
+    for (i = 0; ebpf_modules[i].thread_name; i++) {
+        if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_STOPPED) {
+            if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_NOT_RUNNING)
+                error("Cannot unload maps for thread %s, because it is not stopped.", ebpf_modules[i].thread_name);
+
+            continue;
+        }
+
+        ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links);
+        switch (i) {
+            case EBPF_MODULE_CACHESTAT_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+                if (cachestat_bpf_obj)
+                    cachestat_bpf__destroy(cachestat_bpf_obj);
+#endif
+                break;
+            }
+            case EBPF_MODULE_DCSTAT_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+                if (dc_bpf_obj)
+                    dc_bpf__destroy(dc_bpf_obj);
+#endif
+                break;
+            }
+            case EBPF_MODULE_FD_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+                if (fd_bpf_obj)
+                    fd_bpf__destroy(fd_bpf_obj);
+#endif
+                break;
+            }
+            case EBPF_MODULE_MOUNT_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+                if (mount_bpf_obj)
+                    mount_bpf__destroy(mount_bpf_obj);
+#endif
+                break;
+            }
+            case EBPF_MODULE_SHM_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+                if (shm_bpf_obj)
+                    shm_bpf__destroy(shm_bpf_obj);
+#endif
+                break;
+            }
+            case EBPF_MODULE_SOCKET_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+                if (socket_bpf_obj)
+                    socket_bpf__destroy(socket_bpf_obj);
+#endif
+                break;
+            }
+            case EBPF_MODULE_SWAP_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+                if (bpf_obj)
+                    swap_bpf__destroy(bpf_obj);
+#endif
+                break;
+            }
+            case EBPF_MODULE_VFS_IDX: {
+#ifdef LIBBPF_MAJOR_VERSION
+                if (vfs_bpf_obj)
+                    vfs_bpf__destroy(vfs_bpf_obj);
+#endif
+                break;
+            }
+            case EBPF_MODULE_PROCESS_IDX:
+            case EBPF_MODULE_DISK_IDX:
+            case EBPF_MODULE_HARDIRQ_IDX:
+            case EBPF_MODULE_SOFTIRQ_IDX:
+            case EBPF_MODULE_OOMKILL_IDX:
+            case EBPF_MODULE_MDFLUSH_IDX:
+            default:
+                continue;
+        }
+    }
+}
+
+/**
+ * Unload filesystem maps
+ *
+ * This function unload all BPF maps from filesystem thread.
+ */
+static void ebpf_unload_filesystems()
+{
+    if (ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_NOT_RUNNING ||
+        ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_RUNNING)
+        return;
+
+    int i;
+    for (i = 0; localfs[i].filesystem != NULL; i++) {
+        ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links);
+    }
+}
+
+/**
+ * Unload sync maps
+ *
+ * This function unload all BPF maps from sync thread.
+ */
+static void ebpf_unload_sync()
+{
+    if (ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_NOT_RUNNING ||
+        ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_RUNNING)
+        return;
+
+    int i;
+    for (i = 0; local_syscalls[i].syscall != NULL; i++) {
+        ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links);
+    }
+}
+
 int ebpf_exit_plugin = 0;
 /**
  * Close the collector gracefully
@@ -556,7 +697,6 @@ static void ebpf_stop_threads(int sig)
     UNUSED(sig);
     static int only_one = 0;
 
-    int i;
     // Child thread should be closed by itself.
     pthread_mutex_lock(&ebpf_exit_cleanup);
     if (main_thread_id != gettid() || only_one) {
@@ -564,13 +704,26 @@ static void ebpf_stop_threads(int sig)
         return;
     }
     only_one = 1;
-    for (i = 0; ebpf_threads[i].name != NULL; i++) {
-        if (ebpf_threads[i].enabled != NETDATA_THREAD_EBPF_STOPPED)
-            netdata_thread_cancel(*ebpf_threads[i].thread);
+    int i;
+    for (i = 0; ebpf_modules[i].thread_name != NULL; i++) {
+        if (ebpf_modules[i].enabled == NETDATA_THREAD_EBPF_RUNNING) {
+            netdata_thread_cancel(*ebpf_modules[i].thread->thread);
+#ifdef NETDATA_DEV_MODE
+            info("Sending cancel for thread %s", ebpf_modules[i].thread_name);
+#endif
+        }
     }
     pthread_mutex_unlock(&ebpf_exit_cleanup);
 
+    pthread_mutex_lock(&mutex_cgroup_shm);
+    netdata_thread_cancel(*cgroup_integration_thread.thread);
+#ifdef NETDATA_DEV_MODE
+    info("Sending cancel for thread %s", cgroup_integration_thread.name);
+#endif
+    pthread_mutex_unlock(&mutex_cgroup_shm);
+
     ebpf_exit_plugin = 1;
+
     usec_t max = USEC_PER_SEC, step = 100000;
     while (i && max) {
         max -= step;
@@ -578,42 +731,18 @@ static void ebpf_stop_threads(int sig)
         i = 0;
         int j;
         pthread_mutex_lock(&ebpf_exit_cleanup);
-        for (j = 0; ebpf_threads[j].name != NULL; j++) {
-            if (ebpf_threads[j].enabled != NETDATA_THREAD_EBPF_STOPPED)
+        for (j = 0; ebpf_modules[j].thread_name != NULL; j++) {
+            if (ebpf_modules[j].enabled == NETDATA_THREAD_EBPF_RUNNING)
                 i++;
         }
         pthread_mutex_unlock(&ebpf_exit_cleanup);
     }
 
-    if (!i)  {
-        //Unload threads(except sync and filesystem)
-        pthread_mutex_lock(&ebpf_exit_cleanup);
-        for (i = 0; ebpf_threads[i].name != NULL; i++) {
-            if (ebpf_threads[i].enabled == NETDATA_THREAD_EBPF_STOPPED && i != EBPF_MODULE_FILESYSTEM_IDX &&
-                i != EBPF_MODULE_SYNC_IDX)
-                ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links);
-        }
-        pthread_mutex_unlock(&ebpf_exit_cleanup);
-
-        //Unload filesystem
-        pthread_mutex_lock(&ebpf_exit_cleanup);
-        if (ebpf_threads[EBPF_MODULE_FILESYSTEM_IDX].enabled  == NETDATA_THREAD_EBPF_STOPPED) {
-            for (i = 0; localfs[i].filesystem != NULL; i++) {
-                ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links);
-            }
-        }
-        pthread_mutex_unlock(&ebpf_exit_cleanup);
-
-        //Unload Sync
-        pthread_mutex_lock(&ebpf_exit_cleanup);
-        if (ebpf_threads[EBPF_MODULE_SYNC_IDX].enabled  == NETDATA_THREAD_EBPF_STOPPED) {
-            for (i = 0; local_syscalls[i].syscall != NULL; i++) {
-                ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links);
-            }
-        }
-        pthread_mutex_unlock(&ebpf_exit_cleanup);
-
-    }
+    pthread_mutex_lock(&ebpf_exit_cleanup);
+    ebpf_unload_unique_maps();
+    ebpf_unload_filesystems();
+    ebpf_unload_sync();
+    pthread_mutex_unlock(&ebpf_exit_cleanup);
 
     ebpf_exit();
 }
@@ -624,6 +753,58 @@ static void ebpf_stop_threads(int sig)
  *
  *****************************************************************/
 
+/**
+ * Create apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param root a pointer for the targets.
+ */
+static void ebpf_create_apps_charts(struct ebpf_target *root)
+{
+    if (unlikely(!ebpf_all_pids))
+        return;
+
+    struct ebpf_target *w;
+    int newly_added = 0;
+
+    for (w = root; w; w = w->next) {
+        if (w->target)
+            continue;
+
+        if (unlikely(w->processes && (debug_enabled || w->debug_enabled))) {
+            struct ebpf_pid_on_target *pid_on_target;
+
+            fprintf(
+                stderr, "ebpf.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes,
+                (w->processes == 1) ? "" : "es");
+
+            for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) {
+                fprintf(stderr, " %d", pid_on_target->pid);
+            }
+
+            fputc('\n', stderr);
+        }
+
+        if (!w->exposed && w->processes) {
+            newly_added++;
+            w->exposed = 1;
+            if (debug_enabled || w->debug_enabled)
+                debug_log_int("%s just added - regenerating charts.", w->name);
+        }
+    }
+
+    if (!newly_added)
+        return;
+
+    int counter;
+    for (counter = 0; ebpf_modules[counter].thread_name; counter++) {
+        ebpf_module_t *current = &ebpf_modules[counter];
+        if (current->enabled == NETDATA_THREAD_EBPF_RUNNING && current->apps_charts && current->apps_routine)
+            current->apps_routine(current, root);
+    }
+}
+
 /**
  * Get a value from a structure.
  *
@@ -1044,7 +1225,7 @@ void ebpf_global_labels(netdata_syscall_stat_t *is, netdata_publish_syscall_t *p
 
         pio[i].dimension = dim[i];
         pio[i].name = name[i];
-        pio[i].algorithm = strdupz(ebpf_algorithms[algorithm[i]]);
+        pio[i].algorithm = ebpf_algorithms[algorithm[i]];
         if (publish_prev) {
             publish_prev->next = &pio[i];
         }
@@ -1442,21 +1623,13 @@ static void read_local_addresses()
  * Start Pthread Variable
  *
  * This function starts all pthread variables.
- *
- * @return It returns 0 on success and -1.
  */
-int ebpf_start_pthread_variables()
+void ebpf_start_pthread_variables()
 {
     pthread_mutex_init(&lock, NULL);
     pthread_mutex_init(&ebpf_exit_cleanup, NULL);
     pthread_mutex_init(&collect_data_mutex, NULL);
-
-    if (pthread_cond_init(&collect_data_cond_var, NULL)) {
-        error("Cannot start conditional variable to control Apps charts.");
-        return -1;
-    }
-
-    return 0;
+    pthread_mutex_init(&mutex_cgroup_shm, NULL);
 }
 
 /**
@@ -2320,10 +2493,7 @@ int main(int argc, char **argv)
     signal(SIGTERM, ebpf_stop_threads);
     signal(SIGPIPE, ebpf_stop_threads);
 
-    if (ebpf_start_pthread_variables()) {
-        error("Cannot start mutex to control overall charts.");
-        ebpf_exit();
-    }
+    ebpf_start_pthread_variables();
 
     netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
     if(verify_netdata_host_prefix() == -1) ebpf_exit(6);
@@ -2342,6 +2512,12 @@ int main(int argc, char **argv)
 
     ebpf_set_static_routine();
 
+    cgroup_integration_thread.thread = mallocz(sizeof(netdata_thread_t));
+    cgroup_integration_thread.start_routine = ebpf_cgroup_integration;
+
+    netdata_thread_create(cgroup_integration_thread.thread, cgroup_integration_thread.name,
+                          NETDATA_THREAD_OPTION_DEFAULT, ebpf_cgroup_integration, NULL);
+
     int i;
     for (i = 0; ebpf_threads[i].name != NULL; i++) {
         struct netdata_static_thread *st = &ebpf_threads[i];
@@ -2352,30 +2528,37 @@ int main(int argc, char **argv)
         if (em->enabled || !i) {
             st->thread = mallocz(sizeof(netdata_thread_t));
             em->thread_id = i;
-            st->enabled = NETDATA_THREAD_EBPF_RUNNING;
+            em->enabled = NETDATA_THREAD_EBPF_RUNNING;
             netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, em);
         } else {
-            st->enabled = NETDATA_THREAD_EBPF_STOPPED;
+            em->enabled = NETDATA_THREAD_EBPF_NOT_RUNNING;
         }
     }
 
     usec_t step = USEC_PER_SEC;
-    int counter = NETDATA_EBPF_CGROUP_UPDATE - 1;
     heartbeat_t hb;
     heartbeat_init(&hb);
+    int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT;
+    int update_apps_list = update_apps_every - 1;
     //Plugin will be killed when it receives a signal
     while (!ebpf_exit_plugin) {
         (void)heartbeat_next(&hb, step);
 
-        // We are using a small heartbeat time to wake up thread,
-        // but we should not update so frequently the shared memory data
-        if (++counter >=  NETDATA_EBPF_CGROUP_UPDATE) {
-            counter = 0;
-            if (!shm_ebpf_cgroup.header)
-                ebpf_map_cgroup_shared_memory();
-
-            ebpf_parse_cgroup_shm_data();
+        pthread_mutex_lock(&ebpf_exit_cleanup);
+        if (ebpf_modules[i].enabled == NETDATA_THREAD_EBPF_RUNNING && process_pid_fd != -1) {
+            pthread_mutex_lock(&collect_data_mutex);
+            if (++update_apps_list == update_apps_every) {
+                update_apps_list = 0;
+                cleanup_exited_pids();
+                collect_data_for_all_processes(process_pid_fd);
+
+                pthread_mutex_lock(&lock);
+                ebpf_create_apps_charts(apps_groups_root_target);
+                pthread_mutex_unlock(&lock);
+            }
+            pthread_mutex_unlock(&collect_data_mutex);
         }
+        pthread_mutex_unlock(&ebpf_exit_cleanup);
     }
 
     ebpf_stop_threads(0);

+ 1 - 1
collectors/ebpf.plugin/ebpf.d.conf

@@ -62,7 +62,7 @@
     process = yes
     shm = yes
     socket = no
-    softirq = no
+    softirq = yes
     sync = yes
     swap = yes
     vfs = no

+ 21 - 9
collectors/ebpf.plugin/ebpf.h

@@ -36,6 +36,26 @@
 #define NETDATA_EBPF_OLD_CONFIG_FILE "ebpf.conf"
 #define NETDATA_EBPF_CONFIG_FILE "ebpf.d.conf"
 
+#ifdef LIBBPF_MAJOR_VERSION // BTF code
+#include "includes/cachestat.skel.h"
+#include "includes/dc.skel.h"
+#include "includes/fd.skel.h"
+#include "includes/mount.skel.h"
+#include "includes/shm.skel.h"
+#include "includes/socket.skel.h"
+#include "includes/swap.skel.h"
+#include "includes/vfs.skel.h"
+
+extern struct cachestat_bpf *cachestat_bpf_obj;
+extern struct dc_bpf *dc_bpf_obj;
+extern struct fd_bpf *fd_bpf_obj;
+extern struct mount_bpf *mount_bpf_obj;
+extern struct shm_bpf *shm_bpf_obj;
+extern struct socket_bpf *socket_bpf_obj;
+extern struct swap_bpf *bpf_obj;
+extern struct vfs_bpf *vfs_bpf_obj;
+#endif
+
 typedef struct netdata_syscall_stat {
     unsigned long bytes;               // total number of bytes
     uint64_t call;                     // total number of calls
@@ -108,12 +128,6 @@ typedef struct ebpf_tracepoint {
     char *event;
 } ebpf_tracepoint_t;
 
-enum ebpf_threads_status {
-    NETDATA_THREAD_EBPF_RUNNING,
-    NETDATA_THREAD_EBPF_STOPPING,
-    NETDATA_THREAD_EBPF_STOPPED
-};
-
 // Copied from musl header
 #ifndef offsetof
 #if __GNUC__ > 3
@@ -178,9 +192,9 @@ extern int ebpf_nprocs;
 extern int running_on_kernel;
 extern int isrh;
 extern char *ebpf_plugin_dir;
+extern int process_pid_fd;
 
 extern pthread_mutex_t collect_data_mutex;
-extern pthread_cond_t collect_data_cond_var;
 
 // Common functions
 void ebpf_global_labels(netdata_syscall_stat_t *is,
@@ -243,8 +257,6 @@ void ebpf_create_charts_on_apps(char *name,
 
 void write_end_chart();
 
-void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps);
-
 int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp);
 int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp);
 uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps);

+ 263 - 0
collectors/ebpf.plugin/ebpf_apps.c

@@ -8,6 +8,23 @@
 // ARAL vectors used to speed up processing
 ARAL *ebpf_aral_apps_pid_stat = NULL;
 ARAL *ebpf_aral_process_stat = NULL;
+ARAL *ebpf_aral_socket_pid = NULL;
+ARAL *ebpf_aral_cachestat_pid = NULL;
+ARAL *ebpf_aral_dcstat_pid = NULL;
+ARAL *ebpf_aral_vfs_pid = NULL;
+ARAL *ebpf_aral_fd_pid = NULL;
+ARAL *ebpf_aral_shm_pid = NULL;
+
+// ----------------------------------------------------------------------------
+// Global vectors used with apps
+ebpf_socket_publish_apps_t **socket_bandwidth_curr = NULL;
+netdata_publish_cachestat_t **cachestat_pid = NULL;
+netdata_publish_dcstat_t **dcstat_pid = NULL;
+netdata_publish_swap_t **swap_pid = NULL;
+netdata_publish_vfs_t **vfs_pid = NULL;
+netdata_fd_stat_t **fd_pid = NULL;
+netdata_publish_shm_t **shm_pid = NULL;
+ebpf_process_stat_t **global_process_stats = NULL;
 
 /**
  * eBPF ARAL Init
@@ -55,6 +72,12 @@ void ebpf_pid_stat_release(struct ebpf_pid_stat *stat)
     aral_freez(ebpf_aral_apps_pid_stat, stat);
 }
 
+/*****************************************************************
+ *
+ *  PROCESS ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
 /**
  * eBPF process stat get
  *
@@ -79,6 +102,246 @@ void ebpf_process_stat_release(ebpf_process_stat_t *stat)
     aral_freez(ebpf_aral_process_stat, stat);
 }
 
+/*****************************************************************
+ *
+ *  SOCKET ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * eBPF socket Aral init
+ *
+ * Initiallize array allocator that will be used when integration with apps is enabled.
+ */
+void ebpf_socket_aral_init()
+{
+    ebpf_aral_socket_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_SOCKET_ARAL_NAME, sizeof(ebpf_socket_publish_apps_t));
+}
+
+/**
+ * eBPF socket get
+ *
+ * Get a ebpf_socket_publish_apps_t entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+ebpf_socket_publish_apps_t *ebpf_socket_stat_get(void)
+{
+    ebpf_socket_publish_apps_t *target = aral_mallocz(ebpf_aral_socket_pid);
+    memset(target, 0, sizeof(ebpf_socket_publish_apps_t));
+    return target;
+}
+
+/**
+ * eBPF socket release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_socket_release(ebpf_socket_publish_apps_t *stat)
+{
+    aral_freez(ebpf_aral_socket_pid, stat);
+}
+
+/*****************************************************************
+ *
+ *  CACHESTAT ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * eBPF Cachestat Aral init
+ *
+ * Initiallize array allocator that will be used when integration with apps is enabled.
+ */
+void ebpf_cachestat_aral_init()
+{
+    ebpf_aral_cachestat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_CACHESTAT_ARAL_NAME, sizeof(netdata_publish_cachestat_t));
+}
+
+/**
+ * eBPF publish cachestat get
+ *
+ * Get a netdata_publish_cachestat_t entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+netdata_publish_cachestat_t *ebpf_publish_cachestat_get(void)
+{
+    netdata_publish_cachestat_t *target = aral_mallocz(ebpf_aral_cachestat_pid);
+    memset(target, 0, sizeof(netdata_publish_cachestat_t));
+    return target;
+}
+
+/**
+ * eBPF cachestat release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_cachestat_release(netdata_publish_cachestat_t *stat)
+{
+    aral_freez(ebpf_aral_cachestat_pid, stat);
+}
+
+/*****************************************************************
+ *
+ *  DCSTAT ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * eBPF directory cache Aral init
+ *
+ * Initiallize array allocator that will be used when integration with apps is enabled.
+ */
+void ebpf_dcstat_aral_init()
+{
+    ebpf_aral_dcstat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_DCSTAT_ARAL_NAME, sizeof(netdata_publish_dcstat_t));
+}
+
+/**
+ * eBPF publish dcstat get
+ *
+ * Get a netdata_publish_dcstat_t entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+netdata_publish_dcstat_t *ebpf_publish_dcstat_get(void)
+{
+    netdata_publish_dcstat_t *target = aral_mallocz(ebpf_aral_dcstat_pid);
+    memset(target, 0, sizeof(netdata_publish_dcstat_t));
+    return target;
+}
+
+/**
+ * eBPF dcstat release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_dcstat_release(netdata_publish_dcstat_t *stat)
+{
+    aral_freez(ebpf_aral_dcstat_pid, stat);
+}
+
+/*****************************************************************
+ *
+ *  VFS ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * eBPF VFS Aral init
+ *
+ * Initiallize array allocator that will be used when integration with apps is enabled.
+ */
+void ebpf_vfs_aral_init()
+{
+    ebpf_aral_vfs_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_VFS_ARAL_NAME, sizeof(netdata_publish_vfs_t));
+}
+
+/**
+ * eBPF publish VFS get
+ *
+ * Get a netdata_publish_vfs_t entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+netdata_publish_vfs_t *ebpf_vfs_get(void)
+{
+    netdata_publish_vfs_t *target = aral_mallocz(ebpf_aral_vfs_pid);
+    memset(target, 0, sizeof(netdata_publish_vfs_t));
+    return target;
+}
+
+/**
+ * eBPF VFS release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_vfs_release(netdata_publish_vfs_t *stat)
+{
+    aral_freez(ebpf_aral_vfs_pid, stat);
+}
+
+/*****************************************************************
+ *
+ *  FD ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * eBPF file descriptor Aral init
+ *
+ * Initiallize array allocator that will be used when integration with apps is enabled.
+ */
+void ebpf_fd_aral_init()
+{
+    ebpf_aral_fd_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_FD_ARAL_NAME, sizeof(netdata_fd_stat_t));
+}
+
+/**
+ * eBPF publish file descriptor get
+ *
+ * Get a netdata_fd_stat_t entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+netdata_fd_stat_t *ebpf_fd_stat_get(void)
+{
+    netdata_fd_stat_t *target = aral_mallocz(ebpf_aral_fd_pid);
+    memset(target, 0, sizeof(netdata_fd_stat_t));
+    return target;
+}
+
+/**
+ * eBPF file descriptor release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_fd_release(netdata_fd_stat_t *stat)
+{
+    aral_freez(ebpf_aral_fd_pid, stat);
+}
+
+/*****************************************************************
+ *
+ *  SHM ARAL FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * eBPF shared memory Aral init
+ *
+ * Initiallize array allocator that will be used when integration with apps is enabled.
+ */
+void ebpf_shm_aral_init()
+{
+    ebpf_aral_shm_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_SHM_ARAL_NAME, sizeof(netdata_publish_shm_t));
+}
+
+/**
+ * eBPF shared memory get
+ *
+ * Get a netdata_publish_shm_t entry to be used with a specific PID.
+ *
+ * @return it returns the address on success.
+ */
+netdata_publish_shm_t *ebpf_shm_stat_get(void)
+{
+    netdata_publish_shm_t *target = aral_mallocz(ebpf_aral_shm_pid);
+    memset(target, 0, sizeof(netdata_publish_shm_t));
+    return target;
+}
+
+/**
+ * eBPF shared memory release
+ *
+ * @param stat Release a target after usage.
+ */
+void ebpf_shm_release(netdata_publish_shm_t *stat)
+{
+    aral_freez(ebpf_aral_shm_pid, stat);
+}
+
 // ----------------------------------------------------------------------------
 // internal flags
 // handled in code (automatically set)

+ 41 - 0
collectors/ebpf.plugin/ebpf_apps.h

@@ -218,6 +218,10 @@ void collect_data_for_all_processes(int tbl_pid_stats_fd);
 extern ebpf_process_stat_t **global_process_stats;
 extern netdata_publish_cachestat_t **cachestat_pid;
 extern netdata_publish_dcstat_t **dcstat_pid;
+extern netdata_publish_swap_t **swap_pid;
+extern netdata_publish_vfs_t **vfs_pid;
+extern netdata_fd_stat_t **fd_pid;
+extern netdata_publish_shm_t **shm_pid;
 
 // The default value is at least 32 times smaller than maximum number of PIDs allowed on system,
 // this is only possible because we are using ARAL (https://github.com/netdata/netdata/tree/master/libnetdata/aral).
@@ -226,11 +230,48 @@ extern netdata_publish_dcstat_t **dcstat_pid;
 #endif
 #define NETDATA_EBPF_ALLOC_MIN_ELEMENTS 256
 
+// ARAL Sectiion
 extern void ebpf_aral_init(void);
 
 extern ebpf_process_stat_t *ebpf_process_stat_get(void);
 extern void ebpf_process_stat_release(ebpf_process_stat_t *stat);
 
+extern ARAL *ebpf_aral_socket_pid;
+void ebpf_socket_aral_init();
+ebpf_socket_publish_apps_t *ebpf_socket_stat_get(void);
+void ebpf_socket_release(ebpf_socket_publish_apps_t *stat);
+
+extern ARAL *ebpf_aral_cachestat_pid;
+void ebpf_cachestat_aral_init();
+netdata_publish_cachestat_t *ebpf_publish_cachestat_get(void);
+void ebpf_cachestat_release(netdata_publish_cachestat_t *stat);
+
+extern ARAL *ebpf_aral_dcstat_pid;
+void ebpf_dcstat_aral_init();
+netdata_publish_dcstat_t *ebpf_publish_dcstat_get(void);
+void ebpf_dcstat_release(netdata_publish_dcstat_t *stat);
+
+extern ARAL *ebpf_aral_vfs_pid;
+void ebpf_vfs_aral_init();
+netdata_publish_vfs_t *ebpf_vfs_get(void);
+void ebpf_vfs_release(netdata_publish_vfs_t *stat);
+
+extern ARAL *ebpf_aral_fd_pid;
+void ebpf_fd_aral_init();
+netdata_fd_stat_t *ebpf_fd_stat_get(void);
+void ebpf_fd_release(netdata_fd_stat_t *stat);
+
+extern ARAL *ebpf_aral_shm_pid;
+void ebpf_shm_aral_init();
+netdata_publish_shm_t *ebpf_shm_stat_get(void);
+void ebpf_shm_release(netdata_publish_shm_t *stat);
+
+// ARAL Section end
+
+// Threads integrated with apps
+extern ebpf_socket_publish_apps_t **socket_bandwidth_curr;
+// Threads integrated with apps
+
 #include "libnetdata/threads/threads.h"
 
 // ARAL variables

+ 5 - 63
collectors/ebpf.plugin/ebpf_cachestat.c

@@ -3,12 +3,6 @@
 #include "ebpf.h"
 #include "ebpf_cachestat.h"
 
-// ----------------------------------------------------------------------------
-// ARAL vectors used to speed up processing
-ARAL *ebpf_aral_cachestat_pid = NULL;
-
-netdata_publish_cachestat_t **cachestat_pid;
-
 static char *cachestat_counter_dimension_name[NETDATA_CACHESTAT_END] = { "ratio", "dirty", "hit",
                                                                          "miss" };
 static netdata_syscall_stat_t cachestat_counter_aggregated_data[NETDATA_CACHESTAT_END];
@@ -50,10 +44,6 @@ static char *account_page[NETDATA_CACHESTAT_ACCOUNT_DIRTY_END] ={ "account_page_
                                                                   "__set_page_dirty", "__folio_mark_dirty"  };
 
 #ifdef LIBBPF_MAJOR_VERSION
-#include "includes/cachestat.skel.h" // BTF code
-
-static struct cachestat_bpf *bpf_obj = NULL;
-
 /**
  * Disable probe
  *
@@ -337,20 +327,14 @@ static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf
 static void ebpf_cachestat_free(ebpf_module_t *em)
 {
     pthread_mutex_lock(&ebpf_exit_cleanup);
-    em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
+    em->enabled = NETDATA_THREAD_EBPF_STOPPING;
     pthread_mutex_unlock(&ebpf_exit_cleanup);
 
-    ebpf_cleanup_publish_syscall(cachestat_counter_publish_aggregated);
-
     freez(cachestat_vector);
     freez(cachestat_values);
 
-#ifdef LIBBPF_MAJOR_VERSION
-    if (bpf_obj)
-        cachestat_bpf__destroy(bpf_obj);
-#endif
     pthread_mutex_lock(&ebpf_exit_cleanup);
-    em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+    em->enabled = NETDATA_THREAD_EBPF_STOPPED;
     pthread_mutex_unlock(&ebpf_exit_cleanup);
 }
 
@@ -368,46 +352,6 @@ static void ebpf_cachestat_exit(void *ptr)
     ebpf_cachestat_free(em);
 }
 
-/*****************************************************************
- *
- *  ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF Cachestat Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-static inline void ebpf_cachestat_aral_init()
-{
-    ebpf_aral_cachestat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_CACHESTAT_ARAL_NAME, sizeof(netdata_publish_cachestat_t));
-}
-
-/**
- * eBPF publish cachestat get
- *
- * Get a netdata_publish_cachestat_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-netdata_publish_cachestat_t *ebpf_publish_cachestat_get(void)
-{
-    netdata_publish_cachestat_t *target = aral_mallocz(ebpf_aral_cachestat_pid);
-    memset(target, 0, sizeof(netdata_publish_cachestat_t));
-    return target;
-}
-
-/**
- * eBPF cachestat release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_cachestat_release(netdata_publish_cachestat_t *stat)
-{
-    aral_freez(ebpf_aral_cachestat_pid, stat);
-}
-
 /*****************************************************************
  *
  *  COMMON FUNCTIONS
@@ -1282,11 +1226,11 @@ static int ebpf_cachestat_load_bpf(ebpf_module_t *em)
     }
 #ifdef LIBBPF_MAJOR_VERSION
     else {
-        bpf_obj = cachestat_bpf__open();
-        if (!bpf_obj)
+        cachestat_bpf_obj = cachestat_bpf__open();
+        if (!cachestat_bpf_obj)
             ret = -1;
         else
-            ret = ebpf_cachestat_load_and_attach(bpf_obj, em);
+            ret = ebpf_cachestat_load_and_attach(cachestat_bpf_obj, em);
     }
 #endif
 
@@ -1315,7 +1259,6 @@ void *ebpf_cachestat_thread(void *ptr)
     ebpf_update_pid_table(&cachestat_maps[NETDATA_CACHESTAT_PID_STATS], em);
 
     if (ebpf_cachestat_set_internal_value()) {
-        em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
         goto endcachestat;
     }
 
@@ -1323,7 +1266,6 @@ void *ebpf_cachestat_thread(void *ptr)
     ebpf_adjust_thread_load(em, default_btf);
 #endif
     if (ebpf_cachestat_load_bpf(em)) {
-        em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
         goto endcachestat;
     }
 

+ 110 - 35
collectors/ebpf.plugin/ebpf_cgroup.c

@@ -6,6 +6,7 @@
 #include "ebpf_cgroup.h"
 
 ebpf_cgroup_target_t *ebpf_cgroup_pids = NULL;
+static void *ebpf_mapped_memory = NULL;
 int send_cgroup_chart = 0;
 
 // --------------------------------------------------------------------------------------------------------------------
@@ -19,7 +20,7 @@ int send_cgroup_chart = 0;
  * @param fd       file descriptor returned after shm_open was called.
  * @param length   length of the shared memory
  *
- * @return It returns a pointer to the region mapped.
+ * @return It returns a pointer to the region mapped on success and MAP_FAILED otherwise.
  */
 static inline void *ebpf_cgroup_map_shm_locally(int fd, size_t length)
 {
@@ -36,6 +37,16 @@ static inline void *ebpf_cgroup_map_shm_locally(int fd, size_t length)
     return value;
 }
 
+/**
+ * Unmap Shared Memory
+ *
+ * Unmap shared memory used to integrate eBPF and cgroup plugin
+ */
+void ebpf_unmap_cgroup_shared_memory()
+{
+    munmap(ebpf_mapped_memory, shm_ebpf_cgroup.header->body_length);
+}
+
 /**
  * Map cgroup shared memory
  *
@@ -56,40 +67,47 @@ void ebpf_map_cgroup_shared_memory()
     limit_try++;
     next_try = curr_time + NETDATA_EBPF_CGROUP_NEXT_TRY_SEC;
 
-    shm_fd_ebpf_cgroup = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_RDWR, 0660);
     if (shm_fd_ebpf_cgroup < 0) {
-        if (limit_try == NETDATA_EBPF_CGROUP_MAX_TRIES)
-            error("Shared memory was not initialized, integration between processes won't happen.");
+        shm_fd_ebpf_cgroup = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_RDWR, 0660);
+        if (shm_fd_ebpf_cgroup < 0) {
+            if (limit_try == NETDATA_EBPF_CGROUP_MAX_TRIES)
+                error("Shared memory was not initialized, integration between processes won't happen.");
 
-        return;
+            return;
+        }
     }
 
     // Map only header
-    shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *) ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup,
-                                                                                             sizeof(netdata_ebpf_cgroup_shm_header_t));
-    if (!shm_ebpf_cgroup.header) {
-        limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
+    void *mapped = (netdata_ebpf_cgroup_shm_header_t *) ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup,
+                                                                                   sizeof(netdata_ebpf_cgroup_shm_header_t));
+    if (unlikely(mapped == SEM_FAILED)) {
         return;
     }
+    netdata_ebpf_cgroup_shm_header_t *header = mapped;
 
-    size_t length =  shm_ebpf_cgroup.header->body_length;
+    size_t length =  header->body_length;
 
-    munmap(shm_ebpf_cgroup.header, sizeof(netdata_ebpf_cgroup_shm_header_t));
+    munmap(header, sizeof(netdata_ebpf_cgroup_shm_header_t));
 
-    shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *)ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup, length);
-    if (!shm_ebpf_cgroup.header) {
-        limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
+    if (length <= ((sizeof(netdata_ebpf_cgroup_shm_header_t) + sizeof(netdata_ebpf_cgroup_shm_body_t)))) {
         return;
     }
-    shm_ebpf_cgroup.body = (netdata_ebpf_cgroup_shm_body_t *) ((char *)shm_ebpf_cgroup.header +
-                                                              sizeof(netdata_ebpf_cgroup_shm_header_t));
+
+    ebpf_mapped_memory = (void *)ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup, length);
+    if (unlikely(ebpf_mapped_memory == MAP_FAILED)) {
+        return;
+    }
+    shm_ebpf_cgroup.header = ebpf_mapped_memory;
+    shm_ebpf_cgroup.body = ebpf_mapped_memory + sizeof(netdata_ebpf_cgroup_shm_header_t);
 
     shm_sem_ebpf_cgroup = sem_open(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME, O_CREAT, 0660, 1);
 
     if (shm_sem_ebpf_cgroup == SEM_FAILED) {
         error("Cannot create semaphore, integration between eBPF and cgroup won't happen");
-        munmap(shm_ebpf_cgroup.header, length);
+        limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
+        munmap(ebpf_mapped_memory, length);
         shm_ebpf_cgroup.header = NULL;
+        shm_ebpf_cgroup.body = NULL;
         close(shm_fd_ebpf_cgroup);
         shm_fd_ebpf_cgroup = -1;
         shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
@@ -258,32 +276,38 @@ void ebpf_reset_updated_var()
 void ebpf_parse_cgroup_shm_data()
 {
     static int previous = 0;
-    if (shm_ebpf_cgroup.header) {
-        sem_wait(shm_sem_ebpf_cgroup);
-        int i, end = shm_ebpf_cgroup.header->cgroup_root_count;
+    if (!shm_ebpf_cgroup.header || shm_sem_ebpf_cgroup == SEM_FAILED)
+        return;
 
-        pthread_mutex_lock(&mutex_cgroup_shm);
+    sem_wait(shm_sem_ebpf_cgroup);
+    int i, end = shm_ebpf_cgroup.header->cgroup_root_count;
+    if (end <= 0) {
+        sem_post(shm_sem_ebpf_cgroup);
+        return;
+    }
 
-        ebpf_remove_cgroup_target_update_list();
+    pthread_mutex_lock(&mutex_cgroup_shm);
+    ebpf_remove_cgroup_target_update_list();
 
-        ebpf_reset_updated_var();
+    ebpf_reset_updated_var();
 
-        for (i = 0; i < end; i++) {
-            netdata_ebpf_cgroup_shm_body_t *ptr = &shm_ebpf_cgroup.body[i];
-            if (ptr->enabled) {
-                ebpf_cgroup_target_t *ect =  ebpf_cgroup_find_or_create(ptr);
-                ebpf_update_pid_link_list(ect, ptr->path);
-            }
+    for (i = 0; i < end; i++) {
+        netdata_ebpf_cgroup_shm_body_t *ptr = &shm_ebpf_cgroup.body[i];
+        if (ptr->enabled) {
+            ebpf_cgroup_target_t *ect =  ebpf_cgroup_find_or_create(ptr);
+            ebpf_update_pid_link_list(ect, ptr->path);
         }
-        send_cgroup_chart = previous != shm_ebpf_cgroup.header->cgroup_root_count;
-        previous = shm_ebpf_cgroup.header->cgroup_root_count;
+    }
+    send_cgroup_chart = previous != shm_ebpf_cgroup.header->cgroup_root_count;
+    previous = shm_ebpf_cgroup.header->cgroup_root_count;
+    sem_post(shm_sem_ebpf_cgroup);
+    pthread_mutex_unlock(&mutex_cgroup_shm);
 #ifdef NETDATA_DEV_MODE
-        error("Updating cgroup %d (Previous: %d, Current: %d)", send_cgroup_chart, previous, shm_ebpf_cgroup.header->cgroup_root_count);
+    info("Updating cgroup %d (Previous: %d, Current: %d)",
+         send_cgroup_chart, previous, shm_ebpf_cgroup.header->cgroup_root_count);
 #endif
-        pthread_mutex_unlock(&mutex_cgroup_shm);
 
-        sem_post(shm_sem_ebpf_cgroup);
-    }
+    sem_post(shm_sem_ebpf_cgroup);
 }
 
 // --------------------------------------------------------------------------------------------------------------------
@@ -315,3 +339,54 @@ void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *fam
             fprintf(stdout, "DIMENSION %s '' %s 1 1\n", w->name, algorithm);
     }
 }
+
+// --------------------------------------------------------------------------------------------------------------------
+// Cgroup main thread
+
+/**
+ * CGROUP exit
+ *
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_cgroup_exit(void *ptr)
+{
+    UNUSED(ptr);
+}
+
+/**
+ * Cgroup integratin
+ *
+ * Thread responsible to call functions responsible to sync data between plugins.
+ *
+ * @param ptr It is a NULL value for this thread.
+ *
+ * @return It always returns NULL.
+ */
+void *ebpf_cgroup_integration(void *ptr)
+{
+    netdata_thread_cleanup_push(ebpf_cgroup_exit, ptr);
+
+    usec_t step = USEC_PER_SEC;
+    int counter = NETDATA_EBPF_CGROUP_UPDATE - 1;
+    heartbeat_t hb;
+    heartbeat_init(&hb);
+    //Plugin will be killed when it receives a signal
+    while (!ebpf_exit_plugin) {
+        (void)heartbeat_next(&hb, step);
+
+        // We are using a small heartbeat time to wake up thread,
+        // but we should not update so frequently the shared memory data
+        if (++counter >=  NETDATA_EBPF_CGROUP_UPDATE) {
+            counter = 0;
+            if (!shm_ebpf_cgroup.header)
+                ebpf_map_cgroup_shared_memory();
+            else
+                ebpf_parse_cgroup_shm_data();
+        }
+    }
+
+    netdata_thread_cleanup_pop(1);
+    return NULL;
+}

+ 2 - 0
collectors/ebpf.plugin/ebpf_cgroup.h

@@ -64,6 +64,8 @@ void ebpf_map_cgroup_shared_memory();
 void ebpf_parse_cgroup_shm_data();
 void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order,
                                           char *algorithm, char *context, char *module, int update_every);
+void *ebpf_cgroup_integration(void *ptr);
+void ebpf_unmap_cgroup_shared_memory();
 extern int send_cgroup_chart;
 
 #endif /* NETDATA_EBPF_CGROUP_H */

+ 5 - 62
collectors/ebpf.plugin/ebpf_dcstat.c

@@ -3,16 +3,11 @@
 #include "ebpf.h"
 #include "ebpf_dcstat.h"
 
-// ----------------------------------------------------------------------------
-// ARAL vectors used to speed up processing
-ARAL *ebpf_aral_dcstat_pid = NULL;
-
 static char *dcstat_counter_dimension_name[NETDATA_DCSTAT_IDX_END] = { "ratio", "reference", "slow", "miss" };
 static netdata_syscall_stat_t dcstat_counter_aggregated_data[NETDATA_DCSTAT_IDX_END];
 static netdata_publish_syscall_t dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_END];
 
 netdata_dcstat_pid_t *dcstat_vector = NULL;
-netdata_publish_dcstat_t **dcstat_pid = NULL;
 
 static netdata_idx_t dcstat_hash_values[NETDATA_DCSTAT_IDX_END];
 static netdata_idx_t *dcstat_values = NULL;
@@ -49,10 +44,6 @@ netdata_ebpf_targets_t dc_targets[] = { {.name = "lookup_fast", .mode = EBPF_LOA
                                         {.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
 
 #ifdef LIBBPF_MAJOR_VERSION
-#include "includes/dc.skel.h" // BTF code
-
-static struct dc_bpf *bpf_obj = NULL;
-
 /**
  * Disable probe
  *
@@ -298,23 +289,16 @@ void ebpf_dcstat_clean_names()
 static void ebpf_dcstat_free(ebpf_module_t *em )
 {
     pthread_mutex_lock(&ebpf_exit_cleanup);
-    em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
+    em->enabled = NETDATA_THREAD_EBPF_STOPPING;
     pthread_mutex_unlock(&ebpf_exit_cleanup);
 
     freez(dcstat_vector);
     freez(dcstat_values);
 
-    ebpf_cleanup_publish_syscall(dcstat_counter_publish_aggregated);
-
     ebpf_dcstat_clean_names();
 
-#ifdef LIBBPF_MAJOR_VERSION
-    if (bpf_obj)
-        dc_bpf__destroy(bpf_obj);
-#endif
-
     pthread_mutex_lock(&ebpf_exit_cleanup);
-    em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+    em->enabled = NETDATA_THREAD_EBPF_STOPPED;
     pthread_mutex_unlock(&ebpf_exit_cleanup);
 }
 
@@ -331,46 +315,6 @@ static void ebpf_dcstat_exit(void *ptr)
     ebpf_dcstat_free(em);
 }
 
-/*****************************************************************
- *
- *  ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF directory cache Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-static inline void ebpf_dcstat_aral_init()
-{
-    ebpf_aral_dcstat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_DCSTAT_ARAL_NAME, sizeof(netdata_publish_dcstat_t));
-}
-
-/**
- * eBPF publish dcstat get
- *
- * Get a netdata_publish_dcstat_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-netdata_publish_dcstat_t *ebpf_publish_dcstat_get(void)
-{
-    netdata_publish_dcstat_t *target = aral_mallocz(ebpf_aral_dcstat_pid);
-    memset(target, 0, sizeof(netdata_publish_dcstat_t));
-    return target;
-}
-
-/**
- * eBPF dcstat release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_dcstat_release(netdata_publish_dcstat_t *stat)
-{
-    aral_freez(ebpf_aral_dcstat_pid, stat);
-}
-
 /*****************************************************************
  *
  *  APPS
@@ -1150,11 +1094,11 @@ static int ebpf_dcstat_load_bpf(ebpf_module_t *em)
     }
 #ifdef LIBBPF_MAJOR_VERSION
     else {
-        bpf_obj = dc_bpf__open();
-        if (!bpf_obj)
+        dc_bpf_obj = dc_bpf__open();
+        if (!dc_bpf_obj)
             ret = -1;
         else
-            ret = ebpf_dc_load_and_attach(bpf_obj, em);
+            ret = ebpf_dc_load_and_attach(dc_bpf_obj, em);
     }
 #endif
 
@@ -1188,7 +1132,6 @@ void *ebpf_dcstat_thread(void *ptr)
     ebpf_adjust_thread_load(em, default_btf);
 #endif
     if (ebpf_dcstat_load_bpf(em)) {
-        em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
         goto enddcstat;
     }
 

Some files were not shown because too many files changed in this diff