diff --git a/co-re/Makefile b/co-re/Makefile index ae075e78..8457e18d 100644 --- a/co-re/Makefile +++ b/co-re/Makefile @@ -18,12 +18,16 @@ _LIBC ?= glibc APPS = cachestat \ dc \ + disk \ fd \ filesystem \ + hardirq \ mdflush \ mount \ + oomkill \ process \ shm \ + softirq \ swap \ sync \ # diff --git a/co-re/disk.bpf.c b/co-re/disk.bpf.c new file mode 100644 index 00000000..d506ef22 --- /dev/null +++ b/co-re/disk.bpf.c @@ -0,0 +1,98 @@ +#include "vmlinux.h" +#include "bpf_tracing.h" +#include "bpf_helpers.h" + +#include "netdata_core.h" +#include "netdata_disk.h" + +/************************************************************************************ + * + * MAPS + * + ***********************************************************************************/ + +//Hardware +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __type(key, block_key_t); + __type(value, __u64); + __uint(max_entries, NETDATA_DISK_HISTOGRAM_LENGTH); +} tbl_disk_iocall SEC(".maps"); + +// Temporary use only +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __type(key, netdata_disk_key_t); + __type(value, __u64); + __uint(max_entries, 8192); +} tmp_disk_tp_stat SEC(".maps"); + + +/************************************************************************************ + * + * DISK SECTION + * + ***********************************************************************************/ + +SEC("tracepoint/block/block_rq_issue") +int netdata_block_rq_issue(struct netdata_block_rq_issue *ptr) +{ + // blkid generates these and we're not interested in them + if (!ptr->dev) + return 0; + + netdata_disk_key_t key = {}; + key.dev = ptr->dev; + key.sector = ptr->sector; + + if (key.sector < 0) + key.sector = 0; + + __u64 value = bpf_ktime_get_ns(); + + bpf_map_update_elem(&tmp_disk_tp_stat, &key, &value, BPF_ANY); + + return 0; +} + +SEC("tracepoint/block/block_rq_complete") +int netdata_block_rq_complete(struct netdata_block_rq_complete *ptr) +{ + __u64 *fill; + netdata_disk_key_t key = {}; + block_key_t blk = {}; + key.dev = ptr->dev; + key.sector = ptr->sector; + + if (key.sector < 0) + key.sector = 0; + + fill = bpf_map_lookup_elem(&tmp_disk_tp_stat ,&key); + if (!fill) + return 0; + + // calculate and convert to microsecond + u64 curr = bpf_ktime_get_ns(); + __u64 data, *update; + curr -= *fill; + curr /= 1000; + + blk.bin = libnetdata_select_idx(curr, NETDATA_FS_MAX_BINS_POS); + blk.dev = netdata_new_encode_dev(ptr->dev); + + // Update IOPS + update = bpf_map_lookup_elem(&tbl_disk_iocall ,&blk); + if (update) { + libnetdata_update_u64(update, 1); + } else { + data = 1; + bpf_map_update_elem(&tbl_disk_iocall, &blk, &data, BPF_ANY); + } + + bpf_map_delete_elem(&tmp_disk_tp_stat, &key); + + return 0; +} + +char _license[] SEC("license") = "GPL"; + diff --git a/co-re/disk.c b/co-re/disk.c new file mode 100644 index 00000000..b95954af --- /dev/null +++ b/co-re/disk.c @@ -0,0 +1,131 @@ +#include +#include +#include +#include + +#define _GNU_SOURCE /* See feature_test_macros(7) */ +#define __USE_GNU +#include +#include + +#include "netdata_tests.h" + +#include "disk.skel.h" + +// Copied and redefined from ../include/netdata_disk.h +typedef struct block_key { + uint32_t bin; + uint32_t dev; +} block_key_t; + +static inline int ebpf_load_and_attach(struct disk_bpf *obj) +{ + int ret = disk_bpf__load(obj); + if (ret) { + fprintf(stderr, "failed to load BPF object: %d\n", ret); + return -1; + } + + ret = disk_bpf__attach(obj); + if (!ret) { + fprintf(stdout, "Disk loaded with success\n"); + } + + return ret; +} + +static void ebpf_update_table(int global) +{ + block_key_t idx = { .bin = 0, .dev = 0}; + unsigned char value = 'a'; + int ret = bpf_map_update_elem(global, &idx, &value, 0); + if (ret) + fprintf(stderr, "Cannot insert value to global table."); +} + +static int disk_read_array(int fd, int ebpf_nprocs) +{ + uint64_t *stored = calloc((size_t)ebpf_nprocs, sizeof(uint64_t)); + if (!stored) + return 2; + + uint64_t counter = 0; + block_key_t idx = { .bin = 0, .dev = 0}; + if (!bpf_map_lookup_elem(fd, &idx, stored)) { + int j; + for (j = 0; j < ebpf_nprocs; j++) { + counter += stored[j]; + } + } + + free(stored); + + if (counter) { + fprintf(stdout, "Data stored with success\n"); + return 0; + } + + return 2; +} + +static int ebpf_disk_tests() +{ + struct disk_bpf *obj = NULL; + int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); + + obj = disk_bpf__open(); + if (!obj) { + fprintf(stderr, "Cannot open or load BPF object\n"); + + return 2; + } + + int ret = ebpf_load_and_attach(obj); + if (!ret) { + int fd = bpf_map__fd(obj->maps.tbl_disk_iocall); + ebpf_update_table(fd); + + ret = disk_read_array(fd, ebpf_nprocs); + if (ret) + fprintf(stderr, "Cannot read global table\n"); + } else + fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); + + disk_bpf__destroy(obj); + + return ret; +} + +int main(int argc, char **argv) +{ + static struct option long_options[] = { + {"help", no_argument, 0, 'h' }, + {0, 0, 0, 0} + }; + + int option_index = 0; + while (1) { + int c = getopt_long(argc, argv, "", long_options, &option_index); + if (c == -1) + break; + + switch (c) { + case 'h': { + ebpf_tracepoint_help("Disk"); + exit(0); + } + default: { + break; + } + } + } + + int ret = netdata_ebf_memlock_limit(); + if (ret) { + fprintf(stderr, "Cannot increase memory: error = %d\n", ret); + return 1; + } + + return ebpf_disk_tests(); +} + diff --git a/co-re/hardirq.bpf.c b/co-re/hardirq.bpf.c new file mode 100644 index 00000000..31348e3b --- /dev/null +++ b/co-re/hardirq.bpf.c @@ -0,0 +1,240 @@ +#include "vmlinux.h" +#include "bpf_tracing.h" +#include "bpf_helpers.h" + +#include "netdata_core.h" +#include "netdata_hardirq.h" + +/************************************************************************************ + * + * MAPS + * + ***********************************************************************************/ + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __type(key, hardirq_key_t); + __type(value, hardirq_val_t); + __uint(max_entries, NETDATA_HARDIRQ_MAX_IRQS); +} tbl_hardirq SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, __u32); + __type(value, hardirq_static_val_t); + __uint(max_entries, NETDATA_HARDIRQ_STATIC_END); +} tbl_hardirq_static SEC(".maps"); + + +/************************************************************************************ + * + * HARDIRQ SECTION + * + ***********************************************************************************/ + +SEC("tracepoint/irq/irq_handler_entry") +int netdata_irq_handler_entry(struct netdata_irq_handler_entry *ptr) +{ + hardirq_key_t key = {}; + hardirq_val_t *valp, val = {}; + + key.irq = ptr->irq; + valp = bpf_map_lookup_elem(&tbl_hardirq, &key); + if (valp) { + valp->ts = bpf_ktime_get_ns(); + } else { + val.latency = 0; + val.ts = bpf_ktime_get_ns(); + TP_DATA_LOC_READ_CONST(val.name, ptr, ptr->data_loc_name, NETDATA_HARDIRQ_NAME_LEN); + bpf_map_update_elem(&tbl_hardirq, &key, &val, BPF_ANY); + } + + return 0; +} + +SEC("tracepoint/irq/irq_handler_exit") +int netdata_irq_handler_exit(struct netdata_irq_handler_exit *ptr) +{ + hardirq_key_t key = {}; + hardirq_val_t *valp; + + key.irq = ptr->irq; + valp = bpf_map_lookup_elem(&tbl_hardirq, &key); + if (!valp) { + return 0; + } + + // get time diff and convert to microseconds. + u64 latency = (bpf_ktime_get_ns() - valp->ts) / 1000; + libnetdata_update_u64(&valp->latency, latency); + + return 0; +} + +/************************************************************************************ + * + * HARDIRQ STATIC + * + ***********************************************************************************/ + +#define HARDIRQ_STATIC_GEN_ENTRY(__type, __enum_idx) \ +int netdata_irq_ ##__type(struct netdata_irq_vectors_entry *ptr) \ +{ \ + u32 idx; \ + hardirq_static_val_t *valp, val = {}; \ + \ + idx = __enum_idx; \ + valp = bpf_map_lookup_elem(&tbl_hardirq_static, &idx); \ + if (valp) { \ + valp->ts = bpf_ktime_get_ns(); \ + } else { \ + val.latency = 0; \ + val.ts = bpf_ktime_get_ns(); \ + bpf_map_update_elem(&tbl_hardirq_static, &idx, &val, BPF_ANY); \ + } \ + \ + return 0; \ +} + +#define HARDIRQ_STATIC_GEN_EXIT(__type, __enum_idx) \ +int netdata_irq_ ##__type(struct netdata_irq_vectors_exit *ptr) \ +{ \ + u32 idx; \ + hardirq_static_val_t *valp; \ + \ + idx = __enum_idx; \ + valp = bpf_map_lookup_elem(&tbl_hardirq_static, &idx); \ + if (!valp) { \ + return 0; \ + } \ + \ + /* get time diff and convert to microseconds. */ \ + u64 latency = (bpf_ktime_get_ns() - valp->ts) / 1000; \ + libnetdata_update_u64(&valp->latency, latency); \ + \ + return 0; \ +} + +SEC("tracepoint/irq_vectors/thermal_apic_entry") +HARDIRQ_STATIC_GEN_ENTRY( + thermal_apic_entry, + NETDATA_HARDIRQ_STATIC_APIC_THERMAL +) +SEC("tracepoint/irq_vectors/thermal_apic_exit") +HARDIRQ_STATIC_GEN_EXIT( + thermal_apic_exit, + NETDATA_HARDIRQ_STATIC_APIC_THERMAL +) + +SEC("tracepoint/irq_vectors/threshold_apic_entry") +HARDIRQ_STATIC_GEN_ENTRY( + threshold_apic_entry, + NETDATA_HARDIRQ_STATIC_APIC_THRESHOLD +) +SEC("tracepoint/irq_vectors/threshold_apic_exit") +HARDIRQ_STATIC_GEN_EXIT( + threshold_apic_exit, + NETDATA_HARDIRQ_STATIC_APIC_THRESHOLD +) + +SEC("tracepoint/irq_vectors/error_apic_entry") +HARDIRQ_STATIC_GEN_ENTRY( + error_apic_entry, + NETDATA_HARDIRQ_STATIC_APIC_ERROR +) +SEC("tracepoint/irq_vectors/error_apic_exit") +HARDIRQ_STATIC_GEN_EXIT( + error_apic_exit, + NETDATA_HARDIRQ_STATIC_APIC_ERROR +) + +SEC("tracepoint/irq_vectors/deferred_error_apic_entry") +HARDIRQ_STATIC_GEN_ENTRY( + deferred_error_apic_entry, + NETDATA_HARDIRQ_STATIC_APIC_DEFERRED_ERROR +) +SEC("tracepoint/irq_vectors/deferred_error_apic_exit") +HARDIRQ_STATIC_GEN_EXIT( + deferred_error_apic_exit, + NETDATA_HARDIRQ_STATIC_APIC_DEFERRED_ERROR +) + +SEC("tracepoint/irq_vectors/spurious_apic_entry") +HARDIRQ_STATIC_GEN_ENTRY( + spurious_apic_entry, + NETDATA_HARDIRQ_STATIC_APIC_SPURIOUS +) +SEC("tracepoint/irq_vectors/spurious_apic_exit") +HARDIRQ_STATIC_GEN_EXIT( + spurious_apic_exit, + NETDATA_HARDIRQ_STATIC_APIC_SPURIOUS +) + +SEC("tracepoint/irq_vectors/call_function_entry") +HARDIRQ_STATIC_GEN_ENTRY( + call_function_entry, + NETDATA_HARDIRQ_STATIC_FUNC_CALL +) +SEC("tracepoint/irq_vectors/call_function_exit") +HARDIRQ_STATIC_GEN_EXIT( + call_function_exit, + NETDATA_HARDIRQ_STATIC_FUNC_CALL +) + +SEC("tracepoint/irq_vectors/call_function_single_entry") +HARDIRQ_STATIC_GEN_ENTRY( + call_function_single_entry, + NETDATA_HARDIRQ_STATIC_FUNC_CALL_SINGLE +) +SEC("tracepoint/irq_vectors/call_function_single_exit") +HARDIRQ_STATIC_GEN_EXIT( + call_function_single_exit, + NETDATA_HARDIRQ_STATIC_FUNC_CALL_SINGLE +) + +SEC("tracepoint/irq_vectors/reschedule_entry") +HARDIRQ_STATIC_GEN_ENTRY( + reschedule_entry, + NETDATA_HARDIRQ_STATIC_RESCHEDULE +) +SEC("tracepoint/irq_vectors/reschedule_exit") +HARDIRQ_STATIC_GEN_EXIT( + reschedule_exit, + NETDATA_HARDIRQ_STATIC_RESCHEDULE +) + +SEC("tracepoint/irq_vectors/local_timer_entry") +HARDIRQ_STATIC_GEN_ENTRY( + local_timer_entry, + NETDATA_HARDIRQ_STATIC_LOCAL_TIMER +) +SEC("tracepoint/irq_vectors/local_timer_exit") +HARDIRQ_STATIC_GEN_EXIT( + local_timer_exit, + NETDATA_HARDIRQ_STATIC_LOCAL_TIMER +) + +SEC("tracepoint/irq_vectors/irq_work_entry") +HARDIRQ_STATIC_GEN_ENTRY( + irq_work_entry, + NETDATA_HARDIRQ_STATIC_IRQ_WORK +) +SEC("tracepoint/irq_vectors/irq_work_exit") +HARDIRQ_STATIC_GEN_EXIT( + irq_work_exit, + NETDATA_HARDIRQ_STATIC_IRQ_WORK +) + +SEC("tracepoint/irq_vectors/x86_platform_ipi_entry") +HARDIRQ_STATIC_GEN_ENTRY( + x86_platform_ipi_entry, + NETDATA_HARDIRQ_STATIC_X86_PLATFORM_IPI +) +SEC("tracepoint/irq_vectors/x86_platform_ipi_exit") +HARDIRQ_STATIC_GEN_EXIT( + x86_platform_ipi_exit, + NETDATA_HARDIRQ_STATIC_X86_PLATFORM_IPI +) + +char _license[] SEC("license") = "GPL"; + diff --git a/co-re/hardirq.c b/co-re/hardirq.c new file mode 100644 index 00000000..3baf3761 --- /dev/null +++ b/co-re/hardirq.c @@ -0,0 +1,140 @@ +#include +#include +#include +#include + +#define _GNU_SOURCE /* See feature_test_macros(7) */ +#define __USE_GNU +#include +#include + +#include "netdata_tests.h" + +#include "hardirq.skel.h" + +// Copied and redefined from ../include/netdata_hardirq.h +#define NETDATA_HARDIRQ_NAME_LEN 32 +typedef struct hardirq_val { + // incremental counter storing the total latency so far. + uint64_t latency; + + // temporary timestamp stored at the IRQ entry handler, to be diff'd with a + // timestamp at the IRQ exit handler, to get the latency to add to the + // `latency` field. + uint64_t ts; + + // identifies the IRQ with a human-readable string. + char name[NETDATA_HARDIRQ_NAME_LEN]; +} hardirq_val_t; + +static inline int ebpf_load_and_attach(struct hardirq_bpf *obj) +{ + int ret = hardirq_bpf__load(obj); + if (ret) { + fprintf(stderr, "failed to load BPF object: %d\n", ret); + return -1; + } + + ret = hardirq_bpf__attach(obj); + if (!ret) { + fprintf(stdout, "Hardirq loaded with success\n"); + } + + return ret; +} + +static void ebpf_update_table(int global) +{ + uint32_t idx = 0; + hardirq_val_t value = { .ts = 1, .latency = 1, .name = "netdata_testing" }; + int ret = bpf_map_update_elem(global, &idx, &value, 0); + if (ret) + fprintf(stderr, "Cannot insert value to global table."); +} + +static int hardirq_read_array(int fd, int ebpf_nprocs) +{ + hardirq_val_t *stored = calloc((size_t)ebpf_nprocs, sizeof(hardirq_val_t)); + if (!stored) + return 2; + + uint64_t counter = 0; + int idx = 0; + if (!bpf_map_lookup_elem(fd, &idx, stored)) { + int j; + for (j = 0; j < ebpf_nprocs; j++) { + counter += stored[j].ts + stored[j].latency; + } + } + + free(stored); + + if (counter) { + fprintf(stdout, "Data stored with success\n"); + return 0; + } + + return 2; +} + +static int ebpf_hardirq_tests() +{ + struct hardirq_bpf *obj = NULL; + int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); + + obj = hardirq_bpf__open(); + if (!obj) { + fprintf(stderr, "Cannot open or load BPF object\n"); + + return 2; + } + + int ret = ebpf_load_and_attach(obj); + if (!ret) { + int fd = bpf_map__fd(obj->maps.tbl_hardirq); + ebpf_update_table(fd); + + ret = hardirq_read_array(fd, ebpf_nprocs); + if (ret) + fprintf(stderr, "Cannot read global table\n"); + } else + fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); + + hardirq_bpf__destroy(obj); + + return ret; +} + +int main(int argc, char **argv) +{ + static struct option long_options[] = { + {"help", no_argument, 0, 'h' }, + {0, 0, 0, 0} + }; + + int option_index = 0; + while (1) { + int c = getopt_long(argc, argv, "", long_options, &option_index); + if (c == -1) + break; + + switch (c) { + case 'h': { + ebpf_tracepoint_help("hardirq"); + exit(0); + } + default: { + break; + } + } + } + + int ret = netdata_ebf_memlock_limit(); + if (ret) { + fprintf(stderr, "Cannot increase memory: error = %d\n", ret); + return 1; + } + + return ebpf_hardirq_tests(); +} + diff --git a/co-re/oomkill.bpf.c b/co-re/oomkill.bpf.c new file mode 100644 index 00000000..3ebc16a1 --- /dev/null +++ b/co-re/oomkill.bpf.c @@ -0,0 +1,36 @@ +#include "vmlinux.h" +#include "bpf_tracing.h" +#include "bpf_helpers.h" + +#include "netdata_core.h" +#include "netdata_oomkill.h" + +/************************************************************************************ + * + * MAPS + * + ***********************************************************************************/ + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __type(key, int); + __type(value, __u8); + __uint(max_entries, NETDATA_OOMKILL_MAX_ENTRIES); +} tbl_oomkill SEC(".maps"); + +/*********************************************************************************** + * + * OOMKILL SECTION(tracepoint) + * + ***********************************************************************************/ + +SEC("tracepoint/oom/mark_victim") +int netdata_oom_mark_victim(struct netdata_oom_mark_victim_entry *ptr) { + int key = ptr->pid; + u8 val = 0; + bpf_map_update_elem(&tbl_oomkill, &key, &val, BPF_ANY); + return 0; +} + +char _license[] SEC("license") = "GPL"; + diff --git a/co-re/oomkill.c b/co-re/oomkill.c new file mode 100644 index 00000000..fa8b1295 --- /dev/null +++ b/co-re/oomkill.c @@ -0,0 +1,125 @@ +#include +#include +#include +#include + +#define _GNU_SOURCE /* See feature_test_macros(7) */ +#define __USE_GNU +#include +#include + +#include "netdata_tests.h" + +#include "oomkill.skel.h" + +static inline int ebpf_load_and_attach(struct oomkill_bpf *obj) +{ + int ret = oomkill_bpf__load(obj); + if (ret) { + fprintf(stderr, "failed to load BPF object: %d\n", ret); + return -1; + } + + ret = oomkill_bpf__attach(obj); + if (!ret) { + fprintf(stdout, "OOMkill loaded with success\n"); + } + + return ret; +} + +static void ebpf_update_table(int global) +{ + int idx = 0; + unsigned char value = 'a'; + int ret = bpf_map_update_elem(global, &idx, &value, 0); + if (ret) + fprintf(stderr, "Cannot insert value to global table."); +} + +static int oomkill_read_array(int fd, int ebpf_nprocs) +{ + unsigned char *stored = calloc((size_t)ebpf_nprocs, sizeof(unsigned char)); + if (!stored) + return 2; + + unsigned char counter = 0; + int idx = 0; + if (!bpf_map_lookup_elem(fd, &idx, stored)) { + int j; + for (j = 0; j < ebpf_nprocs; j++) { + counter += stored[j]; + } + } + + free(stored); + + if (counter) { + fprintf(stdout, "Data stored with success\n"); + return 0; + } + + return 2; +} + +static int ebpf_oomkill_tests() +{ + struct oomkill_bpf *obj = NULL; + int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); + + obj = oomkill_bpf__open(); + if (!obj) { + fprintf(stderr, "Cannot open or load BPF object\n"); + + return 2; + } + + int ret = ebpf_load_and_attach(obj); + if (!ret) { + int fd = bpf_map__fd(obj->maps.tbl_oomkill); + ebpf_update_table(fd); + + ret = oomkill_read_array(fd, ebpf_nprocs); + if (ret) + fprintf(stderr, "Cannot read global table\n"); + } else + fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); + + oomkill_bpf__destroy(obj); + + return ret; +} + +int main(int argc, char **argv) +{ + static struct option long_options[] = { + {"help", no_argument, 0, 'h' }, + {0, 0, 0, 0} + }; + + int option_index = 0; + while (1) { + int c = getopt_long(argc, argv, "", long_options, &option_index); + if (c == -1) + break; + + switch (c) { + case 'h': { + ebpf_tracepoint_help("OOMkill"); + exit(0); + } + default: { + break; + } + } + } + + int ret = netdata_ebf_memlock_limit(); + if (ret) { + fprintf(stderr, "Cannot increase memory: error = %d\n", ret); + return 1; + } + + return ebpf_oomkill_tests(); +} + diff --git a/co-re/softirq.bpf.c b/co-re/softirq.bpf.c new file mode 100644 index 00000000..b8939aa3 --- /dev/null +++ b/co-re/softirq.bpf.c @@ -0,0 +1,74 @@ +#include "vmlinux.h" +#include "bpf_tracing.h" +#include "bpf_helpers.h" + +#include "netdata_core.h" +#include "netdata_softirq.h" + +/************************************************************************************ + * + * MAPS + * + ***********************************************************************************/ + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, __u32); + __type(value, softirq_val_t); + __uint(max_entries, NETDATA_SOFTIRQ_MAX_IRQS); +} tbl_softirq SEC(".maps"); + +/*********************************************************************************** + * + * SOFTIRQ SECTION(tracepoint) + * + ***********************************************************************************/ + +SEC("tracepoint/irq/softirq_entry") +int netdata_softirq_entry(struct netdata_softirq_entry *ptr) +{ + softirq_val_t *valp, val = {}; + u32 vec = ptr->vec; + + // out-of-range index. + if (vec > NETDATA_SOFTIRQ_MAX_IRQS-1) { + return 0; + } + + valp = bpf_map_lookup_elem(&tbl_softirq, &vec); + if (valp) { + valp->ts = bpf_ktime_get_ns(); + } else { + val.latency = 0; + val.ts = bpf_ktime_get_ns(); + bpf_map_update_elem(&tbl_softirq, &vec, &val, BPF_ANY); + } + + return 0; +} + +SEC("tracepoint/irq/softirq_exit") +int netdata_softirq_exit(struct netdata_softirq_exit *ptr) +{ + softirq_val_t *valp; + u32 vec = ptr->vec; + + // out-of-range index. + if (vec > NETDATA_SOFTIRQ_MAX_IRQS-1) { + return 0; + } + + valp = bpf_map_lookup_elem(&tbl_softirq, &vec); + if (!valp) { + return 0; + } + + // get time diff and convert to microseconds. + u64 latency = (bpf_ktime_get_ns() - valp->ts) / 1000; + libnetdata_update_u64(&valp->latency, latency); + + return 0; +} + +char _license[] SEC("license") = "GPL"; + diff --git a/co-re/softirq.c b/co-re/softirq.c new file mode 100644 index 00000000..b9190bf0 --- /dev/null +++ b/co-re/softirq.c @@ -0,0 +1,136 @@ +#include +#include +#include +#include + +#define _GNU_SOURCE /* See feature_test_macros(7) */ +#define __USE_GNU +#include +#include + +#include "netdata_tests.h" + +#include "softirq.skel.h" + +// Copied and redefined from ../include/netdata_softirq.h +typedef struct softirq_val { + // incremental counter storing the total latency so far. + uint64_t latency; + + // temporary timestamp stored at the entry handler, to be diff'd with a + // timestamp at the exit handler, to get the latency to add to the + // `latency` field. + uint64_t ts; +} softirq_val_t; + +static inline int ebpf_load_and_attach(struct softirq_bpf *obj) +{ + int ret = softirq_bpf__load(obj); + if (ret) { + fprintf(stderr, "failed to load BPF object: %d\n", ret); + return -1; + } + + ret = softirq_bpf__attach(obj); + if (!ret) { + fprintf(stdout, "Softirq loaded with success\n"); + } + + return ret; +} + +static void ebpf_update_table(int global) +{ + uint32_t idx = 0; + softirq_val_t value = { .ts = 1, .latency = 1 }; + int ret = bpf_map_update_elem(global, &idx, &value, 0); + if (ret) + fprintf(stderr, "Cannot insert value to global table."); +} + +static int softirq_read_array(int fd, int ebpf_nprocs) +{ + softirq_val_t *stored = calloc((size_t)ebpf_nprocs, sizeof(softirq_val_t)); + if (!stored) + return 2; + + uint64_t counter = 0; + int idx = 0; + if (!bpf_map_lookup_elem(fd, &idx, stored)) { + int j; + for (j = 0; j < ebpf_nprocs; j++) { + counter += stored[j].ts + stored[j].latency; + } + } + + free(stored); + + if (counter) { + fprintf(stdout, "Data stored with success\n"); + return 0; + } + + return 2; +} + +static int ebpf_softirq_tests() +{ + struct softirq_bpf *obj = NULL; + int ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN); + + obj = softirq_bpf__open(); + if (!obj) { + fprintf(stderr, "Cannot open or load BPF object\n"); + + return 2; + } + + int ret = ebpf_load_and_attach(obj); + if (!ret) { + int fd = bpf_map__fd(obj->maps.tbl_softirq); + ebpf_update_table(fd); + + ret = softirq_read_array(fd, ebpf_nprocs); + if (ret) + fprintf(stderr, "Cannot read global table\n"); + } else + fprintf(stderr ,"%s", NETDATA_CORE_DEFAULT_ERROR); + + softirq_bpf__destroy(obj); + + return ret; +} + +int main(int argc, char **argv) +{ + static struct option long_options[] = { + {"help", no_argument, 0, 'h' }, + {0, 0, 0, 0} + }; + + int option_index = 0; + while (1) { + int c = getopt_long(argc, argv, "", long_options, &option_index); + if (c == -1) + break; + + switch (c) { + case 'h': { + ebpf_tracepoint_help("softirq"); + exit(0); + } + default: { + break; + } + } + } + + int ret = netdata_ebf_memlock_limit(); + if (ret) { + fprintf(stderr, "Cannot increase memory: error = %d\n", ret); + return 1; + } + + return ebpf_softirq_tests(); +} + diff --git a/includes/netdata_disk.h b/includes/netdata_disk.h index 230c33fa..8b9d9d19 100644 --- a/includes/netdata_disk.h +++ b/includes/netdata_disk.h @@ -8,6 +8,21 @@ #define NETDATA_DISK_MAX_HD 256L #define NETDATA_DISK_HISTOGRAM_LENGTH (NETDATA_FS_MAX_BINS * NETDATA_DISK_MAX_HD) +// Decode function extracted from: https://elixir.bootlin.com/linux/v5.10.8/source/include/linux/kdev_t.h#L7 +#define NETDATA_MINORBITS 20 +#define NETDATA_MINORMASK ((1U << NETDATA_MINORBITS) - 1) + +#define NETDATA_MAJOR(dev) ((unsigned int) ((dev) >> NETDATA_MINORBITS)) +#define NETDATA_MINOR(dev) ((unsigned int) ((dev) & NETDATA_MINORMASK)) +#define NETDATA_MKDEV(ma,mi) (((ma) << MINORBITS) | (mi)) + +static __always_inline u32 netdata_new_encode_dev(dev_t dev) +{ + unsigned major = NETDATA_MAJOR(dev); + unsigned minor = NETDATA_MINOR(dev); + return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); +} + // /sys/kernel/debug/tracing/events/block/block_rq_issue/ struct netdata_block_rq_issue { u64 pad; // This is not used with eBPF diff --git a/includes/netdata_tests.h b/includes/netdata_tests.h index 5ceeb897..7e20fe07 100644 --- a/includes/netdata_tests.h +++ b/includes/netdata_tests.h @@ -80,6 +80,12 @@ static inline void ebpf_print_help(char *name, char *info, int has_trampoline) { " probes will be used.\n"); } +static inline void ebpf_tracepoint_help(char *name) { + fprintf(stdout, "%s tests if it is possible to use tracepoints on host\n\n" + "--help (-h): Prints this help.\n", name); +} + + static inline int ebpf_find_function_id(struct btf *bf, char *name) { const struct btf_type *type = netdata_find_bpf_attach_type(bf);