diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2018-05-11 01:48:19 +0200 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2018-05-11 01:48:32 +0200 |
commit | a84880ef4352c61896028448d809e32f8646628d (patch) | |
tree | 880940e6af0c8352acc8c5f2158ec1457a2dc08c /tools/lib/bpf/libbpf.c | |
parent | cb9c28ef579debbc51e48b37f2534390f5da8467 (diff) | |
parent | be5bca44aa6b37b88e900d5f5f155911d6984d86 (diff) |
Merge branch 'bpf-perf-rb-libbpf'
Jakub Kicinski says:
====================
This series started out as a follow up to the bpftool perf event dumping
patches.
As suggested by Daniel patch 1 makes use of PERF_SAMPLE_TIME to simplify
code and improve accuracy of timestamps.
Remaining patches are trying to move perf event loop into libbpf as
suggested by Alexei. One user for this new function is bpftool which
links with libbpf nicely, the other, unfortunately, is in samples/bpf.
Remaining patches make samples/bpf link against full libbpf.a (not just
a handful of objects). Once we have full power of libbpf at our disposal
we can convert some of XDP samples to use libbpf loader instead of
bpf_load.c. My understanding is that this is the desired direction,
at least for networking code.
====================
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'tools/lib/bpf/libbpf.c')
-rw-r--r-- | tools/lib/bpf/libbpf.c | 107 |
1 files changed, 100 insertions, 7 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7bcdca13083a..df54c4c9e48a 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -31,6 +31,7 @@ #include <unistd.h> #include <fcntl.h> #include <errno.h> +#include <perf-sys.h> #include <asm/unistd.h> #include <linux/err.h> #include <linux/kernel.h> @@ -1437,9 +1438,37 @@ bpf_object__load_progs(struct bpf_object *obj) return 0; } -static int bpf_object__validate(struct bpf_object *obj) +static bool bpf_prog_type__needs_kver(enum bpf_prog_type type) +{ + switch (type) { + case BPF_PROG_TYPE_SOCKET_FILTER: + case BPF_PROG_TYPE_SCHED_CLS: + case BPF_PROG_TYPE_SCHED_ACT: + case BPF_PROG_TYPE_XDP: + case BPF_PROG_TYPE_CGROUP_SKB: + case BPF_PROG_TYPE_CGROUP_SOCK: + case BPF_PROG_TYPE_LWT_IN: + case BPF_PROG_TYPE_LWT_OUT: + case BPF_PROG_TYPE_LWT_XMIT: + case BPF_PROG_TYPE_SOCK_OPS: + case BPF_PROG_TYPE_SK_SKB: + case BPF_PROG_TYPE_CGROUP_DEVICE: + case BPF_PROG_TYPE_SK_MSG: + case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: + return false; + case BPF_PROG_TYPE_UNSPEC: + case BPF_PROG_TYPE_KPROBE: + case BPF_PROG_TYPE_TRACEPOINT: + case BPF_PROG_TYPE_PERF_EVENT: + case BPF_PROG_TYPE_RAW_TRACEPOINT: + default: + return true; + } +} + +static int bpf_object__validate(struct bpf_object *obj, bool needs_kver) { - if (obj->kern_version == 0) { + if (needs_kver && obj->kern_version == 0) { pr_warning("%s doesn't provide kernel version\n", obj->path); return -LIBBPF_ERRNO__KVERSION; @@ -1448,7 +1477,8 @@ static int bpf_object__validate(struct bpf_object *obj) } static struct bpf_object * -__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz) +__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz, + bool needs_kver) { struct bpf_object *obj; int err; @@ -1466,7 +1496,7 @@ __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz) CHECK_ERR(bpf_object__check_endianness(obj), err, out); CHECK_ERR(bpf_object__elf_collect(obj), err, out); CHECK_ERR(bpf_object__collect_reloc(obj), err, out); - CHECK_ERR(bpf_object__validate(obj), err, out); + CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out); bpf_object__elf_finish(obj); return obj; @@ -1483,7 +1513,7 @@ struct bpf_object *bpf_object__open(const char *path) pr_debug("loading %s\n", path); - return __bpf_object__open(path, NULL, 0); + return __bpf_object__open(path, NULL, 0, true); } struct bpf_object *bpf_object__open_buffer(void *obj_buf, @@ -1506,7 +1536,7 @@ struct bpf_object *bpf_object__open_buffer(void *obj_buf, pr_debug("loading object '%s' from buffer\n", name); - return __bpf_object__open(name, obj_buf, obj_buf_sz); + return __bpf_object__open(name, obj_buf, obj_buf_sz, true); } int bpf_object__unload(struct bpf_object *obj) @@ -2163,8 +2193,11 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, if (!attr) return -EINVAL; + if (!attr->file) + return -EINVAL; - obj = bpf_object__open(attr->file); + obj = __bpf_object__open(attr->file, NULL, 0, + bpf_prog_type__needs_kver(attr->prog_type)); if (IS_ERR(obj)) return -ENOENT; @@ -2210,3 +2243,63 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, *prog_fd = bpf_program__fd(first_prog); return 0; } + +enum bpf_perf_event_ret +bpf_perf_event_read_simple(void *mem, unsigned long size, + unsigned long page_size, void **buf, size_t *buf_len, + bpf_perf_event_print_t fn, void *priv) +{ + volatile struct perf_event_mmap_page *header = mem; + __u64 data_tail = header->data_tail; + __u64 data_head = header->data_head; + void *base, *begin, *end; + int ret; + + asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */ + if (data_head == data_tail) + return LIBBPF_PERF_EVENT_CONT; + + base = ((char *)header) + page_size; + + begin = base + data_tail % size; + end = base + data_head % size; + + while (begin != end) { + struct perf_event_header *ehdr; + + ehdr = begin; + if (begin + ehdr->size > base + size) { + long len = base + size - begin; + + if (*buf_len < ehdr->size) { + free(*buf); + *buf = malloc(ehdr->size); + if (!*buf) { + ret = LIBBPF_PERF_EVENT_ERROR; + break; + } + *buf_len = ehdr->size; + } + + memcpy(*buf, begin, len); + memcpy(*buf + len, base, ehdr->size - len); + ehdr = (void *)*buf; + begin = base + ehdr->size - len; + } else if (begin + ehdr->size == base + size) { + begin = base; + } else { + begin += ehdr->size; + } + + ret = fn(ehdr, priv); + if (ret != LIBBPF_PERF_EVENT_CONT) + break; + + data_tail += ehdr->size; + } + + __sync_synchronize(); /* smp_mb() */ + header->data_tail = data_tail; + + return ret; +} |