summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/ringbuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib/bpf/ringbuf.c')
-rw-r--r--tools/lib/bpf/ringbuf.c167
1 files changed, 132 insertions, 35 deletions
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index 02199364db13..00ec4837a06d 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -34,7 +34,7 @@ struct ring {
struct ring_buffer {
struct epoll_event *events;
- struct ring *rings;
+ struct ring **rings;
size_t page_size;
int epoll_fd;
int ring_cnt;
@@ -57,7 +57,7 @@ struct ringbuf_hdr {
__u32 pad;
};
-static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r)
+static void ringbuf_free_ring(struct ring_buffer *rb, struct ring *r)
{
if (r->consumer_pos) {
munmap(r->consumer_pos, rb->page_size);
@@ -67,6 +67,8 @@ static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r)
munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1));
r->producer_pos = NULL;
}
+
+ free(r);
}
/* Add extra RINGBUF maps to this ring buffer manager */
@@ -86,8 +88,8 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
err = bpf_map_get_info_by_fd(map_fd, &info, &len);
if (err) {
err = -errno;
- pr_warn("ringbuf: failed to get map info for fd=%d: %d\n",
- map_fd, err);
+ pr_warn("ringbuf: failed to get map info for fd=%d: %s\n",
+ map_fd, errstr(err));
return libbpf_err(err);
}
@@ -107,8 +109,10 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
return libbpf_err(-ENOMEM);
rb->events = tmp;
- r = &rb->rings[rb->ring_cnt];
- memset(r, 0, sizeof(*r));
+ r = calloc(1, sizeof(*r));
+ if (!r)
+ return libbpf_err(-ENOMEM);
+ rb->rings[rb->ring_cnt] = r;
r->map_fd = map_fd;
r->sample_cb = sample_cb;
@@ -119,9 +123,9 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
if (tmp == MAP_FAILED) {
err = -errno;
- pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
- map_fd, err);
- return libbpf_err(err);
+ pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %s\n",
+ map_fd, errstr(err));
+ goto err_out;
}
r->consumer_pos = tmp;
@@ -131,16 +135,16 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
*/
mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
if (mmap_sz != (__u64)(size_t)mmap_sz) {
+ err = -E2BIG;
pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries);
- return libbpf_err(-E2BIG);
+ goto err_out;
}
tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
if (tmp == MAP_FAILED) {
err = -errno;
- ringbuf_unmap_ring(rb, r);
- pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n",
- map_fd, err);
- return libbpf_err(err);
+ pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %s\n",
+ map_fd, errstr(err));
+ goto err_out;
}
r->producer_pos = tmp;
r->data = tmp + rb->page_size;
@@ -152,14 +156,17 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
e->data.fd = rb->ring_cnt;
if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) {
err = -errno;
- ringbuf_unmap_ring(rb, r);
- pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n",
- map_fd, err);
- return libbpf_err(err);
+ pr_warn("ringbuf: failed to epoll add map fd=%d: %s\n",
+ map_fd, errstr(err));
+ goto err_out;
}
rb->ring_cnt++;
return 0;
+
+err_out:
+ ringbuf_free_ring(rb, r);
+ return libbpf_err(err);
}
void ring_buffer__free(struct ring_buffer *rb)
@@ -170,7 +177,7 @@ void ring_buffer__free(struct ring_buffer *rb)
return;
for (i = 0; i < rb->ring_cnt; ++i)
- ringbuf_unmap_ring(rb, &rb->rings[i]);
+ ringbuf_free_ring(rb, rb->rings[i]);
if (rb->epoll_fd >= 0)
close(rb->epoll_fd);
@@ -198,7 +205,7 @@ ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (rb->epoll_fd < 0) {
err = -errno;
- pr_warn("ringbuf: failed to create epoll instance: %d\n", err);
+ pr_warn("ringbuf: failed to create epoll instance: %s\n", errstr(err));
goto err_out;
}
@@ -224,7 +231,7 @@ static inline int roundup_len(__u32 len)
return (len + 7) / 8 * 8;
}
-static int64_t ringbuf_process_ring(struct ring *r)
+static int64_t ringbuf_process_ring(struct ring *r, size_t n)
{
int *len_ptr, len, err;
/* 64-bit to avoid overflow in case of extreme application behavior */
@@ -261,12 +268,42 @@ static int64_t ringbuf_process_ring(struct ring *r)
}
smp_store_release(r->consumer_pos, cons_pos);
+
+ if (cnt >= n)
+ goto done;
}
} while (got_new_data);
done:
return cnt;
}
+/* Consume available ring buffer(s) data without event polling, up to n
+ * records.
+ *
+ * Returns number of records consumed across all registered ring buffers (or
+ * n, whichever is less), or negative number if any of the callbacks return
+ * error.
+ */
+int ring_buffer__consume_n(struct ring_buffer *rb, size_t n)
+{
+ int64_t err, res = 0;
+ int i;
+
+ for (i = 0; i < rb->ring_cnt; i++) {
+ struct ring *ring = rb->rings[i];
+
+ err = ringbuf_process_ring(ring, n);
+ if (err < 0)
+ return libbpf_err(err);
+ res += err;
+ n -= err;
+
+ if (n == 0)
+ break;
+ }
+ return res > INT_MAX ? INT_MAX : res;
+}
+
/* Consume available ring buffer(s) data without event polling.
* Returns number of records consumed across all registered ring buffers (or
* INT_MAX, whichever is less), or negative number if any of the callbacks
@@ -278,15 +315,17 @@ int ring_buffer__consume(struct ring_buffer *rb)
int i;
for (i = 0; i < rb->ring_cnt; i++) {
- struct ring *ring = &rb->rings[i];
+ struct ring *ring = rb->rings[i];
- err = ringbuf_process_ring(ring);
+ err = ringbuf_process_ring(ring, INT_MAX);
if (err < 0)
return libbpf_err(err);
res += err;
+ if (res > INT_MAX) {
+ res = INT_MAX;
+ break;
+ }
}
- if (res > INT_MAX)
- return INT_MAX;
return res;
}
@@ -305,15 +344,15 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
for (i = 0; i < cnt; i++) {
__u32 ring_id = rb->events[i].data.fd;
- struct ring *ring = &rb->rings[ring_id];
+ struct ring *ring = rb->rings[ring_id];
- err = ringbuf_process_ring(ring);
+ err = ringbuf_process_ring(ring, INT_MAX);
if (err < 0)
return libbpf_err(err);
res += err;
}
if (res > INT_MAX)
- return INT_MAX;
+ res = INT_MAX;
return res;
}
@@ -323,6 +362,63 @@ int ring_buffer__epoll_fd(const struct ring_buffer *rb)
return rb->epoll_fd;
}
+struct ring *ring_buffer__ring(struct ring_buffer *rb, unsigned int idx)
+{
+ if (idx >= rb->ring_cnt)
+ return errno = ERANGE, NULL;
+
+ return rb->rings[idx];
+}
+
+unsigned long ring__consumer_pos(const struct ring *r)
+{
+ /* Synchronizes with smp_store_release() in ringbuf_process_ring(). */
+ return smp_load_acquire(r->consumer_pos);
+}
+
+unsigned long ring__producer_pos(const struct ring *r)
+{
+ /* Synchronizes with smp_store_release() in __bpf_ringbuf_reserve() in
+ * the kernel.
+ */
+ return smp_load_acquire(r->producer_pos);
+}
+
+size_t ring__avail_data_size(const struct ring *r)
+{
+ unsigned long cons_pos, prod_pos;
+
+ cons_pos = ring__consumer_pos(r);
+ prod_pos = ring__producer_pos(r);
+ return prod_pos - cons_pos;
+}
+
+size_t ring__size(const struct ring *r)
+{
+ return r->mask + 1;
+}
+
+int ring__map_fd(const struct ring *r)
+{
+ return r->map_fd;
+}
+
+int ring__consume_n(struct ring *r, size_t n)
+{
+ int64_t res;
+
+ res = ringbuf_process_ring(r, n);
+ if (res < 0)
+ return libbpf_err(res);
+
+ return res > INT_MAX ? INT_MAX : res;
+}
+
+int ring__consume(struct ring *r)
+{
+ return ring__consume_n(r, INT_MAX);
+}
+
static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
{
if (rb->consumer_pos) {
@@ -362,7 +458,8 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
err = bpf_map_get_info_by_fd(map_fd, &info, &len);
if (err) {
err = -errno;
- pr_warn("user ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err);
+ pr_warn("user ringbuf: failed to get map info for fd=%d: %s\n",
+ map_fd, errstr(err));
return err;
}
@@ -378,8 +475,8 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
tmp = mmap(NULL, rb->page_size, PROT_READ, MAP_SHARED, map_fd, 0);
if (tmp == MAP_FAILED) {
err = -errno;
- pr_warn("user ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
- map_fd, err);
+ pr_warn("user ringbuf: failed to mmap consumer page for map fd=%d: %s\n",
+ map_fd, errstr(err));
return err;
}
rb->consumer_pos = tmp;
@@ -398,8 +495,8 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
map_fd, rb->page_size);
if (tmp == MAP_FAILED) {
err = -errno;
- pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %d\n",
- map_fd, err);
+ pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %s\n",
+ map_fd, errstr(err));
return err;
}
@@ -410,7 +507,7 @@ static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
rb_epoll->events = EPOLLOUT;
if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, rb_epoll) < 0) {
err = -errno;
- pr_warn("user ringbuf: failed to epoll add map fd=%d: %d\n", map_fd, err);
+ pr_warn("user ringbuf: failed to epoll add map fd=%d: %s\n", map_fd, errstr(err));
return err;
}
@@ -435,7 +532,7 @@ user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts)
rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
if (rb->epoll_fd < 0) {
err = -errno;
- pr_warn("user ringbuf: failed to create epoll instance: %d\n", err);
+ pr_warn("user ringbuf: failed to create epoll instance: %s\n", errstr(err));
goto err_out;
}