summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/ringbuf.c
diff options
context:
space:
mode:
authorAndrii Nakryiko <andrii@kernel.org>2024-04-06 09:11:11 -0700
committerAndrii Nakryiko <andrii@kernel.org>2024-04-06 10:16:35 -0700
commit50408d7abea68e2d1ae3a9328e1a468b7089b11c (patch)
treebe1f6764f6ef5a655bb0d2e5d60bf7c99659a9ae /tools/lib/bpf/ringbuf.c
parentd564ffde5c832c46513e0189647abfde9833e590 (diff)
parent4d22ea94ea33550538b3b14429d52cb9f96ad2c3 (diff)
Merge branch 'libbpf-api-to-partially-consume-items-from-ringbuffer'
Andrea Righi says: ==================== libbpf: API to partially consume items from ringbuffer Introduce ring__consume_n() and ring_buffer__consume_n() API to partially consume items from one (or more) ringbuffer(s). This can be useful, for example, to consume just a single item or when we need to copy multiple items to a limited user-space buffer from the ringbuffer callback. Practical example (where this API can be used): https://github.com/sched-ext/scx/blob/b7c06b9ed9f72cad83c31e39e9c4e2cfd8683a55/rust/scx_rustland_core/src/bpf.rs#L217 See also: https://lore.kernel.org/lkml/20240310154726.734289-1-andrea.righi@canonical.com/T/#u v4: - open a new 1.5.0 cycle v3: - rename ring__consume_max() -> ring__consume_n() and ring_buffer__consume_max() -> ring_buffer__consume_n() - add new API to a new 1.5.0 cycle - fixed minor nits / comments v2: - introduce a new API instead of changing the callback's retcode behavior ==================== Link: https://lore.kernel.org/r/20240406092005.92399-1-andrea.righi@canonical.com Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Diffstat (limited to 'tools/lib/bpf/ringbuf.c')
-rw-r--r--tools/lib/bpf/ringbuf.c55
1 files changed, 46 insertions, 9 deletions
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index aacb64278a01..99e44cf02321 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -231,7 +231,7 @@ static inline int roundup_len(__u32 len)
return (len + 7) / 8 * 8;
}
-static int64_t ringbuf_process_ring(struct ring *r)
+static int64_t ringbuf_process_ring(struct ring *r, size_t n)
{
int *len_ptr, len, err;
/* 64-bit to avoid overflow in case of extreme application behavior */
@@ -268,12 +268,42 @@ static int64_t ringbuf_process_ring(struct ring *r)
}
smp_store_release(r->consumer_pos, cons_pos);
+
+ if (cnt >= n)
+ goto done;
}
} while (got_new_data);
done:
return cnt;
}
+/* Consume available ring buffer(s) data without event polling, up to n
+ * records.
+ *
+ * Returns number of records consumed across all registered ring buffers (or
+ * n, whichever is less), or negative number if any of the callbacks return
+ * error.
+ */
+int ring_buffer__consume_n(struct ring_buffer *rb, size_t n)
+{
+ int64_t err, res = 0;
+ int i;
+
+ for (i = 0; i < rb->ring_cnt; i++) {
+ struct ring *ring = rb->rings[i];
+
+ err = ringbuf_process_ring(ring, n);
+ if (err < 0)
+ return libbpf_err(err);
+ res += err;
+ n -= err;
+
+ if (n == 0)
+ break;
+ }
+ return res;
+}
+
/* Consume available ring buffer(s) data without event polling.
* Returns number of records consumed across all registered ring buffers (or
* INT_MAX, whichever is less), or negative number if any of the callbacks
@@ -287,13 +317,15 @@ int ring_buffer__consume(struct ring_buffer *rb)
for (i = 0; i < rb->ring_cnt; i++) {
struct ring *ring = rb->rings[i];
- err = ringbuf_process_ring(ring);
+ err = ringbuf_process_ring(ring, INT_MAX);
if (err < 0)
return libbpf_err(err);
res += err;
+ if (res > INT_MAX) {
+ res = INT_MAX;
+ break;
+ }
}
- if (res > INT_MAX)
- return INT_MAX;
return res;
}
@@ -314,13 +346,13 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
__u32 ring_id = rb->events[i].data.fd;
struct ring *ring = rb->rings[ring_id];
- err = ringbuf_process_ring(ring);
+ err = ringbuf_process_ring(ring, INT_MAX);
if (err < 0)
return libbpf_err(err);
res += err;
}
if (res > INT_MAX)
- return INT_MAX;
+ res = INT_MAX;
return res;
}
@@ -371,17 +403,22 @@ int ring__map_fd(const struct ring *r)
return r->map_fd;
}
-int ring__consume(struct ring *r)
+int ring__consume_n(struct ring *r, size_t n)
{
- int64_t res;
+ int res;
- res = ringbuf_process_ring(r);
+ res = ringbuf_process_ring(r, n);
if (res < 0)
return libbpf_err(res);
return res > INT_MAX ? INT_MAX : res;
}
+int ring__consume(struct ring *r)
+{
+ return ring__consume_n(r, INT_MAX);
+}
+
static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
{
if (rb->consumer_pos) {