summaryrefslogtreecommitdiff
path: root/net/xdp/xsk_queue.h
diff options
context:
space:
mode:
Diffstat (limited to 'net/xdp/xsk_queue.h')
-rw-r--r--net/xdp/xsk_queue.h98
1 files changed, 54 insertions, 44 deletions
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index cb8e5be35110..ef6a6f0ec949 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -8,10 +8,10 @@
#include <linux/types.h>
#include <linux/if_xdp.h>
-
-#include "xdp_umem_props.h"
+#include <net/xdp_sock.h>
#define RX_BATCH_SIZE 16
+#define LAZY_UPDATE_THRESHOLD 128
struct xdp_ring {
u32 producer ____cacheline_aligned_in_smp;
@@ -27,7 +27,7 @@ struct xdp_rxtx_ring {
/* Used for the fill and completion queues for buffers */
struct xdp_umem_ring {
struct xdp_ring ptrs;
- u32 desc[0] ____cacheline_aligned_in_smp;
+ u64 desc[0] ____cacheline_aligned_in_smp;
};
struct xsk_queue {
@@ -62,9 +62,14 @@ static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
return (entries > dcnt) ? dcnt : entries;
}
+static inline u32 xskq_nb_free_lazy(struct xsk_queue *q, u32 producer)
+{
+ return q->nentries - (producer - q->cons_tail);
+}
+
static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
{
- u32 free_entries = q->nentries - (producer - q->cons_tail);
+ u32 free_entries = xskq_nb_free_lazy(q, producer);
if (free_entries >= dcnt)
return free_entries;
@@ -76,23 +81,25 @@ static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
/* UMEM queue */
-static inline bool xskq_is_valid_id(struct xsk_queue *q, u32 idx)
+static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
{
- if (unlikely(idx >= q->umem_props.nframes)) {
+ if (addr >= q->umem_props.size) {
q->invalid_descs++;
return false;
}
+
return true;
}
-static inline u32 *xskq_validate_id(struct xsk_queue *q)
+static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr)
{
while (q->cons_tail != q->cons_head) {
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
unsigned int idx = q->cons_tail & q->ring_mask;
- if (xskq_is_valid_id(q, ring->desc[idx]))
- return &ring->desc[idx];
+ *addr = READ_ONCE(ring->desc[idx]) & q->umem_props.chunk_mask;
+ if (xskq_is_valid_addr(q, *addr))
+ return addr;
q->cons_tail++;
}
@@ -100,35 +107,32 @@ static inline u32 *xskq_validate_id(struct xsk_queue *q)
return NULL;
}
-static inline u32 *xskq_peek_id(struct xsk_queue *q)
+static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr)
{
- struct xdp_umem_ring *ring;
-
if (q->cons_tail == q->cons_head) {
WRITE_ONCE(q->ring->consumer, q->cons_tail);
q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
/* Order consumer and data */
smp_rmb();
-
- return xskq_validate_id(q);
}
- ring = (struct xdp_umem_ring *)q->ring;
- return &ring->desc[q->cons_tail & q->ring_mask];
+ return xskq_validate_addr(q, addr);
}
-static inline void xskq_discard_id(struct xsk_queue *q)
+static inline void xskq_discard_addr(struct xsk_queue *q)
{
q->cons_tail++;
- (void)xskq_validate_id(q);
}
-static inline int xskq_produce_id(struct xsk_queue *q, u32 id)
+static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
{
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
- ring->desc[q->prod_tail++ & q->ring_mask] = id;
+ if (xskq_nb_free(q, q->prod_tail, LAZY_UPDATE_THRESHOLD) == 0)
+ return -ENOSPC;
+
+ ring->desc[q->prod_tail++ & q->ring_mask] = addr;
/* Order producer and data */
smp_wmb();
@@ -137,7 +141,28 @@ static inline int xskq_produce_id(struct xsk_queue *q, u32 id)
return 0;
}
-static inline int xskq_reserve_id(struct xsk_queue *q)
+static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr)
+{
+ struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
+
+ if (xskq_nb_free(q, q->prod_head, LAZY_UPDATE_THRESHOLD) == 0)
+ return -ENOSPC;
+
+ ring->desc[q->prod_head++ & q->ring_mask] = addr;
+ return 0;
+}
+
+static inline void xskq_produce_flush_addr_n(struct xsk_queue *q,
+ u32 nb_entries)
+{
+ /* Order producer and data */
+ smp_wmb();
+
+ q->prod_tail += nb_entries;
+ WRITE_ONCE(q->ring->producer, q->prod_tail);
+}
+
+static inline int xskq_reserve_addr(struct xsk_queue *q)
{
if (xskq_nb_free(q, q->prod_head, 1) == 0)
return -ENOSPC;
@@ -150,16 +175,11 @@ static inline int xskq_reserve_id(struct xsk_queue *q)
static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d)
{
- u32 buff_len;
-
- if (unlikely(d->idx >= q->umem_props.nframes)) {
- q->invalid_descs++;
+ if (!xskq_is_valid_addr(q, d->addr))
return false;
- }
- buff_len = q->umem_props.frame_size;
- if (unlikely(d->len > buff_len || d->len == 0 ||
- d->offset > buff_len || d->offset + d->len > buff_len)) {
+ if (((d->addr + d->len) & q->umem_props.chunk_mask) !=
+ (d->addr & q->umem_props.chunk_mask)) {
q->invalid_descs++;
return false;
}
@@ -174,11 +194,9 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
unsigned int idx = q->cons_tail & q->ring_mask;
- if (xskq_is_valid_desc(q, &ring->desc[idx])) {
- if (desc)
- *desc = ring->desc[idx];
+ *desc = READ_ONCE(ring->desc[idx]);
+ if (xskq_is_valid_desc(q, desc))
return desc;
- }
q->cons_tail++;
}
@@ -189,31 +207,24 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q,
static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
struct xdp_desc *desc)
{
- struct xdp_rxtx_ring *ring;
-
if (q->cons_tail == q->cons_head) {
WRITE_ONCE(q->ring->consumer, q->cons_tail);
q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
/* Order consumer and data */
smp_rmb();
-
- return xskq_validate_desc(q, desc);
}
- ring = (struct xdp_rxtx_ring *)q->ring;
- *desc = ring->desc[q->cons_tail & q->ring_mask];
- return desc;
+ return xskq_validate_desc(q, desc);
}
static inline void xskq_discard_desc(struct xsk_queue *q)
{
q->cons_tail++;
- (void)xskq_validate_desc(q, NULL);
}
static inline int xskq_produce_batch_desc(struct xsk_queue *q,
- u32 id, u32 len, u16 offset)
+ u64 addr, u32 len)
{
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
unsigned int idx;
@@ -222,9 +233,8 @@ static inline int xskq_produce_batch_desc(struct xsk_queue *q,
return -ENOSPC;
idx = (q->prod_head++) & q->ring_mask;
- ring->desc[idx].idx = id;
+ ring->desc[idx].addr = addr;
ring->desc[idx].len = len;
- ring->desc[idx].offset = offset;
return 0;
}