summaryrefslogtreecommitdiff
path: root/net/xdp/xsk.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/xdp/xsk.c')
-rw-r--r--net/xdp/xsk.c143
1 files changed, 88 insertions, 55 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 7b0c68a70888..69bbcca8ac75 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -36,20 +36,13 @@
#define TX_BATCH_SIZE 32
#define MAX_PER_SOCKET_BUDGET 32
-struct xsk_addr_node {
- u64 addr;
- struct list_head addr_node;
-};
-
-struct xsk_addr_head {
+struct xsk_addrs {
u32 num_descs;
- struct list_head addrs_list;
+ u64 addrs[MAX_SKB_FRAGS + 1];
};
static struct kmem_cache *xsk_tx_generic_cache;
-#define XSKCB(skb) ((struct xsk_addr_head *)((skb)->cb))
-
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
@@ -558,29 +551,68 @@ static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
return ret;
}
+static bool xsk_skb_destructor_is_addr(struct sk_buff *skb)
+{
+ return (uintptr_t)skb_shinfo(skb)->destructor_arg & 0x1UL;
+}
+
+static u64 xsk_skb_destructor_get_addr(struct sk_buff *skb)
+{
+ return (u64)((uintptr_t)skb_shinfo(skb)->destructor_arg & ~0x1UL);
+}
+
+static void xsk_skb_destructor_set_addr(struct sk_buff *skb, u64 addr)
+{
+ skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t)addr | 0x1UL);
+}
+
+static void xsk_inc_num_desc(struct sk_buff *skb)
+{
+ struct xsk_addrs *xsk_addr;
+
+ if (!xsk_skb_destructor_is_addr(skb)) {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ xsk_addr->num_descs++;
+ }
+}
+
+static u32 xsk_get_num_desc(struct sk_buff *skb)
+{
+ struct xsk_addrs *xsk_addr;
+
+ if (xsk_skb_destructor_is_addr(skb))
+ return 1;
+
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+
+ return xsk_addr->num_descs;
+}
+
static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
struct sk_buff *skb)
{
- struct xsk_addr_node *pos, *tmp;
+ u32 num_descs = xsk_get_num_desc(skb);
+ struct xsk_addrs *xsk_addr;
u32 descs_processed = 0;
unsigned long flags;
- u32 idx;
+ u32 idx, i;
spin_lock_irqsave(&pool->cq_lock, flags);
idx = xskq_get_prod(pool->cq);
- xskq_prod_write_addr(pool->cq, idx,
- (u64)(uintptr_t)skb_shinfo(skb)->destructor_arg);
- descs_processed++;
+ if (unlikely(num_descs > 1)) {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
- if (unlikely(XSKCB(skb)->num_descs > 1)) {
- list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
+ for (i = 0; i < num_descs; i++) {
xskq_prod_write_addr(pool->cq, idx + descs_processed,
- pos->addr);
+ xsk_addr->addrs[i]);
descs_processed++;
- list_del(&pos->addr_node);
- kmem_cache_free(xsk_tx_generic_cache, pos);
}
+ kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
+ } else {
+ xskq_prod_write_addr(pool->cq, idx,
+ xsk_skb_destructor_get_addr(skb));
+ descs_processed++;
}
xskq_prod_submit_n(pool->cq, descs_processed);
spin_unlock_irqrestore(&pool->cq_lock, flags);
@@ -595,16 +627,6 @@ static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
spin_unlock_irqrestore(&pool->cq_lock, flags);
}
-static void xsk_inc_num_desc(struct sk_buff *skb)
-{
- XSKCB(skb)->num_descs++;
-}
-
-static u32 xsk_get_num_desc(struct sk_buff *skb)
-{
- return XSKCB(skb)->num_descs;
-}
-
static void xsk_destruct_skb(struct sk_buff *skb)
{
struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
@@ -621,27 +643,22 @@ static void xsk_destruct_skb(struct sk_buff *skb)
static void xsk_skb_init_misc(struct sk_buff *skb, struct xdp_sock *xs,
u64 addr)
{
- BUILD_BUG_ON(sizeof(struct xsk_addr_head) > sizeof(skb->cb));
- INIT_LIST_HEAD(&XSKCB(skb)->addrs_list);
skb->dev = xs->dev;
skb->priority = READ_ONCE(xs->sk.sk_priority);
skb->mark = READ_ONCE(xs->sk.sk_mark);
- XSKCB(skb)->num_descs = 0;
skb->destructor = xsk_destruct_skb;
- skb_shinfo(skb)->destructor_arg = (void *)(uintptr_t)addr;
+ xsk_skb_destructor_set_addr(skb, addr);
}
static void xsk_consume_skb(struct sk_buff *skb)
{
struct xdp_sock *xs = xdp_sk(skb->sk);
u32 num_descs = xsk_get_num_desc(skb);
- struct xsk_addr_node *pos, *tmp;
+ struct xsk_addrs *xsk_addr;
if (unlikely(num_descs > 1)) {
- list_for_each_entry_safe(pos, tmp, &XSKCB(skb)->addrs_list, addr_node) {
- list_del(&pos->addr_node);
- kmem_cache_free(xsk_tx_generic_cache, pos);
- }
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
}
skb->destructor = sock_wfree;
@@ -701,7 +718,6 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
{
struct xsk_buff_pool *pool = xs->pool;
u32 hr, len, ts, offset, copy, copied;
- struct xsk_addr_node *xsk_addr;
struct sk_buff *skb = xs->skb;
struct page *page;
void *buffer;
@@ -727,16 +743,26 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
return ERR_PTR(err);
}
} else {
- xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
- if (!xsk_addr)
- return ERR_PTR(-ENOMEM);
+ struct xsk_addrs *xsk_addr;
+
+ if (xsk_skb_destructor_is_addr(skb)) {
+ xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
+ GFP_KERNEL);
+ if (!xsk_addr)
+ return ERR_PTR(-ENOMEM);
+
+ xsk_addr->num_descs = 1;
+ xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
+ skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
+ } else {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ }
/* in case of -EOVERFLOW that could happen below,
* xsk_consume_skb() will release this node as whole skb
* would be dropped, which implies freeing all list elements
*/
- xsk_addr->addr = desc->addr;
- list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
+ xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
}
len = desc->len;
@@ -813,10 +839,25 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
}
} else {
int nr_frags = skb_shinfo(skb)->nr_frags;
- struct xsk_addr_node *xsk_addr;
+ struct xsk_addrs *xsk_addr;
struct page *page;
u8 *vaddr;
+ if (xsk_skb_destructor_is_addr(skb)) {
+ xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
+ GFP_KERNEL);
+ if (!xsk_addr) {
+ err = -ENOMEM;
+ goto free_err;
+ }
+
+ xsk_addr->num_descs = 1;
+ xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
+ skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
+ } else {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ }
+
if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
err = -EOVERFLOW;
goto free_err;
@@ -828,13 +869,6 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
goto free_err;
}
- xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache, GFP_KERNEL);
- if (!xsk_addr) {
- __free_page(page);
- err = -ENOMEM;
- goto free_err;
- }
-
vaddr = kmap_local_page(page);
memcpy(vaddr, buffer, len);
kunmap_local(vaddr);
@@ -842,8 +876,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
- xsk_addr->addr = desc->addr;
- list_add_tail(&xsk_addr->addr_node, &XSKCB(skb)->addrs_list);
+ xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
}
}
@@ -1904,7 +1937,7 @@ static int __init xsk_init(void)
goto out_pernet;
xsk_tx_generic_cache = kmem_cache_create("xsk_generic_xmit_cache",
- sizeof(struct xsk_addr_node),
+ sizeof(struct xsk_addrs),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!xsk_tx_generic_cache) {
err = -ENOMEM;