summaryrefslogtreecommitdiff
path: root/net/xdp/xsk.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/xdp/xsk.c')
-rw-r--r--net/xdp/xsk.c685
1 files changed, 563 insertions, 122 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 31dca4ecb2c5..f093c3453f64 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -25,6 +25,8 @@
#include <linux/vmalloc.h>
#include <net/xdp_sock_drv.h>
#include <net/busy_poll.h>
+#include <net/netdev_lock.h>
+#include <net/netdev_rx_queue.h>
#include <net/xdp.h>
#include "xsk_queue.h"
@@ -32,8 +34,14 @@
#include "xsk.h"
#define TX_BATCH_SIZE 32
+#define MAX_PER_SOCKET_BUDGET 32
-static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
+struct xsk_addrs {
+ u32 num_descs;
+ u64 addrs[MAX_SKB_FRAGS + 1];
+};
+
+static struct kmem_cache *xsk_tx_generic_cache;
void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
@@ -135,14 +143,14 @@ int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
return 0;
}
-static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
+ u32 flags)
{
- struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
u64 addr;
int err;
- addr = xp_get_handle(xskb);
- err = xskq_prod_reserve_desc(xs->rx, addr, len);
+ addr = xp_get_handle(xskb, xskb->pool);
+ err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
if (err) {
xs->rx_queue_full++;
return err;
@@ -152,48 +160,142 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
return 0;
}
-static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
+static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
- void *from_buf, *to_buf;
- u32 metalen;
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+ u32 frags = xdp_buff_has_frags(xdp);
+ struct xdp_buff_xsk *pos, *tmp;
+ struct list_head *xskb_list;
+ u32 contd = 0;
+ int err;
- if (unlikely(xdp_data_meta_unsupported(from))) {
- from_buf = from->data;
- to_buf = to->data;
- metalen = 0;
- } else {
- from_buf = from->data_meta;
- metalen = from->data - from->data_meta;
- to_buf = to->data - metalen;
+ if (frags)
+ contd = XDP_PKT_CONTD;
+
+ err = __xsk_rcv_zc(xs, xskb, len, contd);
+ if (err)
+ goto err;
+ if (likely(!frags))
+ return 0;
+
+ xskb_list = &xskb->pool->xskb_list;
+ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+ if (list_is_singular(xskb_list))
+ contd = 0;
+ len = pos->xdp.data_end - pos->xdp.data;
+ err = __xsk_rcv_zc(xs, pos, len, contd);
+ if (err)
+ goto err;
+ list_del(&pos->list_node);
}
- memcpy(to_buf, from_buf, len + metalen);
+ return 0;
+err:
+ xsk_buff_free(xdp);
+ return err;
+}
+
+static void *xsk_copy_xdp_start(struct xdp_buff *from)
+{
+ if (unlikely(xdp_data_meta_unsupported(from)))
+ return from->data;
+ else
+ return from->data_meta;
}
-static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
+static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
+ u32 *from_len, skb_frag_t **frag, u32 rem)
{
+ u32 copied = 0;
+
+ while (1) {
+ u32 copy_len = min_t(u32, *from_len, to_len);
+
+ memcpy(to, *from, copy_len);
+ copied += copy_len;
+ if (rem == copied)
+ return copied;
+
+ if (*from_len == copy_len) {
+ *from = skb_frag_address(*frag);
+ *from_len = skb_frag_size((*frag)++);
+ } else {
+ *from += copy_len;
+ *from_len -= copy_len;
+ }
+ if (to_len == copy_len)
+ return copied;
+
+ to_len -= copy_len;
+ to += copy_len;
+ }
+}
+
+static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+{
+ u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
+ void *copy_from = xsk_copy_xdp_start(xdp), *copy_to;
+ u32 from_len, meta_len, rem, num_desc;
+ struct xdp_buff_xsk *xskb;
struct xdp_buff *xsk_xdp;
- int err;
- u32 len;
+ skb_frag_t *frag;
- len = xdp->data_end - xdp->data;
- if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
- xs->rx_dropped++;
- return -ENOSPC;
+ from_len = xdp->data_end - copy_from;
+ meta_len = xdp->data - copy_from;
+ rem = len + meta_len;
+
+ if (len <= frame_size && !xdp_buff_has_frags(xdp)) {
+ int err;
+
+ xsk_xdp = xsk_buff_alloc(xs->pool);
+ if (!xsk_xdp) {
+ xs->rx_dropped++;
+ return -ENOMEM;
+ }
+ memcpy(xsk_xdp->data - meta_len, copy_from, rem);
+ xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
+ err = __xsk_rcv_zc(xs, xskb, len, 0);
+ if (err) {
+ xsk_buff_free(xsk_xdp);
+ return err;
+ }
+
+ return 0;
}
- xsk_xdp = xsk_buff_alloc(xs->pool);
- if (!xsk_xdp) {
+ num_desc = (len - 1) / frame_size + 1;
+
+ if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
xs->rx_dropped++;
return -ENOMEM;
}
+ if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
+ xs->rx_queue_full++;
+ return -ENOBUFS;
+ }
- xsk_copy_xdp(xsk_xdp, xdp, len);
- err = __xsk_rcv_zc(xs, xsk_xdp, len);
- if (err) {
- xsk_buff_free(xsk_xdp);
- return err;
+ if (xdp_buff_has_frags(xdp)) {
+ struct skb_shared_info *sinfo;
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ frag = &sinfo->frags[0];
}
+
+ do {
+ u32 to_len = frame_size + meta_len;
+ u32 copied;
+
+ xsk_xdp = xsk_buff_alloc(xs->pool);
+ copy_to = xsk_xdp->data - meta_len;
+
+ copied = xsk_copy_xdp(copy_to, &copy_from, to_len, &from_len, &frag, rem);
+ rem -= copied;
+
+ xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
+ __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
+ meta_len = 0;
+ } while (rem);
+
return 0;
}
@@ -205,6 +307,13 @@ static bool xsk_tx_writeable(struct xdp_sock *xs)
return true;
}
+static void __xsk_tx_release(struct xdp_sock *xs)
+{
+ __xskq_cons_release(xs->tx);
+ if (xsk_tx_writeable(xs))
+ xs->sk.sk_write_space(&xs->sk);
+}
+
static bool xsk_is_bound(struct xdp_sock *xs)
{
if (READ_ONCE(xs->state) == XSK_BOUND) {
@@ -215,7 +324,7 @@ static bool xsk_is_bound(struct xdp_sock *xs)
return false;
}
-static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp)
+static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
if (!xsk_is_bound(xs))
return -ENXIO;
@@ -223,7 +332,11 @@ static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp)
if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
return -EINVAL;
- sk_mark_napi_id_once_xdp(&xs->sk, xdp);
+ if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
+ xs->rx_dropped++;
+ return -ENOSPC;
+ }
+
return 0;
}
@@ -236,33 +349,35 @@ static void xsk_flush(struct xdp_sock *xs)
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
+ u32 len = xdp_get_buff_len(xdp);
int err;
- spin_lock_bh(&xs->rx_lock);
- err = xsk_rcv_check(xs, xdp);
+ err = xsk_rcv_check(xs, xdp, len);
if (!err) {
- err = __xsk_rcv(xs, xdp);
+ spin_lock_bh(&xs->pool->rx_lock);
+ err = __xsk_rcv(xs, xdp, len);
xsk_flush(xs);
+ spin_unlock_bh(&xs->pool->rx_lock);
}
- spin_unlock_bh(&xs->rx_lock);
+
return err;
}
static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
+ u32 len = xdp_get_buff_len(xdp);
int err;
- u32 len;
- err = xsk_rcv_check(xs, xdp);
+ err = xsk_rcv_check(xs, xdp, len);
if (err)
return err;
if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
len = xdp->data_end - xdp->data;
- return __xsk_rcv_zc(xs, xdp, len);
+ return xsk_rcv_zc(xs, xdp, len);
}
- err = __xsk_rcv(xs, xdp);
+ err = __xsk_rcv(xs, xdp, len);
if (!err)
xdp_return_buff(xdp);
return err;
@@ -270,22 +385,23 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
- struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
int err;
err = xsk_rcv(xs, xdp);
if (err)
return err;
- if (!xs->flush_node.prev)
+ if (!xs->flush_node.prev) {
+ struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list();
+
list_add(&xs->flush_node, flush_list);
+ }
return 0;
}
-void __xsk_map_flush(void)
+void __xsk_map_flush(struct list_head *flush_list)
{
- struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
struct xdp_sock *xs, *tmp;
list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
@@ -305,26 +421,33 @@ void xsk_tx_release(struct xsk_buff_pool *pool)
struct xdp_sock *xs;
rcu_read_lock();
- list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
- __xskq_cons_release(xs->tx);
- if (xsk_tx_writeable(xs))
- xs->sk.sk_write_space(&xs->sk);
- }
+ list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
+ __xsk_tx_release(xs);
rcu_read_unlock();
}
EXPORT_SYMBOL(xsk_tx_release);
bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
{
+ bool budget_exhausted = false;
struct xdp_sock *xs;
rcu_read_lock();
+again:
list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
+ if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
+ budget_exhausted = true;
+ continue;
+ }
+
if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
- xs->tx->queue_empty_descs++;
+ if (xskq_has_descs(xs->tx))
+ xskq_cons_release(xs->tx);
continue;
}
+ xs->tx_budget_spent++;
+
/* This is the backpressure mechanism for the Tx path.
* Reserve space in the completion queue and only proceed
* if there is space in it. This avoids having to implement
@@ -338,6 +461,14 @@ bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
return true;
}
+ if (budget_exhausted) {
+ list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
+ xs->tx_budget_spent = 0;
+
+ budget_exhausted = false;
+ goto again;
+ }
+
out:
rcu_read_unlock();
return false;
@@ -408,47 +539,240 @@ static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
}
-static void xsk_destruct_skb(struct sk_buff *skb)
+static int xsk_cq_reserve_locked(struct xsk_buff_pool *pool)
{
- u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
- struct xdp_sock *xs = xdp_sk(skb->sk);
+ int ret;
+
+ spin_lock(&pool->cq_cached_prod_lock);
+ ret = xskq_prod_reserve(pool->cq);
+ spin_unlock(&pool->cq_cached_prod_lock);
+
+ return ret;
+}
+
+static bool xsk_skb_destructor_is_addr(struct sk_buff *skb)
+{
+ return (uintptr_t)skb_shinfo(skb)->destructor_arg & 0x1UL;
+}
+
+static u64 xsk_skb_destructor_get_addr(struct sk_buff *skb)
+{
+ return (u64)((uintptr_t)skb_shinfo(skb)->destructor_arg & ~0x1UL);
+}
+
+static void xsk_skb_destructor_set_addr(struct sk_buff *skb, u64 addr)
+{
+ skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t)addr | 0x1UL);
+}
+
+static void xsk_inc_num_desc(struct sk_buff *skb)
+{
+ struct xsk_addrs *xsk_addr;
+
+ if (!xsk_skb_destructor_is_addr(skb)) {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ xsk_addr->num_descs++;
+ }
+}
+
+static u32 xsk_get_num_desc(struct sk_buff *skb)
+{
+ struct xsk_addrs *xsk_addr;
+
+ if (xsk_skb_destructor_is_addr(skb))
+ return 1;
+
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+
+ return xsk_addr->num_descs;
+}
+
+static void xsk_cq_submit_addr_locked(struct xsk_buff_pool *pool,
+ struct sk_buff *skb)
+{
+ u32 num_descs = xsk_get_num_desc(skb);
+ struct xsk_addrs *xsk_addr;
+ u32 descs_processed = 0;
unsigned long flags;
+ u32 idx, i;
+
+ spin_lock_irqsave(&pool->cq_prod_lock, flags);
+ idx = xskq_get_prod(pool->cq);
+
+ if (unlikely(num_descs > 1)) {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+
+ for (i = 0; i < num_descs; i++) {
+ xskq_prod_write_addr(pool->cq, idx + descs_processed,
+ xsk_addr->addrs[i]);
+ descs_processed++;
+ }
+ kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
+ } else {
+ xskq_prod_write_addr(pool->cq, idx,
+ xsk_skb_destructor_get_addr(skb));
+ descs_processed++;
+ }
+ xskq_prod_submit_n(pool->cq, descs_processed);
+ spin_unlock_irqrestore(&pool->cq_prod_lock, flags);
+}
+
+static void xsk_cq_cancel_locked(struct xsk_buff_pool *pool, u32 n)
+{
+ spin_lock(&pool->cq_cached_prod_lock);
+ xskq_prod_cancel_n(pool->cq, n);
+ spin_unlock(&pool->cq_cached_prod_lock);
+}
+
+INDIRECT_CALLABLE_SCOPE
+void xsk_destruct_skb(struct sk_buff *skb)
+{
+ struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
- spin_lock_irqsave(&xs->pool->cq_lock, flags);
- xskq_prod_submit_addr(xs->pool->cq, addr);
- spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+ if (compl->tx_timestamp) {
+ /* sw completion timestamp, not a real one */
+ *compl->tx_timestamp = ktime_get_tai_fast_ns();
+ }
+ xsk_cq_submit_addr_locked(xdp_sk(skb->sk)->pool, skb);
sock_wfree(skb);
}
+static void xsk_skb_init_misc(struct sk_buff *skb, struct xdp_sock *xs,
+ u64 addr)
+{
+ skb->dev = xs->dev;
+ skb->priority = READ_ONCE(xs->sk.sk_priority);
+ skb->mark = READ_ONCE(xs->sk.sk_mark);
+ skb->destructor = xsk_destruct_skb;
+ xsk_skb_destructor_set_addr(skb, addr);
+}
+
+static void xsk_consume_skb(struct sk_buff *skb)
+{
+ struct xdp_sock *xs = xdp_sk(skb->sk);
+ u32 num_descs = xsk_get_num_desc(skb);
+ struct xsk_addrs *xsk_addr;
+
+ if (unlikely(num_descs > 1)) {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ kmem_cache_free(xsk_tx_generic_cache, xsk_addr);
+ }
+
+ skb->destructor = sock_wfree;
+ xsk_cq_cancel_locked(xs->pool, num_descs);
+ /* Free skb without triggering the perf drop trace */
+ consume_skb(skb);
+ xs->skb = NULL;
+}
+
+static void xsk_drop_skb(struct sk_buff *skb)
+{
+ xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
+ xsk_consume_skb(skb);
+}
+
+static int xsk_skb_metadata(struct sk_buff *skb, void *buffer,
+ struct xdp_desc *desc, struct xsk_buff_pool *pool,
+ u32 hr)
+{
+ struct xsk_tx_metadata *meta = NULL;
+
+ if (unlikely(pool->tx_metadata_len == 0))
+ return -EINVAL;
+
+ meta = buffer - pool->tx_metadata_len;
+ if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
+ return -EINVAL;
+
+ if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
+ if (unlikely(meta->request.csum_start +
+ meta->request.csum_offset +
+ sizeof(__sum16) > desc->len))
+ return -EINVAL;
+
+ skb->csum_start = hr + meta->request.csum_start;
+ skb->csum_offset = meta->request.csum_offset;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ if (unlikely(pool->tx_sw_csum)) {
+ int err;
+
+ err = skb_checksum_help(skb);
+ if (err)
+ return err;
+ }
+ }
+
+ if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME)
+ skb->skb_mstamp_ns = meta->request.launch_time;
+ xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
+
+ return 0;
+}
+
static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
struct xdp_desc *desc)
{
struct xsk_buff_pool *pool = xs->pool;
u32 hr, len, ts, offset, copy, copied;
- struct sk_buff *skb;
+ struct sk_buff *skb = xs->skb;
struct page *page;
void *buffer;
int err, i;
u64 addr;
- hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
+ addr = desc->addr;
+ buffer = xsk_buff_raw_get_data(pool, addr);
- skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
- if (unlikely(!skb))
- return ERR_PTR(err);
+ if (!skb) {
+ hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
- skb_reserve(skb, hr);
+ skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
+ if (unlikely(!skb))
+ return ERR_PTR(err);
+
+ skb_reserve(skb, hr);
+
+ xsk_skb_init_misc(skb, xs, desc->addr);
+ if (desc->options & XDP_TX_METADATA) {
+ err = xsk_skb_metadata(skb, buffer, desc, pool, hr);
+ if (unlikely(err))
+ return ERR_PTR(err);
+ }
+ } else {
+ struct xsk_addrs *xsk_addr;
+
+ if (xsk_skb_destructor_is_addr(skb)) {
+ xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
+ GFP_KERNEL);
+ if (!xsk_addr)
+ return ERR_PTR(-ENOMEM);
+
+ xsk_addr->num_descs = 1;
+ xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
+ skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
+ } else {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ }
+
+ /* in case of -EOVERFLOW that could happen below,
+ * xsk_consume_skb() will release this node as whole skb
+ * would be dropped, which implies freeing all list elements
+ */
+ xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
+ }
- addr = desc->addr;
len = desc->len;
ts = pool->unaligned ? len : pool->chunk_size;
- buffer = xsk_buff_raw_get_data(pool, addr);
offset = offset_in_page(buffer);
addr = buffer - pool->addrs;
- for (copied = 0, i = 0; copied < len; i++) {
+ for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
+ if (unlikely(i >= MAX_SKB_FRAGS))
+ return ERR_PTR(-EOVERFLOW);
+
page = pool->umem->pgs[addr >> PAGE_SHIFT];
get_page(page);
@@ -473,53 +797,115 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
struct xdp_desc *desc)
{
struct net_device *dev = xs->dev;
- struct sk_buff *skb;
+ struct sk_buff *skb = xs->skb;
+ int err;
if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
skb = xsk_build_skb_zerocopy(xs, desc);
- if (IS_ERR(skb))
- return skb;
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ skb = NULL;
+ goto free_err;
+ }
} else {
u32 hr, tr, len;
void *buffer;
- int err;
- hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
- tr = dev->needed_tailroom;
+ buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
len = desc->len;
- skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
- if (unlikely(!skb))
- return ERR_PTR(err);
+ if (!skb) {
+ hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
+ tr = dev->needed_tailroom;
+ skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
+ if (unlikely(!skb))
+ goto free_err;
+
+ skb_reserve(skb, hr);
+ skb_put(skb, len);
+
+ err = skb_store_bits(skb, 0, buffer, len);
+ if (unlikely(err))
+ goto free_err;
+
+ xsk_skb_init_misc(skb, xs, desc->addr);
+ if (desc->options & XDP_TX_METADATA) {
+ err = xsk_skb_metadata(skb, buffer, desc,
+ xs->pool, hr);
+ if (unlikely(err))
+ goto free_err;
+ }
+ } else {
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct xsk_addrs *xsk_addr;
+ struct page *page;
+ u8 *vaddr;
+
+ if (xsk_skb_destructor_is_addr(skb)) {
+ xsk_addr = kmem_cache_zalloc(xsk_tx_generic_cache,
+ GFP_KERNEL);
+ if (!xsk_addr) {
+ err = -ENOMEM;
+ goto free_err;
+ }
- skb_reserve(skb, hr);
- skb_put(skb, len);
+ xsk_addr->num_descs = 1;
+ xsk_addr->addrs[0] = xsk_skb_destructor_get_addr(skb);
+ skb_shinfo(skb)->destructor_arg = (void *)xsk_addr;
+ } else {
+ xsk_addr = (struct xsk_addrs *)skb_shinfo(skb)->destructor_arg;
+ }
- buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
- err = skb_store_bits(skb, 0, buffer, len);
- if (unlikely(err)) {
- kfree_skb(skb);
- return ERR_PTR(err);
+ if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
+ err = -EOVERFLOW;
+ goto free_err;
+ }
+
+ page = alloc_page(xs->sk.sk_allocation);
+ if (unlikely(!page)) {
+ err = -EAGAIN;
+ goto free_err;
+ }
+
+ vaddr = kmap_local_page(page);
+ memcpy(vaddr, buffer, len);
+ kunmap_local(vaddr);
+
+ skb_add_rx_frag(skb, nr_frags, page, 0, len, PAGE_SIZE);
+ refcount_add(PAGE_SIZE, &xs->sk.sk_wmem_alloc);
+
+ xsk_addr->addrs[xsk_addr->num_descs] = desc->addr;
}
}
- skb->dev = dev;
- skb->priority = xs->sk.sk_priority;
- skb->mark = xs->sk.sk_mark;
- skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr;
- skb->destructor = xsk_destruct_skb;
+ xsk_inc_num_desc(skb);
return skb;
+
+free_err:
+ if (skb && !skb_shinfo(skb)->nr_frags)
+ kfree_skb(skb);
+
+ if (err == -EOVERFLOW) {
+ /* Drop the packet */
+ xsk_inc_num_desc(xs->skb);
+ xsk_drop_skb(xs->skb);
+ xskq_cons_release(xs->tx);
+ } else {
+ /* Let application retry */
+ xsk_cq_cancel_locked(xs->pool, 1);
+ }
+
+ return ERR_PTR(err);
}
static int __xsk_generic_xmit(struct sock *sk)
{
struct xdp_sock *xs = xdp_sk(sk);
- u32 max_batch = TX_BATCH_SIZE;
bool sent_frame = false;
struct xdp_desc desc;
struct sk_buff *skb;
- unsigned long flags;
+ u32 max_batch;
int err = 0;
mutex_lock(&xs->mutex);
@@ -533,6 +919,7 @@ static int __xsk_generic_xmit(struct sock *sk)
if (xs->queue_id >= xs->dev->real_num_tx_queues)
goto out;
+ max_batch = READ_ONCE(xs->max_tx_budget);
while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
if (max_batch-- == 0) {
err = -EAGAIN;
@@ -544,52 +931,58 @@ static int __xsk_generic_xmit(struct sock *sk)
* if there is space in it. This avoids having to implement
* any buffering in the Tx path.
*/
- spin_lock_irqsave(&xs->pool->cq_lock, flags);
- if (xskq_prod_reserve(xs->pool->cq)) {
- spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
+ err = xsk_cq_reserve_locked(xs->pool);
+ if (err) {
+ err = -EAGAIN;
goto out;
}
- spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
skb = xsk_build_skb(xs, &desc);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- spin_lock_irqsave(&xs->pool->cq_lock, flags);
- xskq_prod_cancel(xs->pool->cq);
- spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
- goto out;
+ if (err != -EOVERFLOW)
+ goto out;
+ err = 0;
+ continue;
+ }
+
+ xskq_cons_release(xs->tx);
+
+ if (xp_mb_desc(&desc)) {
+ xs->skb = skb;
+ continue;
}
err = __dev_direct_xmit(skb, xs->queue_id);
if (err == NETDEV_TX_BUSY) {
/* Tell user-space to retry the send */
- skb->destructor = sock_wfree;
- spin_lock_irqsave(&xs->pool->cq_lock, flags);
- xskq_prod_cancel(xs->pool->cq);
- spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
- /* Free skb without triggering the perf drop trace */
- consume_skb(skb);
+ xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
+ xsk_consume_skb(skb);
err = -EAGAIN;
goto out;
}
- xskq_cons_release(xs->tx);
/* Ignore NET_XMIT_CN as packet might have been sent */
if (err == NET_XMIT_DROP) {
/* SKB completed but not sent */
err = -EBUSY;
+ xs->skb = NULL;
goto out;
}
sent_frame = true;
+ xs->skb = NULL;
}
- xs->tx->queue_empty_descs++;
+ if (xskq_has_descs(xs->tx)) {
+ if (xs->skb)
+ xsk_drop_skb(xs->skb);
+ xskq_cons_release(xs->tx);
+ }
out:
if (sent_frame)
- if (xsk_tx_writeable(xs))
- sk->sk_write_space(sk);
+ __xsk_tx_release(xs);
mutex_unlock(&xs->mutex);
return err;
@@ -613,7 +1006,7 @@ static bool xsk_no_wakeup(struct sock *sk)
#ifdef CONFIG_NET_RX_BUSY_POLL
/* Prefer busy-polling, skip the wakeup. */
return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
- READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
+ napi_id_valid(READ_ONCE(sk->sk_napi_id));
#else
return false;
#endif
@@ -645,11 +1038,8 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
if (unlikely(!xs->tx))
return -ENOBUFS;
- if (sk_can_busy_loop(sk)) {
- if (xs->zc)
- __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
+ if (sk_can_busy_loop(sk))
sk_busy_loop(sk, 1); /* only support non-blocking sockets */
- }
if (xs->zc && xsk_no_wakeup(sk))
return 0;
@@ -723,7 +1113,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
rcu_read_lock();
if (xsk_check_common(xs))
- goto skip_tx;
+ goto out;
pool = xs->pool;
@@ -735,12 +1125,11 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
xsk_generic_xmit(sk);
}
-skip_tx:
if (xs->rx && !xskq_prod_is_empty(xs->rx))
mask |= EPOLLIN | EPOLLRDNORM;
if (xs->tx && xsk_tx_writeable(xs))
mask |= EPOLLOUT | EPOLLWRNORM;
-
+out:
rcu_read_unlock();
return mask;
}
@@ -834,6 +1223,9 @@ static int xsk_release(struct socket *sock)
net = sock_net(sk);
+ if (xs->skb)
+ xsk_drop_skb(xs->skb);
+
mutex_lock(&net->xdp.lock);
sk_del_node_init_rcu(sk);
mutex_unlock(&net->xdp.lock);
@@ -880,7 +1272,7 @@ static bool xsk_validate_queues(struct xdp_sock *xs)
return xs->fq_tmp && xs->cq_tmp;
}
-static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int xsk_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len)
{
struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
struct sock *sk = sock->sk;
@@ -897,7 +1289,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
flags = sxdp->sxdp_flags;
if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
- XDP_USE_NEED_WAKEUP))
+ XDP_USE_NEED_WAKEUP | XDP_USE_SG))
return -EINVAL;
bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
@@ -917,6 +1309,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
goto out_release;
}
+ netdev_lock_ops(dev);
+
if (!xs->rx && !xs->tx) {
err = -EINVAL;
goto out_unlock;
@@ -929,7 +1323,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
struct socket *sock;
if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
- (flags & XDP_USE_NEED_WAKEUP)) {
+ (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) {
/* Cannot specify flags for shared sockets. */
err = -EINVAL;
goto out_unlock;
@@ -994,6 +1388,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
err = xp_alloc_tx_descs(xs->pool, xs);
if (err) {
xp_put_pool(xs->pool);
+ xs->pool = NULL;
sockfd_put(sock);
goto out_unlock;
}
@@ -1028,9 +1423,18 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
xs->dev = dev;
xs->zc = xs->umem->zc;
+ xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
xs->queue_id = qid;
xp_add_xsk(xs->pool, xs);
+ if (qid < dev->real_num_rx_queues) {
+ struct netdev_rx_queue *rxq;
+
+ rxq = __netif_get_rx_queue(dev, qid);
+ if (rxq->napi)
+ __sk_mark_napi_id_once(sk, rxq->napi->napi_id);
+ }
+
out_unlock:
if (err) {
dev_put(dev);
@@ -1041,6 +1445,7 @@ out_unlock:
smp_wmb();
WRITE_ONCE(xs->state, XSK_BOUND);
}
+ netdev_unlock_ops(dev);
out_release:
mutex_unlock(&xs->mutex);
rtnl_unlock();
@@ -1100,6 +1505,17 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
else if (optlen < sizeof(mr))
mr_size = sizeof(struct xdp_umem_reg_v1);
+ BUILD_BUG_ON(sizeof(struct xdp_umem_reg_v1) >= sizeof(struct xdp_umem_reg));
+
+ /* Make sure the last field of the struct doesn't have
+ * uninitialized padding. All padding has to be explicit
+ * and has to be set to zero by the userspace to make
+ * struct xdp_umem_reg extensible in the future.
+ */
+ BUILD_BUG_ON(offsetof(struct xdp_umem_reg, tx_metadata_len) +
+ sizeof_field(struct xdp_umem_reg, tx_metadata_len) !=
+ sizeof(struct xdp_umem_reg));
+
if (copy_from_sockptr(&mr, optval, mr_size))
return -EFAULT;
@@ -1127,6 +1543,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
struct xsk_queue **q;
int entries;
+ if (optlen < sizeof(entries))
+ return -EINVAL;
if (copy_from_sockptr(&entries, optval, sizeof(entries)))
return -EFAULT;
@@ -1142,6 +1560,21 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
mutex_unlock(&xs->mutex);
return err;
}
+ case XDP_MAX_TX_SKB_BUDGET:
+ {
+ unsigned int budget;
+
+ if (optlen != sizeof(budget))
+ return -EINVAL;
+ if (copy_from_sockptr(&budget, optval, sizeof(budget)))
+ return -EFAULT;
+ if (!xs->tx ||
+ budget < TX_BATCH_SIZE || budget > xs->tx->nentries)
+ return -EACCES;
+
+ WRITE_ONCE(xs->max_tx_budget, budget);
+ return 0;
+ }
default:
break;
}
@@ -1439,8 +1872,8 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
xs = xdp_sk(sk);
xs->state = XSK_READY;
+ xs->max_tx_budget = TX_BATCH_SIZE;
mutex_init(&xs->mutex);
- spin_lock_init(&xs->rx_lock);
INIT_LIST_HEAD(&xs->map_list);
spin_lock_init(&xs->map_list_lock);
@@ -1483,7 +1916,7 @@ static struct pernet_operations xsk_net_ops = {
static int __init xsk_init(void)
{
- int err, cpu;
+ int err;
err = proto_register(&xsk_proto, 0 /* no slab */);
if (err)
@@ -1501,10 +1934,18 @@ static int __init xsk_init(void)
if (err)
goto out_pernet;
- for_each_possible_cpu(cpu)
- INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
+ xsk_tx_generic_cache = kmem_cache_create("xsk_generic_xmit_cache",
+ sizeof(struct xsk_addrs),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!xsk_tx_generic_cache) {
+ err = -ENOMEM;
+ goto out_unreg_notif;
+ }
+
return 0;
+out_unreg_notif:
+ unregister_netdevice_notifier(&xsk_netdev_notifier);
out_pernet:
unregister_pernet_subsys(&xsk_net_ops);
out_sk: