summaryrefslogtreecommitdiff
path: root/net/xdp
diff options
context:
space:
mode:
authorNuno Gonçalves <nunog@fr24.com>2023-03-24 10:02:22 +0000
committerAlexei Starovoitov <ast@kernel.org>2023-03-25 21:07:35 -0700
commit5f5a7d8d8bd461f515543040ad7d107cc405d30c (patch)
treea682e451ae97a8d77c45c80738e994c49b4576d4 /net/xdp
parent8cfee110711ed60bfdd39af0107ddef01d6b72c3 (diff)
xsk: allow remap of fill and/or completion rings
The remap of fill and completion rings was frowned upon as they control the usage of UMEM which does not support concurrent use. At the same time this would disallow the remap of these rings into another process. A possible use case is that the user wants to transfer the socket/ UMEM ownership to another process (via SYS_pidfd_getfd) and so would need to also remap these rings. This will have no impact on current usages and just relaxes the remap limitation. Signed-off-by: Nuno Gonçalves <nunog@fr24.com> Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> Link: https://lore.kernel.org/r/20230324100222.13434-1-nunog@fr24.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'net/xdp')
-rw-r--r--net/xdp/xsk.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 2ac58b282b5e..cc1e7f15fa73 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -1301,9 +1301,10 @@ static int xsk_mmap(struct file *file, struct socket *sock,
loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
unsigned long size = vma->vm_end - vma->vm_start;
struct xdp_sock *xs = xdp_sk(sock->sk);
+ int state = READ_ONCE(xs->state);
struct xsk_queue *q = NULL;
- if (READ_ONCE(xs->state) != XSK_READY)
+ if (state != XSK_READY && state != XSK_BOUND)
return -EBUSY;
if (offset == XDP_PGOFF_RX_RING) {
@@ -1314,9 +1315,11 @@ static int xsk_mmap(struct file *file, struct socket *sock,
/* Matches the smp_wmb() in XDP_UMEM_REG */
smp_rmb();
if (offset == XDP_UMEM_PGOFF_FILL_RING)
- q = READ_ONCE(xs->fq_tmp);
+ q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
+ READ_ONCE(xs->pool->fq);
else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
- q = READ_ONCE(xs->cq_tmp);
+ q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
+ READ_ONCE(xs->pool->cq);
}
if (!q)