summaryrefslogtreecommitdiff
path: root/net/xdp/xsk.c
diff options
context:
space:
mode:
authorXuan Zhuo <xuanzhuo@linux.alibaba.com>2023-02-16 16:30:47 +0800
committerDavid S. Miller <davem@davemloft.net>2023-02-20 08:22:12 +0000
commit9f78bf330a66cd400b3e00f370f597e9fa939207 (patch)
tree2d260b0cc0471c30641dadb8ae18cef51bf85b2e /net/xdp/xsk.c
parentb148d400f820637bcc95f6aca64c8763a2db858f (diff)
xsk: support use vaddr as ring
When we try to start AF_XDP on some machines with long running time, due to the machine's memory fragmentation problem, there is no sufficient contiguous physical memory that will cause the start failure. If the size of the queue is 8 * 1024, then the size of the desc[] is 8 * 1024 * 8 = 16 * PAGE, but we also add struct xdp_ring size, so it is 16page+. This is necessary to apply for a 4-order memory. If there are a lot of queues, it is difficult to these machine with long running time. Here, that we actually waste 15 pages. 4-Order memory is 32 pages, but we only use 17 pages. This patch replaces __get_free_pages() by vmalloc() to allocate memory to solve these problems. Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com> Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/xdp/xsk.c')
-rw-r--r--net/xdp/xsk.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index a245c1b4a21b..63c82e8bcd8e 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -1294,8 +1294,6 @@ static int xsk_mmap(struct file *file, struct socket *sock,
unsigned long size = vma->vm_end - vma->vm_start;
struct xdp_sock *xs = xdp_sk(sock->sk);
struct xsk_queue *q = NULL;
- unsigned long pfn;
- struct page *qpg;
if (READ_ONCE(xs->state) != XSK_READY)
return -EBUSY;
@@ -1318,13 +1316,10 @@ static int xsk_mmap(struct file *file, struct socket *sock,
/* Matches the smp_wmb() in xsk_init_queue */
smp_rmb();
- qpg = virt_to_head_page(q->ring);
- if (size > page_size(qpg))
+ if (size > q->ring_vmalloc_size)
return -EINVAL;
- pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
- return remap_pfn_range(vma, vma->vm_start, pfn,
- size, vma->vm_page_prot);
+ return remap_vmalloc_range(vma, q->ring, 0);
}
static int xsk_notifier(struct notifier_block *this,