summaryrefslogtreecommitdiff
path: root/kernel/dma
diff options
context:
space:
mode:
authorPetr Tesarik <petr.tesarik.ext@huawei.com>2023-06-26 15:01:03 +0200
committerChristoph Hellwig <hch@lst.de>2023-06-29 07:10:00 +0200
commitaabd12609f91155f26584508b01f548215cc3c0c (patch)
treef46b1741f361abfad3e6cc10240a83382a1d8725 /kernel/dma
parent0a2f6372a43ff5e948b8b10be34d4473f6c2ef6c (diff)
swiotlb: always set the number of areas before allocating the pool
The number of areas defaults to the number of possible CPUs. However, the total number of slots may have to be increased after adjusting the number of areas. Consequently, the number of areas must be determined before allocating the memory pool. This is even explained with a comment in swiotlb_init_remap(), but swiotlb_init_late() adjusts the number of areas after slots are already allocated. The areas may end up being smaller than IO_TLB_SEGSIZE, which breaks per-area locking. While fixing swiotlb_init_late(), move all relevant comments before the definition of swiotlb_adjust_nareas() and convert them to kernel-doc. Fixes: 20347fca71a3 ("swiotlb: split up the global swiotlb lock") Signed-off-by: Petr Tesarik <petr.tesarik.ext@huawei.com> Reviewed-by: Roberto Sassu <roberto.sassu@huawei.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/swiotlb.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 775f7bb10ab1..89db590f931f 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -115,9 +115,16 @@ static bool round_up_default_nslabs(void)
return true;
}
+/**
+ * swiotlb_adjust_nareas() - adjust the number of areas and slots
+ * @nareas: Desired number of areas. Zero is treated as 1.
+ *
+ * Adjust the default number of areas in a memory pool.
+ * The default size of the memory pool may also change to meet minimum area
+ * size requirements.
+ */
static void swiotlb_adjust_nareas(unsigned int nareas)
{
- /* use a single area when non is specified */
if (!nareas)
nareas = 1;
else if (!is_power_of_2(nareas))
@@ -298,10 +305,6 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
if (swiotlb_force_disable)
return;
- /*
- * default_nslabs maybe changed when adjust area number.
- * So allocate bounce buffer after adjusting area number.
- */
if (!default_nareas)
swiotlb_adjust_nareas(num_possible_cpus());
@@ -363,6 +366,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
if (swiotlb_force_disable)
return 0;
+ if (!default_nareas)
+ swiotlb_adjust_nareas(num_possible_cpus());
+
retry:
order = get_order(nslabs << IO_TLB_SHIFT);
nslabs = SLABS_PER_PAGE << order;
@@ -397,9 +403,6 @@ retry:
(PAGE_SIZE << order) >> 20);
}
- if (!default_nareas)
- swiotlb_adjust_nareas(num_possible_cpus());
-
area_order = get_order(array_size(sizeof(*mem->areas),
default_nareas));
mem->areas = (struct io_tlb_area *)