summaryrefslogtreecommitdiff
path: root/kernel/dma/direct.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2022-02-26 16:40:21 +0100
committerChristoph Hellwig <hch@lst.de>2022-03-03 14:00:57 +0300
commitf5ff79fddf0efecca538046b5cc20fb3ded2ec4f (patch)
tree865b9157a52b891d906b279721ac2df29d9065ff /kernel/dma/direct.c
parentfba09099c6e506608e05e08ac717bf34501f821b (diff)
dma-mapping: remove CONFIG_DMA_REMAP
CONFIG_DMA_REMAP is used to build a few helpers around the core vmalloc code, and to use them in case there is a highmem page in dma-direct, and to make dma coherent allocations be able to use non-contiguous pages allocations for DMA allocations in the dma-iommu layer. Right now it needs to be explicitly selected by architectures, and is only done so by architectures that require remapping to deal with devices that are not DMA coherent. Make it unconditional for builds with CONFIG_MMU as it is very little extra code, but makes it much more likely that large DMA allocations succeed on x86. This fixes hot plugging a NVMe thunderbolt SSD for me, which tries to allocate a 1MB buffer that is otherwise hard to obtain due to memory fragmentation on a heavily used laptop. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Diffstat (limited to 'kernel/dma/direct.c')
-rw-r--r--kernel/dma/direct.c18
1 files changed, 7 insertions, 11 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 50f48e9e4598..35a1d29d6a2e 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -265,17 +265,13 @@ void *dma_direct_alloc(struct device *dev, size_t size,
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
if (!page)
return NULL;
+
+ /*
+ * dma_alloc_contiguous can return highmem pages depending on a
+ * combination the cma= arguments and per-arch setup. These need to be
+ * remapped to return a kernel virtual address.
+ */
if (PageHighMem(page)) {
- /*
- * Depending on the cma= arguments and per-arch setup,
- * dma_alloc_contiguous could return highmem pages.
- * Without remapping there is no way to return them here, so
- * log an error and fail.
- */
- if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
- dev_info(dev, "Rejecting highmem page from CMA.\n");
- goto out_free_pages;
- }
remap = true;
set_uncached = false;
}
@@ -349,7 +345,7 @@ void dma_direct_free(struct device *dev, size_t size,
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
return;
- if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+ if (is_vmalloc_addr(cpu_addr)) {
vunmap(cpu_addr);
} else {
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))