summaryrefslogtreecommitdiff
path: root/kernel/dma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-11-01 13:15:54 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2023-11-01 13:15:54 -1000
commit009fbfc97b6367762efa257f1478ec86d37949f9 (patch)
tree99842c2e12ac6c39e1636a6256bb96e3335cecb9 /kernel/dma
parent3c86a44d623ee8734a02636d3544812e31496460 (diff)
parent36d91e851598a9ea523ad4681dd11fa661d59695 (diff)
Merge tag 'dma-mapping-6.7-2023-10-30' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig: - get rid of the fake support for coherent DMA allocation on coldfire with caches (Christoph Hellwig) - add a few Kconfig dependencies so that Kconfig catches the use of invalid configurations (Christoph Hellwig) - fix a type in dma-debug output (Chuck Lever) - rewrite a comment in swiotlb (Sean Christopherson) * tag 'dma-mapping-6.7-2023-10-30' of git://git.infradead.org/users/hch/dma-mapping: dma-debug: Fix a typo in a debugging eye-catcher swiotlb: rewrite comment explaining why the source is preserved on DMA_FROM_DEVICE m68k: remove unused includes from dma.c m68k: don't provide arch_dma_alloc for nommu/coldfire net: fec: use dma_alloc_noncoherent for data cache enabled coldfire m68k: use the coherent DMA code for coldfire without data cache dma-direct: warn when coherent allocations aren't supported dma-direct: simplify the use atomic pool logic in dma_direct_alloc dma-direct: add a CONFIG_ARCH_HAS_DMA_ALLOC symbol dma-direct: add dependencies to CONFIG_DMA_GLOBAL_POOL
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/Kconfig11
-rw-r--r--kernel/dma/debug.c2
-rw-r--r--kernel/dma/direct.c37
-rw-r--r--kernel/dma/swiotlb.c12
4 files changed, 32 insertions, 30 deletions
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index f488997b0717..d62f5957f36b 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -135,6 +135,8 @@ config DMA_COHERENT_POOL
config DMA_GLOBAL_POOL
select DMA_DECLARE_COHERENT
+ depends on !ARCH_HAS_DMA_SET_UNCACHED
+ depends on !DMA_DIRECT_REMAP
bool
config DMA_DIRECT_REMAP
@@ -142,6 +144,15 @@ config DMA_DIRECT_REMAP
select DMA_COHERENT_POOL
select DMA_NONCOHERENT_MMAP
+#
+# Fallback to arch code for DMA allocations. This should eventually go away.
+#
+config ARCH_HAS_DMA_ALLOC
+ depends on !ARCH_HAS_DMA_SET_UNCACHED
+ depends on !DMA_DIRECT_REMAP
+ depends on !DMA_GLOBAL_POOL
+ bool
+
config DMA_CMA
bool "DMA Contiguous Memory Allocator"
depends on HAVE_DMA_CONTIGUOUS && CMA
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 06366acd27b0..3de494375b7b 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -139,7 +139,7 @@ static const char *const maperr2str[] = {
static const char *type2name[] = {
[dma_debug_single] = "single",
- [dma_debug_sg] = "scather-gather",
+ [dma_debug_sg] = "scatter-gather",
[dma_debug_coherent] = "coherent",
[dma_debug_resource] = "resource",
};
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 9596ae1aa0da..ed3056eb20b8 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -220,13 +220,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
if (!dev_is_dma_coherent(dev)) {
- /*
- * Fallback to the arch handler if it exists. This should
- * eventually go away.
- */
- if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
- !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
+ if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) &&
!is_swiotlb_for_alloc(dev))
return arch_dma_alloc(dev, size, dma_handle, gfp,
attrs);
@@ -240,27 +234,24 @@ void *dma_direct_alloc(struct device *dev, size_t size,
dma_handle);
/*
- * Otherwise remap if the architecture is asking for it. But
- * given that remapping memory is a blocking operation we'll
- * instead have to dip into the atomic pools.
+ * Otherwise we require the architecture to either be able to
+ * mark arbitrary parts of the kernel direct mapping uncached,
+ * or remapped it uncached.
*/
+ set_uncached = IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED);
remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
- if (remap) {
- if (dma_direct_use_pool(dev, gfp))
- return dma_direct_alloc_from_pool(dev, size,
- dma_handle, gfp);
- } else {
- if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
- return NULL;
- set_uncached = true;
+ if (!set_uncached && !remap) {
+ pr_warn_once("coherent DMA allocations not supported on this platform.\n");
+ return NULL;
}
}
/*
- * Decrypting memory may block, so allocate the memory from the atomic
- * pools if we can't block.
+ * Remapping or decrypting memory may block, allocate the memory from
+ * the atomic pools instead if we aren't allowed block.
*/
- if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
+ if ((remap || force_dma_unencrypted(dev)) &&
+ dma_direct_use_pool(dev, gfp))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
/* we always manually zero the memory once we are done */
@@ -330,9 +321,7 @@ void dma_direct_free(struct device *dev, size_t size,
return;
}
- if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
- !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
+ if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) &&
!dev_is_dma_coherent(dev) &&
!is_swiotlb_for_alloc(dev)) {
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index dff067bd56b1..26202274784f 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -1301,11 +1301,13 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
tlb_addr = slot_addr(pool->start, index) + offset;
/*
- * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
- * to the tlb buffer, if we knew for sure the device will
- * overwrite the entire current content. But we don't. Thus
- * unconditional bounce may prevent leaking swiotlb content (i.e.
- * kernel memory) to user-space.
+ * When the device is writing memory, i.e. dir == DMA_FROM_DEVICE, copy
+ * the original buffer to the TLB buffer before initiating DMA in order
+ * to preserve the original's data if the device does a partial write,
+ * i.e. if the device doesn't overwrite the entire buffer. Preserving
+ * the original data, even if it's garbage, is necessary to match
+ * hardware behavior. Use of swiotlb is supposed to be transparent,
+ * i.e. swiotlb must not corrupt memory by clobbering unwritten bytes.
*/
swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
return tlb_addr;