From 9eef8b8cc26559fe5f2575daf7d08c6a17e81ff8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 10:53:03 +0200 Subject: arm: implement ->mapping_error DMA_ERROR_CODE is going to go away, so don't rely on it. Signed-off-by: Christoph Hellwig --- arch/arm/include/asm/dma-iommu.h | 2 ++ arch/arm/include/asm/dma-mapping.h | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 2ef282f96651..389a26a10ea3 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -9,6 +9,8 @@ #include #include +#define ARM_MAPPING_ERROR (~(dma_addr_t)0x0) + struct dma_iommu_mapping { /* iommu specific data */ struct iommu_domain *domain; diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 680d3f3889e7..52a8fd5a8edb 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -12,7 +12,6 @@ #include #include -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) extern const struct dma_map_ops arm_dma_ops; extern const struct dma_map_ops arm_coherent_dma_ops; -- cgit From 418a7a7e4f05f36d6e4ab5b8548ea71f0b602140 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 May 2017 11:20:18 +0200 Subject: arm: remove arch specific dma_supported implementation And instead wire it up as method for all the dma_map_ops instances. Note that the code seems a little fishy for dmabounce and iommu, but for now I'd like to preserve the existing behavior 1:1. Signed-off-by: Christoph Hellwig --- arch/arm/include/asm/dma-iommu.h | 2 ++ arch/arm/include/asm/dma-mapping.h | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 389a26a10ea3..c090ec675eac 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -35,5 +35,7 @@ int arm_iommu_attach_device(struct device *dev, struct dma_iommu_mapping *mapping); void arm_iommu_detach_device(struct device *dev); +int arm_dma_supported(struct device *dev, u64 mask); + #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 52a8fd5a8edb..8dabcfdf4505 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -20,9 +20,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) return &arm_dma_ops; } -#define HAVE_ARCH_DMA_SUPPORTED 1 -extern int dma_supported(struct device *dev, u64 mask); - #ifdef __arch_page_to_dma #error Please update to __arch_pfn_to_dma #endif -- cgit From 1c51c429f30ea10428337f3a33c12059ba59f668 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Wed, 24 May 2017 11:24:30 +0100 Subject: ARM: NOMMU: Introduce dma operations for noMMU R/M classes of cpus can have memory covered by MPU which in turn might configure RAM as Normal i.e. bufferable and cacheable. It breaks dma_alloc_coherent() and friends, since data can stuck in caches now or be buffered. This patch factors out DMA support for NOMMU configuration into separate entity which provides dedicated dma_ops. We have to handle there several cases: - configurations with MMU/MPU setup - configurations without MMU/MPU setup - special case for M-class, since caches and MPU there are optional In general we rely on default DMA area for coherent allocations or/and per-device memory reserves suitable for coherent DMA, so if such regions are set coherent allocations go from there. In case MMU/MPU was not setup we fallback to normal page allocator for DMA memory allocation. In case we run M-class cpus, for configuration without cache support (like Cortex-M3/M4) dma operations are forced to be coherent and wired with dma-noop (such decision is made based on cacheid global variable); however, if caches are detected there and no DMA coherent region is given (either default or per-device), dma is disallowed even MPU is not set - it is because M-class implement system memory map which defines part of address space as Normal memory. Reported-by: Alexandre Torgue Reported-by: Andras Szemzo Tested-by: Benjamin Gaignard Tested-by: Andras Szemzo Tested-by: Alexandre TORGUE Reviewed-by: Robin Murphy Signed-off-by: Vladimir Murzin Acked-by: Arnd Bergmann Acked-by: Russell King [hch: removed the dma_supported() implementation that isn't required anymore] Signed-off-by: Christoph Hellwig --- arch/arm/include/asm/dma-mapping.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 8dabcfdf4505..4e0285a66ef8 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -17,7 +17,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { - return &arm_dma_ops; + return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_noop_ops; } #ifdef __arch_page_to_dma -- cgit