From 4f8232bbf887123f78bcdca3dfd2b3dfa52a0112 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 21 Feb 2020 12:24:02 -0800 Subject: dma-direct: remove the cached_kernel_address hook dma-direct now finds the kernel address for coherent allocations based on the dma address, so the cached_kernel_address hooks is unused and can be removed entirely. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- arch/Kconfig | 2 +- arch/microblaze/mm/consistent.c | 7 ------- arch/mips/mm/dma-noncoherent.c | 5 ----- arch/nios2/mm/dma-mapping.c | 10 ---------- arch/xtensa/kernel/pci-dma.c | 10 ++-------- 5 files changed, 3 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/Kconfig b/arch/Kconfig index 98de654b79b3..7994b239f155 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -249,7 +249,7 @@ config ARCH_HAS_SET_DIRECT_MAP # # Select if arch has an uncached kernel segment and provides the -# uncached_kernel_address / cached_kernel_address symbols to use it +# uncached_kernel_address symbol to use it # config ARCH_HAS_UNCACHED_SEGMENT select ARCH_HAS_DMA_PREP_COHERENT diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index 8c5f0c332d8b..cede7c5e8135 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -49,11 +49,4 @@ void *uncached_kernel_address(void *ptr) pr_warn("ERROR: Your cache coherent area is CACHED!!!\n"); return (void *)addr; } - -void *cached_kernel_address(void *ptr) -{ - unsigned long addr = (unsigned long)ptr; - - return (void *)(addr & ~UNCACHED_SHADOW_MASK); -} #endif /* CONFIG_MMU */ diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index dc42ffc83825..77dce28ad0a0 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -54,11 +54,6 @@ void *uncached_kernel_address(void *addr) return (void *)(__pa(addr) + UNCAC_BASE); } -void *cached_kernel_address(void *addr) -{ - return __va(addr) - UNCAC_BASE; -} - static inline void dma_sync_virt(void *addr, size_t size, enum dma_data_direction dir) { diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c index 0ed711e37902..f30f2749257c 100644 --- a/arch/nios2/mm/dma-mapping.c +++ b/arch/nios2/mm/dma-mapping.c @@ -75,13 +75,3 @@ void *uncached_kernel_address(void *ptr) return (void *)ptr; } - -void *cached_kernel_address(void *ptr) -{ - unsigned long addr = (unsigned long)ptr; - - addr &= ~CONFIG_NIOS2_IO_REGION_BASE; - addr |= CONFIG_NIOS2_KERNEL_REGION_BASE; - - return (void *)ptr; -} diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 72b6222daa0b..6a685545d5c9 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -88,18 +88,12 @@ void arch_dma_prep_coherent(struct page *page, size_t size) /* * Memory caching is platform-dependent in noMMU xtensa configurations. - * The following two functions should be implemented in platform code - * in order to enable coherent DMA memory operations when CONFIG_MMU is not - * enabled. + * This function should be implemented in platform code in order to enable + * coherent DMA memory operations when CONFIG_MMU is not enabled. */ #ifdef CONFIG_MMU void *uncached_kernel_address(void *p) { return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; } - -void *cached_kernel_address(void *p) -{ - return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; -} #endif /* CONFIG_MMU */ -- cgit From fa7e2247c5729f990c7456fe09f3af99c8f2571b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 21 Feb 2020 15:55:43 -0800 Subject: dma-direct: make uncached_kernel_address more general Rename the symbol to arch_dma_set_uncached, and pass a size to it as well as allow an error return. That will allow reusing this hook for in-place pagetable remapping. As the in-place remap doesn't always require an explicit cache flush, also detangle ARCH_HAS_DMA_PREP_COHERENT from ARCH_HAS_DMA_SET_UNCACHED. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- arch/Kconfig | 8 ++++---- arch/microblaze/Kconfig | 2 +- arch/microblaze/mm/consistent.c | 2 +- arch/mips/Kconfig | 3 ++- arch/mips/mm/dma-noncoherent.c | 2 +- arch/nios2/Kconfig | 3 ++- arch/nios2/mm/dma-mapping.c | 2 +- arch/xtensa/Kconfig | 2 +- arch/xtensa/kernel/pci-dma.c | 2 +- 9 files changed, 14 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/Kconfig b/arch/Kconfig index 7994b239f155..090cfe0c82a7 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -248,11 +248,11 @@ config ARCH_HAS_SET_DIRECT_MAP bool # -# Select if arch has an uncached kernel segment and provides the -# uncached_kernel_address symbol to use it +# Select if the architecture provides the arch_dma_set_uncached symbol to +# either provide an uncached segement alias for a DMA allocation, or +# to remap the page tables in place. # -config ARCH_HAS_UNCACHED_SEGMENT - select ARCH_HAS_DMA_PREP_COHERENT +config ARCH_HAS_DMA_SET_UNCACHED bool # Select if arch init_task must go in the __init_task_data section diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 6a331bd57ea8..9606c244b5b8 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -8,7 +8,7 @@ config MICROBLAZE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_UNCACHED_SEGMENT if !MMU + select ARCH_HAS_DMA_SET_UNCACHED if !MMU select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_TABLE_SORT diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index cede7c5e8135..e09b66e43cb6 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c @@ -40,7 +40,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) #define UNCACHED_SHADOW_MASK 0 #endif /* CONFIG_XILINX_UNCACHED_SHADOW */ -void *uncached_kernel_address(void *ptr) +void *arch_dma_set_uncached(void *ptr, size_t size) { unsigned long addr = (unsigned long)ptr; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 797d7f1ad5fe..489185db501e 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1187,8 +1187,9 @@ config DMA_NONCOHERENT # significant advantages. # select ARCH_HAS_DMA_WRITE_COMBINE + select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_UNCACHED_SEGMENT + select ARCH_HAS_DMA_SET_UNCACHED select DMA_NONCOHERENT_MMAP select DMA_NONCOHERENT_CACHE_SYNC select NEED_DMA_MAP_STATE diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 77dce28ad0a0..fcea92d95d86 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -49,7 +49,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) dma_cache_wback_inv((unsigned long)page_address(page), size); } -void *uncached_kernel_address(void *addr) +void *arch_dma_set_uncached(void *addr, size_t size) { return (void *)(__pa(addr) + UNCAC_BASE); } diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 44b5da37e8bd..2fc4ed210b5f 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -2,9 +2,10 @@ config NIOS2 def_bool y select ARCH_32BIT_OFF_T + select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE - select ARCH_HAS_UNCACHED_SEGMENT + select ARCH_HAS_DMA_SET_UNCACHED select ARCH_NO_SWAP select TIMER_OF select GENERIC_ATOMIC64 diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c index f30f2749257c..fd887d5f3f9a 100644 --- a/arch/nios2/mm/dma-mapping.c +++ b/arch/nios2/mm/dma-mapping.c @@ -67,7 +67,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) flush_dcache_range(start, start + size); } -void *uncached_kernel_address(void *ptr) +void *arch_dma_set_uncached(void *ptr, size_t size) { unsigned long addr = (unsigned long)ptr; diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 32ee759a3fda..de229424b659 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -6,7 +6,7 @@ config XTENSA select ARCH_HAS_DMA_PREP_COHERENT if MMU select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU - select ARCH_HAS_UNCACHED_SEGMENT if MMU + select ARCH_HAS_DMA_SET_UNCACHED if MMU select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_FRAME_POINTERS diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 6a685545d5c9..17c4384f8495 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -92,7 +92,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) * coherent DMA memory operations when CONFIG_MMU is not enabled. */ #ifdef CONFIG_MMU -void *uncached_kernel_address(void *p) +void *arch_dma_set_uncached(void *p, size_t size) { return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; } -- cgit From 999a5d1203baa7cff00586361feae263ee3f23a5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 21 Feb 2020 12:35:05 -0800 Subject: dma-direct: provide a arch_dma_clear_uncached hook This allows the arch code to reset the page tables to cached access when freeing a dma coherent allocation that was set to uncached using arch_dma_set_uncached. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- arch/Kconfig | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/Kconfig b/arch/Kconfig index 090cfe0c82a7..c26302f90c96 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -255,6 +255,13 @@ config ARCH_HAS_SET_DIRECT_MAP config ARCH_HAS_DMA_SET_UNCACHED bool +# +# Select if the architectures provides the arch_dma_clear_uncached symbol +# to undo an in-place page table remap for uncached access. +# +config ARCH_HAS_DMA_CLEAR_UNCACHED + bool + # Select if arch init_task must go in the __init_task_data section config ARCH_TASK_STRUCT_ON_STACK bool -- cgit From a4a4d11a22ee89d0e94bd8640ec190d81b4c8134 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 7 Nov 2019 18:08:39 +0100 Subject: openrisc: use the generic in-place uncached DMA allocator Switch openrisc to use the dma-direct allocator and just provide the hooks for setting memory uncached or cached. Signed-off-by: Christoph Hellwig Reviewed-by: Stafford Horne Reviewed-by: Robin Murphy --- arch/openrisc/Kconfig | 2 ++ arch/openrisc/kernel/dma.c | 55 +++++++++------------------------------------- 2 files changed, 12 insertions(+), 45 deletions(-) (limited to 'arch') diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 1928e061ff96..7e94fe37cb2f 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -7,6 +7,8 @@ config OPENRISC def_bool y select ARCH_32BIT_OFF_T + select ARCH_HAS_DMA_SET_UNCACHED + select ARCH_HAS_DMA_CLEAR_UNCACHED select ARCH_HAS_SYNC_DMA_FOR_DEVICE select OF select OF_EARLY_FLATTREE diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c index adec711ad39d..c152a68811dd 100644 --- a/arch/openrisc/kernel/dma.c +++ b/arch/openrisc/kernel/dma.c @@ -11,8 +11,6 @@ * Copyright (C) 2010-2011 Jonas Bonn * * DMA mapping callbacks... - * As alloc_coherent is the only DMA callback being used currently, that's - * the only thing implemented properly. The rest need looking into... */ #include @@ -67,62 +65,29 @@ static const struct mm_walk_ops clear_nocache_walk_ops = { .pte_entry = page_clear_nocache, }; -/* - * Alloc "coherent" memory, which for OpenRISC means simply uncached. - * - * This function effectively just calls __get_free_pages, sets the - * cache-inhibit bit on those pages, and makes sure that the pages are - * flushed out of the cache before they are used. - * - * If the NON_CONSISTENT attribute is set, then this function just - * returns "normal", cachable memory. - * - * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take - * into consideration here, too. All current known implementations of - * the OR1K support only strongly ordered memory accesses, so that flag - * is being ignored for now; uncached but write-combined memory is a - * missing feature of the OR1K. - */ -void * -arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) +void *arch_dma_set_uncached(void *cpu_addr, size_t size) { - unsigned long va; - void *page; - - page = alloc_pages_exact(size, gfp | __GFP_ZERO); - if (!page) - return NULL; - - /* This gives us the real physical address of the first page. */ - *dma_handle = __pa(page); - - va = (unsigned long)page; + unsigned long va = (unsigned long)cpu_addr; + int error; /* * We need to iterate through the pages, clearing the dcache for * them and setting the cache-inhibit bit. */ - if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, - NULL)) { - free_pages_exact(page, size); - return NULL; - } - - return (void *)va; + error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, + NULL); + if (error) + return ERR_PTR(error); + return cpu_addr; } -void -arch_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) +void arch_dma_clear_uncached(void *cpu_addr, size_t size) { - unsigned long va = (unsigned long)vaddr; + unsigned long va = (unsigned long)cpu_addr; /* walk_page_range shouldn't be able to fail here */ WARN_ON(walk_page_range(&init_mm, va, va + size, &clear_nocache_walk_ops, NULL)); - - free_pages_exact(vaddr, size); } void arch_sync_dma_for_device(phys_addr_t addr, size_t size, -- cgit From fd50924917f98f11554e3bf55b0d062e09bce541 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Feb 2020 12:46:37 +0100 Subject: ARM/dma-mapping: remove get_coherent_dma_mask The core DMA code already checks for valid DMA masks earlier on, and doesn't allow NULL struct device pointers. Remove the now not required checks. Signed-off-by: Christoph Hellwig --- arch/arm/mm/dma-mapping.c | 41 ++++------------------------------------- 1 file changed, 4 insertions(+), 37 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 9414d72f664b..72ddc3d0f5eb 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -219,7 +219,7 @@ const struct dma_map_ops arm_coherent_dma_ops = { }; EXPORT_SYMBOL(arm_coherent_dma_ops); -static int __dma_supported(struct device *dev, u64 mask, bool warn) +static int __dma_supported(struct device *dev, u64 mask) { unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); @@ -227,41 +227,11 @@ static int __dma_supported(struct device *dev, u64 mask, bool warn) * Translate the device's DMA mask to a PFN limit. This * PFN number includes the page which we can DMA to. */ - if (dma_to_pfn(dev, mask) < max_dma_pfn) { - if (warn) - dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", - mask, - dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, - max_dma_pfn + 1); + if (dma_to_pfn(dev, mask) < max_dma_pfn) return 0; - } - return 1; } -static u64 get_coherent_dma_mask(struct device *dev) -{ - u64 mask = (u64)DMA_BIT_MASK(32); - - if (dev) { - mask = dev->coherent_dma_mask; - - /* - * Sanity check the DMA mask - it must be non-zero, and - * must be able to be satisfied by a DMA allocation. - */ - if (mask == 0) { - dev_warn(dev, "coherent DMA mask is unset\n"); - return 0; - } - - if (!__dma_supported(dev, mask, true)) - return 0; - } - - return mask; -} - static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) { /* @@ -688,7 +658,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot, bool is_coherent, unsigned long attrs, const void *caller) { - u64 mask = get_coherent_dma_mask(dev); + u64 mask = dev->coherent_dma_mask; struct page *page = NULL; void *addr; bool allowblock, cma; @@ -712,9 +682,6 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, } #endif - if (!mask) - return NULL; - buf = kzalloc(sizeof(*buf), gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); if (!buf) @@ -1095,7 +1062,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, */ int arm_dma_supported(struct device *dev, u64 mask) { - return __dma_supported(dev, mask, false); + return __dma_supported(dev, mask); } static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) -- cgit From 7607cb733fbb6dfac88c4a85c113e8104c91733e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Feb 2020 12:50:23 +0100 Subject: ARM/dma-mapping: take the bus limit into account in __dma_alloc The DMA coherent allocator needs to take bus limits into account for picking the zone that the memory is allocated from. Reported-by: Roger Quadros Signed-off-by: Christoph Hellwig Tested-by: Peter Ujfalusi Tested-by: Roger Quadros --- arch/arm/mm/dma-mapping.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 72ddc3d0f5eb..87aba505554a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -658,7 +658,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot, bool is_coherent, unsigned long attrs, const void *caller) { - u64 mask = dev->coherent_dma_mask; + u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); struct page *page = NULL; void *addr; bool allowblock, cma; -- cgit From fd27a526bb381f43dded6db30b3b016468ab0e6c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 17 Feb 2020 12:48:59 +0100 Subject: ARM/dma-mapping: merge __dma_supported into arm_dma_supported Merge __dma_supported into its only caller, and move the resulting function so that it doesn't need a forward declaration. Also mark it static as there are no callers outside of dma-mapping.c. Signed-off-by: Christoph Hellwig --- arch/arm/include/asm/dma-iommu.h | 2 -- arch/arm/mm/dma-mapping.c | 41 +++++++++++++++++----------------------- 2 files changed, 17 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 772f48ef84b7..86405cc81385 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h @@ -33,7 +33,5 @@ int arm_iommu_attach_device(struct device *dev, struct dma_iommu_mapping *mapping); void arm_iommu_detach_device(struct device *dev); -int arm_dma_supported(struct device *dev, u64 mask); - #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 87aba505554a..8a8949174b1c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -179,6 +179,23 @@ static void arm_dma_sync_single_for_device(struct device *dev, __dma_page_cpu_to_dev(page, offset, size, dir); } +/* + * Return whether the given device DMA address mask can be supported + * properly. For example, if your device can only drive the low 24-bits + * during bus mastering, then you would pass 0x00ffffff as the mask + * to this function. + */ +static int arm_dma_supported(struct device *dev, u64 mask) +{ + unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); + + /* + * Translate the device's DMA mask to a PFN limit. This + * PFN number includes the page which we can DMA to. + */ + return dma_to_pfn(dev, mask) >= max_dma_pfn; +} + const struct dma_map_ops arm_dma_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, @@ -219,19 +236,6 @@ const struct dma_map_ops arm_coherent_dma_ops = { }; EXPORT_SYMBOL(arm_coherent_dma_ops); -static int __dma_supported(struct device *dev, u64 mask) -{ - unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); - - /* - * Translate the device's DMA mask to a PFN limit. This - * PFN number includes the page which we can DMA to. - */ - if (dma_to_pfn(dev, mask) < max_dma_pfn) - return 0; - return 1; -} - static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) { /* @@ -1054,17 +1058,6 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, dir); } -/* - * Return whether the given device DMA address mask can be supported - * properly. For example, if your device can only drive the low 24-bits - * during bus mastering, then you would pass 0x00ffffff as the mask - * to this function. - */ -int arm_dma_supported(struct device *dev, u64 mask) -{ - return __dma_supported(dev, mask); -} - static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) { /* -- cgit