summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/common/dmabounce.c19
-rw-r--r--arch/arm/include/asm/dma-iommu.h4
-rw-r--r--arch/arm/include/asm/dma-mapping.h6
-rw-r--r--arch/arm/mm/Kconfig8
-rw-r--r--arch/arm/mm/Makefile5
-rw-r--r--arch/arm/mm/dma-mapping-nommu.c228
-rw-r--r--arch/arm/mm/dma-mapping.c77
-rw-r--r--arch/arm/xen/mm.c17
-rw-r--r--arch/arm64/include/asm/dma-mapping.h1
-rw-r--r--arch/arm64/mm/dma-mapping.c3
-rw-r--r--arch/blackfin/Kconfig1
-rw-r--r--arch/c6x/include/asm/dma-mapping.h5
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h5
-rw-r--r--arch/hexagon/kernel/dma.c21
-rw-r--r--arch/hexagon/kernel/hexagon_ksyms.c1
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/m32r/Kconfig1
-rw-r--r--arch/m32r/include/asm/dma-mapping.h2
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/loongson64/common/dma-swiotlb.c19
-rw-r--r--arch/openrisc/include/asm/dma-mapping.h9
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h5
-rw-r--r--arch/powerpc/include/asm/iommu.h4
-rw-r--r--arch/powerpc/kernel/dma-iommu.c6
-rw-r--r--arch/powerpc/kernel/dma.c17
-rw-r--r--arch/powerpc/kernel/iommu.c28
-rw-r--r--arch/powerpc/platforms/cell/iommu.c53
-rw-r--r--arch/powerpc/platforms/pseries/vio.c3
-rw-r--r--arch/s390/include/asm/dma-mapping.h2
-rw-r--r--arch/s390/pci/pci_dma.c18
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/include/asm/dma-mapping.h2
-rw-r--r--arch/sparc/include/asm/dma-mapping.h8
-rw-r--r--arch/sparc/kernel/iommu.c52
-rw-r--r--arch/sparc/kernel/iommu_common.h2
-rw-r--r--arch/sparc/kernel/ioport.c27
-rw-r--r--arch/sparc/kernel/pci_sun4v.c31
-rw-r--r--arch/tile/kernel/pci-dma.c30
-rw-r--r--arch/x86/include/asm/dma-mapping.h5
-rw-r--r--arch/x86/include/asm/iommu.h2
-rw-r--r--arch/x86/kernel/amd_gart_64.c1
-rw-r--r--arch/x86/kernel/pci-calgary_64.c25
-rw-r--r--arch/x86/kernel/pci-dma.c8
-rw-r--r--arch/x86/kernel/pci-nommu.c11
-rw-r--r--arch/x86/pci/sta2x11-fixup.c3
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c14
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h2
51 files changed, 464 insertions, 336 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index abd59fad1a34..0b731e8ab17e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -22,6 +22,7 @@ config ARM
select CLONE_BACKWARDS
select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
+ select DMA_NOOP_OPS if !MMU
select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB
select GENERIC_ALLOCATOR
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 9b1b7be2ec0e..9a92de63426f 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -33,6 +33,7 @@
#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
+#include <asm/dma-iommu.h>
#undef STATS
@@ -256,7 +257,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
if (buf == NULL) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -326,7 +327,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
ret = needs_bounce(dev, dma_addr, size);
if (ret < 0)
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
if (ret == 0) {
arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
@@ -335,7 +336,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
return map_single(dev, page_address(page) + offset, size, dir, attrs);
@@ -444,12 +445,17 @@ static void dmabounce_sync_for_device(struct device *dev,
arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
}
-static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+static int dmabounce_dma_supported(struct device *dev, u64 dma_mask)
{
if (dev->archdata.dmabounce)
return 0;
- return arm_dma_ops.set_dma_mask(dev, dma_mask);
+ return arm_dma_ops.dma_supported(dev, dma_mask);
+}
+
+static int dmabounce_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return arm_dma_ops.mapping_error(dev, dma_addr);
}
static const struct dma_map_ops dmabounce_ops = {
@@ -465,7 +471,8 @@ static const struct dma_map_ops dmabounce_ops = {
.unmap_sg = arm_dma_unmap_sg,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
- .set_dma_mask = dmabounce_set_mask,
+ .dma_supported = dmabounce_dma_supported,
+ .mapping_error = dmabounce_mapping_error,
};
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 2ef282f96651..c090ec675eac 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -9,6 +9,8 @@
#include <linux/kmemcheck.h>
#include <linux/kref.h>
+#define ARM_MAPPING_ERROR (~(dma_addr_t)0x0)
+
struct dma_iommu_mapping {
/* iommu specific data */
struct iommu_domain *domain;
@@ -33,5 +35,7 @@ int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
void arm_iommu_detach_device(struct device *dev);
+int arm_dma_supported(struct device *dev, u64 mask);
+
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 680d3f3889e7..4e0285a66ef8 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -12,18 +12,14 @@
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern const struct dma_map_ops arm_dma_ops;
extern const struct dma_map_ops arm_coherent_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return &arm_dma_ops;
+ return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_noop_ops;
}
-#define HAVE_ARCH_DMA_SUPPORTED 1
-extern int dma_supported(struct device *dev, u64 mask);
-
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c6c4c9c8824b..877a0e3fd17d 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -1045,8 +1045,8 @@ config ARM_L1_CACHE_SHIFT
default 5
config ARM_DMA_MEM_BUFFERABLE
- bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K) && !CPU_V7
- default y if CPU_V6 || CPU_V6K || CPU_V7
+ bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K || CPU_V7M) && !CPU_V7
+ default y if CPU_V6 || CPU_V6K || CPU_V7 || CPU_V7M
help
Historically, the kernel has used strongly ordered mappings to
provide DMA coherent memory. With the advent of ARMv7, mapping
@@ -1061,6 +1061,10 @@ config ARM_DMA_MEM_BUFFERABLE
and therefore turning this on may result in unpredictable driver
behaviour. Therefore, we offer this as an option.
+ On some of the beefier ARMv7-M machines (with DMA and write
+ buffers) you likely want this enabled, while those that
+ didn't need it until now also won't need it in the future.
+
You are recommended say 'Y' here and debug any affected drivers.
config ARM_HEAVY_MB
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index b3dea80715b4..950d19babb5f 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -2,9 +2,8 @@
# Makefile for the linux arm-specific parts of the memory manager.
#
-obj-y := dma-mapping.o extable.o fault.o init.o \
- iomap.o
-
+obj-y := extable.o fault.o init.o iomap.o
+obj-y += dma-mapping$(MMUEXT).o
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
mmap.o pgd.o mmu.o pageattr.o
diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c
new file mode 100644
index 000000000000..90ee354d803e
--- /dev/null
+++ b/arch/arm/mm/dma-mapping-nommu.c
@@ -0,0 +1,228 @@
+/*
+ * Based on linux/arch/arm/mm/dma-mapping.c
+ *
+ * Copyright (C) 2000-2004 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cachetype.h>
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/cp15.h>
+
+#include "dma.h"
+
+/*
+ * dma_noop_ops is used if
+ * - MMU/MPU is off
+ * - cpu is v7m w/o cache support
+ * - device is coherent
+ * otherwise arm_nommu_dma_ops is used.
+ *
+ * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
+ * [1] on how to declare such memory).
+ *
+ * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+ */
+
+static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs)
+
+{
+ const struct dma_map_ops *ops = &dma_noop_ops;
+
+ /*
+ * We are here because:
+ * - no consistent DMA region has been defined, so we can't
+ * continue.
+ * - there is no space left in consistent DMA region, so we
+ * only can fallback to generic allocator if we are
+ * advertised that consistency is not required.
+ */
+
+ if (attrs & DMA_ATTR_NON_CONSISTENT)
+ return ops->alloc(dev, size, dma_handle, gfp, attrs);
+
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+static void arm_nommu_dma_free(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+ const struct dma_map_ops *ops = &dma_noop_ops;
+
+ if (attrs & DMA_ATTR_NON_CONSISTENT)
+ ops->free(dev, size, cpu_addr, dma_addr, attrs);
+ else
+ WARN_ON_ONCE(1);
+
+ return;
+}
+
+static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ dmac_map_area(__va(paddr), size, dir);
+
+ if (dir == DMA_FROM_DEVICE)
+ outer_inv_range(paddr, paddr + size);
+ else
+ outer_clean_range(paddr, paddr + size);
+}
+
+static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ if (dir != DMA_TO_DEVICE) {
+ outer_inv_range(paddr, paddr + size);
+ dmac_unmap_area(__va(paddr), size, dir);
+ }
+}
+
+static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ dma_addr_t handle = page_to_phys(page) + offset;
+
+ __dma_page_cpu_to_dev(handle, size, dir);
+
+ return handle;
+}
+
+static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ __dma_page_dev_to_cpu(handle, size, dir);
+}
+
+
+static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, nents, i) {
+ sg_dma_address(sg) = sg_phys(sg);
+ sg_dma_len(sg) = sg->length;
+ __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
+ }
+
+ return nents;
+}
+
+static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
+}
+
+static void arm_nommu_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ __dma_page_cpu_to_dev(handle, size, dir);
+}
+
+static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ __dma_page_cpu_to_dev(handle, size, dir);
+}
+
+static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
+}
+
+static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
+}
+
+const struct dma_map_ops arm_nommu_dma_ops = {
+ .alloc = arm_nommu_dma_alloc,
+ .free = arm_nommu_dma_free,
+ .map_page = arm_nommu_dma_map_page,
+ .unmap_page = arm_nommu_dma_unmap_page,
+ .map_sg = arm_nommu_dma_map_sg,
+ .unmap_sg = arm_nommu_dma_unmap_sg,
+ .sync_single_for_device = arm_nommu_dma_sync_single_for_device,
+ .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu,
+ .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device,
+ .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu,
+};
+EXPORT_SYMBOL(arm_nommu_dma_ops);
+
+static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
+{
+ return coherent ? &dma_noop_ops : &arm_nommu_dma_ops;
+}
+
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ const struct iommu_ops *iommu, bool coherent)
+{
+ const struct dma_map_ops *dma_ops;
+
+ if (IS_ENABLED(CONFIG_CPU_V7M)) {
+ /*
+ * Cache support for v7m is optional, so can be treated as
+ * coherent if no cache has been detected. Note that it is not
+ * enough to check if MPU is in use or not since in absense of
+ * MPU system memory map is used.
+ */
+ dev->archdata.dma_coherent = (cacheid) ? coherent : true;
+ } else {
+ /*
+ * Assume coherent DMA in case MMU/MPU has not been set up.
+ */
+ dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
+ }
+
+ dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent);
+
+ set_dma_ops(dev, dma_ops);
+}
+
+void arch_teardown_dma_ops(struct device *dev)
+{
+}
+
+#define PREALLOC_DMA_DEBUG_ENTRIES 4096
+
+static int __init dma_debug_do_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+ return 0;
+}
+core_initcall(dma_debug_do_init);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index bd83c531828a..e7380bafbfa6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -180,6 +180,11 @@ static void arm_dma_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir);
}
+static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == ARM_MAPPING_ERROR;
+}
+
const struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
@@ -193,6 +198,8 @@ const struct dma_map_ops arm_dma_ops = {
.sync_single_for_device = arm_dma_sync_single_for_device,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
+ .mapping_error = arm_dma_mapping_error,
+ .dma_supported = arm_dma_supported,
};
EXPORT_SYMBOL(arm_dma_ops);
@@ -211,6 +218,8 @@ const struct dma_map_ops arm_coherent_dma_ops = {
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg,
+ .mapping_error = arm_dma_mapping_error,
+ .dma_supported = arm_dma_supported,
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
@@ -344,8 +353,6 @@ static void __dma_free_buffer(struct page *page, size_t size)
}
}
-#ifdef CONFIG_MMU
-
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr,
@@ -647,22 +654,6 @@ static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
return prot;
}
-#define nommu() 0
-
-#else /* !CONFIG_MMU */
-
-#define nommu() 1
-
-#define __get_dma_pgprot(attrs, prot) __pgprot(0)
-#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
-#define __alloc_from_pool(size, ret_page) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag, gfp) NULL
-#define __free_from_pool(cpu_addr, size) do { } while (0)
-#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
-#define __dma_free_remap(cpu_addr, size) do { } while (0)
-
-#endif /* CONFIG_MMU */
-
static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
struct page **ret_page)
{
@@ -799,13 +790,13 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp &= ~(__GFP_COMP);
args.gfp = gfp;
- *handle = DMA_ERROR_CODE;
+ *handle = ARM_MAPPING_ERROR;
allowblock = gfpflags_allow_blocking(gfp);
cma = allowblock ? dev_get_cma_area(dev) : false;
if (cma)
buf->allocator = &cma_allocator;
- else if (nommu() || is_coherent)
+ else if (is_coherent)
buf->allocator = &simple_allocator;
else if (allowblock)
buf->allocator = &remap_allocator;
@@ -854,8 +845,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- int ret = -ENXIO;
-#ifdef CONFIG_MMU
+ int ret;
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = dma_to_pfn(dev, dma_addr);
@@ -870,10 +860,6 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
-#else
- ret = vm_iomap_memory(vma, vma->vm_start,
- (vma->vm_end - vma->vm_start));
-#endif /* CONFIG_MMU */
return ret;
}
@@ -892,9 +878,7 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
-#ifdef CONFIG_MMU
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
-#endif /* CONFIG_MMU */
return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}
@@ -1177,11 +1161,10 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
* during bus mastering, then you would pass 0x00ffffff as the mask
* to this function.
*/
-int dma_supported(struct device *dev, u64 mask)
+int arm_dma_supported(struct device *dev, u64 mask)
{
return __dma_supported(dev, mask, false);
}
-EXPORT_SYMBOL(dma_supported);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
@@ -1254,7 +1237,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
if (i == mapping->nr_bitmaps) {
if (extend_iommu_mapping(mapping)) {
spin_unlock_irqrestore(&mapping->lock, flags);
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
@@ -1262,7 +1245,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
if (start > mapping->bits) {
spin_unlock_irqrestore(&mapping->lock, flags);
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
bitmap_set(mapping->bitmaps[i], start, count);
@@ -1445,7 +1428,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
int i;
dma_addr = __alloc_iova(mapping, size);
- if (dma_addr == DMA_ERROR_CODE)
+ if (dma_addr == ARM_MAPPING_ERROR)
return dma_addr;
iova = dma_addr;
@@ -1472,7 +1455,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
fail:
iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
__free_iova(mapping, dma_addr, size);
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
@@ -1533,7 +1516,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
return NULL;
*handle = __iommu_create_mapping(dev, &page, size, attrs);
- if (*handle == DMA_ERROR_CODE)
+ if (*handle == ARM_MAPPING_ERROR)
goto err_mapping;
return addr;
@@ -1561,7 +1544,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
struct page **pages;
void *addr = NULL;
- *handle = DMA_ERROR_CODE;
+ *handle = ARM_MAPPING_ERROR;
size = PAGE_ALIGN(size);
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
@@ -1582,7 +1565,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
return NULL;
*handle = __iommu_create_mapping(dev, pages, size, attrs);
- if (*handle == DMA_ERROR_CODE)
+ if (*handle == ARM_MAPPING_ERROR)
goto err_buffer;
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
@@ -1732,10 +1715,10 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
int prot;
size = PAGE_ALIGN(size);
- *handle = DMA_ERROR_CODE;
+ *handle = ARM_MAPPING_ERROR;
iova_base = iova = __alloc_iova(mapping, size);
- if (iova == DMA_ERROR_CODE)
+ if (iova == ARM_MAPPING_ERROR)
return -ENOMEM;
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
@@ -1775,7 +1758,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
for (i = 1; i < nents; i++) {
s = sg_next(s);
- s->dma_address = DMA_ERROR_CODE;
+ s->dma_address = ARM_MAPPING_ERROR;
s->dma_length = 0;
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
@@ -1950,7 +1933,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
int ret, prot, len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len);
- if (dma_addr == DMA_ERROR_CODE)
+ if (dma_addr == ARM_MAPPING_ERROR)
return dma_addr;
prot = __dma_info_to_prot(dir, attrs);
@@ -1962,7 +1945,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
return dma_addr + offset;
fail:
__free_iova(mapping, dma_addr, len);
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
/**
@@ -2056,7 +2039,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
size_t len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len);
- if (dma_addr == DMA_ERROR_CODE)
+ if (dma_addr == ARM_MAPPING_ERROR)
return dma_addr;
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
@@ -2068,7 +2051,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
return dma_addr + offset;
fail:
__free_iova(mapping, dma_addr, len);
- return DMA_ERROR_CODE;
+ return ARM_MAPPING_ERROR;
}
/**
@@ -2140,6 +2123,9 @@ const struct dma_map_ops iommu_ops = {
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
+
+ .mapping_error = arm_dma_mapping_error,
+ .dma_supported = arm_dma_supported,
};
const struct dma_map_ops iommu_coherent_ops = {
@@ -2156,6 +2142,9 @@ const struct dma_map_ops iommu_coherent_ops = {
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
+
+ .mapping_error = arm_dma_mapping_error,
+ .dma_supported = arm_dma_supported,
};
/**
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index f0325d96b97a..785d2a562a23 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -185,23 +185,6 @@ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
const struct dma_map_ops *xen_dma_ops;
EXPORT_SYMBOL(xen_dma_ops);
-static const struct dma_map_ops xen_swiotlb_dma_ops = {
- .alloc = xen_swiotlb_alloc_coherent,
- .free = xen_swiotlb_free_coherent,
- .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
- .sync_single_for_device = xen_swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
- .map_sg = xen_swiotlb_map_sg_attrs,
- .unmap_sg = xen_swiotlb_unmap_sg_attrs,
- .map_page = xen_swiotlb_map_page,
- .unmap_page = xen_swiotlb_unmap_page,
- .dma_supported = xen_swiotlb_dma_supported,
- .set_dma_mask = xen_swiotlb_set_dma_mask,
- .mmap = xen_swiotlb_dma_mmap,
- .get_sgtable = xen_swiotlb_get_sgtable,
-};
-
int __init xen_mm_init(void)
{
struct gnttab_cache_flush cflush;
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index f72779aad276..0df756b24863 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -24,7 +24,6 @@
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern const struct dma_map_ops dummy_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 3e340b625436..e90cd1db42a8 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -175,7 +175,6 @@ static void *__dma_alloc(struct device *dev, size_t size,
no_map:
__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
no_mem:
- *dma_handle = DMA_ERROR_CODE;
return NULL;
}
@@ -478,7 +477,7 @@ static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- return DMA_ERROR_CODE;
+ return 0;
}
static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 3c1bd640042a..89bdb8264305 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -41,6 +41,7 @@ config BLACKFIN
select MODULES_USE_ELF_RELA
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_NMI
+ select ARCH_NO_COHERENT_DMA_MMAP
config GENERIC_CSUM
def_bool y
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
index aca9f755e4f8..05daf1038111 100644
--- a/arch/c6x/include/asm/dma-mapping.h
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -12,11 +12,6 @@
#ifndef _ASM_C6X_DMA_MAPPING_H
#define _ASM_C6X_DMA_MAPPING_H
-/*
- * DMA errors are defined by all-bits-set in the DMA address.
- */
-#define DMA_ERROR_CODE ~0
-
extern const struct dma_map_ops c6x_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
index d3a87bd9b686..463dbc18f853 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -29,8 +29,6 @@
#include <asm/io.h>
struct device;
-extern int bad_dma_address;
-#define DMA_ERROR_CODE bad_dma_address
extern const struct dma_map_ops *dma_ops;
@@ -39,9 +37,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
-#define HAVE_ARCH_DMA_SUPPORTED 1
-extern int dma_supported(struct device *dev, u64 mask);
-extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index e74b65009587..546792d176a4 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -25,25 +25,16 @@
#include <linux/module.h>
#include <asm/page.h>
+#define HEXAGON_MAPPING_ERROR 0
+
const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
-int bad_dma_address; /* globals are automatically initialized to zero */
-
static inline void *dma_addr_to_virt(dma_addr_t dma_addr)
{
return phys_to_virt((unsigned long) dma_addr);
}
-int dma_supported(struct device *dev, u64 mask)
-{
- if (mask == DMA_BIT_MASK(32))
- return 1;
- else
- return 0;
-}
-EXPORT_SYMBOL(dma_supported);
-
static struct gen_pool *coherent_pool;
@@ -181,7 +172,7 @@ static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
- return bad_dma_address;
+ return HEXAGON_MAPPING_ERROR;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_sync(dma_addr_to_virt(bus), size, dir);
@@ -203,6 +194,11 @@ static void hexagon_sync_single_for_device(struct device *dev,
dma_sync(dma_addr_to_virt(dma_handle), size, dir);
}
+static int hexagon_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == HEXAGON_MAPPING_ERROR;
+}
+
const struct dma_map_ops hexagon_dma_ops = {
.alloc = hexagon_dma_alloc_coherent,
.free = hexagon_free_coherent,
@@ -210,6 +206,7 @@ const struct dma_map_ops hexagon_dma_ops = {
.map_page = hexagon_map_page,
.sync_single_for_cpu = hexagon_sync_single_for_cpu,
.sync_single_for_device = hexagon_sync_single_for_device,
+ .mapping_error = hexagon_mapping_error,
.is_phys = 1,
};
diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c
index 00bcad9cbd8f..aa248f595431 100644
--- a/arch/hexagon/kernel/hexagon_ksyms.c
+++ b/arch/hexagon/kernel/hexagon_ksyms.c
@@ -40,7 +40,6 @@ EXPORT_SYMBOL(memset);
/* Additional variables */
EXPORT_SYMBOL(__phys_offset);
EXPORT_SYMBOL(_dflt_cache_att);
-EXPORT_SYMBOL(bad_dma_address);
#define DECLARE_EXPORT(name) \
extern void name(void); EXPORT_SYMBOL(name)
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 73ec3c6f4cfe..3ce5ab4339f3 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -12,8 +12,6 @@
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-#define DMA_ERROR_CODE 0
-
extern const struct dma_map_ops *dma_ops;
extern struct ia64_machine_vector ia64_mv;
extern void set_iommu_machvec(void);
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 95474460b367..87cde1e4b38c 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -19,6 +19,7 @@ config M32R
select HAVE_DEBUG_STACKOVERFLOW
select CPU_NO_EFFICIENT_FFS
select DMA_NOOP_OPS
+ select ARCH_NO_COHERENT_DMA_MMAP if !MMU
config SBUS
bool
diff --git a/arch/m32r/include/asm/dma-mapping.h b/arch/m32r/include/asm/dma-mapping.h
index c01d9f52d228..aff3ae8b62f7 100644
--- a/arch/m32r/include/asm/dma-mapping.h
+++ b/arch/m32r/include/asm/dma-mapping.h
@@ -8,8 +8,6 @@
#include <linux/dma-debug.h>
#include <linux/io.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
return &dma_noop_ops;
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index d140206d5d29..5abb548f0e70 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -2,6 +2,7 @@ config M68K
bool
default y
select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
+ select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select HAVE_IDE
select HAVE_AOUT if MMU
select HAVE_DEBUG_BUGVERBOSE
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 8e47121b8b8b..4ed8ebf33509 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -2,6 +2,7 @@ config MICROBLAZE
def_bool y
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select TIMER_OF
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 3fad5e722a66..e15cd2f76e23 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -28,8 +28,6 @@
#include <asm/io.h>
#include <asm/cacheflush.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
#define __dma_free_coherent(size, addr) ((void)0)
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
index 178ca17a5667..34486c138206 100644
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ b/arch/mips/loongson64/common/dma-swiotlb.c
@@ -75,19 +75,11 @@ static void loongson_dma_sync_sg_for_device(struct device *dev,
mb();
}
-static int loongson_dma_set_mask(struct device *dev, u64 mask)
+static int loongson_dma_supported(struct device *dev, u64 mask)
{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits)) {
- *dev->dma_mask = DMA_BIT_MASK(loongson_sysconf.dma_mask_bits);
- return -EIO;
- }
-
- *dev->dma_mask = mask;
-
- return 0;
+ if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits))
+ return 0;
+ return swiotlb_dma_supported(dev, mask);
}
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
@@ -126,8 +118,7 @@ static const struct dma_map_ops loongson_dma_map_ops = {
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = loongson_dma_sync_sg_for_device,
.mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = swiotlb_dma_supported,
- .set_dma_mask = loongson_dma_set_mask
+ .dma_supported = loongson_dma_supported,
};
void __init plat_swiotlb_setup(void)
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h
index 0c0075f17145..f41bd3cb76d9 100644
--- a/arch/openrisc/include/asm/dma-mapping.h
+++ b/arch/openrisc/include/asm/dma-mapping.h
@@ -26,8 +26,6 @@
#include <linux/kmemcheck.h>
#include <linux/dma-mapping.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
extern const struct dma_map_ops or1k_dma_map_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
@@ -35,11 +33,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return &or1k_dma_map_ops;
}
-#define HAVE_ARCH_DMA_SUPPORTED 1
-static inline int dma_supported(struct device *dev, u64 dma_mask)
-{
- /* Support 32 bit DMA mask exclusively */
- return dma_mask == DMA_BIT_MASK(32);
-}
-
#endif /* __ASM_OPENRISC_DMA_MAPPING_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 181a095468e4..eaece3d3e225 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -17,10 +17,6 @@
#include <asm/io.h>
#include <asm/swiotlb.h>
-#ifdef CONFIG_PPC64
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-#endif
-
/* Some dma direct funcs must be visible for use in other dma_ops */
extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
@@ -116,7 +112,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
#define HAVE_ARCH_DMA_SET_MASK 1
extern int dma_set_mask(struct device *dev, u64 dma_mask);
-extern int __dma_set_mask(struct device *dev, u64 dma_mask);
extern u64 __dma_get_required_mask(struct device *dev);
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 8a8ce220d7d0..20febe0b7f32 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -139,6 +139,8 @@ struct scatterlist;
#ifdef CONFIG_PPC64
+#define IOMMU_MAPPING_ERROR (~(dma_addr_t)0x0)
+
static inline void set_iommu_table_base(struct device *dev,
struct iommu_table *base)
{
@@ -238,6 +240,8 @@ static inline int __init tce_iommu_bus_notifier_init(void)
}
#endif /* !CONFIG_IOMMU_API */
+int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
#else
static inline void *get_iommu_table_base(struct device *dev)
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index fb7cbaa37658..8f7abf9baa63 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -105,6 +105,11 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
return mask;
}
+int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == IOMMU_MAPPING_ERROR;
+}
+
struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent,
@@ -115,5 +120,6 @@ struct dma_map_ops dma_iommu_ops = {
.map_page = dma_iommu_map_page,
.unmap_page = dma_iommu_unmap_page,
.get_required_mask = dma_iommu_get_required_mask,
+ .mapping_error = dma_iommu_mapping_error,
};
EXPORT_SYMBOL(dma_iommu_ops);
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 41c749586bd2..4194bbbbdb10 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -314,18 +314,6 @@ EXPORT_SYMBOL(dma_set_coherent_mask);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
-int __dma_set_mask(struct device *dev, u64 dma_mask)
-{
- const struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
- if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
- return dma_ops->set_dma_mask(dev, dma_mask);
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
- *dev->dma_mask = dma_mask;
- return 0;
-}
-
int dma_set_mask(struct device *dev, u64 dma_mask)
{
if (ppc_md.dma_set_mask)
@@ -338,7 +326,10 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
return phb->controller_ops.dma_set_mask(pdev, dma_mask);
}
- return __dma_set_mask(dev, dma_mask);
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+ *dev->dma_mask = dma_mask;
+ return 0;
}
EXPORT_SYMBOL(dma_set_mask);
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index f2b724cd9e64..233ca3fe4754 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -198,11 +198,11 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (unlikely(npages == 0)) {
if (printk_ratelimit())
WARN_ON(1);
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
}
if (should_fail_iommu(dev))
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
/*
* We don't need to disable preemption here because any CPU can
@@ -278,7 +278,7 @@ again:
} else {
/* Give up */
spin_unlock_irqrestore(&(pool->lock), flags);
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
}
}
@@ -310,13 +310,13 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
unsigned long attrs)
{
unsigned long entry;
- dma_addr_t ret = DMA_ERROR_CODE;
+ dma_addr_t ret = IOMMU_MAPPING_ERROR;
int build_fail;
entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
- if (unlikely(entry == DMA_ERROR_CODE))
- return DMA_ERROR_CODE;
+ if (unlikely(entry == IOMMU_MAPPING_ERROR))
+ return IOMMU_MAPPING_ERROR;
entry += tbl->it_offset; /* Offset into real TCE table */
ret = entry << tbl->it_page_shift; /* Set the return dma address */
@@ -328,12 +328,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
/* tbl->it_ops->set() only returns non-zero for transient errors.
* Clean up the table bitmap in this case and return
- * DMA_ERROR_CODE. For all other errors the functionality is
+ * IOMMU_MAPPING_ERROR. For all other errors the functionality is
* not altered.
*/
if (unlikely(build_fail)) {
__iommu_free(tbl, ret, npages);
- return DMA_ERROR_CODE;
+ return IOMMU_MAPPING_ERROR;
}
/* Flush/invalidate TLB caches if necessary */
@@ -478,7 +478,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
/* Handle failure */
- if (unlikely(entry == DMA_ERROR_CODE)) {
+ if (unlikely(entry == IOMMU_MAPPING_ERROR)) {
if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit())
dev_info(dev, "iommu_alloc failed, tbl %p "
@@ -545,7 +545,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
*/
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = DMA_ERROR_CODE;
+ outs->dma_address = IOMMU_MAPPING_ERROR;
outs->dma_length = 0;
}
@@ -563,7 +563,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
npages = iommu_num_pages(s->dma_address, s->dma_length,
IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, vaddr, npages);
- s->dma_address = DMA_ERROR_CODE;
+ s->dma_address = IOMMU_MAPPING_ERROR;
s->dma_length = 0;
}
if (s == outs)
@@ -777,7 +777,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
unsigned long mask, enum dma_data_direction direction,
unsigned long attrs)
{
- dma_addr_t dma_handle = DMA_ERROR_CODE;
+ dma_addr_t dma_handle = IOMMU_MAPPING_ERROR;
void *vaddr;
unsigned long uaddr;
unsigned int npages, align;
@@ -797,7 +797,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> tbl->it_page_shift, align,
attrs);
- if (dma_handle == DMA_ERROR_CODE) {
+ if (dma_handle == IOMMU_MAPPING_ERROR) {
if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit()) {
dev_info(dev, "iommu_alloc failed, tbl %p "
@@ -869,7 +869,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> tbl->it_page_shift, io_order, 0);
- if (mapping == DMA_ERROR_CODE) {
+ if (mapping == IOMMU_MAPPING_ERROR) {
free_pages((unsigned long)ret, order);
return NULL;
}
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 71b995bbcae0..29d4f96ed33e 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -644,32 +644,22 @@ static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
direction, attrs);
}
-static int dma_fixed_dma_supported(struct device *dev, u64 mask)
-{
- return mask == DMA_BIT_MASK(64);
-}
-
-static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
+static int dma_suported_and_switch(struct device *dev, u64 dma_mask);
static const struct dma_map_ops dma_iommu_fixed_ops = {
.alloc = dma_fixed_alloc_coherent,
.free = dma_fixed_free_coherent,
.map_sg = dma_fixed_map_sg,
.unmap_sg = dma_fixed_unmap_sg,
- .dma_supported = dma_fixed_dma_supported,
- .set_dma_mask = dma_set_mask_and_switch,
+ .dma_supported = dma_suported_and_switch,
.map_page = dma_fixed_map_page,
.unmap_page = dma_fixed_unmap_page,
+ .mapping_error = dma_iommu_mapping_error,
};
-static void cell_dma_dev_setup_fixed(struct device *dev);
-
static void cell_dma_dev_setup(struct device *dev)
{
- /* Order is important here, these are not mutually exclusive */
- if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
- cell_dma_dev_setup_fixed(dev);
- else if (get_pci_dma_ops() == &dma_iommu_ops)
+ if (get_pci_dma_ops() == &dma_iommu_ops)
set_iommu_table_base(dev, cell_get_iommu_table(dev));
else if (get_pci_dma_ops() == &dma_direct_ops)
set_dma_offset(dev, cell_dma_direct_offset);
@@ -956,38 +946,29 @@ out:
return dev_addr;
}
-static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
+static int dma_suported_and_switch(struct device *dev, u64 dma_mask)
{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
if (dma_mask == DMA_BIT_MASK(64) &&
- cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
- {
+ cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) {
+ u64 addr = cell_iommu_get_fixed_address(dev) +
+ dma_iommu_fixed_base;
dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
+ dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
set_dma_ops(dev, &dma_iommu_fixed_ops);
- } else {
+ set_dma_offset(dev, addr);
+ return 1;
+ }
+
+ if (dma_iommu_dma_supported(dev, dma_mask)) {
dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
set_dma_ops(dev, get_pci_dma_ops());
+ cell_dma_dev_setup(dev);
+ return 1;
}
- cell_dma_dev_setup(dev);
-
- *dev->dma_mask = dma_mask;
-
return 0;
}
-static void cell_dma_dev_setup_fixed(struct device *dev)
-{
- u64 addr;
-
- addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
- set_dma_offset(dev, addr);
-
- dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
-}
-
static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
unsigned long base_pte)
{
@@ -1139,7 +1120,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
}
- dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch;
+ dma_iommu_ops.dma_supported = dma_suported_and_switch;
set_pci_dma_ops(&dma_iommu_ops);
return 0;
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 117beb9e8786..8a47f168476b 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -519,7 +519,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl;
- dma_addr_t ret = DMA_ERROR_CODE;
+ dma_addr_t ret = IOMMU_MAPPING_ERROR;
tbl = get_iommu_table_base(dev);
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
@@ -625,6 +625,7 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
.unmap_page = vio_dma_iommu_unmap_page,
.dma_supported = vio_dma_iommu_dma_supported,
.get_required_mask = vio_dma_get_required_mask,
+ .mapping_error = dma_iommu_mapping_error,
};
/**
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 3108b8dbe266..512ad0eaa11a 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -8,8 +8,6 @@
#include <linux/dma-debug.h>
#include <linux/io.h>
-#define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
-
extern const struct dma_map_ops s390_pci_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 8eb1cc341dab..0d300ee00f4e 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -14,6 +14,8 @@
#include <linux/pci.h>
#include <asm/pci_dma.h>
+#define S390_MAPPING_ERROR (~(dma_addr_t) 0x0)
+
static struct kmem_cache *dma_region_table_cache;
static struct kmem_cache *dma_page_table_cache;
static int s390_iommu_strict;
@@ -281,7 +283,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size)
out_error:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
- return DMA_ERROR_CODE;
+ return S390_MAPPING_ERROR;
}
static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
@@ -329,7 +331,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
dma_addr = dma_alloc_address(dev, nr_pages);
- if (dma_addr == DMA_ERROR_CODE) {
+ if (dma_addr == S390_MAPPING_ERROR) {
ret = -ENOSPC;
goto out_err;
}
@@ -352,7 +354,7 @@ out_free:
out_err:
zpci_err("map error:\n");
zpci_err_dma(ret, pa);
- return DMA_ERROR_CODE;
+ return S390_MAPPING_ERROR;
}
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
@@ -429,7 +431,7 @@ static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
int ret;
dma_addr_base = dma_alloc_address(dev, nr_pages);
- if (dma_addr_base == DMA_ERROR_CODE)
+ if (dma_addr_base == S390_MAPPING_ERROR)
return -ENOMEM;
dma_addr = dma_addr_base;
@@ -476,7 +478,7 @@ static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
for (i = 1; i < nr_elements; i++) {
s = sg_next(s);
- s->dma_address = DMA_ERROR_CODE;
+ s->dma_address = S390_MAPPING_ERROR;
s->dma_length = 0;
if (s->offset || (size & ~PAGE_MASK) ||
@@ -525,6 +527,11 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
s->dma_length = 0;
}
}
+
+static int s390_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == S390_MAPPING_ERROR;
+}
int zpci_dma_init_device(struct zpci_dev *zdev)
{
@@ -659,6 +666,7 @@ const struct dma_map_ops s390_pci_dma_ops = {
.unmap_sg = s390_dma_unmap_sg,
.map_page = s390_dma_map_pages,
.unmap_page = s390_dma_unmap_pages,
+ .mapping_error = s390_mapping_error,
/* if we support direct DMA this must be conditional */
.is_phys = 0,
/* dma_supported is unconditionally true without a callback */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index ee086958b2b2..640a85925060 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -2,6 +2,7 @@ config SUPERH
def_bool y
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select HAVE_PATA_PLATFORM
select CLKDEV_LOOKUP
select HAVE_IDE if HAS_IOPORT_MAP
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index d99008af5f73..9b06be07db4d 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -9,8 +9,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
return dma_ops;
}
-#define DMA_ERROR_CODE 0
-
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir);
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 69cc627779f2..60bf1633d554 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -5,11 +5,6 @@
#include <linux/mm.h>
#include <linux/dma-debug.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
-#define HAVE_ARCH_DMA_SUPPORTED 1
-int dma_supported(struct device *dev, u64 mask);
-
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
@@ -19,7 +14,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
}
extern const struct dma_map_ops *dma_ops;
-extern const struct dma_map_ops *leon_dma_ops;
extern const struct dma_map_ops pci32_dma_ops;
extern struct bus_type pci_bus_type;
@@ -28,7 +22,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
#ifdef CONFIG_SPARC_LEON
if (sparc_cpu_model == sparc_leon)
- return leon_dma_ops;
+ return &pci32_dma_ops;
#endif
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
if (bus == &pci_bus_type)
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index c63ba99ca551..fcbcc031f615 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -314,7 +314,7 @@ bad:
bad_no_ctx:
if (printk_ratelimit())
WARN_ON(1);
- return DMA_ERROR_CODE;
+ return SPARC_MAPPING_ERROR;
}
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
@@ -547,7 +547,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = DMA_ERROR_CODE;
+ outs->dma_address = SPARC_MAPPING_ERROR;
outs->dma_length = 0;
}
@@ -573,7 +573,7 @@ iommu_map_failed:
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
IOMMU_ERROR_CODE);
- s->dma_address = DMA_ERROR_CODE;
+ s->dma_address = SPARC_MAPPING_ERROR;
s->dma_length = 0;
}
if (s == outs)
@@ -741,6 +741,26 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
spin_unlock_irqrestore(&iommu->lock, flags);
}
+static int dma_4u_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == SPARC_MAPPING_ERROR;
+}
+
+static int dma_4u_supported(struct device *dev, u64 device_mask)
+{
+ struct iommu *iommu = dev->archdata.iommu;
+
+ if (device_mask > DMA_BIT_MASK(32))
+ return 0;
+ if ((device_mask & iommu->dma_addr_mask) == iommu->dma_addr_mask)
+ return 1;
+#ifdef CONFIG_PCI
+ if (dev_is_pci(dev))
+ return pci64_dma_supported(to_pci_dev(dev), device_mask);
+#endif
+ return 0;
+}
+
static const struct dma_map_ops sun4u_dma_ops = {
.alloc = dma_4u_alloc_coherent,
.free = dma_4u_free_coherent,
@@ -750,31 +770,9 @@ static const struct dma_map_ops sun4u_dma_ops = {
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
+ .dma_supported = dma_4u_supported,
+ .mapping_error = dma_4u_mapping_error,
};
const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops);
-
-int dma_supported(struct device *dev, u64 device_mask)
-{
- struct iommu *iommu = dev->archdata.iommu;
- u64 dma_addr_mask = iommu->dma_addr_mask;
-
- if (device_mask > DMA_BIT_MASK(32)) {
- if (iommu->atu)
- dma_addr_mask = iommu->atu->dma_addr_mask;
- else
- return 0;
- }
-
- if ((device_mask & dma_addr_mask) == dma_addr_mask)
- return 1;
-
-#ifdef CONFIG_PCI
- if (dev_is_pci(dev))
- return pci64_dma_supported(to_pci_dev(dev), device_mask);
-#endif
-
- return 0;
-}
-EXPORT_SYMBOL(dma_supported);
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h
index 828493329f68..5ea5c192b1d9 100644
--- a/arch/sparc/kernel/iommu_common.h
+++ b/arch/sparc/kernel/iommu_common.h
@@ -47,4 +47,6 @@ static inline int is_span_boundary(unsigned long entry,
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
}
+#define SPARC_MAPPING_ERROR (~(dma_addr_t)0x0)
+
#endif /* _IOMMU_COMMON_H */
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index cf20033a1458..12894f259bea 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -401,6 +401,11 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
BUG();
}
+static int sbus_dma_supported(struct device *dev, u64 mask)
+{
+ return 0;
+}
+
static const struct dma_map_ops sbus_dma_ops = {
.alloc = sbus_alloc_coherent,
.free = sbus_free_coherent,
@@ -410,6 +415,7 @@ static const struct dma_map_ops sbus_dma_ops = {
.unmap_sg = sbus_unmap_sg,
.sync_sg_for_cpu = sbus_sync_sg_for_cpu,
.sync_sg_for_device = sbus_sync_sg_for_device,
+ .dma_supported = sbus_dma_supported,
};
static int __init sparc_register_ioport(void)
@@ -637,6 +643,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
}
}
+/* note: leon re-uses pci32_dma_ops */
const struct dma_map_ops pci32_dma_ops = {
.alloc = pci32_alloc_coherent,
.free = pci32_free_coherent,
@@ -651,29 +658,9 @@ const struct dma_map_ops pci32_dma_ops = {
};
EXPORT_SYMBOL(pci32_dma_ops);
-/* leon re-uses pci32_dma_ops */
-const struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
-EXPORT_SYMBOL(leon_dma_ops);
-
const struct dma_map_ops *dma_ops = &sbus_dma_ops;
EXPORT_SYMBOL(dma_ops);
-
-/*
- * Return whether the given PCI device DMA address mask can be
- * supported properly. For example, if your device can only drive the
- * low 24-bits during PCI bus mastering, then you would pass
- * 0x00ffffff as the mask to this function.
- */
-int dma_supported(struct device *dev, u64 mask)
-{
- if (dev_is_pci(dev))
- return 1;
-
- return 0;
-}
-EXPORT_SYMBOL(dma_supported);
-
#ifdef CONFIG_PROC_FS
static int sparc_io_proc_show(struct seq_file *m, void *v)
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 68bec7c97cb8..24f21c726dfa 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -24,6 +24,7 @@
#include "pci_impl.h"
#include "iommu_common.h"
+#include "kernel.h"
#include "pci_sun4v.h"
@@ -412,12 +413,12 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
bad:
if (printk_ratelimit())
WARN_ON(1);
- return DMA_ERROR_CODE;
+ return SPARC_MAPPING_ERROR;
iommu_map_fail:
local_irq_restore(flags);
iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
- return DMA_ERROR_CODE;
+ return SPARC_MAPPING_ERROR;
}
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
@@ -590,7 +591,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = DMA_ERROR_CODE;
+ outs->dma_address = SPARC_MAPPING_ERROR;
outs->dma_length = 0;
}
@@ -607,7 +608,7 @@ iommu_map_failed:
iommu_tbl_range_free(tbl, vaddr, npages,
IOMMU_ERROR_CODE);
/* XXX demap? XXX */
- s->dma_address = DMA_ERROR_CODE;
+ s->dma_address = SPARC_MAPPING_ERROR;
s->dma_length = 0;
}
if (s == outs)
@@ -669,6 +670,26 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
local_irq_restore(flags);
}
+static int dma_4v_supported(struct device *dev, u64 device_mask)
+{
+ struct iommu *iommu = dev->archdata.iommu;
+ u64 dma_addr_mask;
+
+ if (device_mask > DMA_BIT_MASK(32) && iommu->atu)
+ dma_addr_mask = iommu->atu->dma_addr_mask;
+ else
+ dma_addr_mask = iommu->dma_addr_mask;
+
+ if ((device_mask & dma_addr_mask) == dma_addr_mask)
+ return 1;
+ return pci64_dma_supported(to_pci_dev(dev), device_mask);
+}
+
+static int dma_4v_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == SPARC_MAPPING_ERROR;
+}
+
static const struct dma_map_ops sun4v_dma_ops = {
.alloc = dma_4v_alloc_coherent,
.free = dma_4v_free_coherent,
@@ -676,6 +697,8 @@ static const struct dma_map_ops sun4v_dma_ops = {
.unmap_page = dma_4v_unmap_page,
.map_sg = dma_4v_map_sg,
.unmap_sg = dma_4v_unmap_sg,
+ .dma_supported = dma_4v_supported,
+ .mapping_error = dma_4v_mapping_error,
};
static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 569bb6dd154a..f2abedc8a080 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -317,18 +317,6 @@ static void tile_dma_sync_sg_for_device(struct device *dev,
}
}
-static inline int
-tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline int
-tile_dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
static const struct dma_map_ops tile_default_dma_map_ops = {
.alloc = tile_dma_alloc_coherent,
.free = tile_dma_free_coherent,
@@ -340,8 +328,6 @@ static const struct dma_map_ops tile_default_dma_map_ops = {
.sync_single_for_device = tile_dma_sync_single_for_device,
.sync_sg_for_cpu = tile_dma_sync_sg_for_cpu,
.sync_sg_for_device = tile_dma_sync_sg_for_device,
- .mapping_error = tile_dma_mapping_error,
- .dma_supported = tile_dma_supported
};
const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops;
@@ -504,18 +490,6 @@ static void tile_pci_dma_sync_sg_for_device(struct device *dev,
}
}
-static inline int
-tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline int
-tile_pci_dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
static const struct dma_map_ops tile_pci_default_dma_map_ops = {
.alloc = tile_pci_dma_alloc_coherent,
.free = tile_pci_dma_free_coherent,
@@ -527,8 +501,6 @@ static const struct dma_map_ops tile_pci_default_dma_map_ops = {
.sync_single_for_device = tile_pci_dma_sync_single_for_device,
.sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
.sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
- .mapping_error = tile_pci_dma_mapping_error,
- .dma_supported = tile_pci_dma_supported
};
const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops;
@@ -578,8 +550,6 @@ static const struct dma_map_ops pci_hybrid_dma_ops = {
.sync_single_for_device = tile_pci_dma_sync_single_for_device,
.sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
.sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
- .mapping_error = tile_pci_dma_mapping_error,
- .dma_supported = tile_pci_dma_supported
};
const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 08a0838b83fb..398c79889f5c 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -19,8 +19,6 @@
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
#endif
-#define DMA_ERROR_CODE 0
-
extern int iommu_merge;
extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow;
@@ -35,9 +33,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
#define arch_dma_alloc_attrs arch_dma_alloc_attrs
-#define HAVE_ARCH_DMA_SUPPORTED 1
-extern int dma_supported(struct device *hwdev, u64 mask);
-
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
unsigned long attrs);
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index 793869879464..fca144a104e4 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -6,6 +6,8 @@ extern int force_iommu, no_iommu;
extern int iommu_detected;
extern int iommu_pass_through;
+int x86_dma_supported(struct device *dev, u64 mask);
+
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 815dd63f49d0..cc0e8bc0ea3f 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -704,6 +704,7 @@ static const struct dma_map_ops gart_dma_ops = {
.alloc = gart_alloc_coherent,
.free = gart_free_coherent,
.mapping_error = gart_mapping_error,
+ .dma_supported = x86_dma_supported,
};
static void gart_iommu_shutdown(void)
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index fda7867046d0..5286a4a92cf7 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -50,6 +50,8 @@
#include <asm/x86_init.h>
#include <asm/iommu_table.h>
+#define CALGARY_MAPPING_ERROR 0
+
#ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
int use_calgary __read_mostly = 1;
#else
@@ -252,7 +254,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (panic_on_overflow)
panic("Calgary: fix the allocator.\n");
else
- return DMA_ERROR_CODE;
+ return CALGARY_MAPPING_ERROR;
}
}
@@ -272,10 +274,10 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
entry = iommu_range_alloc(dev, tbl, npages);
- if (unlikely(entry == DMA_ERROR_CODE)) {
+ if (unlikely(entry == CALGARY_MAPPING_ERROR)) {
pr_warn("failed to allocate %u pages in iommu %p\n",
npages, tbl);
- return DMA_ERROR_CODE;
+ return CALGARY_MAPPING_ERROR;
}
/* set the return dma address */
@@ -295,7 +297,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned long flags;
/* were we called with bad_dma_address? */
- badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
+ badend = CALGARY_MAPPING_ERROR + (EMERGENCY_PAGES * PAGE_SIZE);
if (unlikely(dma_addr < badend)) {
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
"address 0x%Lx\n", dma_addr);
@@ -380,7 +382,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
entry = iommu_range_alloc(dev, tbl, npages);
- if (entry == DMA_ERROR_CODE) {
+ if (entry == CALGARY_MAPPING_ERROR) {
/* makes sure unmap knows to stop */
s->dma_length = 0;
goto error;
@@ -398,7 +400,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
error:
calgary_unmap_sg(dev, sg, nelems, dir, 0);
for_each_sg(sg, s, nelems, i) {
- sg->dma_address = DMA_ERROR_CODE;
+ sg->dma_address = CALGARY_MAPPING_ERROR;
sg->dma_length = 0;
}
return 0;
@@ -453,7 +455,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
/* set up tces to cover the allocated range */
mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
- if (mapping == DMA_ERROR_CODE)
+ if (mapping == CALGARY_MAPPING_ERROR)
goto free;
*dma_handle = mapping;
return ret;
@@ -478,6 +480,11 @@ static void calgary_free_coherent(struct device *dev, size_t size,
free_pages((unsigned long)vaddr, get_order(size));
}
+static int calgary_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == CALGARY_MAPPING_ERROR;
+}
+
static const struct dma_map_ops calgary_dma_ops = {
.alloc = calgary_alloc_coherent,
.free = calgary_free_coherent,
@@ -485,6 +492,8 @@ static const struct dma_map_ops calgary_dma_ops = {
.unmap_sg = calgary_unmap_sg,
.map_page = calgary_map_page,
.unmap_page = calgary_unmap_page,
+ .mapping_error = calgary_mapping_error,
+ .dma_supported = x86_dma_supported,
};
static inline void __iomem * busno_to_bbar(unsigned char num)
@@ -732,7 +741,7 @@ static void __init calgary_reserve_regions(struct pci_dev *dev)
struct iommu_table *tbl = pci_iommu(dev->bus);
/* reserve EMERGENCY_PAGES from bad_dma_address and up */
- iommu_range_reserve(tbl, DMA_ERROR_CODE, EMERGENCY_PAGES);
+ iommu_range_reserve(tbl, CALGARY_MAPPING_ERROR, EMERGENCY_PAGES);
/* avoid the BIOS/VGA first 640KB-1MB region */
/* for CalIOC2 - avoid the entire first MB */
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 3a216ec869cd..5e16d3f29594 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -213,10 +213,8 @@ static __init int iommu_setup(char *p)
}
early_param("iommu", iommu_setup);
-int dma_supported(struct device *dev, u64 mask)
+int x86_dma_supported(struct device *dev, u64 mask)
{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
#ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) {
dev_info(dev, "PCI: Disallowing DAC for device\n");
@@ -224,9 +222,6 @@ int dma_supported(struct device *dev, u64 mask)
}
#endif
- if (ops->dma_supported)
- return ops->dma_supported(dev, mask);
-
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */
@@ -252,7 +247,6 @@ int dma_supported(struct device *dev, u64 mask)
return 1;
}
-EXPORT_SYMBOL(dma_supported);
static int __init pci_iommu_init(void)
{
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index a88952ef371c..a6d404087fe3 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -11,6 +11,8 @@
#include <asm/iommu.h>
#include <asm/dma.h>
+#define NOMMU_MAPPING_ERROR 0
+
static int
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
@@ -33,7 +35,7 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
- return DMA_ERROR_CODE;
+ return NOMMU_MAPPING_ERROR;
flush_write_buffers();
return bus;
}
@@ -88,6 +90,11 @@ static void nommu_sync_sg_for_device(struct device *dev,
flush_write_buffers();
}
+static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == NOMMU_MAPPING_ERROR;
+}
+
const struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent,
.free = dma_generic_free_coherent,
@@ -96,4 +103,6 @@ const struct dma_map_ops nommu_dma_ops = {
.sync_single_for_device = nommu_sync_single_for_device,
.sync_sg_for_device = nommu_sync_sg_for_device,
.is_phys = 1,
+ .mapping_error = nommu_mapping_error,
+ .dma_supported = x86_dma_supported,
};
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index ec008e800b45..53d600217973 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -26,6 +26,7 @@
#include <linux/pci_ids.h>
#include <linux/export.h>
#include <linux/list.h>
+#include <asm/iommu.h>
#define STA2X11_SWIOTLB_SIZE (4*1024*1024)
extern int swiotlb_late_init_with_default_size(size_t default_size);
@@ -191,7 +192,7 @@ static const struct dma_map_ops sta2x11_dma_ops = {
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
.mapping_error = swiotlb_dma_mapping_error,
- .dma_supported = NULL, /* FIXME: we should use this instead! */
+ .dma_supported = x86_dma_supported,
};
/* At setup time, we use our own ops if the device is a ConneXt one */
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 42b08f8fc2ca..37c6056a7bba 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -18,20 +18,6 @@
int xen_swiotlb __read_mostly;
-static const struct dma_map_ops xen_swiotlb_dma_ops = {
- .alloc = xen_swiotlb_alloc_coherent,
- .free = xen_swiotlb_free_coherent,
- .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
- .sync_single_for_device = xen_swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
- .map_sg = xen_swiotlb_map_sg_attrs,
- .unmap_sg = xen_swiotlb_unmap_sg_attrs,
- .map_page = xen_swiotlb_map_page,
- .unmap_page = xen_swiotlb_unmap_page,
- .dma_supported = xen_swiotlb_dma_supported,
-};
-
/*
* pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
*
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index f4126cf997a4..7ad6d77b2f22 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -3,6 +3,7 @@ config ZONE_DMA
config XTENSA
def_bool y
+ select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index c6140fa8c0be..269738dc9d1d 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -16,8 +16,6 @@
#include <linux/mm.h>
#include <linux/scatterlist.h>
-#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
-
extern const struct dma_map_ops xtensa_dma_map_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)