summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-21 12:32:08 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-21 12:32:08 -0800
commiteae21770b4fed5597623aad0d618190fa60426ff (patch)
tree23c59fb7a33e93a79525e2b10d56df54d40049d1 /arch
parente9f57ebcba563e0cd532926cab83c92bb4d79360 (diff)
parent9f273c24ec5f4a6f785bb83e931b3808a07b459e (diff)
Merge branch 'akpm' (patches from Andrew)
Merge third patch-bomb from Andrew Morton: "I'm pretty much done for -rc1 now: - the rest of MM, basically - lib/ updates - checkpatch, epoll, hfs, fatfs, ptrace, coredump, exit - cpu_mask simplifications - kexec, rapidio, MAINTAINERS etc, etc. - more dma-mapping cleanups/simplifications from hch" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (109 commits) MAINTAINERS: add/fix git URLs for various subsystems mm: memcontrol: add "sock" to cgroup2 memory.stat mm: memcontrol: basic memory statistics in cgroup2 memory controller mm: memcontrol: do not uncharge old page in page cache replacement Documentation: cgroup: add memory.swap.{current,max} description mm: free swap cache aggressively if memcg swap is full mm: vmscan: do not scan anon pages if memcg swap limit is hit swap.h: move memcg related stuff to the end of the file mm: memcontrol: replace mem_cgroup_lruvec_online with mem_cgroup_online mm: vmscan: pass memcg to get_scan_count() mm: memcontrol: charge swap to cgroup2 mm: memcontrol: clean up alloc, online, offline, free functions mm: memcontrol: flatten struct cg_proto mm: memcontrol: rein in the CONFIG space madness net: drop tcp_memcontrol.c mm: memcontrol: introduce CONFIG_MEMCG_LEGACY_KMEM mm: memcontrol: allow to disable kmem accounting for cgroup2 mm: memcontrol: account "kmem" consumers in cgroup2 memory controller mm: memcontrol: move kmem accounting code to CONFIG_MEMCG mm: memcontrol: separate kmem code from legacy tcp accounting code ...
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/dma-mapping.h2
-rw-r--r--arch/alpha/include/uapi/asm/mman.h1
-rw-r--r--arch/arc/include/asm/dma-mapping.h187
-rw-r--r--arch/arc/mm/dma.c152
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/dma-mapping.h7
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/dma-mapping.h2
-rw-r--r--arch/avr32/include/asm/dma-mapping.h342
-rw-r--r--arch/avr32/mm/dma-coherent.c115
-rw-r--r--arch/blackfin/include/asm/dma-mapping.h127
-rw-r--r--arch/blackfin/kernel/dma-mapping.c52
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/dma-mapping.h98
-rw-r--r--arch/c6x/kernel/dma.c95
-rw-r--r--arch/c6x/mm/dma-coherent.c10
-rw-r--r--arch/cris/arch-v32/drivers/pci/dma.c54
-rw-r--r--arch/cris/include/asm/dma-mapping.h161
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/frv/include/asm/dma-mapping.h132
-rw-r--r--arch/frv/include/asm/io.h17
-rw-r--r--arch/frv/mb93090-mb00/pci-dma-nommu.c72
-rw-r--r--arch/frv/mb93090-mb00/pci-dma.c74
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/include/asm/dma-mapping.h2
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/m68k/include/asm/dma-mapping.h112
-rw-r--r--arch/m68k/kernel/dma.c61
-rw-r--r--arch/metag/include/asm/dma-mapping.h179
-rw-r--r--arch/metag/kernel/dma.c146
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/include/uapi/asm/mman.h1
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/mn10300/include/asm/dma-mapping.h161
-rw-r--r--arch/mn10300/mm/dma-alloc.c67
-rw-r--r--arch/nios2/include/asm/dma-mapping.h123
-rw-r--r--arch/nios2/mm/dma-mapping.c149
-rw-r--r--arch/openrisc/Kconfig3
-rw-r--r--arch/openrisc/include/asm/dma-mapping.h2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/dma-mapping.h189
-rw-r--r--arch/parisc/include/uapi/asm/mman.h1
-rw-r--r--arch/parisc/kernel/drivers.c2
-rw-r--r--arch/parisc/kernel/pci-dma.c92
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h2
-rw-r--r--arch/powerpc/include/asm/fadump.h2
-rw-r--r--arch/powerpc/kernel/Makefile8
-rw-r--r--arch/powerpc/kernel/fadump.c4
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile1
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile1
-rw-r--r--arch/powerpc/xmon/Makefile1
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/dma-mapping.h2
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/include/asm/dma-mapping.h2
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/dma-mapping.h17
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/tile/include/asm/dma-mapping.h32
-rw-r--r--arch/tile/kernel/pci-dma.c29
-rw-r--r--arch/unicore32/Kconfig1
-rw-r--r--arch/unicore32/include/asm/dma-mapping.h2
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/boot/Makefile1
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/entry/vdso/Makefile1
-rw-r--r--arch/x86/include/asm/dma-mapping.h2
-rw-r--r--arch/x86/kernel/machine_kexec_64.c2
-rw-r--r--arch/x86/realmode/rm/Makefile1
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h4
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h1
81 files changed, 862 insertions, 2280 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index ba1b626bca00..f6b649d88ec8 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -205,9 +205,6 @@ config HAVE_NMI_WATCHDOG
config HAVE_ARCH_TRACEHOOK
bool
-config HAVE_DMA_ATTRS
- bool
-
config HAVE_DMA_CONTIGUOUS
bool
@@ -632,4 +629,7 @@ config OLD_SIGACTION
config COMPAT_OLD_SIGACTION
bool
+config ARCH_NO_COHERENT_DMA_MMAP
+ bool
+
source "kernel/gcov/Kconfig"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index f515a4dbf7a0..9d8a85801ed1 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -9,7 +9,6 @@ config ALPHA
select HAVE_OPROFILE
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
- select HAVE_DMA_ATTRS
select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select AUTO_IRQ_AFFINITY if SMP
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index 72a8ca7796d9..3c3451f58ff4 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -10,8 +10,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return dma_ops;
}
-#include <asm-generic/dma-mapping-common.h>
-
#define dma_cache_sync(dev, va, size, dir) ((void)0)
#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index ab336c06153e..fec1947b8dbc 100644
--- a/arch/alpha/include/uapi/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
@@ -47,7 +47,6 @@
#define MADV_WILLNEED 3 /* will need these pages */
#define MADV_SPACEAVAIL 5 /* ensure resources are available */
#define MADV_DONTNEED 6 /* don't need these pages */
-#define MADV_FREE 7 /* free pages only if memory pressure */
/* common/generic parameters */
#define MADV_FREE 8 /* free pages only if memory pressure */
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index 2d28ba939d8e..660205414f1d 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -11,192 +11,11 @@
#ifndef ASM_ARC_DMA_MAPPING_H
#define ASM_ARC_DMA_MAPPING_H
-#include <asm-generic/dma-coherent.h>
-#include <asm/cacheflush.h>
+extern struct dma_map_ops arc_dma_ops;
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-
-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-
-void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
- dma_addr_t dma_handle);
-
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
-/*
- * streaming DMA Mapping API...
- * CPU accesses page via normal paddr, thus needs to explicitly made
- * consistent before each use
- */
-
-static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
- enum dma_data_direction dir)
-{
- switch (dir) {
- case DMA_FROM_DEVICE:
- dma_cache_inv(paddr, size);
- break;
- case DMA_TO_DEVICE:
- dma_cache_wback(paddr, size);
- break;
- case DMA_BIDIRECTIONAL:
- dma_cache_wback_inv(paddr, size);
- break;
- default:
- pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
- }
-}
-
-void __arc_dma_cache_sync(unsigned long paddr, size_t size,
- enum dma_data_direction dir);
-
-#define _dma_cache_sync(addr, sz, dir) \
-do { \
- if (__builtin_constant_p(dir)) \
- __inline_dma_cache_sync(addr, sz, dir); \
- else \
- __arc_dma_cache_sync(addr, sz, dir); \
-} \
-while (0);
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- enum dma_data_direction dir)
-{
- _dma_cache_sync((unsigned long)cpu_addr, size, dir);
- return (dma_addr_t)cpu_addr;
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- unsigned long paddr = page_to_phys(page) + offset;
- return dma_map_single(dev, (void *)paddr, size, dir);
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i)
- s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
- s->length, dir);
-
- return nents;
-}
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i)
- dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir)
-{
- _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir)
-{
- _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- _dma_cache_sync(dma_handle + offset, size, DMA_FROM_DEVICE);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- _dma_cache_sync(dma_handle + offset, size, DMA_TO_DEVICE);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
- enum dma_data_direction dir)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i)
- _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i)
- _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
-}
-
-static inline int dma_supported(struct device *dev, u64 dma_mask)
-{
- /* Support 32 bit DMA mask exclusively */
- return dma_mask == DMA_BIT_MASK(32);
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
-
- return 0;
+ return &arc_dma_ops;
}
#endif
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 29a46bb198cc..01eaf88bf821 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -17,18 +17,14 @@
*/
#include <linux/dma-mapping.h>
-#include <linux/dma-debug.h>
-#include <linux/export.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
-/*
- * Helpers for Coherent DMA API.
- */
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+
+static void *arc_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
- void *paddr;
+ void *paddr, *kvaddr;
/* This is linear addr (0x8000_0000 based) */
paddr = alloc_pages_exact(size, gfp);
@@ -38,22 +34,6 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
/* This is bus address, platform dependent */
*dma_handle = (dma_addr_t)paddr;
- return paddr;
-}
-EXPORT_SYMBOL(dma_alloc_noncoherent);
-
-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
-{
- free_pages_exact((void *)dma_handle, size);
-}
-EXPORT_SYMBOL(dma_free_noncoherent);
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
-{
- void *paddr, *kvaddr;
-
/*
* IOC relies on all data (even coherent DMA data) being in cache
* Thus allocate normal cached memory
@@ -65,22 +45,15 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
* -For coherent data, Read/Write to buffers terminate early in cache
* (vs. always going to memory - thus are faster)
*/
- if (is_isa_arcv2() && ioc_exists)
- return dma_alloc_noncoherent(dev, size, dma_handle, gfp);
-
- /* This is linear addr (0x8000_0000 based) */
- paddr = alloc_pages_exact(size, gfp);
- if (!paddr)
- return NULL;
+ if ((is_isa_arcv2() && ioc_exists) ||
+ dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ return paddr;
/* This is kernel Virtual address (0x7000_0000 based) */
kvaddr = ioremap_nocache((unsigned long)paddr, size);
if (kvaddr == NULL)
return NULL;
- /* This is bus address, platform dependent */
- *dma_handle = (dma_addr_t)paddr;
-
/*
* Evict any existing L1 and/or L2 lines for the backing page
* in case it was used earlier as a normal "cached" page.
@@ -95,26 +68,111 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return kvaddr;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
- dma_addr_t dma_handle)
+static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
- if (is_isa_arcv2() && ioc_exists)
- return dma_free_noncoherent(dev, size, kvaddr, dma_handle);
-
- iounmap((void __force __iomem *)kvaddr);
+ if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) &&
+ !(is_isa_arcv2() && ioc_exists))
+ iounmap((void __force __iomem *)vaddr);
free_pages_exact((void *)dma_handle, size);
}
-EXPORT_SYMBOL(dma_free_coherent);
/*
- * Helper for streaming DMA...
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+ * consistent before each use
*/
-void __arc_dma_cache_sync(unsigned long paddr, size_t size,
- enum dma_data_direction dir)
+static void _dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ dma_cache_inv(paddr, size);
+ break;
+ case DMA_TO_DEVICE:
+ dma_cache_wback(paddr, size);
+ break;
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv(paddr, size);
+ break;
+ default:
+ pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+ }
+}
+
+static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ unsigned long paddr = page_to_phys(page) + offset;
+ _dma_cache_sync(paddr, size, dir);
+ return (dma_addr_t)paddr;
+}
+
+static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+ s->length, dir);
+
+ return nents;
+}
+
+static void arc_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
+{
+ _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
+}
+
+static void arc_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
- __inline_dma_cache_sync(paddr, size, dir);
+ _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
}
-EXPORT_SYMBOL(__arc_dma_cache_sync);
+
+static void arc_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static void arc_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static int arc_dma_supported(struct device *dev, u64 dma_mask)
+{
+ /* Support 32 bit DMA mask exclusively */
+ return dma_mask == DMA_BIT_MASK(32);
+}
+
+struct dma_map_ops arc_dma_ops = {
+ .alloc = arc_dma_alloc,
+ .free = arc_dma_free,
+ .map_page = arc_dma_map_page,
+ .map_sg = arc_dma_map_sg,
+ .sync_single_for_device = arc_dma_sync_single_for_device,
+ .sync_single_for_cpu = arc_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = arc_dma_sync_sg_for_device,
+ .dma_supported = arc_dma_supported,
+};
+EXPORT_SYMBOL(arc_dma_ops);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 37c7951ca4f5..4f799e567fc8 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -47,7 +47,6 @@ config ARM
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
- select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index ccb3aa64640d..6ad1ceda62a5 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -41,13 +41,6 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
#define HAVE_ARCH_DMA_SUPPORTED 1
extern int dma_supported(struct device *dev, u64 mask);
-/*
- * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent
- * implementations, we don't provide a dma_cache_sync function so drivers using
- * this API are highlighted with build warnings.
- */
-#include <asm-generic/dma-mapping-common.h>
-
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6be3fa2310ee..8cc62289a63e 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -64,7 +64,6 @@ config ARM64
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
- select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 61e08f360e31..ba437f090a74 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -64,8 +64,6 @@ static inline bool is_device_dma_coherent(struct device *dev)
return dev->archdata.dma_coherent;
}
-#include <asm-generic/dma-mapping-common.h>
-
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return (dma_addr_t)paddr;
diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h
index ae7ac9205d20..1115f2a645d1 100644
--- a/arch/avr32/include/asm/dma-mapping.h
+++ b/arch/avr32/include/asm/dma-mapping.h
@@ -1,350 +1,14 @@
#ifndef __ASM_AVR32_DMA_MAPPING_H
#define __ASM_AVR32_DMA_MAPPING_H
-#include <linux/mm.h>
-#include <linux/device.h>
-#include <linux/scatterlist.h>
-#include <asm/processor.h>
-#include <asm/cacheflush.h>
-#include <asm/io.h>
-
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
int direction);
-/*
- * Return whether the given device DMA address mask can be supported
- * properly. For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ffffff as the mask
- * to this function.
- */
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- /* Fix when needed. I really don't know of any limitations */
- return 1;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
- return 0;
-}
+extern struct dma_map_ops avr32_dma_ops;
-/*
- * dma_map_single can't fail as it is implemented now.
- */
-static inline int dma_mapping_error(struct device *dev, dma_addr_t addr)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- return 0;
+ return &avr32_dma_ops;
}
-/**
- * dma_alloc_coherent - allocate consistent memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- *
- * Allocate some uncached, unbuffered memory for a device for
- * performing DMA. This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
- */
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp);
-
-/**
- * dma_free_coherent - free memory allocated by dma_alloc_coherent
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: size of memory originally requested in dma_alloc_coherent
- * @cpu_addr: CPU-view address returned from dma_alloc_coherent
- * @handle: device-view address returned from dma_alloc_coherent
- *
- * Free (and unmap) a DMA buffer previously allocated by
- * dma_alloc_coherent().
- *
- * References to memory and mappings associated with cpu_addr/handle
- * during and after this call executing are illegal.
- */
-extern void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle);
-
-/**
- * dma_alloc_writecombine - allocate write-combining memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- *
- * Allocate some uncached, buffered memory for a device for
- * performing DMA. This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
- */
-extern void *dma_alloc_writecombine(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp);
-
-/**
- * dma_free_coherent - free memory allocated by dma_alloc_writecombine
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: size of memory originally requested in dma_alloc_writecombine
- * @cpu_addr: CPU-view address returned from dma_alloc_writecombine
- * @handle: device-view address returned from dma_alloc_writecombine
- *
- * Free (and unmap) a DMA buffer previously allocated by
- * dma_alloc_writecombine().
- *
- * References to memory and mappings associated with cpu_addr/handle
- * during and after this call executing are illegal.
- */
-extern void dma_free_writecombine(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle);
-
-/**
- * dma_map_single - map a single buffer for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @cpu_addr: CPU direct mapped address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_single() or dma_sync_single().
- */
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- enum dma_data_direction direction)
-{
- dma_cache_sync(dev, cpu_addr, size, direction);
- return virt_to_bus(cpu_addr);
-}
-
-/**
- * dma_unmap_single - unmap a single buffer previously mapped
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Unmap a single streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_single() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
-
-}
-
-/**
- * dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_page() or dma_sync_single().
- */
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- return dma_map_single(dev, page_address(page) + offset,
- size, direction);
-}
-
-/**
- * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Unmap a single streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_single() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- dma_unmap_single(dev, dma_address, size, direction);
-}
-
-/**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scatter-gather version of the
- * above pci_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i) {
- char *virt;
-
- sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
- virt = sg_virt(sg);
- dma_cache_sync(dev, virt, sg->length, direction);
- }
-
- return nents;
-}
-
-/**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Unmap a set of streaming mode DMA translations.
- * Again, CPU read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
-
-}
-
-/**
- * dma_sync_single_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Make physical memory consistent for a single streaming mode DMA
- * translation after a transfer.
- *
- * If you perform a dma_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the DMA mapping,
- * you must call this function before doing so. At the next point you
- * give the DMA address back to the card, you must first perform a
- * dma_sync_single_for_device, and then the device again owns the
- * buffer.
- */
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- /*
- * No need to do anything since the CPU isn't supposed to
- * touch this memory after we flushed it at mapping- or
- * sync-for-device time.
- */
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- /* just sync everything, that's all the pci API can do */
- dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- /* just sync everything, that's all the pci API can do */
- dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
-}
-
-/**
- * dma_sync_sg_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as dma_sync_single_for_* but for a scatter-gather list,
- * same rules and usage.
- */
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- /*
- * No need to do anything since the CPU isn't supposed to
- * touch this memory after we flushed it at mapping- or
- * sync-for-device time.
- */
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i)
- dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
-}
-
-/* Now for the API extensions over the pci_ one */
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
#endif /* __ASM_AVR32_DMA_MAPPING_H */
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c
index 50cdb5b10f0f..92cf1fb2b3e6 100644
--- a/arch/avr32/mm/dma-coherent.c
+++ b/arch/avr32/mm/dma-coherent.c
@@ -9,9 +9,14 @@
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+#include <linux/scatterlist.h>
-#include <asm/addrspace.h>
+#include <asm/processor.h>
#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/addrspace.h>
void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
{
@@ -93,60 +98,100 @@ static void __dma_free(struct device *dev, size_t size,
__free_page(page++);
}
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp)
+static void *avr32_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
struct page *page;
- void *ret = NULL;
+ dma_addr_t phys;
page = __dma_alloc(dev, size, handle, gfp);
- if (page)
- ret = phys_to_uncached(page_to_phys(page));
+ if (!page)
+ return NULL;
+ phys = page_to_phys(page);
- return ret;
+ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
+ /* Now, map the page into P3 with write-combining turned on */
+ *handle = phys;
+ return __ioremap(phys, size, _PAGE_BUFFER);
+ } else {
+ return phys_to_uncached(phys);
+ }
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle)
+static void avr32_dma_free(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
{
- void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
struct page *page;
- pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
- cpu_addr, (unsigned long)handle, (unsigned)size);
- BUG_ON(!virt_addr_valid(addr));
- page = virt_to_page(addr);
+ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
+ iounmap(cpu_addr);
+
+ page = phys_to_page(handle);
+ } else {
+ void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
+
+ pr_debug("avr32_dma_free addr %p (phys %08lx) size %u\n",
+ cpu_addr, (unsigned long)handle, (unsigned)size);
+
+ BUG_ON(!virt_addr_valid(addr));
+ page = virt_to_page(addr);
+ }
+
__dma_free(dev, size, page, handle);
}
-EXPORT_SYMBOL(dma_free_coherent);
-void *dma_alloc_writecombine(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp)
+static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
- struct page *page;
- dma_addr_t phys;
+ void *cpu_addr = page_address(page) + offset;
- page = __dma_alloc(dev, size, handle, gfp);
- if (!page)
- return NULL;
+ dma_cache_sync(dev, cpu_addr, size, direction);
+ return virt_to_bus(cpu_addr);
+}
- phys = page_to_phys(page);
- *handle = phys;
+static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nents, i) {
+ char *virt;
- /* Now, map the page into P3 with write-combining turned on */
- return __ioremap(phys, size, _PAGE_BUFFER);
+ sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
+ virt = sg_virt(sg);
+ dma_cache_sync(dev, virt, sg->length, direction);
+ }
+
+ return nents;
}
-EXPORT_SYMBOL(dma_alloc_writecombine);
-void dma_free_writecombine(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle)
+static void avr32_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
- struct page *page;
+ dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
+}
- iounmap(cpu_addr);
+static void avr32_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+ struct scatterlist *sg;
- page = phys_to_page(handle);
- __dma_free(dev, size, page, handle);
+ for_each_sg(sglist, sg, nents, i)
+ dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
}
-EXPORT_SYMBOL(dma_free_writecombine);
+
+struct dma_map_ops avr32_dma_ops = {
+ .alloc = avr32_dma_alloc,
+ .free = avr32_dma_free,
+ .map_page = avr32_dma_map_page,
+ .map_sg = avr32_dma_map_sg,
+ .sync_single_for_device = avr32_dma_sync_single_for_device,
+ .sync_sg_for_device = avr32_dma_sync_sg_for_device,
+};
+EXPORT_SYMBOL(avr32_dma_ops);
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index 054d9ec57d9d..3490570aaa82 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -8,36 +8,6 @@
#define _BLACKFIN_DMA_MAPPING_H
#include <asm/cacheflush.h>
-struct scatterlist;
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
-
-/*
- * Now for the API extensions over the pci_ one
- */
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_supported(d, m) (1)
-
-static inline int
-dma_set_mask(struct device *dev, u64 dma_mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
-
- return 0;
-}
-
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
extern void
__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
@@ -66,102 +36,11 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
__dma_sync(addr, size, dir);
}
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction dir)
-{
- _dma_sync((dma_addr_t)ptr, size, dir);
- return (dma_addr_t) ptr;
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- return dma_map_single(dev, page_address(page) + offset, size, dir);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
-{
- dma_unmap_single(dev, dma_addr, size, dir);
-}
+extern struct dma_map_ops bfin_dma_ops;
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction dir)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- BUG_ON(!valid_dma_direction(dir));
+ return &bfin_dma_ops;
}
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- _dma_sync(handle + offset, size, dir);
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
-{
- dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
-{
- dma_sync_single_range_for_device(dev, handle, 0, size, dir);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-}
-
-extern void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir);
-
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
- _dma_sync((dma_addr_t)vaddr, size, dir);
-}
-
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
#endif /* _BLACKFIN_DMA_MAPPING_H */
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index df437e52d9df..771afe6e4264 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -78,8 +78,8 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages)
spin_unlock_irqrestore(&dma_page_lock, flags);
}
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+static void *bfin_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
@@ -92,15 +92,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return ret;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void
-dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+static void bfin_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
__free_dma_pages((unsigned long)vaddr, get_pages(size));
}
-EXPORT_SYMBOL(dma_free_coherent);
/*
* Streaming DMA mappings
@@ -112,9 +109,9 @@ void __dma_sync(dma_addr_t addr, size_t size,
}
EXPORT_SYMBOL(__dma_sync);
-int
-dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents,
- enum dma_data_direction direction)
+static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
@@ -126,10 +123,10 @@ dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents,
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list,
- int nelems, enum dma_data_direction direction)
+static void bfin_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg_list, int nelems,
+ enum dma_data_direction direction)
{
struct scatterlist *sg;
int i;
@@ -139,4 +136,31 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list,
__dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
}
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ dma_addr_t handle = (dma_addr_t)(page_address(page) + offset);
+
+ _dma_sync(handle, size, dir);
+ return handle;
+}
+
+static inline void bfin_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ _dma_sync(handle, size, dir);
+}
+
+struct dma_map_ops bfin_dma_ops = {
+ .alloc = bfin_dma_alloc,
+ .free = bfin_dma_free,
+
+ .map_page = bfin_dma_map_page,
+ .map_sg = bfin_dma_map_sg,
+
+ .sync_single_for_device = bfin_dma_sync_single_for_device,
+ .sync_sg_for_device = bfin_dma_sync_sg_for_device,
+};
+EXPORT_SYMBOL(bfin_dma_ops);
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 77ea09b8bce1..79049d432d3c 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -17,6 +17,7 @@ config C6X
select OF_EARLY_FLATTREE
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
+ select ARCH_NO_COHERENT_DMA_MMAP
config MMU
def_bool n
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
index bbd7774e4d4e..6b5cd7b0cf32 100644
--- a/arch/c6x/include/asm/dma-mapping.h
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -12,104 +12,22 @@
#ifndef _ASM_C6X_DMA_MAPPING_H
#define _ASM_C6X_DMA_MAPPING_H
-#include <linux/dma-debug.h>
-#include <asm-generic/dma-coherent.h>
-
-#define dma_supported(d, m) 1
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
-{
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
-
- return 0;
-}
-
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- debug_dma_mapping_error(dev, dma_addr);
- return dma_addr == ~0;
-}
-
-extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction dir);
+#define DMA_ERROR_CODE ~0
-extern void dma_unmap_single(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir);
+extern struct dma_map_ops c6x_dma_ops;
-extern int dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction);
-
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction);
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- dma_addr_t handle;
-
- handle = dma_map_single(dev, page_address(page) + offset, size, dir);
-
- debug_dma_map_page(dev, page, offset, size, dir, handle, false);
-
- return handle;
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- dma_unmap_single(dev, handle, size, dir);
-
- debug_dma_unmap_page(dev, handle, size, dir, false);
+ return &c6x_dma_ops;
}
-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir);
-
-extern void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
- size_t size,
- enum dma_data_direction dir);
-
-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir);
-
-extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir);
-
extern void coherent_mem_init(u32 start, u32 size);
-extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
-extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
-
-/* Not supported for now */
-static inline int dma_mmap_coherent(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size)
-{
- return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size)
-{
- return -EINVAL;
-}
+void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs);
+void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs);
#endif /* _ASM_C6X_DMA_MAPPING_H */
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c
index ab7b12de144d..8a80f3a250c0 100644
--- a/arch/c6x/kernel/dma.c
+++ b/arch/c6x/kernel/dma.c
@@ -36,110 +36,101 @@ static void c6x_dma_sync(dma_addr_t handle, size_t size,
}
}
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction dir)
+static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
- dma_addr_t addr = virt_to_phys(ptr);
+ dma_addr_t handle = virt_to_phys(page_address(page) + offset);
- c6x_dma_sync(addr, size, dir);
-
- debug_dma_map_page(dev, virt_to_page(ptr),
- (unsigned long)ptr & ~PAGE_MASK, size,
- dir, addr, true);
- return addr;
+ c6x_dma_sync(handle, size, dir);
+ return handle;
}
-EXPORT_SYMBOL(dma_map_single);
-
-void dma_unmap_single(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
+static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir, struct dma_attrs *attrs)
{
c6x_dma_sync(handle, size, dir);
-
- debug_dma_unmap_page(dev, handle, size, dir, true);
}
-EXPORT_SYMBOL(dma_unmap_single);
-
-int dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir)
+static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
- for_each_sg(sglist, sg, nents, i)
- sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length,
- dir);
-
- debug_dma_map_sg(dev, sglist, nents, nents, dir);
+ for_each_sg(sglist, sg, nents, i) {
+ sg->dma_address = sg_phys(sg);
+ c6x_dma_sync(sg->dma_address, sg->length, dir);
+ }
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-
-void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir)
+static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
for_each_sg(sglist, sg, nents, i)
- dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir);
+ c6x_dma_sync(sg_dma_address(sg), sg->length, dir);
- debug_dma_unmap_sg(dev, sglist, nents, dir);
}
-EXPORT_SYMBOL(dma_unmap_sg);
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
+static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
{
c6x_dma_sync(handle, size, dir);
- debug_dma_sync_single_for_cpu(dev, handle, size, dir);
}
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-
-void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
+static void c6x_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
c6x_dma_sync(handle, size, dir);
- debug_dma_sync_single_for_device(dev, handle, size, dir);
}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir)
+static void c6x_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist, int nents,
+ enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sglist, sg, nents, i)
- dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+ c6x_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
sg->length, dir);
- debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir);
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
-
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir)
+static void c6x_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist, int nents,
+ enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sglist, sg, nents, i)
- dma_sync_single_for_device(dev, sg_dma_address(sg),
+ c6x_dma_sync_single_for_device(dev, sg_dma_address(sg),
sg->length, dir);
- debug_dma_sync_sg_for_device(dev, sglist, nents, dir);
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
+struct dma_map_ops c6x_dma_ops = {
+ .alloc = c6x_dma_alloc,
+ .free = c6x_dma_free,
+ .map_page = c6x_dma_map_page,
+ .unmap_page = c6x_dma_unmap_page,
+ .map_sg = c6x_dma_map_sg,
+ .unmap_sg = c6x_dma_unmap_sg,
+ .sync_single_for_device = c6x_dma_sync_single_for_device,
+ .sync_single_for_cpu = c6x_dma_sync_single_for_cpu,
+ .sync_sg_for_device = c6x_dma_sync_sg_for_device,
+ .sync_sg_for_cpu = c6x_dma_sync_sg_for_cpu,
+};
+EXPORT_SYMBOL(c6x_dma_ops);
/* Number of entries preallocated for DMA-API debugging */
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
index 4187e5180373..f7ee63af2541 100644
--- a/arch/c6x/mm/dma-coherent.c
+++ b/arch/c6x/mm/dma-coherent.c
@@ -73,8 +73,8 @@ static void __free_dma_pages(u32 addr, int order)
* Allocate DMA coherent memory space and return both the kernel
* virtual and DMA address for that space.
*/
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp)
+void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs)
{
u32 paddr;
int order;
@@ -94,13 +94,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return phys_to_virt(paddr);
}
-EXPORT_SYMBOL(dma_alloc_coherent);
/*
* Free DMA coherent memory as defined by the above mapping.
*/
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
int order;
@@ -111,7 +110,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
__free_dma_pages(virt_to_phys(vaddr), order);
}
-EXPORT_SYMBOL(dma_free_coherent);
/*
* Initialise the coherent DMA memory allocator using the given uncached region.
diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c
index ee55578d9834..8d5efa58cce1 100644
--- a/arch/cris/arch-v32/drivers/pci/dma.c
+++ b/arch/cris/arch-v32/drivers/pci/dma.c
@@ -16,21 +16,18 @@
#include <linux/gfp.h>
#include <asm/io.h>
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+static void *v32_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
- int order = get_order(size);
+
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
- if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
- return ret;
-
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
gfp |= GFP_DMA;
- ret = (void *)__get_free_pages(gfp, order);
+ ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
@@ -39,12 +36,45 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return ret;
}
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+static void v32_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+
+static inline dma_addr_t v32_dma_map_page(struct device *dev,
+ struct page *page, unsigned long offset, size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
- int order = get_order(size);
+ return page_to_phys(page) + offset;
+}
- if (!dma_release_from_coherent(dev, order, vaddr))
- free_pages((unsigned long)vaddr, order);
+static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ printk("Map sg\n");
+ return nents;
+}
+
+static inline int v32_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < 0x00ffffff)
+ return 0;
+ return 1;
}
+struct dma_map_ops v32_dma_ops = {
+ .alloc = v32_dma_alloc,
+ .free = v32_dma_free,
+ .map_page = v32_dma_map_page,
+ .map_sg = v32_dma_map_sg,
+ .dma_supported = v32_dma_supported,
+};
+EXPORT_SYMBOL(v32_dma_ops);
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h
index 57f794ee6039..5a370178a0e9 100644
--- a/arch/cris/include/asm/dma-mapping.h
+++ b/arch/cris/include/asm/dma-mapping.h
@@ -1,156 +1,20 @@
-/* DMA mapping. Nothing tricky here, just virt_to_phys */
-
#ifndef _ASM_CRIS_DMA_MAPPING_H
#define _ASM_CRIS_DMA_MAPPING_H
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-
-#include <asm/cache.h>
-#include <asm/io.h>
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
#ifdef CONFIG_PCI
-#include <asm-generic/dma-coherent.h>
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
+extern struct dma_map_ops v32_dma_ops;
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-#else
-static inline void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- BUG();
- return NULL;
+ return &v32_dma_ops;
}
-
-static inline void
-dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+#else
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- BUG();
+ BUG();
+ return NULL;
}
#endif
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- return virt_to_phys(ptr);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- printk("Map sg\n");
- return nents;
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- return page_to_phys(page) + offset;
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
-}
-
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if(mask < 0x00ffffff)
- return 0;
-
- return 1;
-}
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
- if(!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
-}
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
@@ -158,15 +22,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
}
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
-
#endif
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 03bfd6bf03e7..eefd9a4ed156 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -15,6 +15,7 @@ config FRV
select OLD_SIGSUSPEND3
select OLD_SIGACTION
select HAVE_DEBUG_STACKOVERFLOW
+ select ARCH_NO_COHERENT_DMA_MMAP
config ZONE_DMA
bool
diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h
index 2840adcd6d92..9a82bfa4303b 100644
--- a/arch/frv/include/asm/dma-mapping.h
+++ b/arch/frv/include/asm/dma-mapping.h
@@ -1,128 +1,17 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
-#include <linux/device.h>
-#include <linux/scatterlist.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
-#include <asm/io.h>
-
-/*
- * See Documentation/DMA-API.txt for the description of how the
- * following DMA API should work.
- */
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
extern unsigned long __nongprelbss dma_coherent_mem_start;
extern unsigned long __nongprelbss dma_coherent_mem_end;
-void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
-
-extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction);
-
-static inline
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
-
-static inline
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-extern
-dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction);
-
-static inline
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-
-static inline
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
-
-static inline
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline
-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
-
-static inline
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
-}
-
-static inline
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
-
-static inline
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline
-int dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < 0x00ffffff)
- return 0;
-
- return 1;
-}
+extern struct dma_map_ops frv_dma_ops;
-static inline
-int dma_set_mask(struct device *dev, u64 mask)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
+ return &frv_dma_ops;
}
static inline
@@ -132,19 +21,4 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
flush_write_buffers();
}
-/* Not supported for now */
-static inline int dma_mmap_coherent(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size)
-{
- return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size)
-{
- return -EINVAL;
-}
-
#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/frv/include/asm/io.h b/arch/frv/include/asm/io.h
index 70dfbea8c8d7..8062fc73fad0 100644
--- a/arch/frv/include/asm/io.h
+++ b/arch/frv/include/asm/io.h
@@ -43,9 +43,20 @@ static inline unsigned long _swapl(unsigned long v)
//#define __iormb() asm volatile("membar")
//#define __iowmb() asm volatile("membar")
-#define __raw_readb __builtin_read8
-#define __raw_readw __builtin_read16
-#define __raw_readl __builtin_read32
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ return __builtin_read8((volatile void __iomem *)addr);
+}
+
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ return __builtin_read16((volatile void __iomem *)addr);
+}
+
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ return __builtin_read32((volatile void __iomem *)addr);
+}
#define __raw_writeb(datum, addr) __builtin_write8(addr, datum)
#define __raw_writew(datum, addr) __builtin_write16(addr, datum)
diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c
index 8eeea0d77aad..082be49b5df0 100644
--- a/arch/frv/mb93090-mb00/pci-dma-nommu.c
+++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c
@@ -34,7 +34,8 @@ struct dma_alloc_record {
static DEFINE_SPINLOCK(dma_alloc_lock);
static LIST_HEAD(dma_alloc_list);
-void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
+static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, struct dma_attrs *attrs)
{
struct dma_alloc_record *new;
struct list_head *this = &dma_alloc_list;
@@ -84,9 +85,8 @@ void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_hand
return NULL;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-
-void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
+static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
struct dma_alloc_record *rec;
unsigned long flags;
@@ -105,22 +105,9 @@ void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_
BUG();
}
-EXPORT_SYMBOL(dma_free_coherent);
-
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
-
- return virt_to_bus(ptr);
-}
-
-EXPORT_SYMBOL(dma_map_single);
-
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
+static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
@@ -135,14 +122,49 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
+static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
BUG_ON(direction == DMA_NONE);
flush_dcache_page(page);
return (dma_addr_t) page_to_phys(page) + offset;
}
-EXPORT_SYMBOL(dma_map_page);
+static void frv_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+static void frv_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+
+static int frv_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < 0x00ffffff)
+ return 0;
+ return 1;
+}
+
+struct dma_map_ops frv_dma_ops = {
+ .alloc = frv_dma_alloc,
+ .free = frv_dma_free,
+ .map_page = frv_dma_map_page,
+ .map_sg = frv_dma_map_sg,
+ .sync_single_for_device = frv_dma_sync_single_for_device,
+ .sync_sg_for_device = frv_dma_sync_sg_for_device,
+ .dma_supported = frv_dma_supported,
+};
+EXPORT_SYMBOL(frv_dma_ops);
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
index 4d1f01dc46e5..316b7b65348d 100644
--- a/arch/frv/mb93090-mb00/pci-dma.c
+++ b/arch/frv/mb93090-mb00/pci-dma.c
@@ -18,7 +18,9 @@
#include <linux/scatterlist.h>
#include <asm/io.h>
-void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
+static void *frv_dma_alloc(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
void *ret;
@@ -29,29 +31,15 @@ void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_hand
return ret;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-
-void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
+static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
consistent_free(vaddr);
}
-EXPORT_SYMBOL(dma_free_coherent);
-
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
-
- return virt_to_bus(ptr);
-}
-
-EXPORT_SYMBOL(dma_map_single);
-
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
+static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
unsigned long dampr2;
void *vaddr;
@@ -79,14 +67,48 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
+static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
- BUG_ON(direction == DMA_NONE);
flush_dcache_page(page);
return (dma_addr_t) page_to_phys(page) + offset;
}
-EXPORT_SYMBOL(dma_map_page);
+static void frv_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+static void frv_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+
+static int frv_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < 0x00ffffff)
+ return 0;
+ return 1;
+}
+
+struct dma_map_ops frv_dma_ops = {
+ .alloc = frv_dma_alloc,
+ .free = frv_dma_free,
+ .map_page = frv_dma_map_page,
+ .map_sg = frv_dma_map_sg,
+ .sync_single_for_device = frv_dma_sync_single_for_device,
+ .sync_sg_for_device = frv_dma_sync_sg_for_device,
+ .dma_supported = frv_dma_supported,
+};
+EXPORT_SYMBOL(frv_dma_ops);
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index cd1f754c1336..986ea84caaed 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -15,7 +15,6 @@ config H8300
select OF_IRQ
select OF_EARLY_FLATTREE
select HAVE_MEMBLOCK
- select HAVE_DMA_ATTRS
select CLKSRC_OF
select H8300_TMR8
select HAVE_KERNEL_GZIP
diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h
index d9b5b806afe6..7ac7fadffed0 100644
--- a/arch/h8300/include/asm/dma-mapping.h
+++ b/arch/h8300/include/asm/dma-mapping.h
@@ -8,6 +8,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return &h8300_dma_map_ops;
}
-#include <asm-generic/dma-mapping-common.h>
-
#endif
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 4dc89d1f9c48..57298e7b4867 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -27,7 +27,6 @@ config HEXAGON
select GENERIC_CLOCKEVENTS_BROADCAST
select MODULES_USE_ELF_RELA
select GENERIC_CPU_DEVICES
- select HAVE_DMA_ATTRS
---help---
Qualcomm Hexagon is a processor architecture designed for high
performance and low power across a wide variety of applications.
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
index 268fde8a4575..aa6203464520 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -49,8 +49,6 @@ extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
-#include <asm-generic/dma-mapping-common.h>
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index eb0249e37981..fb0515eb639b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -25,7 +25,6 @@ config IA64
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
select HAVE_FUNCTION_TRACER
- select HAVE_DMA_ATTRS
select TTY
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 9beccf8010bd..d472805edfa9 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -25,8 +25,6 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
#define get_dma_ops(dev) platform_dma_get_ops(dev)
-#include <asm-generic/dma-mapping-common.h>
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
index 05aa53594d49..96c536194287 100644
--- a/arch/m68k/include/asm/dma-mapping.h
+++ b/arch/m68k/include/asm/dma-mapping.h
@@ -1,123 +1,17 @@
#ifndef _M68K_DMA_MAPPING_H
#define _M68K_DMA_MAPPING_H
-#include <asm/cache.h>
+extern struct dma_map_ops m68k_dma_ops;
-struct scatterlist;
-
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- return 0;
-}
-
-extern void *dma_alloc_coherent(struct device *, size_t,
- dma_addr_t *, gfp_t);
-extern void dma_free_coherent(struct device *, size_t,
- void *, dma_addr_t);
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
-{
- /* attrs is not supported and ignored */
- return dma_alloc_coherent(dev, size, dma_handle, flag);
-}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- /* attrs is not supported and ignored */
- dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ return &m68k_dma_ops;
}
-static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t flag)
-{
- return dma_alloc_coherent(dev, size, handle, flag);
-}
-static inline void dma_free_noncoherent(struct device *dev, size_t size,
- void *addr, dma_addr_t handle)
-{
- dma_free_coherent(dev, size, addr, handle);
-}
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
/* we use coherent allocation, so not much to do here. */
}
-extern dma_addr_t dma_map_single(struct device *, void *, size_t,
- enum dma_data_direction);
-static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-extern dma_addr_t dma_map_page(struct device *, struct page *,
- unsigned long, size_t size,
- enum dma_data_direction);
-static inline void dma_unmap_page(struct device *dev, dma_addr_t address,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-extern int dma_map_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction dir)
-{
-}
-
-extern void dma_sync_single_for_device(struct device *, dma_addr_t, size_t,
- enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- /* just sync everything for now */
- dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
-}
-
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- /* just sync everything for now */
- dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
-{
- return 0;
-}
-
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
#endif /* _M68K_DMA_MAPPING_H */
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 564665f9af30..cbc78b4117b5 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -18,8 +18,8 @@
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t flag)
+static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t flag, struct dma_attrs *attrs)
{
struct page *page, **map;
pgprot_t pgprot;
@@ -61,8 +61,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return addr;
}
-void dma_free_coherent(struct device *dev, size_t size,
- void *addr, dma_addr_t handle)
+static void m68k_dma_free(struct device *dev, size_t size, void *addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
{
pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
vfree(addr);
@@ -72,8 +72,8 @@ void dma_free_coherent(struct device *dev, size_t size,
#include <asm/cacheflush.h>
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+static void *m68k_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
/* ignore region specifiers */
@@ -90,19 +90,16 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return ret;
}
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
}
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
-EXPORT_SYMBOL(dma_alloc_coherent);
-EXPORT_SYMBOL(dma_free_coherent);
-
-void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
+static void m68k_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
@@ -118,10 +115,9 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
break;
}
}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir)
+static void m68k_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist, int nents, enum dma_data_direction dir)
{
int i;
struct scatterlist *sg;
@@ -131,31 +127,19 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
dir);
}
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
-
-dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
- enum dma_data_direction dir)
-{
- dma_addr_t handle = virt_to_bus(addr);
-
- dma_sync_single_for_device(dev, handle, size, dir);
- return handle;
-}
-EXPORT_SYMBOL(dma_map_single);
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
+static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
dma_addr_t handle = page_to_phys(page) + offset;
dma_sync_single_for_device(dev, handle, size, dir);
return handle;
}
-EXPORT_SYMBOL(dma_map_page);
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction dir)
+static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
@@ -167,4 +151,13 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
}
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
+
+struct dma_map_ops m68k_dma_ops = {
+ .alloc = m68k_dma_alloc,
+ .free = m68k_dma_free,
+ .map_page = m68k_dma_map_page,
+ .map_sg = m68k_dma_map_sg,
+ .sync_single_for_device = m68k_dma_sync_single_for_device,
+ .sync_sg_for_device = m68k_dma_sync_sg_for_device,
+};
+EXPORT_SYMBOL(m68k_dma_ops);
diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h
index eb5cdec94be0..27af5d479ce6 100644
--- a/arch/metag/include/asm/dma-mapping.h
+++ b/arch/metag/include/asm/dma-mapping.h
@@ -1,177 +1,11 @@
#ifndef _ASM_METAG_DMA_MAPPING_H
#define _ASM_METAG_DMA_MAPPING_H
-#include <linux/mm.h>
+extern struct dma_map_ops metag_dma_ops;
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <linux/scatterlist.h>
-#include <asm/bug.h>
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-void dma_sync_for_device(void *vaddr, size_t size, int dma_direction);
-void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction);
-
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(size == 0);
- dma_sync_for_device(ptr, size, direction);
- return virt_to_phys(ptr);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- dma_sync_for_cpu(phys_to_virt(dma_addr), size, direction);
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nents == 0 || sglist[0].length == 0);
-
- for_each_sg(sglist, sg, nents, i) {
- BUG_ON(!sg_page(sg));
-
- sg->dma_address = sg_phys(sg);
- dma_sync_for_device(sg_virt(sg), sg->length, direction);
- }
-
- return nents;
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
- direction);
- return page_to_phys(page) + offset;
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
-}
-
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nhwentries == 0 || sglist[0].length == 0);
-
- for_each_sg(sglist, sg, nhwentries, i) {
- BUG_ON(!sg_page(sg));
-
- sg->dma_address = sg_phys(sg);
- dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
- }
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_for_cpu(phys_to_virt(dma_handle)+offset, size,
- direction);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_for_device(phys_to_virt(dma_handle)+offset, size,
- direction);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i)
- dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i)
- dma_sync_for_device(sg_virt(sg), sg->length, direction);
-}
-
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- return 0;
-}
-
-#define dma_supported(dev, mask) (1)
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
+ return &metag_dma_ops;
}
/*
@@ -184,11 +18,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
}
-/* drivers/base/dma-mapping.c */
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
#endif
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c
index c700d625067a..e12368d02155 100644
--- a/arch/metag/kernel/dma.c
+++ b/arch/metag/kernel/dma.c
@@ -171,8 +171,8 @@ out:
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp)
+static void *metag_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
struct page *page;
struct metag_vm_region *c;
@@ -263,13 +263,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
no_page:
return NULL;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
/*
* free a page as defined by the above mapping.
*/
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+static void metag_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
struct metag_vm_region *c;
unsigned long flags, addr;
@@ -329,16 +328,19 @@ no_area:
__func__, vaddr);
dump_stack();
}
-EXPORT_SYMBOL(dma_free_coherent);
-
-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
{
- int ret = -ENXIO;
-
unsigned long flags, user_size, kern_size;
struct metag_vm_region *c;
+ int ret = -ENXIO;
+
+ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
@@ -364,25 +366,6 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
return ret;
}
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_coherent);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_writecombine);
-
-
-
-
/*
* Initialise the consistent memory allocation.
*/
@@ -423,7 +406,7 @@ early_initcall(dma_alloc_init);
/*
* make an area consistent to devices.
*/
-void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
+static void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
{
/*
* Ensure any writes get through the write combiner. This is necessary
@@ -465,12 +448,11 @@ void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
wmb();
}
-EXPORT_SYMBOL(dma_sync_for_device);
/*
* make an area consistent to the core.
*/
-void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
+static void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
{
/*
* Hardware L2 cache prefetch doesn't occur across 4K physical
@@ -497,4 +479,100 @@ void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
rmb();
}
-EXPORT_SYMBOL(dma_sync_for_cpu);
+
+static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
+{
+ dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
+ direction);
+ return page_to_phys(page) + offset;
+}
+
+static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
+}
+
+static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i) {
+ BUG_ON(!sg_page(sg));
+
+ sg->dma_address = sg_phys(sg);
+ dma_sync_for_device(sg_virt(sg), sg->length, direction);
+ }
+
+ return nents;
+}
+
+
+static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nhwentries, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nhwentries, i) {
+ BUG_ON(!sg_page(sg));
+
+ sg->dma_address = sg_phys(sg);
+ dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+ }
+}
+
+static void metag_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
+}
+
+static void metag_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
+}
+
+static void metag_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i)
+ dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+}
+
+static void metag_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i)
+ dma_sync_for_device(sg_virt(sg), sg->length, direction);
+}
+
+struct dma_map_ops metag_dma_ops = {
+ .alloc = metag_dma_alloc,
+ .free = metag_dma_free,
+ .map_page = metag_dma_map_page,
+ .map_sg = metag_dma_map_sg,
+ .sync_single_for_device = metag_dma_sync_single_for_device,
+ .sync_single_for_cpu = metag_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = metag_dma_sync_sg_for_cpu,
+ .mmap = metag_dma_mmap,
+};
+EXPORT_SYMBOL(metag_dma_ops);
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 5ecd0287a874..53b69deceb99 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -19,7 +19,6 @@ config MICROBLAZE
select HAVE_ARCH_KGDB
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
- select HAVE_DMA_ATTRS
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 24b12970c9cf..1884783d15c0 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -44,8 +44,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return &dma_direct_ops;
}
-#include <asm-generic/dma-mapping-common.h>
-
static inline void __dma_sync(unsigned long paddr,
size_t size, enum dma_data_direction direction)
{
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 71683a853372..fbf3f6670b69 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -31,7 +31,6 @@ config MIPS
select RTC_LIB if !MACH_LOONGSON64
select GENERIC_ATOMIC64 if !64BIT
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
- select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS
select HAVE_DMA_API_DEBUG
select GENERIC_IRQ_PROBE
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index e604f760c4a0..12fa79e2f1b4 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -29,8 +29,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
static inline void dma_mark_clean(void *addr, size_t size) {}
-#include <asm-generic/dma-mapping-common.h>
-
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index b0ebe59f73fd..ccdcfcbb24aa 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -73,7 +73,6 @@
#define MADV_SEQUENTIAL 2 /* expect sequential page references */
#define MADV_WILLNEED 3 /* will need these pages */
#define MADV_DONTNEED 4 /* don't need these pages */
-#define MADV_FREE 5 /* free pages only if memory pressure */
/* common parameters: try to keep these consistent across architectures */
#define MADV_FREE 8 /* free pages only if memory pressure */
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 78ae5552fdb8..10607f0d2bcd 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -14,6 +14,7 @@ config MN10300
select OLD_SIGSUSPEND3
select OLD_SIGACTION
select HAVE_DEBUG_STACKOVERFLOW
+ select ARCH_NO_COHERENT_DMA_MMAP
config AM33_2
def_bool n
diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h
index a18abfc558eb..1dcd44757f32 100644
--- a/arch/mn10300/include/asm/dma-mapping.h
+++ b/arch/mn10300/include/asm/dma-mapping.h
@@ -11,154 +11,14 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-
#include <asm/cache.h>
#include <asm/io.h>
-/*
- * See Documentation/DMA-API.txt for the description of how the
- * following DMA API should work.
- */
-
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag);
-
-extern void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
-
-static inline
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- mn10300_dcache_flush_inv();
- return virt_to_bus(ptr);
-}
-
-static inline
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-static inline
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nents == 0 || sglist[0].length == 0);
-
- for_each_sg(sglist, sg, nents, i) {
- BUG_ON(!sg_page(sg));
-
- sg->dma_address = sg_phys(sg);
- }
+extern struct dma_map_ops mn10300_dma_ops;
- mn10300_dcache_flush_inv();
- return nents;
-}
-
-static inline
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-}
-
-static inline
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- return page_to_bus(page) + offset;
-}
-
-static inline
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-static inline
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
-}
-
-static inline
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- mn10300_dcache_flush_inv();
-}
-
-static inline
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- mn10300_dcache_flush_inv();
-}
-
-
-static inline
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction)
-{
-}
-
-static inline
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction)
-{
- mn10300_dcache_flush_inv();
-}
-
-static inline
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline
-int dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
- * guarantee allocations that must be within a tighter range than
- * GFP_DMA
- */
- if (mask < 0x00ffffff)
- return 0;
- return 1;
-}
-
-static inline
-int dma_set_mask(struct device *dev, u64 mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
- return 0;
+ return &mn10300_dma_ops;
}
static inline
@@ -168,19 +28,4 @@ void dma_cache_sync(void *vaddr, size_t size,
mn10300_dcache_flush_inv();
}
-/* Not supported for now */
-static inline int dma_mmap_coherent(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size)
-{
- return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size)
-{
- return -EINVAL;
-}
-
#endif
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
index e244ebe637e1..8842394cb49a 100644
--- a/arch/mn10300/mm/dma-alloc.c
+++ b/arch/mn10300/mm/dma-alloc.c
@@ -20,8 +20,8 @@
static unsigned long pci_sram_allocated = 0xbc000000;
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int gfp)
+static void *mn10300_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
unsigned long addr;
void *ret;
@@ -61,10 +61,9 @@ done:
printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle);
return ret;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
unsigned long addr = (unsigned long) vaddr & ~0x20000000;
@@ -73,4 +72,60 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
free_pages(addr, get_order(size));
}
-EXPORT_SYMBOL(dma_free_coherent);
+
+static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i) {
+ BUG_ON(!sg_page(sg));
+
+ sg->dma_address = sg_phys(sg);
+ }
+
+ mn10300_dcache_flush_inv();
+ return nents;
+}
+
+static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
+{
+ return page_to_bus(page) + offset;
+}
+
+static void mn10300_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ mn10300_dcache_flush_inv();
+}
+
+static void mn10300_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction direction)
+{
+ mn10300_dcache_flush_inv();
+}
+
+static int mn10300_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
+ * guarantee allocations that must be within a tighter range than
+ * GFP_DMA
+ */
+ if (mask < 0x00ffffff)
+ return 0;
+ return 1;
+}
+
+struct dma_map_ops mn10300_dma_ops = {
+ .alloc = mn10300_dma_alloc,
+ .free = mn10300_dma_free,
+ .map_page = mn10300_dma_map_page,
+ .map_sg = mn10300_dma_map_sg,
+ .sync_single_for_device = mn10300_dma_sync_single_for_device,
+ .sync_sg_for_device = mn10300_dma_sync_sg_for_device,
+};
diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h
index b5567233f7f1..bec8ac8e6ad2 100644
--- a/arch/nios2/include/asm/dma-mapping.h
+++ b/arch/nios2/include/asm/dma-mapping.h
@@ -10,131 +10,20 @@
#ifndef _ASM_NIOS2_DMA_MAPPING_H
#define _ASM_NIOS2_DMA_MAPPING_H
-#include <linux/scatterlist.h>
-#include <linux/cache.h>
-#include <asm/cacheflush.h>
+extern struct dma_map_ops nios2_dma_ops;
-static inline void __dma_sync_for_device(void *vaddr, size_t size,
- enum dma_data_direction direction)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- switch (direction) {
- case DMA_FROM_DEVICE:
- invalidate_dcache_range((unsigned long)vaddr,
- (unsigned long)(vaddr + size));
- break;
- case DMA_TO_DEVICE:
- /*
- * We just need to flush the caches here , but Nios2 flush
- * instruction will do both writeback and invalidate.
- */
- case DMA_BIDIRECTIONAL: /* flush and invalidate */
- flush_dcache_range((unsigned long)vaddr,
- (unsigned long)(vaddr + size));
- break;
- default:
- BUG();
- }
-}
-
-static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- switch (direction) {
- case DMA_BIDIRECTIONAL:
- case DMA_FROM_DEVICE:
- invalidate_dcache_range((unsigned long)vaddr,
- (unsigned long)(vaddr + size));
- break;
- case DMA_TO_DEVICE:
- break;
- default:
- BUG();
- }
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-static inline dma_addr_t dma_map_single(struct device *dev, void *ptr,
- size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- __dma_sync_for_device(ptr, size, direction);
- return virt_to_phys(ptr);
-}
-
-static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction);
-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction);
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction);
-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction);
-extern void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction);
-extern void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction);
-extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction);
-
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
+ return &nios2_dma_ops;
}
/*
-* dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
-* do any flushing here.
-*/
+ * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
+ * do any flushing here.
+ */
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
#endif /* _ASM_NIOS2_DMA_MAPPING_H */
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
index ac5da7594f0b..90422c367ed3 100644
--- a/arch/nios2/mm/dma-mapping.c
+++ b/arch/nios2/mm/dma-mapping.c
@@ -20,9 +20,46 @@
#include <linux/cache.h>
#include <asm/cacheflush.h>
+static inline void __dma_sync_for_device(void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ switch (direction) {
+ case DMA_FROM_DEVICE:
+ invalidate_dcache_range((unsigned long)vaddr,
+ (unsigned long)(vaddr + size));
+ break;
+ case DMA_TO_DEVICE:
+ /*
+ * We just need to flush the caches here , but Nios2 flush
+ * instruction will do both writeback and invalidate.
+ */
+ case DMA_BIDIRECTIONAL: /* flush and invalidate */
+ flush_dcache_range((unsigned long)vaddr,
+ (unsigned long)(vaddr + size));
+ break;
+ default:
+ BUG();
+ }
+}
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ switch (direction) {
+ case DMA_BIDIRECTIONAL:
+ case DMA_FROM_DEVICE:
+ invalidate_dcache_range((unsigned long)vaddr,
+ (unsigned long)(vaddr + size));
+ break;
+ case DMA_TO_DEVICE:
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void *nios2_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
@@ -45,24 +82,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return ret;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+static void nios2_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
free_pages(addr, get_order(size));
}
-EXPORT_SYMBOL(dma_free_coherent);
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
int i;
- BUG_ON(!valid_dma_direction(direction));
-
for_each_sg(sg, sg, nents, i) {
void *addr;
@@ -75,40 +109,32 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
+static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction)
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
- void *addr;
-
- BUG_ON(!valid_dma_direction(direction));
+ void *addr = page_address(page) + offset;
- addr = page_address(page) + offset;
__dma_sync_for_device(addr, size, direction);
-
return page_to_phys(page) + offset;
}
-EXPORT_SYMBOL(dma_map_page);
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
+static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
- BUG_ON(!valid_dma_direction(direction));
-
__dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
}
-EXPORT_SYMBOL(dma_unmap_page);
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
+static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nhwentries, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
void *addr;
int i;
- BUG_ON(!valid_dma_direction(direction));
-
if (direction == DMA_TO_DEVICE)
return;
@@ -118,69 +144,54 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
__dma_sync_for_cpu(addr, sg->length, direction);
}
}
-EXPORT_SYMBOL(dma_unmap_sg);
-
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- __dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static void nios2_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
- BUG_ON(!valid_dma_direction(direction));
-
__dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
}
-EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static void nios2_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
- BUG_ON(!valid_dma_direction(direction));
-
__dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
}
-EXPORT_SYMBOL(dma_sync_single_range_for_device);
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
+static void nios2_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
{
int i;
- BUG_ON(!valid_dma_direction(direction));
-
/* Make sure that gcc doesn't leave the empty loop body. */
for_each_sg(sg, sg, nelems, i)
__dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction)
+static void nios2_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
{
int i;
- BUG_ON(!valid_dma_direction(direction));
-
/* Make sure that gcc doesn't leave the empty loop body. */
for_each_sg(sg, sg, nelems, i)
__dma_sync_for_device(sg_virt(sg), sg->length, direction);
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+struct dma_map_ops nios2_dma_ops = {
+ .alloc = nios2_dma_alloc,
+ .free = nios2_dma_free,
+ .map_page = nios2_dma_map_page,
+ .unmap_page = nios2_dma_unmap_page,
+ .map_sg = nios2_dma_map_sg,
+ .unmap_sg = nios2_dma_unmap_sg,
+ .sync_single_for_device = nios2_dma_sync_single_for_device,
+ .sync_single_for_cpu = nios2_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = nios2_dma_sync_sg_for_device,
+};
+EXPORT_SYMBOL(nios2_dma_ops);
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 443f44de1020..e118c02cc79a 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -29,9 +29,6 @@ config OPENRISC
config MMU
def_bool y
-config HAVE_DMA_ATTRS
- def_bool y
-
config RWSEM_GENERIC_SPINLOCK
def_bool y
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h
index 413bfcf86384..1f260bccb368 100644
--- a/arch/openrisc/include/asm/dma-mapping.h
+++ b/arch/openrisc/include/asm/dma-mapping.h
@@ -42,6 +42,4 @@ static inline int dma_supported(struct device *dev, u64 dma_mask)
return dma_mask == DMA_BIT_MASK(32);
}
-#include <asm-generic/dma-mapping-common.h>
-
#endif /* __ASM_OPENRISC_DMA_MAPPING_H */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 7c34cafdf301..14f655cf542e 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -29,6 +29,7 @@ config PARISC
select TTY # Needed for pdc_cons.c
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_ARCH_AUDITSYSCALL
+ select ARCH_NO_COHERENT_DMA_MMAP
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index d8d60a57183f..16e024602737 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -1,30 +1,11 @@
#ifndef _PARISC_DMA_MAPPING_H
#define _PARISC_DMA_MAPPING_H
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
-/* See Documentation/DMA-API-HOWTO.txt */
-struct hppa_dma_ops {
- int (*dma_supported)(struct device *dev, u64 mask);
- void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
- void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
- void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
- dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
- void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
- int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);
- void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction);
- void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
- void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
- void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
- void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
-};
-
/*
-** We could live without the hppa_dma_ops indirection if we didn't want
-** to support 4 different coherent dma models with one binary (they will
-** someday be loadable modules):
+** We need to support 4 different coherent dma models with one binary:
+**
** I/O MMU consistent method dma_sync behavior
** ============= ====================== =======================
** a) PA-7x00LC uncachable host memory flush/purge
@@ -40,158 +21,22 @@ struct hppa_dma_ops {
*/
#ifdef CONFIG_PA11
-extern struct hppa_dma_ops pcxl_dma_ops;
-extern struct hppa_dma_ops pcx_dma_ops;
+extern struct dma_map_ops pcxl_dma_ops;
+extern struct dma_map_ops pcx_dma_ops;
#endif
-extern struct hppa_dma_ops *hppa_dma_ops;
-
-#define dma_alloc_attrs(d, s, h, f, a) dma_alloc_coherent(d, s, h, f)
-#define dma_free_attrs(d, s, h, f, a) dma_free_coherent(d, s, h, f)
-
-static inline void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag)
-{
- return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
-}
-
-static inline void *
-dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag)
-{
- return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
-}
-
-static inline void
-dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
-}
-
-static inline void
-dma_free_noncoherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
-}
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- return hppa_dma_ops->map_single(dev, ptr, size, direction);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- hppa_dma_ops->unmap_single(dev, dma_addr, size, direction);
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- return hppa_dma_ops->map_sg(dev, sg, nents, direction);
-}
+extern struct dma_map_ops *hppa_dma_ops;
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
- hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction);
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
- return dma_map_single(dev, (page_address(page) + (offset)), size, direction);
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- dma_unmap_single(dev, dma_address, size, direction);
-}
-
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- if(hppa_dma_ops->dma_sync_single_for_cpu)
- hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, 0, size, direction);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- if(hppa_dma_ops->dma_sync_single_for_device)
- hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, 0, size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- if(hppa_dma_ops->dma_sync_single_for_cpu)
- hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, offset, size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- if(hppa_dma_ops->dma_sync_single_for_device)
- hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, offset, size, direction);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- if(hppa_dma_ops->dma_sync_sg_for_cpu)
- hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- if(hppa_dma_ops->dma_sync_sg_for_device)
- hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction);
-}
-
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
- return hppa_dma_ops->dma_supported(dev, mask);
-}
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
- if(!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
+ return hppa_dma_ops;
}
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
- if(hppa_dma_ops->dma_sync_single_for_cpu)
+ if (hppa_dma_ops->sync_single_for_cpu)
flush_kernel_dcache_range((unsigned long)vaddr, size);
}
@@ -238,22 +83,4 @@ struct parisc_device;
void * sba_get_iommu(struct parisc_device *dev);
#endif
-/* At the moment, we panic on error for IOMMU resource exaustion */
-#define dma_mapping_error(dev, x) 0
-
-/* This API cannot be supported on PA-RISC */
-static inline int dma_mmap_coherent(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size)
-{
- return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size)
-{
- return -EINVAL;
-}
-
#endif
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index cf830d465f75..f3db7d8eb0c2 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -43,7 +43,6 @@
#define MADV_SPACEAVAIL 5 /* insure that resources are reserved */
#define MADV_VPS_PURGE 6 /* Purge pages from VM page cache */
#define MADV_VPS_INHERIT 7 /* Inherit parents page size */
-#define MADV_FREE 8 /* free pages only if memory pressure */
/* common/generic parameters */
#define MADV_FREE 8 /* free pages only if memory pressure */
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index dba508fe1683..f8150669b8c6 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -40,7 +40,7 @@
#include <asm/parisc-device.h>
/* See comments in include/asm-parisc/pci.h */
-struct hppa_dma_ops *hppa_dma_ops __read_mostly;
+struct dma_map_ops *hppa_dma_ops __read_mostly;
EXPORT_SYMBOL(hppa_dma_ops);
static struct device root = {
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index b9402c9b3454..a27e4928bf73 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -413,7 +413,8 @@ pcxl_dma_init(void)
__initcall(pcxl_dma_init);
-static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
+static void *pa11_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
{
unsigned long vaddr;
unsigned long paddr;
@@ -439,7 +440,8 @@ static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_ad
return (void *)vaddr;
}
-static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
+static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
int order;
@@ -450,15 +452,20 @@ static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vad
free_pages((unsigned long)__va(dma_handle), order);
}
-static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
+static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
+ void *addr = page_address(page) + offset;
BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) addr, size);
return virt_to_phys(addr);
}
-static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
+static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
BUG_ON(direction == DMA_NONE);
@@ -475,7 +482,9 @@ static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, siz
return;
}
-static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
@@ -492,7 +501,9 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int n
return nents;
}
-static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
@@ -509,18 +520,24 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, in
return;
}
-static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
+static void pa11_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
+ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
+ size);
}
-static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
+static void pa11_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
+ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
+ size);
}
static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
@@ -545,32 +562,28 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
flush_kernel_vmap_range(sg_virt(sg), sg->length);
}
-struct hppa_dma_ops pcxl_dma_ops = {
+struct dma_map_ops pcxl_dma_ops = {
.dma_supported = pa11_dma_supported,
- .alloc_consistent = pa11_dma_alloc_consistent,
- .alloc_noncoherent = pa11_dma_alloc_consistent,
- .free_consistent = pa11_dma_free_consistent,
- .map_single = pa11_dma_map_single,
- .unmap_single = pa11_dma_unmap_single,
+ .alloc = pa11_dma_alloc,
+ .free = pa11_dma_free,
+ .map_page = pa11_dma_map_page,
+ .unmap_page = pa11_dma_unmap_page,
.map_sg = pa11_dma_map_sg,
.unmap_sg = pa11_dma_unmap_sg,
- .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
- .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
- .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
- .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
+ .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
+ .sync_single_for_device = pa11_dma_sync_single_for_device,
+ .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = pa11_dma_sync_sg_for_device,
};
-static void *fail_alloc_consistent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- return NULL;
-}
-
-static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+static void *pcx_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
{
void *addr;
+ if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ return NULL;
+
addr = (void *)__get_free_pages(flag, get_order(size));
if (addr)
*dma_handle = (dma_addr_t)virt_to_phys(addr);
@@ -578,24 +591,23 @@ static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
return addr;
}
-static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t iova)
+static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t iova, struct dma_attrs *attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
return;
}
-struct hppa_dma_ops pcx_dma_ops = {
+struct dma_map_ops pcx_dma_ops = {
.dma_supported = pa11_dma_supported,
- .alloc_consistent = fail_alloc_consistent,
- .alloc_noncoherent = pa11_dma_alloc_noncoherent,
- .free_consistent = pa11_dma_free_noncoherent,
- .map_single = pa11_dma_map_single,
- .unmap_single = pa11_dma_unmap_single,
+ .alloc = pcx_dma_alloc,
+ .free = pcx_dma_free,
+ .map_page = pa11_dma_map_page,
+ .unmap_page = pa11_dma_unmap_page,
.map_sg = pa11_dma_map_sg,
.unmap_sg = pa11_dma_unmap_sg,
- .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
- .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
- .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
- .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
+ .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
+ .sync_single_for_device = pa11_dma_sync_single_for_device,
+ .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = pa11_dma_sync_sg_for_device,
};
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 94f6c5089e0c..e4824fd04bb7 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -108,7 +108,6 @@ config PPC
select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
- select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_OPROFILE
select HAVE_DEBUG_KMEMLEAK
@@ -158,6 +157,7 @@ config PPC
select ARCH_HAS_DMA_SET_COHERENT_MASK
select ARCH_HAS_DEVMEM_IS_ALLOWED
select HAVE_ARCH_SECCOMP_FILTER
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 7f522c021dc3..77816acd4fd9 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -125,8 +125,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
#define HAVE_ARCH_DMA_SET_MASK 1
extern int dma_set_mask(struct device *dev, u64 dma_mask);
-#include <asm-generic/dma-mapping-common.h>
-
extern int __dma_set_mask(struct device *dev, u64 dma_mask);
extern u64 __dma_get_required_mask(struct device *dev);
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 493e72f64b35..b4407d0add27 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -191,7 +191,7 @@ struct fadump_crash_info_header {
u64 elfcorehdr_addr;
u32 crashing_cpu;
struct pt_regs regs;
- struct cpumask cpu_online_mask;
+ struct cpumask online_mask;
};
/* Crash memory ranges */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ba336930d448..794f22adf99d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -136,12 +136,18 @@ endif
obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o
obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o
-# Disable GCOV in odd or sensitive code
+# Disable GCOV & sanitizers in odd or sensitive code
GCOV_PROFILE_prom_init.o := n
+UBSAN_SANITIZE_prom_init.o := n
GCOV_PROFILE_ftrace.o := n
+UBSAN_SANITIZE_ftrace.o := n
GCOV_PROFILE_machine_kexec_64.o := n
+UBSAN_SANITIZE_machine_kexec_64.o := n
GCOV_PROFILE_machine_kexec_32.o := n
+UBSAN_SANITIZE_machine_kexec_32.o := n
GCOV_PROFILE_kprobes.o := n
+UBSAN_SANITIZE_kprobes.o := n
+UBSAN_SANITIZE_vdso.o := n
extra-$(CONFIG_PPC_FPU) += fpu.o
extra-$(CONFIG_ALTIVEC) += vector.o
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 26d091a1a54c..3cb3b02a13dd 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -415,7 +415,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
else
ppc_save_regs(&fdh->regs);
- fdh->cpu_online_mask = *cpu_online_mask;
+ fdh->online_mask = *cpu_online_mask;
/* Call ibm,os-term rtas call to trigger firmware assisted dump */
rtas_os_term((char *)str);
@@ -646,7 +646,7 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
}
/* Lower 4 bytes of reg_value contains logical cpu id */
cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK;
- if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) {
+ if (fdh && !cpumask_test_cpu(cpu, &fdh->online_mask)) {
SKIP_TO_NEXT_CPU(reg_entry);
continue;
}
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index 6abffb7a8cd9..cbabd143acae 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -15,6 +15,7 @@ targets := $(obj-vdso32) vdso32.so vdso32.so.dbg
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index 8c8f2ae43935..c710802b8fb6 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -8,6 +8,7 @@ targets := $(obj-vdso64) vdso64.so vdso64.so.dbg
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 1278788d96e3..436062dbb6e2 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -3,6 +3,7 @@
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index dbeeb3a049f2..3be9c832dec1 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -579,7 +579,6 @@ config QDIO
menuconfig PCI
bool "PCI support"
- select HAVE_DMA_ATTRS
select PCI_MSI
select IOMMU_SUPPORT
help
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index b3fd54d93dd2..e64bfcb9702f 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -23,8 +23,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
}
-#include <asm-generic/dma-mapping-common.h>
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 6c391a5d3e5c..e13da05505dc 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -11,7 +11,6 @@ config SUPERH
select HAVE_GENERIC_DMA_COHERENT
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
- select HAVE_DMA_ATTRS
select HAVE_PERF_EVENTS
select HAVE_DEBUG_BUGVERBOSE
select ARCH_HAVE_CUSTOM_GPIO_H
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index a3745a3fe029..e11cf0c8206b 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -11,8 +11,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
#define DMA_ERROR_CODE 0
-#include <asm-generic/dma-mapping-common.h>
-
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 3203e42190dd..57ffaf285c2f 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -26,7 +26,6 @@ config SPARC
select RTC_CLASS
select RTC_DRV_M48T59
select RTC_SYSTOHC
- select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_ARCH_JUMP_LABEL if SPARC64
select GENERIC_IRQ_SHOW
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index a21da597b0b5..1180ae254154 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -37,21 +37,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return dma_ops;
}
-#define HAVE_ARCH_DMA_SET_MASK 1
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type) {
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EINVAL;
- *dev->dma_mask = mask;
- return 0;
- }
-#endif
- return -EINVAL;
-}
-
-#include <asm-generic/dma-mapping-common.h>
-
#endif
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 6bfbe8b71e7e..de4a4fff9323 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -5,7 +5,6 @@ config TILE
def_bool y
select HAVE_PERF_EVENTS
select USE_PMC if PERF_EVENTS
- select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_KVM if !TILEGX
select GENERIC_FIND_FIRST_BIT
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index 96ac6cce4a32..01ceb4a895b0 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -73,37 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
}
#define HAVE_ARCH_DMA_SET_MASK 1
-
-#include <asm-generic/dma-mapping-common.h>
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
- /*
- * For PCI devices with 64-bit DMA addressing capability, promote
- * the dma_ops to hybrid, with the consistent memory DMA space limited
- * to 32-bit. For 32-bit capable devices, limit the streaming DMA
- * address range to max_direct_dma_addr.
- */
- if (dma_ops == gx_pci_dma_map_ops ||
- dma_ops == gx_hybrid_pci_dma_map_ops ||
- dma_ops == gx_legacy_pci_dma_map_ops) {
- if (mask == DMA_BIT_MASK(64) &&
- dma_ops == gx_legacy_pci_dma_map_ops)
- set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
- else if (mask > dev->archdata.max_direct_dma_addr)
- mask = dev->archdata.max_direct_dma_addr;
- }
-
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
-}
+int dma_set_mask(struct device *dev, u64 mask);
/*
* dma_alloc_noncoherent() is #defined to return coherent memory,
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 09b58703ac26..b6bc0547a4f6 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -583,6 +583,35 @@ struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
+int dma_set_mask(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ /*
+ * For PCI devices with 64-bit DMA addressing capability, promote
+ * the dma_ops to hybrid, with the consistent memory DMA space limited
+ * to 32-bit. For 32-bit capable devices, limit the streaming DMA
+ * address range to max_direct_dma_addr.
+ */
+ if (dma_ops == gx_pci_dma_map_ops ||
+ dma_ops == gx_hybrid_pci_dma_map_ops ||
+ dma_ops == gx_legacy_pci_dma_map_ops) {
+ if (mask == DMA_BIT_MASK(64) &&
+ dma_ops == gx_legacy_pci_dma_map_ops)
+ set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
+ else if (mask > dev->archdata.max_direct_dma_addr)
+ mask = dev->archdata.max_direct_dma_addr;
+ }
+
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_set_mask);
+
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 877342640b6e..e5602ee9c610 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -5,7 +5,6 @@ config UNICORE32
select ARCH_MIGHT_HAVE_PC_SERIO
select HAVE_MEMBLOCK
select HAVE_GENERIC_DMA_COHERENT
- select HAVE_DMA_ATTRS
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select GENERIC_ATOMIC64
diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h
index 8140e053ccd3..4749854afd03 100644
--- a/arch/unicore32/include/asm/dma-mapping.h
+++ b/arch/unicore32/include/asm/dma-mapping.h
@@ -28,8 +28,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return &swiotlb_dma_map_ops;
}
-#include <asm-generic/dma-mapping-common.h>
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (dev && dev->dma_mask)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9bd3cc03d51d..330e738ccfc1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -31,6 +31,7 @@ config X86
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_MMIO_FLUSH
select ARCH_HAS_SG_CHAIN
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_MIGHT_HAVE_PC_PARPORT
@@ -99,7 +100,6 @@ config X86
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DMA_API_DEBUG
- select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 2ee62dba0373..bbe1a62efc02 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -60,6 +60,7 @@ clean-files += cpustr.h
KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
$(obj)/bzImage: asflags-y := $(SVGA_MODE)
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 0a291cdfaf77..f9ce75d80101 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -33,6 +33,7 @@ KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
+UBSAN_SANITIZE :=n
LDFLAGS := -m elf_$(UTS_MACHINE)
LDFLAGS_vmlinux := -T
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 265c0ed68118..c854541d93ff 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -4,6 +4,7 @@
KBUILD_CFLAGS += $(DISABLE_LTO)
KASAN_SANITIZE := n
+UBSAN_SANITIZE := n
VDSO64-$(CONFIG_X86_64) := y
VDSOX32-$(CONFIG_X86_X32_ABI) := y
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 953b7263f844..3a27b93e6261 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -46,8 +46,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
#define HAVE_ARCH_DMA_SUPPORTED 1
extern int dma_supported(struct device *hwdev, u64 mask);
-#include <asm-generic/dma-mapping-common.h>
-
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs);
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 819ab3f9c9c7..ba7fbba9831b 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -385,6 +385,7 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image)
return image->fops->cleanup(image->image_loader_data);
}
+#ifdef CONFIG_KEXEC_VERIFY_SIG
int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel,
unsigned long kernel_len)
{
@@ -395,6 +396,7 @@ int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel,
return image->fops->verify_sig(kernel, kernel_len);
}
+#endif
/*
* Apply purgatory relocations.
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 2730d775ef9a..3e75fcf6b836 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -70,3 +70,4 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
-I$(srctree)/arch/x86/boot
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 82044f732323..e9df1567d778 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -15,7 +15,6 @@ config XTENSA
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
select HAVE_DMA_API_DEBUG
- select HAVE_DMA_ATTRS
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if !MMU
select HAVE_IRQ_TIME_ACCOUNTING
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 66c9ba261e30..3fc1170a6488 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -13,8 +13,6 @@
#include <asm/cache.h>
#include <asm/io.h>
-#include <asm-generic/dma-coherent.h>
-
#include <linux/mm.h>
#include <linux/scatterlist.h>
@@ -30,8 +28,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return &xtensa_dma_map_ops;
}
-#include <asm-generic/dma-mapping-common.h>
-
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index d030594ed22b..9e079d49e7f2 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -86,7 +86,6 @@
#define MADV_SEQUENTIAL 2 /* expect sequential page references */
#define MADV_WILLNEED 3 /* will need these pages */
#define MADV_DONTNEED 4 /* don't need these pages */
-#define MADV_FREE 5 /* free pages only if memory pressure */
/* common parameters: try to keep these consistent across architectures */
#define MADV_FREE 8 /* free pages only if memory pressure */