summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 11:32:27 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-31 11:32:27 -0800
commit2382dc9a3eca644147be83dd2cd0dd64dc9e3e8c (patch)
tree71a152721a9b9b11875bf5ea053edb72c9b3a94e /arch/powerpc
parent28bc6fb9596fe1e577d09fc17ee6e1bb051c6ba3 (diff)
parent04f56534786c885f578c24461bcd782fe9a787cf (diff)
Merge tag 'dma-mapping-4.16' of git://git.infradead.org/users/hch/dma-mapping
Pull dma mapping updates from Christoph Hellwig: "Except for a runtime warning fix from Christian this is all about consolidation of the generic no-IOMMU code, a well as the glue code for swiotlb. All the code is based on the x86 implementation with hooks to allow all architectures that aren't cache coherent to use it. The x86 conversion itself has been deferred because the x86 maintainers were a little busy in the last months" * tag 'dma-mapping-4.16' of git://git.infradead.org/users/hch/dma-mapping: (57 commits) MAINTAINERS: add the iommu list for swiotlb and xen-swiotlb arm64: use swiotlb_alloc and swiotlb_free arm64: replace ZONE_DMA with ZONE_DMA32 mips: use swiotlb_{alloc,free} mips/netlogic: remove swiotlb support tile: use generic swiotlb_ops tile: replace ZONE_DMA with ZONE_DMA32 unicore32: use generic swiotlb_ops ia64: remove an ifdef around the content of pci-dma.c ia64: clean up swiotlb support ia64: use generic swiotlb_ops ia64: replace ZONE_DMA with ZONE_DMA32 swiotlb: remove various exports swiotlb: refactor coherent buffer allocation swiotlb: refactor coherent buffer freeing swiotlb: wire up ->dma_supported in swiotlb_dma_ops swiotlb: add common swiotlb_map_ops swiotlb: rename swiotlb_free to swiotlb_exit x86: rename swiotlb_dma_ops powerpc: rename swiotlb_dma_ops ...
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/dma-direct.h29
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h36
-rw-r--r--arch/powerpc/include/asm/swiotlb.h4
-rw-r--r--arch/powerpc/kernel/dma-iommu.c2
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c12
-rw-r--r--arch/powerpc/kernel/dma.c73
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/platforms/cell/iommu.c28
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c2
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c2
-rw-r--r--arch/powerpc/platforms/pseries/vio.c2
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c4
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c4
17 files changed, 103 insertions, 106 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 2ed525a44734..e92432ae9737 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -139,6 +139,7 @@ config PPC
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API if PPC64
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select ARCH_HAS_SG_CHAIN
diff --git a/arch/powerpc/include/asm/dma-direct.h b/arch/powerpc/include/asm/dma-direct.h
new file mode 100644
index 000000000000..a5b59c765426
--- /dev/null
+++ b/arch/powerpc/include/asm/dma-direct.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ASM_POWERPC_DMA_DIRECT_H
+#define ASM_POWERPC_DMA_DIRECT_H 1
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+#ifdef CONFIG_SWIOTLB
+ struct dev_archdata *sd = &dev->archdata;
+
+ if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
+ return false;
+#endif
+
+ if (!dev->dma_mask)
+ return false;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr + get_dma_offset(dev);
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr - get_dma_offset(dev);
+}
+#endif /* ASM_POWERPC_DMA_DIRECT_H */
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 5a6cbe11db6f..8fa394520af6 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -19,13 +19,13 @@
#include <asm/swiotlb.h>
/* Some dma direct funcs must be visible for use in other dma_ops */
-extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
+extern void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs);
-extern void __dma_direct_free_coherent(struct device *dev, size_t size,
+extern void __dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs);
-extern int dma_direct_mmap_coherent(struct device *dev,
+extern int dma_nommu_mmap_coherent(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle,
size_t size, unsigned long attrs);
@@ -73,7 +73,7 @@ static inline unsigned long device_to_mask(struct device *dev)
#ifdef CONFIG_PPC64
extern struct dma_map_ops dma_iommu_ops;
#endif
-extern const struct dma_map_ops dma_direct_ops;
+extern const struct dma_map_ops dma_nommu_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
@@ -107,39 +107,11 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
dev->archdata.dma_offset = off;
}
-/* this will be removed soon */
-#define flush_write_buffers()
-
#define HAVE_ARCH_DMA_SET_MASK 1
extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern u64 __dma_get_required_mask(struct device *dev);
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
-#ifdef CONFIG_SWIOTLB
- struct dev_archdata *sd = &dev->archdata;
-
- if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
- return false;
-#endif
-
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <= *dev->dma_mask;
-}
-
-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
-{
- return paddr + get_dma_offset(dev);
-}
-
-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
-{
- return daddr - get_dma_offset(dev);
-}
-
#define ARCH_HAS_DMA_MMAP_COHERENT
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
index 01d45a5fd00b..f65ecf57b66c 100644
--- a/arch/powerpc/include/asm/swiotlb.h
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -13,9 +13,7 @@
#include <linux/swiotlb.h>
-extern const struct dma_map_ops swiotlb_dma_ops;
-
-static inline void dma_mark_clean(void *addr, size_t size) {}
+extern const struct dma_map_ops powerpc_swiotlb_dma_ops;
extern unsigned int ppc_swiotlb_enable;
int __init swiotlb_setup_bus_notifier(void);
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 66f33e7f8d40..f9fe2080ceb9 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -114,7 +114,7 @@ int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent,
- .mmap = dma_direct_mmap_coherent,
+ .mmap = dma_nommu_mmap_coherent,
.map_sg = dma_iommu_map_sg,
.unmap_sg = dma_iommu_unmap_sg,
.dma_supported = dma_iommu_dma_supported,
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index d0ea7860e02b..88f3963ca30f 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -46,10 +46,10 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
* map_page, and unmap_page on highmem, use normal dma_ops
* for everything else.
*/
-const struct dma_map_ops swiotlb_dma_ops = {
- .alloc = __dma_direct_alloc_coherent,
- .free = __dma_direct_free_coherent,
- .mmap = dma_direct_mmap_coherent,
+const struct dma_map_ops powerpc_swiotlb_dma_ops = {
+ .alloc = __dma_nommu_alloc_coherent,
+ .free = __dma_nommu_free_coherent,
+ .mmap = dma_nommu_mmap_coherent,
.map_sg = swiotlb_map_sg_attrs,
.unmap_sg = swiotlb_unmap_sg_attrs,
.dma_supported = swiotlb_dma_supported,
@@ -89,7 +89,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
/* May need to bounce if the device can't address all of DRAM */
if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM())
- set_dma_ops(dev, &swiotlb_dma_ops);
+ set_dma_ops(dev, &powerpc_swiotlb_dma_ops);
return NOTIFY_DONE;
}
@@ -121,7 +121,7 @@ static int __init check_swiotlb_enabled(void)
if (ppc_swiotlb_enable)
swiotlb_print_info();
else
- swiotlb_free();
+ swiotlb_exit();
return 0;
}
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 4194bbbbdb10..da20569de9d4 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -33,14 +33,14 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev)
struct dev_archdata __maybe_unused *sd = &dev->archdata;
#ifdef CONFIG_SWIOTLB
- if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops)
+ if (sd->max_direct_dma_addr && dev->dma_ops == &powerpc_swiotlb_dma_ops)
pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
#endif
return pfn;
}
-static int dma_direct_dma_supported(struct device *dev, u64 mask)
+static int dma_nommu_dma_supported(struct device *dev, u64 mask)
{
#ifdef CONFIG_PPC64
u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
@@ -62,7 +62,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
#endif
}
-void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
+void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
@@ -105,9 +105,6 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
};
#endif /* CONFIG_FSL_SOC */
- /* ignore region specifiers */
- flag &= ~(__GFP_HIGHMEM);
-
page = alloc_pages_node(node, flag, get_order(size));
if (page == NULL)
return NULL;
@@ -119,7 +116,7 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
#endif
}
-void __dma_direct_free_coherent(struct device *dev, size_t size,
+void __dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
@@ -130,7 +127,7 @@ void __dma_direct_free_coherent(struct device *dev, size_t size,
#endif
}
-static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
@@ -139,8 +136,8 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
/* The coherent mask may be smaller than the real mask, check if
* we can really use the direct ops
*/
- if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
- return __dma_direct_alloc_coherent(dev, size, dma_handle,
+ if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
+ return __dma_nommu_alloc_coherent(dev, size, dma_handle,
flag, attrs);
/* Ok we can't ... do we have an iommu ? If not, fail */
@@ -154,15 +151,15 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dev_to_node(dev));
}
-static void dma_direct_free_coherent(struct device *dev, size_t size,
+static void dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
struct iommu_table *iommu;
- /* See comments in dma_direct_alloc_coherent() */
- if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
- return __dma_direct_free_coherent(dev, size, vaddr, dma_handle,
+ /* See comments in dma_nommu_alloc_coherent() */
+ if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
+ return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle,
attrs);
/* Maybe we used an iommu ... */
iommu = get_iommu_table_base(dev);
@@ -175,7 +172,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size,
iommu_free_coherent(iommu, size, vaddr, dma_handle);
}
-int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
{
@@ -193,7 +190,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
vma->vm_page_prot);
}
-static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
+static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
@@ -213,13 +210,13 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
return nents;
}
-static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
+static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
}
-static u64 dma_direct_get_required_mask(struct device *dev)
+static u64 dma_nommu_get_required_mask(struct device *dev)
{
u64 end, mask;
@@ -231,7 +228,7 @@ static u64 dma_direct_get_required_mask(struct device *dev)
return mask;
}
-static inline dma_addr_t dma_direct_map_page(struct device *dev,
+static inline dma_addr_t dma_nommu_map_page(struct device *dev,
struct page *page,
unsigned long offset,
size_t size,
@@ -246,7 +243,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
return page_to_phys(page) + offset + get_dma_offset(dev);
}
-static inline void dma_direct_unmap_page(struct device *dev,
+static inline void dma_nommu_unmap_page(struct device *dev,
dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
@@ -255,7 +252,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
}
#ifdef CONFIG_NOT_COHERENT_CACHE
-static inline void dma_direct_sync_sg(struct device *dev,
+static inline void dma_nommu_sync_sg(struct device *dev,
struct scatterlist *sgl, int nents,
enum dma_data_direction direction)
{
@@ -266,7 +263,7 @@ static inline void dma_direct_sync_sg(struct device *dev,
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
}
-static inline void dma_direct_sync_single(struct device *dev,
+static inline void dma_nommu_sync_single(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
@@ -274,24 +271,24 @@ static inline void dma_direct_sync_single(struct device *dev,
}
#endif
-const struct dma_map_ops dma_direct_ops = {
- .alloc = dma_direct_alloc_coherent,
- .free = dma_direct_free_coherent,
- .mmap = dma_direct_mmap_coherent,
- .map_sg = dma_direct_map_sg,
- .unmap_sg = dma_direct_unmap_sg,
- .dma_supported = dma_direct_dma_supported,
- .map_page = dma_direct_map_page,
- .unmap_page = dma_direct_unmap_page,
- .get_required_mask = dma_direct_get_required_mask,
+const struct dma_map_ops dma_nommu_ops = {
+ .alloc = dma_nommu_alloc_coherent,
+ .free = dma_nommu_free_coherent,
+ .mmap = dma_nommu_mmap_coherent,
+ .map_sg = dma_nommu_map_sg,
+ .unmap_sg = dma_nommu_unmap_sg,
+ .dma_supported = dma_nommu_dma_supported,
+ .map_page = dma_nommu_map_page,
+ .unmap_page = dma_nommu_unmap_page,
+ .get_required_mask = dma_nommu_get_required_mask,
#ifdef CONFIG_NOT_COHERENT_CACHE
- .sync_single_for_cpu = dma_direct_sync_single,
- .sync_single_for_device = dma_direct_sync_single,
- .sync_sg_for_cpu = dma_direct_sync_sg,
- .sync_sg_for_device = dma_direct_sync_sg,
+ .sync_single_for_cpu = dma_nommu_sync_single,
+ .sync_single_for_device = dma_nommu_sync_single,
+ .sync_sg_for_cpu = dma_nommu_sync_sg,
+ .sync_sg_for_device = dma_nommu_sync_sg,
#endif
};
-EXPORT_SYMBOL(dma_direct_ops);
+EXPORT_SYMBOL(dma_nommu_ops);
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
@@ -302,7 +299,7 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
* is no dma_op->set_coherent_mask() so we have to do
* things the hard way:
*/
- if (get_dma_ops(dev) != &dma_direct_ops ||
+ if (get_dma_ops(dev) != &dma_nommu_ops ||
get_iommu_table_base(dev) == NULL ||
!dma_iommu_dma_supported(dev, mask))
return -EIO;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 0ac7aa346c69..590f4d0a6cb1 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -60,7 +60,7 @@ resource_size_t isa_mem_base;
EXPORT_SYMBOL(isa_mem_base);
-static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
+static const struct dma_map_ops *pci_dma_ops = &dma_nommu_ops;
void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
{
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 8fd3a70047f1..3f33869c6486 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -780,7 +780,7 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
{
pdev->archdata.dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->archdata.dma_mask;
- set_dma_ops(&pdev->dev, &dma_direct_ops);
+ set_dma_ops(&pdev->dev, &dma_nommu_ops);
}
static __init void print_system_info(void)
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 4b91ad08eefd..12352a58072a 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -541,7 +541,7 @@ static struct cbe_iommu *cell_iommu_for_node(int nid)
return NULL;
}
-static unsigned long cell_dma_direct_offset;
+static unsigned long cell_dma_nommu_offset;
static unsigned long dma_iommu_fixed_base;
@@ -580,7 +580,7 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
device_to_mask(dev), flag,
dev_to_node(dev));
else
- return dma_direct_ops.alloc(dev, size, dma_handle, flag,
+ return dma_nommu_ops.alloc(dev, size, dma_handle, flag,
attrs);
}
@@ -592,7 +592,7 @@ static void dma_fixed_free_coherent(struct device *dev, size_t size,
iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
dma_handle);
else
- dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs);
+ dma_nommu_ops.free(dev, size, vaddr, dma_handle, attrs);
}
static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
@@ -601,7 +601,7 @@ static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
unsigned long attrs)
{
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- return dma_direct_ops.map_page(dev, page, offset, size,
+ return dma_nommu_ops.map_page(dev, page, offset, size,
direction, attrs);
else
return iommu_map_page(dev, cell_get_iommu_table(dev), page,
@@ -614,7 +614,7 @@ static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
unsigned long attrs)
{
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
+ dma_nommu_ops.unmap_page(dev, dma_addr, size, direction,
attrs);
else
iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
@@ -626,7 +626,7 @@ static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
unsigned long attrs)
{
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
+ return dma_nommu_ops.map_sg(dev, sg, nents, direction, attrs);
else
return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg,
nents, device_to_mask(dev),
@@ -638,7 +638,7 @@ static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
unsigned long attrs)
{
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
- dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
+ dma_nommu_ops.unmap_sg(dev, sg, nents, direction, attrs);
else
ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents,
direction, attrs);
@@ -661,8 +661,8 @@ static void cell_dma_dev_setup(struct device *dev)
{
if (get_pci_dma_ops() == &dma_iommu_ops)
set_iommu_table_base(dev, cell_get_iommu_table(dev));
- else if (get_pci_dma_ops() == &dma_direct_ops)
- set_dma_offset(dev, cell_dma_direct_offset);
+ else if (get_pci_dma_ops() == &dma_nommu_ops)
+ set_dma_offset(dev, cell_dma_nommu_offset);
else
BUG();
}
@@ -810,14 +810,14 @@ static int __init cell_iommu_init_disabled(void)
unsigned long base = 0, size;
/* When no iommu is present, we use direct DMA ops */
- set_pci_dma_ops(&dma_direct_ops);
+ set_pci_dma_ops(&dma_nommu_ops);
/* First make sure all IOC translation is turned off */
cell_disable_iommus();
/* If we have no Axon, we set up the spider DMA magic offset */
if (of_find_node_by_name(NULL, "axon") == NULL)
- cell_dma_direct_offset = SPIDER_DMA_OFFSET;
+ cell_dma_nommu_offset = SPIDER_DMA_OFFSET;
/* Now we need to check to see where the memory is mapped
* in PCI space. We assume that all busses use the same dma
@@ -851,13 +851,13 @@ static int __init cell_iommu_init_disabled(void)
return -ENODEV;
}
- cell_dma_direct_offset += base;
+ cell_dma_nommu_offset += base;
- if (cell_dma_direct_offset != 0)
+ if (cell_dma_nommu_offset != 0)
cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
printk("iommu: disabled, direct DMA offset is 0x%lx\n",
- cell_dma_direct_offset);
+ cell_dma_nommu_offset);
return 0;
}
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 7fec04de27fc..78b80cbd9768 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -186,7 +186,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
*/
if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
!firmware_has_feature(FW_FEATURE_LPAR)) {
- dev->dev.dma_ops = &dma_direct_ops;
+ dev->dev.dma_ops = &dma_nommu_ops;
/*
* Set the coherent DMA mask to prevent the iommu
* being used unnecessarily
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index c4a3e93dc324..d0b8ae53660d 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -363,7 +363,7 @@ static int pcmcia_notify(struct notifier_block *nb, unsigned long action,
return 0;
/* We use the direct ops for localbus */
- dev->dma_ops = &dma_direct_ops;
+ dev->dma_ops = &dma_nommu_ops;
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 749055553064..9582aeb1fe4c 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1850,7 +1850,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
if (bypass) {
dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
- set_dma_ops(&pdev->dev, &dma_direct_ops);
+ set_dma_ops(&pdev->dev, &dma_nommu_ops);
} else {
/*
* If the device can't set the TCE bypass bit but still wants
@@ -1868,7 +1868,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
return rc;
/* 4GB offset bypasses 32-bit space */
set_dma_offset(&pdev->dev, (1ULL << 32));
- set_dma_ops(&pdev->dev, &dma_direct_ops);
+ set_dma_ops(&pdev->dev, &dma_nommu_ops);
} else if (dma_mask >> 32 && dma_mask != DMA_BIT_MASK(64)) {
/*
* Fail the request if a DMA mask between 32 and 64 bits
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 69921f72e2da..eaa11334fc8c 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1231,7 +1231,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
if (dma_offset != 0) {
dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset);
set_dma_offset(dev, dma_offset);
- set_dma_ops(dev, &dma_direct_ops);
+ set_dma_ops(dev, &dma_nommu_ops);
ddw_enabled = true;
}
}
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index d86938260a86..49e04ec19238 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -618,7 +618,7 @@ static u64 vio_dma_get_required_mask(struct device *dev)
static const struct dma_map_ops vio_dma_mapping_ops = {
.alloc = vio_dma_iommu_alloc_coherent,
.free = vio_dma_iommu_free_coherent,
- .mmap = dma_direct_mmap_coherent,
+ .mmap = dma_nommu_mmap_coherent,
.map_sg = vio_dma_iommu_map_sg,
.unmap_sg = vio_dma_iommu_unmap_sg,
.map_page = vio_dma_iommu_map_page,
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 3573d54b2770..a6198d4f0f03 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -402,7 +402,7 @@ static int dart_dma_set_mask(struct device *dev, u64 dma_mask)
*/
if (dart_device_on_pcie(dev) && dma_mask >= DMA_BIT_MASK(40)) {
dev_info(dev, "Using 64-bit DMA iommu bypass\n");
- set_dma_ops(dev, &dma_direct_ops);
+ set_dma_ops(dev, &dma_nommu_ops);
} else {
dev_info(dev, "Using 32-bit DMA via iommu\n");
set_dma_ops(dev, &dma_iommu_ops);
@@ -446,7 +446,7 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
controller_ops->dma_bus_setup = NULL;
/* Setup pci_dma ops */
- set_pci_dma_ops(&dma_direct_ops);
+ set_pci_dma_ops(&dma_nommu_ops);
}
#ifdef CONFIG_PM
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 22d98057f773..61e07c78d64f 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -118,7 +118,7 @@ static void setup_swiotlb_ops(struct pci_controller *hose)
{
if (ppc_swiotlb_enable) {
hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
- set_pci_dma_ops(&swiotlb_dma_ops);
+ set_pci_dma_ops(&powerpc_swiotlb_dma_ops);
}
}
#else
@@ -135,7 +135,7 @@ static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
* mapping that allows addressing any RAM address from across PCI.
*/
if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
- set_dma_ops(dev, &dma_direct_ops);
+ set_dma_ops(dev, &dma_nommu_ops);
set_dma_offset(dev, pci64_dma_offset);
}