summaryrefslogtreecommitdiff
path: root/arch/mips/mm/dma-noncoherent.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/dma-noncoherent.c')
-rw-r--r--arch/mips/mm/dma-noncoherent.c51
1 files changed, 28 insertions, 23 deletions
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 563c2c0d0c81..38d3d9143b47 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -5,8 +5,7 @@
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
*/
#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/dma-contiguous.h>
+#include <linux/dma-map-ops.h>
#include <linux/highmem.h>
#include <asm/cache.h>
@@ -55,22 +54,34 @@ void *arch_dma_set_uncached(void *addr, size_t size)
return (void *)(__pa(addr) + UNCAC_BASE);
}
-static inline void dma_sync_virt(void *addr, size_t size,
+static inline void dma_sync_virt_for_device(void *addr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
dma_cache_wback((unsigned long)addr, size);
break;
-
case DMA_FROM_DEVICE:
dma_cache_inv((unsigned long)addr, size);
break;
-
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv((unsigned long)addr, size);
break;
+ default:
+ BUG();
+ }
+}
+static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ break;
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ dma_cache_inv((unsigned long)addr, size);
+ break;
default:
BUG();
}
@@ -82,7 +93,7 @@ static inline void dma_sync_virt(void *addr, size_t size,
* configured then the bulk of this loop gets optimized out.
*/
static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir, bool for_device)
{
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
unsigned long offset = paddr & ~PAGE_MASK;
@@ -90,18 +101,20 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
do {
size_t len = left;
+ void *addr;
if (PageHighMem(page)) {
- void *addr;
-
if (offset + len > PAGE_SIZE)
len = PAGE_SIZE - offset;
+ }
+
+ addr = kmap_atomic(page);
+ if (for_device)
+ dma_sync_virt_for_device(addr + offset, len, dir);
+ else
+ dma_sync_virt_for_cpu(addr + offset, len, dir);
+ kunmap_atomic(addr);
- addr = kmap_atomic(page);
- dma_sync_virt(addr + offset, len, dir);
- kunmap_atomic(addr);
- } else
- dma_sync_virt(page_address(page) + offset, size, dir);
offset = 0;
page++;
left -= len;
@@ -111,7 +124,7 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
- dma_sync_phys(paddr, size, dir);
+ dma_sync_phys(paddr, size, dir, true);
}
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
@@ -119,18 +132,10 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
if (cpu_needs_post_dma_flush())
- dma_sync_phys(paddr, size, dir);
+ dma_sync_phys(paddr, size, dir, false);
}
#endif
-void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- dma_sync_virt(vaddr, size, direction);
-}
-
#ifdef CONFIG_DMA_PERDEV_COHERENT
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)