// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2005-2017 Andes Technology Corporation #include #include #include #include #include #include #include #include static inline void cache_op(phys_addr_t paddr, size_t size, void (*fn)(unsigned long start, unsigned long end)) { struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); unsigned offset = paddr & ~PAGE_MASK; size_t left = size; unsigned long start; do { size_t len = left; if (PageHighMem(page)) { void *addr; if (offset + len > PAGE_SIZE) { if (offset >= PAGE_SIZE) { page += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; } len = PAGE_SIZE - offset; } addr = kmap_atomic(page); start = (unsigned long)(addr + offset); fn(start, start + len); kunmap_atomic(addr); } else { start = (unsigned long)phys_to_virt(paddr); fn(start, start + size); } offset = 0; page++; left -= len; } while (left); } void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_FROM_DEVICE: break; case DMA_TO_DEVICE: case DMA_BIDIRECTIONAL: cache_op(paddr, size, cpu_dma_wb_range); break; default: BUG(); } } void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_TO_DEVICE: break; case DMA_FROM_DEVICE: case DMA_BIDIRECTIONAL: cache_op(paddr, size, cpu_dma_inval_range); break; default: BUG(); } } void arch_dma_prep_coherent(struct page *page, size_t size) { cache_op(page_to_phys(page), size, cpu_dma_wbinval_range); }