summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/nds32/include/asm/cacheflush.h11
-rw-r--r--include/asm-generic/cacheflush.h33
2 files changed, 39 insertions, 5 deletions
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
index d9ac7e6408ef..caddded56e77 100644
--- a/arch/nds32/include/asm/cacheflush.h
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -9,7 +9,11 @@
#define PG_dcache_dirty PG_arch_1
void flush_icache_range(unsigned long start, unsigned long end);
+#define flush_icache_range flush_icache_range
+
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+#define flush_icache_page flush_icache_page
+
#ifdef CONFIG_CPU_CACHE_ALIASING
void flush_cache_mm(struct mm_struct *mm);
void flush_cache_dup_mm(struct mm_struct *mm);
@@ -40,12 +44,11 @@ void invalidate_kernel_vmap_range(void *addr, int size);
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
#else
-#include <asm-generic/cacheflush.h>
-#undef flush_icache_range
-#undef flush_icache_page
-#undef flush_icache_user_range
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len);
+#define flush_icache_user_range flush_icache_user_range
+
+#include <asm-generic/cacheflush.h>
#endif
#endif /* __NDS32_CACHEFLUSH_H__ */
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index a950a22c4890..cac7404b2bdd 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -11,71 +11,102 @@
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
+#ifndef flush_cache_all
static inline void flush_cache_all(void)
{
}
+#endif
+#ifndef flush_cache_mm
static inline void flush_cache_mm(struct mm_struct *mm)
{
}
+#endif
+#ifndef flush_cache_dup_mm
static inline void flush_cache_dup_mm(struct mm_struct *mm)
{
}
+#endif
+#ifndef flush_cache_range
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
}
+#endif
+#ifndef flush_cache_page
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr,
unsigned long pfn)
{
}
+#endif
+#ifndef flush_dcache_page
static inline void flush_dcache_page(struct page *page)
{
}
+#endif
+#ifndef flush_dcache_mmap_lock
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{
}
+#endif
+#ifndef flush_dcache_mmap_unlock
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
{
}
+#endif
+#ifndef flush_icache_range
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
}
+#endif
+#ifndef flush_icache_page
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
+#endif
+#ifndef flush_icache_user_range
static inline void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page,
unsigned long addr, int len)
{
}
+#endif
+#ifndef flush_cache_vmap
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
}
+#endif
+#ifndef flush_cache_vunmap
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
+#endif
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+#ifndef copy_to_user_page
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
+#endif
+
+#ifndef copy_from_user_page
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
+#endif
#endif /* __ASM_CACHEFLUSH_H */