summaryrefslogtreecommitdiff
path: root/arch/csky/abiv2/inc/abi/cacheflush.h
diff options
context:
space:
mode:
authorGuo Ren <guoren@linux.alibaba.com>2020-01-31 20:33:10 +0800
committerGuo Ren <guoren@linux.alibaba.com>2020-02-21 15:43:24 +0800
commit997153b9a75c08d545ad45e6f8ceb432435d2425 (patch)
tree830915f73d05e7d094d2f293a712e05c0da1e6f5 /arch/csky/abiv2/inc/abi/cacheflush.h
parentcc1f6563a92ced0889775d0587316d725b6e1a68 (diff)
csky: Add flush_icache_mm to defer flush icache all
Some CPUs don't support icache.va instruction to maintain the whole smp cores' icache. Using icache.all + IPI casue a lot on performace and using defer mechanism could reduce the number of calling icache _flush_all functions. Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Diffstat (limited to 'arch/csky/abiv2/inc/abi/cacheflush.h')
-rw-r--r--arch/csky/abiv2/inc/abi/cacheflush.h14
1 files changed, 11 insertions, 3 deletions
diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h
index 28b7c3233175..a565e00c3f70 100644
--- a/arch/csky/abiv2/inc/abi/cacheflush.h
+++ b/arch/csky/abiv2/inc/abi/cacheflush.h
@@ -31,15 +31,23 @@ static inline void flush_dcache_page(struct page *page)
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
+void flush_icache_mm_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+void flush_icache_deferred(struct mm_struct *mm);
+
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
- if (vma->vm_flags & VM_EXEC) \
- cache_wbinv_range((unsigned long)dst, \
- (unsigned long)dst + len); \
+ if (vma->vm_flags & VM_EXEC) { \
+ dcache_wb_range((unsigned long)dst, \
+ (unsigned long)dst + len); \
+ flush_icache_mm_range(current->mm, \
+ (unsigned long)dst, \
+ (unsigned long)dst + len); \
+ } \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)