summaryrefslogtreecommitdiff
path: root/arch/csky/abiv2
diff options
context:
space:
mode:
authorGuo Ren <guoren@linux.alibaba.com>2020-01-25 00:37:09 +0800
committerGuo Ren <guoren@linux.alibaba.com>2020-02-21 15:43:24 +0800
commita1176734132c630b50908c36563e05fb3599682c (patch)
treecb054b34a4fb5e0a58c7c5318a5a0ec06432c88a /arch/csky/abiv2
parent761b4f694cb90b63ca2739ac8a8a176342636e5e (diff)
csky: Remove unnecessary flush_icache_* implementation
The abiv2 CPUs are all PIPT cache, so there is no need to implement flush_icache_page function. The function flush_icache_user_range hasn't been used, so just remove it. The function flush_cache_range is not necessary for PIPT cache when tlb mapping changed. Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Diffstat (limited to 'arch/csky/abiv2')
-rw-r--r--arch/csky/abiv2/cacheflush.c23
-rw-r--r--arch/csky/abiv2/inc/abi/cacheflush.h13
2 files changed, 2 insertions, 34 deletions
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c
index 5bb887b275e1..f64b415f6fde 100644
--- a/arch/csky/abiv2/cacheflush.c
+++ b/arch/csky/abiv2/cacheflush.c
@@ -6,29 +6,6 @@
#include <linux/mm.h>
#include <asm/cache.h>
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
- unsigned long start;
-
- start = (unsigned long) kmap_atomic(page);
-
- cache_wbinv_range(start, start + PAGE_SIZE);
-
- kunmap_atomic((void *)start);
-}
-
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
- unsigned long vaddr, int len)
-{
- unsigned long kaddr;
-
- kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK);
-
- cache_wbinv_range(kaddr, kaddr + len);
-
- kunmap_atomic((void *)kaddr);
-}
-
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *pte)
{
diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h
index b8db5e0b2fe3..62a9031fffd8 100644
--- a/arch/csky/abiv2/inc/abi/cacheflush.h
+++ b/arch/csky/abiv2/inc/abi/cacheflush.h
@@ -13,25 +13,16 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
-
-#define flush_cache_range(vma, start, end) \
- do { \
- if (vma->vm_flags & VM_EXEC) \
- icache_inv_all(); \
- } while (0)
-
+#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_icache_page(vma, page) do { } while (0)
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
-void flush_icache_page(struct vm_area_struct *vma, struct page *page);
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
- unsigned long vaddr, int len);
-
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)