summaryrefslogtreecommitdiff
path: root/arch/xtensa/include/asm/cacheflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/xtensa/include/asm/cacheflush.h')
-rw-r--r--arch/xtensa/include/asm/cacheflush.h190
1 files changed, 57 insertions, 133 deletions
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 127cd48883c4..a2b6bb5429f5 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -1,18 +1,14 @@
/*
- * include/asm-xtensa/cacheflush.h
- *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * (C) 2001 - 2007 Tensilica Inc.
+ * (C) 2001 - 2013 Tensilica Inc.
*/
#ifndef _XTENSA_CACHEFLUSH_H
#define _XTENSA_CACHEFLUSH_H
-#ifdef __KERNEL__
-
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/page.h>
@@ -41,6 +37,7 @@
* specials for cache aliasing:
*
* __flush_invalidate_dcache_page_alias(vaddr,paddr)
+ * __invalidate_dcache_page_alias(vaddr,paddr)
* __invalidate_icache_page_alias(vaddr,paddr)
*/
@@ -51,7 +48,6 @@ extern void __invalidate_icache_page(unsigned long);
extern void __invalidate_icache_range(unsigned long, unsigned long);
extern void __invalidate_dcache_range(unsigned long, unsigned long);
-
#if XCHAL_DCACHE_IS_WRITEBACK
extern void __flush_invalidate_dcache_all(void);
extern void __flush_dcache_page(unsigned long);
@@ -59,17 +55,25 @@ extern void __flush_dcache_range(unsigned long, unsigned long);
extern void __flush_invalidate_dcache_page(unsigned long);
extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
#else
-# define __flush_dcache_range(p,s) do { } while(0)
-# define __flush_dcache_page(p) do { } while(0)
-# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
+static inline void __flush_dcache_page(unsigned long va)
+{
+}
+static inline void __flush_dcache_range(unsigned long va, unsigned long sz)
+{
+}
+# define __flush_invalidate_dcache_all() __invalidate_dcache_all()
+# define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p)
# define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s)
#endif
#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long);
+extern void __invalidate_dcache_page_alias(unsigned long, unsigned long);
#else
static inline void __flush_invalidate_dcache_page_alias(unsigned long virt,
unsigned long phys) { }
+static inline void __invalidate_dcache_page_alias(unsigned long virt,
+ unsigned long phys) { }
#endif
#if defined(CONFIG_MMU) && (ICACHE_WAY_SIZE > PAGE_SIZE)
extern void __invalidate_icache_page_alias(unsigned long, unsigned long);
@@ -84,12 +88,30 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
*
* Pages can get remapped. Because this might change the 'color' of that page,
* we have to flush the cache before the PTE is changed.
- * (see also Documentation/cachetlb.txt)
+ * (see also Documentation/core-api/cachetlb.rst)
*/
-#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+#if defined(CONFIG_MMU) && \
+ ((DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP))
-#define flush_cache_all() \
+#ifdef CONFIG_SMP
+void flush_cache_all(void);
+void flush_cache_range(struct vm_area_struct*, ulong, ulong);
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_cache_page(struct vm_area_struct*,
+ unsigned long, unsigned long);
+#define flush_cache_all flush_cache_all
+#define flush_cache_range flush_cache_range
+#define flush_icache_range flush_icache_range
+#define flush_cache_page flush_cache_page
+#else
+#define flush_cache_all local_flush_cache_all
+#define flush_cache_range local_flush_cache_range
+#define flush_icache_range local_flush_icache_range
+#define flush_cache_page local_flush_cache_page
+#endif
+
+#define local_flush_cache_all() \
do { \
__flush_invalidate_dcache_all(); \
__invalidate_icache_all(); \
@@ -98,51 +120,47 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
#define flush_cache_mm(mm) flush_cache_all()
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
-#define flush_cache_vmap(start,end) flush_cache_all()
-#define flush_cache_vunmap(start,end) flush_cache_all()
-
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page*);
-extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
-extern void flush_cache_page(struct vm_area_struct*,
- unsigned long, unsigned long);
+#define flush_cache_vmap(start,end) flush_cache_all()
+#define flush_cache_vmap_early(start,end) do { } while (0)
+#define flush_cache_vunmap(start,end) flush_cache_all()
-#else
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+static inline void flush_dcache_page(struct page *page)
+{
+ flush_dcache_folio(page_folio(page));
+}
-#define flush_cache_vmap(start,end) do { } while (0)
-#define flush_cache_vunmap(start,end) do { } while (0)
+void local_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void local_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned long pfn);
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
+#else
-#define flush_cache_page(vma,addr,pfn) do { } while (0)
-#define flush_cache_range(vma,start,end) do { } while (0)
+#define flush_icache_range local_flush_icache_range
#endif
+#define flush_icache_user_range flush_icache_range
+
/* Ensure consistency between data and instruction cache. */
-#define flush_icache_range(start,end) \
+#define local_flush_icache_range(start, end) \
do { \
__flush_dcache_range(start, (end) - (start)); \
__invalidate_icache_range(start,(end) - (start)); \
} while (0)
-/* This is not required, see Documentation/cachetlb.txt */
-#define flush_icache_page(vma,page) do { } while (0)
-
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-
-#if (DCACHE_WAY_SIZE > PAGE_SIZE)
+#if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
extern void copy_to_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
extern void copy_from_user_page(struct vm_area_struct*, struct page*,
unsigned long, void*, const void*, unsigned long);
+#define copy_to_user_page copy_to_user_page
+#define copy_from_user_page copy_from_user_page
#else
@@ -158,100 +176,6 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*,
#endif
-#define XTENSA_CACHEBLK_LOG2 29
-#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
-#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
-
-#if XCHAL_HAVE_CACHEATTR
-static inline u32 xtensa_get_cacheattr(void)
-{
- u32 r;
- asm volatile(" rsr %0, cacheattr" : "=a"(r));
- return r;
-}
-
-static inline u32 xtensa_get_dtlb1(u32 addr)
-{
- u32 r = addr & XTENSA_CACHEBLK_MASK;
- return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
- & 0xF);
-}
-#else
-static inline u32 xtensa_get_dtlb1(u32 addr)
-{
- u32 r;
- asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
- asm volatile(" dsync");
- return r;
-}
-
-static inline u32 xtensa_get_cacheattr(void)
-{
- u32 r = 0;
- u32 a = 0;
- do {
- a -= XTENSA_CACHEBLK_SIZE;
- r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
- } while (a);
- return r;
-}
-#endif
-
-static inline int xtensa_need_flush_dma_source(u32 addr)
-{
- return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
-}
-
-static inline int xtensa_need_invalidate_dma_destination(u32 addr)
-{
- return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
-}
-
-static inline void flush_dcache_unaligned(u32 addr, u32 size)
-{
- u32 cnt;
- if (size) {
- cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
- + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
- while (cnt--) {
- asm volatile(" dhwb %0, 0" : : "a"(addr));
- addr += XCHAL_DCACHE_LINESIZE;
- }
- asm volatile(" dsync");
- }
-}
-
-static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
-{
- int cnt;
- if (size) {
- asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
- cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
- - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
- while (cnt-- > 0) {
- asm volatile(" dhi %0, %1" : : "a"(addr),
- "n"(XCHAL_DCACHE_LINESIZE));
- addr += XCHAL_DCACHE_LINESIZE;
- }
- asm volatile(" dhwbi %0, %1" : : "a"(addr),
- "n"(XCHAL_DCACHE_LINESIZE));
- asm volatile(" dsync");
- }
-}
-
-static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
-{
- u32 cnt;
- if (size) {
- cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
- + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
- while (cnt--) {
- asm volatile(" dhwbi %0, 0" : : "a"(addr));
- addr += XCHAL_DCACHE_LINESIZE;
- }
- asm volatile(" dsync");
- }
-}
+#include <asm-generic/cacheflush.h>
-#endif /* __KERNEL__ */
#endif /* _XTENSA_CACHEFLUSH_H */