diff options
Diffstat (limited to 'arch/xtensa/include/asm/cacheflush.h')
| -rw-r--r-- | arch/xtensa/include/asm/cacheflush.h | 43 |
1 files changed, 21 insertions, 22 deletions
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index 397d6a1a4224..a2b6bb5429f5 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -88,7 +88,7 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, * * Pages can get remapped. Because this might change the 'color' of that page, * we have to flush the cache before the PTE is changed. - * (see also Documentation/cachetlb.txt) + * (see also Documentation/core-api/cachetlb.rst) */ #if defined(CONFIG_MMU) && \ @@ -100,6 +100,10 @@ void flush_cache_range(struct vm_area_struct*, ulong, ulong); void flush_icache_range(unsigned long start, unsigned long end); void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); +#define flush_cache_all flush_cache_all +#define flush_cache_range flush_cache_range +#define flush_icache_range flush_icache_range +#define flush_cache_page flush_cache_page #else #define flush_cache_all local_flush_cache_all #define flush_cache_range local_flush_cache_range @@ -116,11 +120,18 @@ void flush_cache_page(struct vm_area_struct*, #define flush_cache_mm(mm) flush_cache_all() #define flush_cache_dup_mm(mm) flush_cache_mm(mm) -#define flush_cache_vmap(start,end) flush_cache_all() -#define flush_cache_vunmap(start,end) flush_cache_all() +#define flush_cache_vmap(start,end) flush_cache_all() +#define flush_cache_vmap_early(start,end) do { } while (0) +#define flush_cache_vunmap(start,end) flush_cache_all() + +void flush_dcache_folio(struct folio *folio); +#define flush_dcache_folio flush_dcache_folio #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 -extern void flush_dcache_page(struct page*); +static inline void flush_dcache_page(struct page *page) +{ + flush_dcache_folio(page_folio(page)); +} void local_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -129,22 +140,12 @@ void local_flush_cache_page(struct vm_area_struct *vma, #else -#define flush_cache_all() do { } while (0) -#define flush_cache_mm(mm) do { } while (0) -#define flush_cache_dup_mm(mm) do { } while (0) - -#define flush_cache_vmap(start,end) do { } while (0) -#define flush_cache_vunmap(start,end) do { } while (0) - -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 -#define flush_dcache_page(page) do { } while (0) - #define flush_icache_range local_flush_icache_range -#define flush_cache_page(vma, addr, pfn) do { } while (0) -#define flush_cache_range(vma, start, end) do { } while (0) #endif +#define flush_icache_user_range flush_icache_range + /* Ensure consistency between data and instruction cache. */ #define local_flush_icache_range(start, end) \ do { \ @@ -152,18 +153,14 @@ void local_flush_cache_page(struct vm_area_struct *vma, __invalidate_icache_range(start,(end) - (start)); \ } while (0) -/* This is not required, see Documentation/cachetlb.txt */ -#define flush_icache_page(vma,page) do { } while (0) - -#define flush_dcache_mmap_lock(mapping) do { } while (0) -#define flush_dcache_mmap_unlock(mapping) do { } while (0) - #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE) extern void copy_to_user_page(struct vm_area_struct*, struct page*, unsigned long, void*, const void*, unsigned long); extern void copy_from_user_page(struct vm_area_struct*, struct page*, unsigned long, void*, const void*, unsigned long); +#define copy_to_user_page copy_to_user_page +#define copy_from_user_page copy_from_user_page #else @@ -179,4 +176,6 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*, #endif +#include <asm-generic/cacheflush.h> + #endif /* _XTENSA_CACHEFLUSH_H */ |
