summaryrefslogtreecommitdiff
path: root/arch/mips/include/asm/cacheflush.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm/cacheflush.h')
-rw-r--r--arch/mips/include/asm/cacheflush.h68
1 files changed, 41 insertions, 27 deletions
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index 69468ded2828..5099c1b65a58 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -25,10 +25,24 @@
*
* MIPS specific flush operations:
*
- * - flush_cache_sigtramp() flush signal trampoline
* - flush_icache_all() flush the entire instruction cache
* - flush_data_cache_page() flushes a page from the data cache
+ * - __flush_icache_user_range(start, end) flushes range of user instructions
*/
+
+ /*
+ * This flag is used to indicate that the page pointed to by a pte
+ * is dirty and requires cleaning before returning it to the user.
+ */
+#define PG_dcache_dirty PG_arch_1
+
+#define folio_test_dcache_dirty(folio) \
+ test_bit(PG_dcache_dirty, &(folio)->flags.f)
+#define folio_set_dcache_dirty(folio) \
+ set_bit(PG_dcache_dirty, &(folio)->flags.f)
+#define folio_clear_dcache_dirty(folio) \
+ clear_bit(PG_dcache_dirty, &(folio)->flags.f)
+
extern void (*flush_cache_all)(void);
extern void (*__flush_cache_all)(void);
extern void (*flush_cache_mm)(struct mm_struct *mm);
@@ -36,14 +50,27 @@ extern void (*flush_cache_mm)(struct mm_struct *mm);
extern void (*flush_cache_range)(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
-extern void __flush_dcache_page(struct page *page);
+void __flush_dcache_folio_pages(struct folio *folio, struct page *page, unsigned int nr);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+static inline void flush_dcache_folio(struct folio *folio)
+{
+ if (cpu_has_dc_aliases)
+ __flush_dcache_folio_pages(folio, folio_page(folio, 0),
+ folio_nr_pages(folio));
+ else if (!cpu_has_ic_fills_f_dc)
+ folio_set_dcache_dirty(folio);
+}
+#define flush_dcache_folio flush_dcache_folio
+
static inline void flush_dcache_page(struct page *page)
{
- if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
- __flush_dcache_page(page);
+ struct folio *folio = page_folio(page);
+ if (cpu_has_dc_aliases)
+ __flush_dcache_folio_pages(folio, page, 1);
+ else if (!cpu_has_ic_fills_f_dc)
+ folio_set_dcache_dirty(folio);
}
#define flush_dcache_mmap_lock(mapping) do { } while (0)
@@ -58,13 +85,12 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
__flush_anon_page(page, vmaddr);
}
-static inline void flush_icache_page(struct vm_area_struct *vma,
- struct page *page)
-{
-}
-
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
+extern void (*__flush_icache_user_range)(unsigned long start,
+ unsigned long end);
+extern void (*__local_flush_icache_user_range)(unsigned long start,
+ unsigned long end);
extern void (*__flush_cache_vmap)(void);
@@ -74,6 +100,8 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
__flush_cache_vmap();
}
+#define flush_cache_vmap_early(start, end) do { } while (0)
+
extern void (*__flush_cache_vunmap)(void);
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
@@ -90,36 +118,22 @@ extern void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
-extern void (*flush_cache_sigtramp)(unsigned long addr);
extern void (*flush_icache_all)(void);
-extern void (*local_flush_data_cache_page)(void * addr);
extern void (*flush_data_cache_page)(unsigned long addr);
-/*
- * This flag is used to indicate that the page pointed to by a pte
- * is dirty and requires cleaning before returning it to the user.
- */
-#define PG_dcache_dirty PG_arch_1
-
-#define Page_dcache_dirty(page) \
- test_bit(PG_dcache_dirty, &(page)->flags)
-#define SetPageDcacheDirty(page) \
- set_bit(PG_dcache_dirty, &(page)->flags)
-#define ClearPageDcacheDirty(page) \
- clear_bit(PG_dcache_dirty, &(page)->flags)
-
/* Run kernel code uncached, useful for cache probing functions. */
unsigned long run_uncached(void *func);
extern void *kmap_coherent(struct page *page, unsigned long addr);
extern void kunmap_coherent(void);
+extern void *kmap_noncoherent(struct page *page, unsigned long addr);
-#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
+static inline void kunmap_noncoherent(void)
{
- BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
+ kunmap_coherent();
}
+#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
/*
* For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
* cache writeback and invalidate operation.