diff options
Diffstat (limited to 'arch/riscv/include/asm/tlbflush.h')
| -rw-r--r-- | arch/riscv/include/asm/tlbflush.h | 65 |
1 files changed, 41 insertions, 24 deletions
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 394cfbccdcd9..eed0abc40514 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -9,6 +9,10 @@ #include <linux/mm_types.h> #include <asm/smp.h> +#include <asm/errata_list.h> + +#define FLUSH_TLB_MAX_SIZE ((unsigned long)-1) +#define FLUSH_TLB_NO_ASID ((unsigned long)-1) #ifdef CONFIG_MMU static inline void local_flush_tlb_all(void) @@ -16,41 +20,54 @@ static inline void local_flush_tlb_all(void) __asm__ __volatile__ ("sfence.vma" : : : "memory"); } +static inline void local_flush_tlb_all_asid(unsigned long asid) +{ + if (asid != FLUSH_TLB_NO_ASID) + ALT_SFENCE_VMA_ASID(asid); + else + local_flush_tlb_all(); +} + /* Flush one page from local TLB */ static inline void local_flush_tlb_page(unsigned long addr) { - __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"); + ALT_SFENCE_VMA_ADDR(addr); +} + +static inline void local_flush_tlb_page_asid(unsigned long addr, + unsigned long asid) +{ + if (asid != FLUSH_TLB_NO_ASID) + ALT_SFENCE_VMA_ADDR_ASID(addr, asid); + else + local_flush_tlb_page(addr); } -#else /* CONFIG_MMU */ -#define local_flush_tlb_all() do { } while (0) -#define local_flush_tlb_page(addr) do { } while (0) -#endif /* CONFIG_MMU */ -#if defined(CONFIG_SMP) && defined(CONFIG_MMU) void flush_tlb_all(void); void flush_tlb_mm(struct mm_struct *mm); +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end, unsigned int page_size); void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); -#else /* CONFIG_SMP && CONFIG_MMU */ +void flush_tlb_kernel_range(unsigned long start, unsigned long end); +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +void flush_pud_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +#endif -#define flush_tlb_all() local_flush_tlb_all() -#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr) +bool arch_tlbbatch_should_defer(struct mm_struct *mm); +void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, + struct mm_struct *mm, unsigned long start, unsigned long end); +void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - local_flush_tlb_all(); -} - -#define flush_tlb_mm(mm) flush_tlb_all() -#endif /* !CONFIG_SMP || !CONFIG_MMU */ - -/* Flush a range of kernel pages */ -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) -{ - flush_tlb_all(); -} +extern unsigned long tlb_flush_all_threshold; +#else /* CONFIG_MMU */ +#define local_flush_tlb_all() do { } while (0) +#endif /* CONFIG_MMU */ #endif /* _ASM_RISCV_TLBFLUSH_H */ |
