diff options
Diffstat (limited to 'arch/riscv/include/asm/tlbflush.h')
| -rw-r--r-- | arch/riscv/include/asm/tlbflush.h | 98 |
1 files changed, 46 insertions, 52 deletions
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 54fee0cadb1e..eed0abc40514 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -1,15 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> * Copyright (C) 2012 Regents of the University of California - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation, version 2. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef _ASM_RISCV_TLBFLUSH_H @@ -17,63 +9,65 @@ #include <linux/mm_types.h> #include <asm/smp.h> +#include <asm/errata_list.h> -/* - * Flush entire local TLB. 'sfence.vma' implicitly fences with the instruction - * cache as well, so a 'fence.i' is not necessary. - */ +#define FLUSH_TLB_MAX_SIZE ((unsigned long)-1) +#define FLUSH_TLB_NO_ASID ((unsigned long)-1) + +#ifdef CONFIG_MMU static inline void local_flush_tlb_all(void) { __asm__ __volatile__ ("sfence.vma" : : : "memory"); } -/* Flush one page from local TLB */ -static inline void local_flush_tlb_page(unsigned long addr) +static inline void local_flush_tlb_all_asid(unsigned long asid) { - __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"); + if (asid != FLUSH_TLB_NO_ASID) + ALT_SFENCE_VMA_ASID(asid); + else + local_flush_tlb_all(); } -#ifndef CONFIG_SMP - -#define flush_tlb_all() local_flush_tlb_all() -#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr) - -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) +/* Flush one page from local TLB */ +static inline void local_flush_tlb_page(unsigned long addr) { - local_flush_tlb_all(); + ALT_SFENCE_VMA_ADDR(addr); } -#define flush_tlb_mm(mm) flush_tlb_all() - -#else /* CONFIG_SMP */ - -#include <asm/sbi.h> - -static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start, - unsigned long size) +static inline void local_flush_tlb_page_asid(unsigned long addr, + unsigned long asid) { - struct cpumask hmask; - - cpumask_clear(&hmask); - riscv_cpuid_to_hartid_mask(cmask, &hmask); - sbi_remote_sfence_vma(hmask.bits, start, size); + if (asid != FLUSH_TLB_NO_ASID) + ALT_SFENCE_VMA_ADDR_ASID(addr, asid); + else + local_flush_tlb_page(addr); } -#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1) -#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0) -#define flush_tlb_range(vma, start, end) \ - remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start)) -#define flush_tlb_mm(mm) \ - remote_sfence_vma(mm_cpumask(mm), 0, -1) - -#endif /* CONFIG_SMP */ - -/* Flush a range of kernel pages */ -static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) -{ - flush_tlb_all(); -} +void flush_tlb_all(void); +void flush_tlb_mm(struct mm_struct *mm); +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end, unsigned int page_size); +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +void flush_tlb_kernel_range(unsigned long start, unsigned long end); +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +void flush_pud_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +#endif + +bool arch_tlbbatch_should_defer(struct mm_struct *mm); +void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, + struct mm_struct *mm, unsigned long start, unsigned long end); +void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); + +extern unsigned long tlb_flush_all_threshold; +#else /* CONFIG_MMU */ +#define local_flush_tlb_all() do { } while (0) +#endif /* CONFIG_MMU */ #endif /* _ASM_RISCV_TLBFLUSH_H */ |
