summaryrefslogtreecommitdiff
path: root/arch/riscv/mm
diff options
context:
space:
mode:
authorGuo Ren <guoren@linux.alibaba.com>2021-06-06 17:20:50 +0200
committerPalmer Dabbelt <palmerdabbelt@google.com>2021-06-30 20:55:39 -0700
commit3f1e782998cdf6dac037588b99b10b787b00810a (patch)
treec8b733eb062e7763841a9411b8c4acd9506f0fe6 /arch/riscv/mm
parent70c7605c08c5979e5148085903bfed5feac09406 (diff)
riscv: add ASID-based tlbflushing methods
Implement optimized version of the tlb flushing routines for systems using ASIDs. These are behind the use_asid_allocator static branch to not affect existing systems not using ASIDs. Signed-off-by: Guo Ren <guoren@linux.alibaba.com> [hch: rebased on top of previous cleanups, use the same algorithm as the non-ASID based code for local vs global flushes, keep functions as local as possible] Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Guo Ren <guoren@kernel.org> Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r--arch/riscv/mm/context.c2
-rw-r--r--arch/riscv/mm/tlbflush.c47
2 files changed, 41 insertions, 8 deletions
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 25cb406737d4..ee3459cb6750 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -18,7 +18,7 @@
#ifdef CONFIG_MMU
-static DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
+DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
static unsigned long asid_bits;
static unsigned long num_asids;
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index b458949fa8df..64f8201237c2 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -4,6 +4,24 @@
#include <linux/smp.h>
#include <linux/sched.h>
#include <asm/sbi.h>
+#include <asm/mmu_context.h>
+
+static inline void local_flush_tlb_all_asid(unsigned long asid)
+{
+ __asm__ __volatile__ ("sfence.vma x0, %0"
+ :
+ : "r" (asid)
+ : "memory");
+}
+
+static inline void local_flush_tlb_page_asid(unsigned long addr,
+ unsigned long asid)
+{
+ __asm__ __volatile__ ("sfence.vma %0, %1"
+ :
+ : "r" (addr), "r" (asid)
+ : "memory");
+}
void flush_tlb_all(void)
{
@@ -16,21 +34,36 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
struct cpumask *cmask = mm_cpumask(mm);
struct cpumask hmask;
unsigned int cpuid;
+ bool broadcast;
if (cpumask_empty(cmask))
return;
cpuid = get_cpu();
+ /* check if the tlbflush needs to be sent to other CPUs */
+ broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
+ if (static_branch_unlikely(&use_asid_allocator)) {
+ unsigned long asid = atomic_long_read(&mm->context.id);
- if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
- /* local cpu is the only cpu present in cpumask */
- if (size <= stride)
+ if (broadcast) {
+ riscv_cpuid_to_hartid_mask(cmask, &hmask);
+ sbi_remote_sfence_vma_asid(cpumask_bits(&hmask),
+ start, size, asid);
+ } else if (size <= stride) {
+ local_flush_tlb_page_asid(start, asid);
+ } else {
+ local_flush_tlb_all_asid(asid);
+ }
+ } else {
+ if (broadcast) {
+ riscv_cpuid_to_hartid_mask(cmask, &hmask);
+ sbi_remote_sfence_vma(cpumask_bits(&hmask),
+ start, size);
+ } else if (size <= stride) {
local_flush_tlb_page(start);
- else
+ } else {
local_flush_tlb_all();
- } else {
- riscv_cpuid_to_hartid_mask(cmask, &hmask);
- sbi_remote_sfence_vma(cpumask_bits(&hmask), start, size);
+ }
}
put_cpu();