summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorYicong Yang <yangyicong@hisilicon.com>2023-08-01 20:42:03 +0800
committerAndrew Morton <akpm@linux-foundation.org>2023-08-21 13:37:32 -0700
commit6a718bd2ed4a589e7e50e7d028d24e087139dd03 (patch)
tree8d6114bf19840fdbe7b7d8b7a3ab9b6e2c3681c7 /arch
parentebddd111fcd13fefd7350f77201dfc5605672909 (diff)
arm64: tlbflush: add some comments for TLB batched flushing
Add comments for arch_flush_tlb_batched_pending() and arch_tlbbatch_flush() to illustrate why only a DSB is needed. Link: https://lkml.kernel.org/r/20230801124203.62164-1-yangyicong@huawei.com Cc: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Yicong Yang <yangyicong@hisilicon.com> Reviewed-by: Alistair Popple <apopple@nvidia.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Barry Song <21cnbao@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/tlbflush.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 84a05a0bd2b6..55b50e1d4a84 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -304,11 +304,26 @@ static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *b
__flush_tlb_page_nosync(mm, uaddr);
}
+/*
+ * If mprotect/munmap/etc occurs during TLB batched flushing, we need to
+ * synchronise all the TLBI issued with a DSB to avoid the race mentioned in
+ * flush_tlb_batched_pending().
+ */
static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
{
dsb(ish);
}
+/*
+ * To support TLB batched flush for multiple pages unmapping, we only send
+ * the TLBI for each page in arch_tlbbatch_add_pending() and wait for the
+ * completion at the end in arch_tlbbatch_flush(). Since we've already issued
+ * TLBI for each page so only a DSB is needed to synchronise its effect on the
+ * other CPUs.
+ *
+ * This will save the time waiting on DSB comparing issuing a TLBI;DSB sequence
+ * for each page.
+ */
static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
dsb(ish);