diff options
Diffstat (limited to 'arch/arm64/include/asm/tlbflush.h')
-rw-r--r-- | arch/arm64/include/asm/tlbflush.h | 11 |
1 files changed, 0 insertions, 11 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index aa9efee17277..18a5dc0c9a54 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -323,17 +323,6 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) } /* - * If mprotect/munmap/etc occurs during TLB batched flushing, we need to ensure - * all the previously issued TLBIs targeting mm have completed. But since we - * can be executing on a remote CPU, a DSB cannot guarantee this like it can - * for arch_tlbbatch_flush(). Our only option is to flush the entire mm. - */ -static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm) -{ - flush_tlb_mm(mm); -} - -/* * To support TLB batched flush for multiple pages unmapping, we only send * the TLBI for each page in arch_tlbbatch_add_pending() and wait for the * completion at the end in arch_tlbbatch_flush(). Since we've already issued |