summaryrefslogtreecommitdiff
path: root/include/asm-generic
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-09-20 10:54:04 +0200
committerIngo Molnar <mingo@kernel.org>2019-04-03 10:33:01 +0200
commitfa0aafb8acb684e68231ff0a547ed249f8dc31a5 (patch)
treef5ee01320b5d6700560258508fe0e31ceb6fac18 /include/asm-generic
parentb3fa8ed4e48802e6ba0aa5f3283313a27dcbf46f (diff)
asm-generic/tlb: Remove tlb_flush_mmu_free()
As the comment notes; it is a potentially dangerous operation. Just use tlb_flush_mmu(), that will skip the (double) TLB invalidate if it really isn't needed anyway. No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/tlb.h10
1 files changed, 3 insertions, 7 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 2648a02a6b1b..ddd3d02be93d 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -67,16 +67,13 @@
* call before __tlb_remove_page*() to set the current page-size; implies a
* possible tlb_flush_mmu() call.
*
- * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() / tlb_flush_mmu_free()
+ * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
*
* tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
* related state, like the range)
*
- * tlb_flush_mmu_free() - frees the queued pages; make absolutely
- * sure no additional tlb_remove_page()
- * calls happen between _tlbonly() and this.
- *
- * tlb_flush_mmu() - the above two calls.
+ * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
+ * whatever pages are still batched.
*
* - mmu_gather::fullmm
*
@@ -281,7 +278,6 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb,
void tlb_flush_mmu(struct mmu_gather *tlb);
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force);
-void tlb_flush_mmu_free(struct mmu_gather *tlb);
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address,