summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2018-08-22 17:30:13 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-22 18:22:04 -0700
commit52a288c736669851f166544d4a0b93e1090d7e9b (patch)
treec65ef4f76102052b4ffe0ec40bec83578ea71c2f /mm
parent815f0ddb346c196018d4d8f8f55c12b83da1de3f (diff)
x86/mm/tlb: Revert the recent lazy TLB patches
Revert commits: 95b0e6357d3e x86/mm/tlb: Always use lazy TLB mode 64482aafe55f x86/mm/tlb: Only send page table free TLB flush to lazy TLB CPUs ac0315896970 x86/mm/tlb: Make lazy TLB mode lazier 61d0beb5796a x86/mm/tlb: Restructure switch_mm_irqs_off() 2ff6ddf19c0e x86/mm/tlb: Leave lazy TLB mode at page table free time In order to simplify the TLB invalidate fixes for x86 and unify the parts that need backporting. We'll try again later. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Rik van Riel <riel@surriel.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c22
1 files changed, 8 insertions, 14 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 19f47d7b9b86..d1dd43f8c1ce 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -326,20 +326,16 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+/*
+ * See the comment near struct mmu_table_batch.
+ */
+
static void tlb_remove_table_smp_sync(void *arg)
{
- struct mm_struct __maybe_unused *mm = arg;
- /*
- * On most architectures this does nothing. Simply delivering the
- * interrupt is enough to prevent races with software page table
- * walking like that done in get_user_pages_fast.
- *
- * See the comment near struct mmu_table_batch.
- */
- tlb_flush_remove_tables_local(mm);
+ /* Simply deliver the interrupt */
}
-static void tlb_remove_table_one(void *table, struct mmu_gather *tlb)
+static void tlb_remove_table_one(void *table)
{
/*
* This isn't an RCU grace period and hence the page-tables cannot be
@@ -348,7 +344,7 @@ static void tlb_remove_table_one(void *table, struct mmu_gather *tlb)
* It is however sufficient for software page-table walkers that rely on
* IRQ disabling. See the comment near struct mmu_table_batch.
*/
- smp_call_function(tlb_remove_table_smp_sync, tlb->mm, 1);
+ smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
__tlb_remove_table(table);
}
@@ -369,8 +365,6 @@ void tlb_table_flush(struct mmu_gather *tlb)
{
struct mmu_table_batch **batch = &tlb->batch;
- tlb_flush_remove_tables(tlb->mm);
-
if (*batch) {
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL;
@@ -393,7 +387,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
if (*batch == NULL) {
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) {
- tlb_remove_table_one(table, tlb);
+ tlb_remove_table_one(table);
return;
}
(*batch)->nr = 0;