summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2016-12-12 16:42:43 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 18:55:07 -0800
commit692a68c1544d6be4ba7c6e929e9c7b2ba0447b91 (patch)
treea8d1ee7cb797cb64a260c229b8ce67926657166a /mm/memory.c
parent07e326610e5634e5038fce32fff370949eb42101 (diff)
mm: remove the page size change check in tlb_remove_page
Now that we check for page size change early in the loop, we can partially revert e9d55e157034a ("mm: change the interface for __tlb_remove_page"). This simplies the code much, by removing the need to track the last address with which we adjusted the range. We also go back to the older way of filling the mmu_gather array, ie, we add an entry and then check whether the gather batch is full. Link: http://lkml.kernel.org/r/20161026084839.27299-6-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c21
1 files changed, 6 insertions, 15 deletions
diff --git a/mm/memory.c b/mm/memory.c
index eae20eb66bfc..0a72f821ccdc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -300,15 +300,14 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
struct mmu_gather_batch *batch;
VM_BUG_ON(!tlb->end);
-
- if (!tlb->page_size)
- tlb->page_size = page_size;
- else {
- if (page_size != tlb->page_size)
- return true;
- }
+ VM_WARN_ON(tlb->page_size != page_size);
batch = tlb->active;
+ /*
+ * Add the page and check if we are full. If so
+ * force a flush.
+ */
+ batch->pages[batch->nr++] = page;
if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb))
return true;
@@ -316,7 +315,6 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
}
VM_BUG_ON_PAGE(batch->nr > batch->max, page);
- batch->pages[batch->nr++] = page;
return false;
}
@@ -1122,7 +1120,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
pte_t *start_pte;
pte_t *pte;
swp_entry_t entry;
- struct page *pending_page = NULL;
tlb_remove_check_page_size_change(tlb, PAGE_SIZE);
again:
@@ -1177,7 +1174,6 @@ again:
print_bad_pte(vma, addr, ptent, page);
if (unlikely(__tlb_remove_page(tlb, page))) {
force_flush = 1;
- pending_page = page;
addr += PAGE_SIZE;
break;
}
@@ -1218,11 +1214,6 @@ again:
if (force_flush) {
force_flush = 0;
tlb_flush_mmu_free(tlb);
- if (pending_page) {
- /* remove the page with new size */
- __tlb_remove_pte_page(tlb, pending_page);
- pending_page = NULL;
- }
if (addr != end)
goto again;
}