From b8aa9d9d95b3b4b60d42ac95f65d33a92527aef3 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Thu, 6 Aug 2020 23:23:40 -0700 Subject: mm/mremap: it is sure to have enough space when extent meets requirement Patch series "mm/mremap: cleanup move_page_tables() a little", v5. move_page_tables() tries to move page table by PMD or PTE. The root reason is if it tries to move PMD, both old and new range should be PMD aligned. But current code calculate old range and new range separately. This leads to some redundant check and calculation. This cleanup tries to consolidate the range check in one place to reduce some extra range handling. This patch (of 3): old_end is passed to these two functions to check whether there is enough space to do the move, while this check is done before invoking these functions. These two functions only would be invoked when extent meets the requirement and there is one check before invoking these functions: if (extent > old_end - old_addr) extent = old_end - old_addr; This implies (old_end - old_addr) won't fail the check in these two functions. Signed-off-by: Wei Yang Signed-off-by: Andrew Morton Tested-by: Dmitry Osipenko Acked-by: Kirill A. Shutemov Cc: Vlastimil Babka Cc: Yang Shi Cc: Thomas Hellstrom (VMware) Cc: Anshuman Khandual Cc: Sean Christopherson Cc: Wei Yang Cc: Peter Xu Cc: Aneesh Kumar K.V Cc: Matthew Wilcox Cc: Thomas Hellstrom Link: http://lkml.kernel.org/r/20200710092835.56368-1-richard.weiyang@linux.alibaba.com Link: http://lkml.kernel.org/r/20200710092835.56368-2-richard.weiyang@linux.alibaba.com Link: http://lkml.kernel.org/r/20200708095028.41706-1-richard.weiyang@linux.alibaba.com Link: http://lkml.kernel.org/r/20200708095028.41706-2-richard.weiyang@linux.alibaba.com Signed-off-by: Linus Torvalds --- mm/mremap.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'mm/mremap.c') diff --git a/mm/mremap.c b/mm/mremap.c index 6b153dc05fe4..4f434062d154 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -193,15 +193,13 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, #ifdef CONFIG_HAVE_MOVE_PMD static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, unsigned long old_end, - pmd_t *old_pmd, pmd_t *new_pmd) + unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd) { spinlock_t *old_ptl, *new_ptl; struct mm_struct *mm = vma->vm_mm; pmd_t pmd; - if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK) - || old_end - old_addr < PMD_SIZE) + if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK)) return false; /* @@ -292,7 +290,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, if (need_rmap_locks) take_rmap_locks(vma); moved = move_huge_pmd(vma, old_addr, new_addr, - old_end, old_pmd, new_pmd); + old_pmd, new_pmd); if (need_rmap_locks) drop_rmap_locks(vma); if (moved) @@ -312,7 +310,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, if (need_rmap_locks) take_rmap_locks(vma); moved = move_normal_pmd(vma, old_addr, new_addr, - old_end, old_pmd, new_pmd); + old_pmd, new_pmd); if (need_rmap_locks) drop_rmap_locks(vma); if (moved) -- cgit