summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLorenzo Stoakes <lorenzo.stoakes@oracle.com>2025-07-17 17:55:54 +0100
committerAndrew Morton <akpm@linux-foundation.org>2025-07-24 19:12:29 -0700
commite49e76c20ba10e04b257f522e5a086db43d8f1c1 (patch)
tree3010d35046c6fe8ff380a75876042eea80883d54
parentf256a7a4ca1aad688773fec1bd082a200395a234 (diff)
mm/mremap: cleanup post-processing stage of mremap
Separate out the uffd bits so it clear's what's happening. Don't bother setting vrm->mmap_locked after unlocking, because after this we are done anyway. The only time we drop the mmap lock is on VMA shrink, at which point vrm->new_len will be < vrm->old_len and the operation will not be performed anyway, so move this code out of the if (vrm->mmap_locked) block. All addresses returned by mremap() are page-aligned, so the offset_in_page() check on ret seems only to be incorrectly trying to detect whether an error occurred - explicitly check for this. No functional change intended. Link: https://lkml.kernel.org/r/ebb8f29650b8e343fe98fefc67b3a61a24d1e0f1.1752770784.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christian Brauner <brauner@kernel.org> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Peter Xu <peterx@redhat.com> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/mremap.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/mm/mremap.c b/mm/mremap.c
index 60eb0ac8634b..53447761e55d 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -1729,6 +1729,15 @@ static int check_prep_vma(struct vma_remap_struct *vrm)
return 0;
}
+static void notify_uffd(struct vma_remap_struct *vrm, unsigned long to)
+{
+ struct mm_struct *mm = current->mm;
+
+ userfaultfd_unmap_complete(mm, vrm->uf_unmap_early);
+ mremap_userfaultfd_complete(vrm->uf, vrm->addr, to, vrm->old_len);
+ userfaultfd_unmap_complete(mm, vrm->uf_unmap);
+}
+
static unsigned long do_mremap(struct vma_remap_struct *vrm)
{
struct mm_struct *mm = current->mm;
@@ -1754,18 +1763,13 @@ static unsigned long do_mremap(struct vma_remap_struct *vrm)
res = vrm_implies_new_addr(vrm) ? mremap_to(vrm) : mremap_at(vrm);
out:
- if (vrm->mmap_locked) {
+ if (vrm->mmap_locked)
mmap_write_unlock(mm);
- vrm->mmap_locked = false;
-
- if (!offset_in_page(res) && vrm->mlocked && vrm->new_len > vrm->old_len)
- mm_populate(vrm->new_addr + vrm->old_len, vrm->delta);
- }
- userfaultfd_unmap_complete(mm, vrm->uf_unmap_early);
- mremap_userfaultfd_complete(vrm->uf, vrm->addr, res, vrm->old_len);
- userfaultfd_unmap_complete(mm, vrm->uf_unmap);
+ if (!IS_ERR_VALUE(res) && vrm->mlocked && vrm->new_len > vrm->old_len)
+ mm_populate(vrm->new_addr + vrm->old_len, vrm->delta);
+ notify_uffd(vrm, res);
return res;
}