summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSuren Baghdasaryan <surenb@google.com>2023-02-27 09:36:21 -0800
committerAndrew Morton <akpm@linux-foundation.org>2023-04-05 20:02:59 -0700
commit457f67be5910a2b5f1fda8af06bfe4d3492a0a4f (patch)
treed28c0949fd9211d20dfbe3d50a1e8ed38470e05a
parenteeff9a5d47f89bc641034fea05501c8a6de131cb (diff)
mm: introduce vma detached flag
Per-vma locking mechanism will search for VMA under RCU protection and then after locking it, has to ensure it was not removed from the VMA tree after we found it. To make this check efficient, introduce a vma->detached flag to mark VMAs which were removed from the VMA tree. Link: https://lkml.kernel.org/r/20230227173632.3292573-23-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/mm.h11
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--mm/mmap.c2
3 files changed, 16 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d6a2abc51e3d..d0f289bfef01 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -711,6 +711,14 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma)
VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
}
+static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
+{
+ /* When detaching vma should be write-locked */
+ if (detached)
+ vma_assert_write_locked(vma);
+ vma->detached = detached;
+}
+
#else /* CONFIG_PER_VMA_LOCK */
static inline void vma_init_lock(struct vm_area_struct *vma) {}
@@ -721,6 +729,8 @@ static inline void vma_start_write(struct vm_area_struct *vma) {}
static inline bool vma_try_start_write(struct vm_area_struct *vma)
{ return true; }
static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
+static inline void vma_mark_detached(struct vm_area_struct *vma,
+ bool detached) {}
#endif /* CONFIG_PER_VMA_LOCK */
@@ -732,6 +742,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
vma->vm_mm = mm;
vma->vm_ops = &dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
+ vma_mark_detached(vma, false);
vma_init_lock(vma);
}
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 843a45893991..3248ae45cb2e 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -506,6 +506,9 @@ struct vm_area_struct {
#ifdef CONFIG_PER_VMA_LOCK
int vm_lock_seq;
struct rw_semaphore lock;
+
+ /* Flag to indicate areas detached from the mm->mm_mt tree */
+ bool detached;
#endif
/*
diff --git a/mm/mmap.c b/mm/mmap.c
index 18aed0ea6bd3..b42f58591b9a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -599,6 +599,7 @@ static inline void vma_complete(struct vma_prepare *vp,
if (vp->remove) {
again:
+ vma_mark_detached(vp->remove, true);
if (vp->file) {
uprobe_munmap(vp->remove, vp->remove->vm_start,
vp->remove->vm_end);
@@ -2276,6 +2277,7 @@ static inline int munmap_sidetree(struct vm_area_struct *vma,
if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
return -ENOMEM;
+ vma_mark_detached(vma, true);
if (vma->vm_flags & VM_LOCKED)
vma->vm_mm->locked_vm -= vma_pages(vma);