diff options
Diffstat (limited to 'mm/rmap.c')
| -rw-r--r-- | mm/rmap.c | 3293 |
1 files changed, 2261 insertions, 1032 deletions
diff --git a/mm/rmap.c b/mm/rmap.c index cd356df4f71a..f955f02d570e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -20,32 +20,44 @@ /* * Lock ordering in mm: * - * inode->i_mutex (while writing or truncating, not reading or faulting) - * mm->mmap_sem - * page->flags PG_locked (lock_page) - * mapping->i_mmap_mutex - * anon_vma->rwsem - * mm->page_table_lock or pte_lock - * zone->lru_lock (in mark_page_accessed, isolate_lru_page) - * swap_lock (in swap_duplicate, swap_info_get) - * mmlist_lock (in mmput, drain_mmlist and others) - * mapping->private_lock (in __set_page_dirty_buffers) - * inode->i_lock (in set_page_dirty's __mark_inode_dirty) - * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) - * sb_lock (within inode_lock in fs/fs-writeback.c) - * mapping->tree_lock (widely used, in set_page_dirty, - * in arch-dependent flush_dcache_mmap_lock, - * within bdi.wb->list_lock in __sync_single_inode) - * - * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) + * inode->i_rwsem (while writing or truncating, not reading or faulting) + * mm->mmap_lock + * mapping->invalidate_lock (in filemap_fault) + * folio_lock + * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below) + * vma_start_write + * mapping->i_mmap_rwsem + * anon_vma->rwsem + * mm->page_table_lock or pte_lock + * swap_lock (in swap_duplicate, swap_info_get) + * mmlist_lock (in mmput, drain_mmlist and others) + * mapping->private_lock (in block_dirty_folio) + * i_pages lock (widely used) + * lruvec->lru_lock (in folio_lruvec_lock_irq) + * inode->i_lock (in set_page_dirty's __mark_inode_dirty) + * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) + * sb_lock (within inode_lock in fs/fs-writeback.c) + * i_pages lock (widely used, in set_page_dirty, + * in arch-dependent flush_dcache_mmap_lock, + * within bdi.wb->list_lock in __sync_single_inode) + * + * anon_vma->rwsem,mapping->i_mmap_rwsem (memory_failure, collect_procs_anon) * ->tasklist_lock * pte map lock + * + * hugetlbfs PageHuge() take locks in this order: + * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) + * vma_lock (hugetlb specific lock for pmd_sharing) + * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) + * folio_lock */ #include <linux/mm.h> +#include <linux/sched/mm.h> +#include <linux/sched/task.h> #include <linux/pagemap.h> #include <linux/swap.h> -#include <linux/swapops.h> +#include <linux/leafops.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/ksm.h> @@ -56,10 +68,19 @@ #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include <linux/hugetlb.h> +#include <linux/huge_mm.h> #include <linux/backing-dev.h> +#include <linux/page_idle.h> +#include <linux/memremap.h> +#include <linux/userfaultfd_k.h> +#include <linux/mm_inline.h> +#include <linux/oom.h> #include <asm/tlbflush.h> +#define CREATE_TRACE_POINTS +#include <trace/events/migrate.h> + #include "internal.h" static struct kmem_cache *anon_vma_cachep; @@ -72,6 +93,9 @@ static inline struct anon_vma *anon_vma_alloc(void) anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); if (anon_vma) { atomic_set(&anon_vma->refcount, 1); + anon_vma->num_children = 0; + anon_vma->num_active_vmas = 0; + anon_vma->parent = anon_vma; /* * Initialise the anon_vma root to point to itself. If called * from fork, the root will be reset to the parents anon_vma. @@ -87,15 +111,15 @@ static inline void anon_vma_free(struct anon_vma *anon_vma) VM_BUG_ON(atomic_read(&anon_vma->refcount)); /* - * Synchronize against page_lock_anon_vma_read() such that + * Synchronize against folio_lock_anon_vma_read() such that * we can safely hold the lock without the anon_vma getting * freed. * * Relies on the full mb implied by the atomic_dec_and_test() from * put_anon_vma() against the acquire barrier implied by - * down_read_trylock() from page_lock_anon_vma_read(). This orders: + * down_read_trylock() from folio_lock_anon_vma_read(). This orders: * - * page_lock_anon_vma_read() VS put_anon_vma() + * folio_lock_anon_vma_read() VS put_anon_vma() * down_read_trylock() atomic_dec_and_test() * LOCK MB * atomic_read() rwsem_is_locked() @@ -103,6 +127,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma) * LOCK should suffice since the actual taking of the lock must * happen _before_ what follows. */ + might_sleep(); if (rwsem_is_locked(&anon_vma->root->rwsem)) { anon_vma_lock_write(anon_vma); anon_vma_unlock_write(anon_vma); @@ -132,22 +157,23 @@ static void anon_vma_chain_link(struct vm_area_struct *vma, } /** - * anon_vma_prepare - attach an anon_vma to a memory region + * __anon_vma_prepare - attach an anon_vma to a memory region * @vma: the memory region in question * * This makes sure the memory mapping described by 'vma' has * an 'anon_vma' attached to it, so that we can associate the * anonymous pages mapped into it with that anon_vma. * - * The common case will be that we already have one, but if + * The common case will be that we already have one, which + * is handled inline by anon_vma_prepare(). But if * not we either need to find an adjacent mapping that we * can re-use the anon_vma from (very common when the only * reason for splitting a vma has been mprotect()), or we * allocate a new one. * * Anon-vma allocations are very subtle, because we may have - * optimistically looked up an anon_vma in page_lock_anon_vma_read() - * and that may actually touch the spinlock even in the newly + * optimistically looked up an anon_vma in folio_lock_anon_vma_read() + * and that may actually touch the rwsem even in the newly * allocated vma (it depends on RCU to make sure that the * anon_vma isn't actually destroyed). * @@ -155,49 +181,48 @@ static void anon_vma_chain_link(struct vm_area_struct *vma, * for the new allocation. At the same time, we do not want * to do any locking for the common case of already having * an anon_vma. - * - * This must be called with the mmap_sem held for reading. */ -int anon_vma_prepare(struct vm_area_struct *vma) +int __anon_vma_prepare(struct vm_area_struct *vma) { - struct anon_vma *anon_vma = vma->anon_vma; + struct mm_struct *mm = vma->vm_mm; + struct anon_vma *anon_vma, *allocated; struct anon_vma_chain *avc; + mmap_assert_locked(mm); might_sleep(); - if (unlikely(!anon_vma)) { - struct mm_struct *mm = vma->vm_mm; - struct anon_vma *allocated; - avc = anon_vma_chain_alloc(GFP_KERNEL); - if (!avc) - goto out_enomem; + avc = anon_vma_chain_alloc(GFP_KERNEL); + if (!avc) + goto out_enomem; + + anon_vma = find_mergeable_anon_vma(vma); + allocated = NULL; + if (!anon_vma) { + anon_vma = anon_vma_alloc(); + if (unlikely(!anon_vma)) + goto out_enomem_free_avc; + anon_vma->num_children++; /* self-parent link for new root */ + allocated = anon_vma; + } - anon_vma = find_mergeable_anon_vma(vma); + anon_vma_lock_write(anon_vma); + /* page_table_lock to protect against threads */ + spin_lock(&mm->page_table_lock); + if (likely(!vma->anon_vma)) { + vma->anon_vma = anon_vma; + anon_vma_chain_link(vma, avc, anon_vma); + anon_vma->num_active_vmas++; allocated = NULL; - if (!anon_vma) { - anon_vma = anon_vma_alloc(); - if (unlikely(!anon_vma)) - goto out_enomem_free_avc; - allocated = anon_vma; - } + avc = NULL; + } + spin_unlock(&mm->page_table_lock); + anon_vma_unlock_write(anon_vma); - anon_vma_lock_write(anon_vma); - /* page_table_lock to protect against threads */ - spin_lock(&mm->page_table_lock); - if (likely(!vma->anon_vma)) { - vma->anon_vma = anon_vma; - anon_vma_chain_link(vma, avc, anon_vma); - allocated = NULL; - avc = NULL; - } - spin_unlock(&mm->page_table_lock); - anon_vma_unlock_write(anon_vma); + if (unlikely(allocated)) + put_anon_vma(allocated); + if (unlikely(avc)) + anon_vma_chain_free(avc); - if (unlikely(allocated)) - put_anon_vma(allocated); - if (unlikely(avc)) - anon_vma_chain_free(avc); - } return 0; out_enomem_free_avc: @@ -235,6 +260,21 @@ static inline void unlock_anon_vma_root(struct anon_vma *root) /* * Attach the anon_vmas from src to dst. * Returns 0 on success, -ENOMEM on failure. + * + * anon_vma_clone() is called by vma_expand(), vma_merge(), __split_vma(), + * copy_vma() and anon_vma_fork(). The first four want an exact copy of src, + * while the last one, anon_vma_fork(), may try to reuse an existing anon_vma to + * prevent endless growth of anon_vma. Since dst->anon_vma is set to NULL before + * call, we can identify this case by checking (!dst->anon_vma && + * src->anon_vma). + * + * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find + * and reuse existing anon_vma which has no vmas and only one child anon_vma. + * This prevents degradation of anon_vma hierarchy to endless linear chain in + * case of constantly forking task. On the other hand, an anon_vma with more + * than one child isn't reused even if there was no alive vma, thus rmap + * walker has a good chance of avoiding scanning the whole hierarchy when it + * searches where page is mapped. */ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) { @@ -244,7 +284,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { struct anon_vma *anon_vma; - avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); + avc = anon_vma_chain_alloc(GFP_NOWAIT); if (unlikely(!avc)) { unlock_anon_vma_root(root); root = NULL; @@ -255,11 +295,32 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) anon_vma = pavc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_chain_link(dst, avc, anon_vma); + + /* + * Reuse existing anon_vma if it has no vma and only one + * anon_vma child. + * + * Root anon_vma is never reused: + * it has self-parent reference and at least one child. + */ + if (!dst->anon_vma && src->anon_vma && + anon_vma->num_children < 2 && + anon_vma->num_active_vmas == 0) + dst->anon_vma = anon_vma; } + if (dst->anon_vma) + dst->anon_vma->num_active_vmas++; unlock_anon_vma_root(root); return 0; enomem_failure: + /* + * dst->anon_vma is dropped here otherwise its num_active_vmas can + * be incorrectly decremented in unlink_anon_vmas(). + * We can safely do this because callers of anon_vma_clone() don't care + * about dst->anon_vma if anon_vma_clone() failed. + */ + dst->anon_vma = NULL; unlink_anon_vmas(dst); return -ENOMEM; } @@ -273,31 +334,42 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) { struct anon_vma_chain *avc; struct anon_vma *anon_vma; + int error; /* Don't bother if the parent process has no anon_vma here. */ if (!pvma->anon_vma) return 0; + /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ + vma->anon_vma = NULL; + /* * First, attach the new VMA to the parent VMA's anon_vmas, * so rmap can find non-COWed pages in child processes. */ - if (anon_vma_clone(vma, pvma)) - return -ENOMEM; + error = anon_vma_clone(vma, pvma); + if (error) + return error; + + /* An existing anon_vma has been reused, all done then. */ + if (vma->anon_vma) + return 0; /* Then add our own anon_vma. */ anon_vma = anon_vma_alloc(); if (!anon_vma) goto out_error; + anon_vma->num_active_vmas++; avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto out_error_free_anon_vma; /* - * The root anon_vma's spinlock is the lock actually used when we + * The root anon_vma's rwsem is the lock actually used when we * lock any of the anon_vmas in this anon_vma tree. */ anon_vma->root = pvma->anon_vma->root; + anon_vma->parent = pvma->anon_vma; /* * With refcounts, an anon_vma can stay around longer than the * process it belongs to. The root anon_vma needs to be pinned until @@ -308,6 +380,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) vma->anon_vma = anon_vma; anon_vma_lock_write(anon_vma); anon_vma_chain_link(vma, avc, anon_vma); + anon_vma->parent->num_children++; anon_vma_unlock_write(anon_vma); return 0; @@ -338,12 +411,23 @@ void unlink_anon_vmas(struct vm_area_struct *vma) * Leave empty anon_vmas on the list - we'll need * to free them outside the lock. */ - if (RB_EMPTY_ROOT(&anon_vma->rb_root)) + if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { + anon_vma->parent->num_children--; continue; + } list_del(&avc->same_vma); anon_vma_chain_free(avc); } + if (vma->anon_vma) { + vma->anon_vma->num_active_vmas--; + + /* + * vma would still be needed after unlink, and anon_vma will be prepared + * when handle fault. + */ + vma->anon_vma = NULL; + } unlock_anon_vma_root(root); /* @@ -354,6 +438,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma) list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; + VM_WARN_ON(anon_vma->num_children); + VM_WARN_ON(anon_vma->num_active_vmas); put_anon_vma(anon_vma); list_del(&avc->same_vma); @@ -367,22 +453,24 @@ static void anon_vma_ctor(void *data) init_rwsem(&anon_vma->rwsem); atomic_set(&anon_vma->refcount, 0); - anon_vma->rb_root = RB_ROOT; + anon_vma->rb_root = RB_ROOT_CACHED; } void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), - 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); - anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC); + 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, + anon_vma_ctor); + anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, + SLAB_PANIC|SLAB_ACCOUNT); } /* * Getting a lock on a stable anon_vma from a page off the LRU is tricky! * - * Since there is no serialization what so ever against page_remove_rmap() - * the best this function can do is return a locked anon_vma that might - * have been relevant to this page. + * Since there is no serialization what so ever against folio_remove_rmap_*() + * the best this function can do is return a refcount increased anon_vma + * that might have been relevant to this page. * * The page might have been remapped to a different anon_vma or the anon_vma * returned may already be freed (and even reused). @@ -396,38 +484,44 @@ void __init anon_vma_init(void) * chain and verify that the page in question is indeed mapped in it * [ something equivalent to page_mapped_in_vma() ]. * - * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap() - * that the anon_vma pointer from page->mapping is valid if there is a - * mapcount, we can dereference the anon_vma after observing those. + * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from + * folio_remove_rmap_*() that the anon_vma pointer from page->mapping is valid + * if there is a mapcount, we can dereference the anon_vma after observing + * those. + * + * NOTE: the caller should hold folio lock when calling this. */ -struct anon_vma *page_get_anon_vma(struct page *page) +struct anon_vma *folio_get_anon_vma(const struct folio *folio) { struct anon_vma *anon_vma = NULL; unsigned long anon_mapping; + VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + rcu_read_lock(); - anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); - if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) + anon_mapping = (unsigned long)READ_ONCE(folio->mapping); + if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON) goto out; - if (!page_mapped(page)) + if (!folio_mapped(folio)) goto out; - anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); + anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON); if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } /* - * If this page is still mapped, then its anon_vma cannot have been + * If this folio is still mapped, then its anon_vma cannot have been * freed. But if it has been unmapped, we have no security against the * anon_vma structure being freed and reused (for another anon_vma: - * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero() + * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() * above cannot corrupt). */ - if (!page_mapped(page)) { + if (!folio_mapped(folio)) { + rcu_read_unlock(); put_anon_vma(anon_vma); - anon_vma = NULL; + return NULL; } out: rcu_read_unlock(); @@ -436,50 +530,60 @@ out: } /* - * Similar to page_get_anon_vma() except it locks the anon_vma. + * Similar to folio_get_anon_vma() except it locks the anon_vma. * * Its a little more complex as it tries to keep the fast path to a single * atomic op -- the trylock. If we fail the trylock, we fall back to getting a - * reference like with page_get_anon_vma() and then block on the mutex. + * reference like with folio_get_anon_vma() and then block on the mutex + * on !rwc->try_lock case. */ -struct anon_vma *page_lock_anon_vma_read(struct page *page) +struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio, + struct rmap_walk_control *rwc) { struct anon_vma *anon_vma = NULL; struct anon_vma *root_anon_vma; unsigned long anon_mapping; + VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + rcu_read_lock(); - anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); - if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) + anon_mapping = (unsigned long)READ_ONCE(folio->mapping); + if ((anon_mapping & FOLIO_MAPPING_FLAGS) != FOLIO_MAPPING_ANON) goto out; - if (!page_mapped(page)) + if (!folio_mapped(folio)) goto out; - anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); - root_anon_vma = ACCESS_ONCE(anon_vma->root); + anon_vma = (struct anon_vma *) (anon_mapping - FOLIO_MAPPING_ANON); + root_anon_vma = READ_ONCE(anon_vma->root); if (down_read_trylock(&root_anon_vma->rwsem)) { /* - * If the page is still mapped, then this anon_vma is still + * If the folio is still mapped, then this anon_vma is still * its anon_vma, and holding the mutex ensures that it will * not go away, see anon_vma_free(). */ - if (!page_mapped(page)) { + if (!folio_mapped(folio)) { up_read(&root_anon_vma->rwsem); anon_vma = NULL; } goto out; } + if (rwc && rwc->try_lock) { + anon_vma = NULL; + rwc->contended = true; + goto out; + } + /* trylock failed, we got to sleep */ if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } - if (!page_mapped(page)) { + if (!folio_mapped(folio)) { + rcu_read_unlock(); put_anon_vma(anon_vma); - anon_vma = NULL; - goto out; + return NULL; } /* we pinned the anon_vma, its safe to sleep */ @@ -504,67 +608,189 @@ out: return anon_vma; } -void page_unlock_anon_vma_read(struct anon_vma *anon_vma) +#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH +/* + * Flush TLB entries for recently unmapped pages from remote CPUs. It is + * important if a PTE was dirty when it was unmapped that it's flushed + * before any IO is initiated on the page to prevent lost writes. Similarly, + * it must be flushed before freeing to prevent data leakage. + */ +void try_to_unmap_flush(void) +{ + struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; + + if (!tlb_ubc->flush_required) + return; + + arch_tlbbatch_flush(&tlb_ubc->arch); + tlb_ubc->flush_required = false; + tlb_ubc->writable = false; +} + +/* Flush iff there are potentially writable TLB entries that can race with IO */ +void try_to_unmap_flush_dirty(void) { - anon_vma_unlock_read(anon_vma); + struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; + + if (tlb_ubc->writable) + try_to_unmap_flush(); } /* - * At what user virtual address is page expected in @vma? + * Bits 0-14 of mm->tlb_flush_batched record pending generations. + * Bits 16-30 of mm->tlb_flush_batched bit record flushed generations. */ -static inline unsigned long -__vma_address(struct page *page, struct vm_area_struct *vma) +#define TLB_FLUSH_BATCH_FLUSHED_SHIFT 16 +#define TLB_FLUSH_BATCH_PENDING_MASK \ + ((1 << (TLB_FLUSH_BATCH_FLUSHED_SHIFT - 1)) - 1) +#define TLB_FLUSH_BATCH_PENDING_LARGE \ + (TLB_FLUSH_BATCH_PENDING_MASK / 2) + +static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, + unsigned long start, unsigned long end) { - pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; + int batch; + bool writable = pte_dirty(pteval); + + if (!pte_accessible(mm, pteval)) + return; - if (unlikely(is_vm_hugetlb_page(vma))) - pgoff = page->index << huge_page_order(page_hstate(page)); + arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end); + tlb_ubc->flush_required = true; - return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); + /* + * Ensure compiler does not re-order the setting of tlb_flush_batched + * before the PTE is cleared. + */ + barrier(); + batch = atomic_read(&mm->tlb_flush_batched); +retry: + if ((batch & TLB_FLUSH_BATCH_PENDING_MASK) > TLB_FLUSH_BATCH_PENDING_LARGE) { + /* + * Prevent `pending' from catching up with `flushed' because of + * overflow. Reset `pending' and `flushed' to be 1 and 0 if + * `pending' becomes large. + */ + if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) + goto retry; + } else { + atomic_inc(&mm->tlb_flush_batched); + } + + /* + * If the PTE was dirty then it's best to assume it's writable. The + * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() + * before the page is queued for IO. + */ + if (writable) + tlb_ubc->writable = true; } -inline unsigned long -vma_address(struct page *page, struct vm_area_struct *vma) +/* + * Returns true if the TLB flush should be deferred to the end of a batch of + * unmap operations to reduce IPIs. + */ +static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) { - unsigned long address = __vma_address(page, vma); - - /* page should be within @vma mapping range */ - VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); + if (!(flags & TTU_BATCH_FLUSH)) + return false; - return address; + return arch_tlbbatch_should_defer(mm); } /* - * At what user virtual address is page expected in vma? - * Caller should check the page is actually part of the vma. + * Reclaim unmaps pages under the PTL but do not flush the TLB prior to + * releasing the PTL if TLB flushes are batched. It's possible for a parallel + * operation such as mprotect or munmap to race between reclaim unmapping + * the page and flushing the page. If this race occurs, it potentially allows + * access to data via a stale TLB entry. Tracking all mm's that have TLB + * batching in flight would be expensive during reclaim so instead track + * whether TLB batching occurred in the past and if so then do a flush here + * if required. This will cost one additional flush per reclaim cycle paid + * by the first operation at risk such as mprotect and mumap. + * + * This must be called under the PTL so that an access to tlb_flush_batched + * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise + * via the PTL. */ -unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) +void flush_tlb_batched_pending(struct mm_struct *mm) { - unsigned long address; - if (PageAnon(page)) { - struct anon_vma *page__anon_vma = page_anon_vma(page); + int batch = atomic_read(&mm->tlb_flush_batched); + int pending = batch & TLB_FLUSH_BATCH_PENDING_MASK; + int flushed = batch >> TLB_FLUSH_BATCH_FLUSHED_SHIFT; + + if (pending != flushed) { + flush_tlb_mm(mm); + /* + * If the new TLB flushing is pending during flushing, leave + * mm->tlb_flush_batched as is, to avoid losing flushing. + */ + atomic_cmpxchg(&mm->tlb_flush_batched, batch, + pending | (pending << TLB_FLUSH_BATCH_FLUSHED_SHIFT)); + } +} +#else +static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, + unsigned long start, unsigned long end) +{ +} + +static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) +{ + return false; +} +#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ + +/** + * page_address_in_vma - The virtual address of a page in this VMA. + * @folio: The folio containing the page. + * @page: The page within the folio. + * @vma: The VMA we need to know the address in. + * + * Calculates the user virtual address of this page in the specified VMA. + * It is the caller's responsibility to check the page is actually + * within the VMA. There may not currently be a PTE pointing at this + * page, but if a page fault occurs at this address, this is the page + * which will be accessed. + * + * Context: Caller should hold a reference to the folio. Caller should + * hold a lock (eg the i_mmap_lock or the mmap_lock) which keeps the + * VMA from being altered. + * + * Return: The virtual address corresponding to this page in the VMA. + */ +unsigned long page_address_in_vma(const struct folio *folio, + const struct page *page, const struct vm_area_struct *vma) +{ + if (folio_test_anon(folio)) { + struct anon_vma *anon_vma = folio_anon_vma(folio); /* * Note: swapoff's unuse_vma() is more efficient with this * check, and needs it to match anon_vma when KSM is active. */ - if (!vma->anon_vma || !page__anon_vma || - vma->anon_vma->root != page__anon_vma->root) + if (!vma->anon_vma || !anon_vma || + vma->anon_vma->root != anon_vma->root) return -EFAULT; - } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { - if (!vma->vm_file || - vma->vm_file->f_mapping != page->mapping) - return -EFAULT; - } else + } else if (!vma->vm_file) { return -EFAULT; - address = __vma_address(page, vma); - if (unlikely(address < vma->vm_start || address >= vma->vm_end)) + } else if (vma->vm_file->f_mapping != folio->mapping) { return -EFAULT; - return address; + } + + /* KSM folios don't reach here because of the !anon_vma check */ + return vma_address(vma, page_pgoff(folio, page), 1); } +/* + * Returns the actual pmd_t* where we expect 'address' to be mapped from, or + * NULL if it doesn't exist. No guarantees / checks on what the pmd_t* + * represents. + */ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd = NULL; @@ -572,680 +798,1378 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) if (!pgd_present(*pgd)) goto out; - pud = pud_offset(pgd, address); + p4d = p4d_offset(pgd, address); + if (!p4d_present(*p4d)) + goto out; + + pud = pud_offset(p4d, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); - if (!pmd_present(*pmd)) - pmd = NULL; out: return pmd; } +struct folio_referenced_arg { + int mapcount; + int referenced; + vm_flags_t vm_flags; + struct mem_cgroup *memcg; +}; + /* - * Check that @page is mapped at @address into @mm. - * - * If @sync is false, page_check_address may perform a racy check to avoid - * the page table lock when the pte is not present (helpful when reclaiming - * highly shared pages). - * - * On success returns with pte mapped and locked. + * arg: folio_referenced_arg will be passed */ -pte_t *__page_check_address(struct page *page, struct mm_struct *mm, - unsigned long address, spinlock_t **ptlp, int sync) +static bool folio_referenced_one(struct folio *folio, + struct vm_area_struct *vma, unsigned long address, void *arg) { - pmd_t *pmd; - pte_t *pte; - spinlock_t *ptl; + struct folio_referenced_arg *pra = arg; + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); + int ptes = 0, referenced = 0; - if (unlikely(PageHuge(page))) { - pte = huge_pte_offset(mm, address); - ptl = &mm->page_table_lock; - goto check; - } + while (page_vma_mapped_walk(&pvmw)) { + address = pvmw.address; - pmd = mm_find_pmd(mm, address); - if (!pmd) - return NULL; + if (vma->vm_flags & VM_LOCKED) { + ptes++; + pra->mapcount--; - if (pmd_trans_huge(*pmd)) - return NULL; + /* Only mlock fully mapped pages */ + if (pvmw.pte && ptes != pvmw.nr_pages) + continue; - pte = pte_offset_map(pmd, address); - /* Make a quick check before getting the lock */ - if (!sync && !pte_present(*pte)) { - pte_unmap(pte); - return NULL; + /* + * All PTEs must be protected by page table lock in + * order to mlock the page. + * + * If page table boundary has been cross, current ptl + * only protect part of ptes. + */ + if (pvmw.flags & PVMW_PGTABLE_CROSSED) + continue; + + /* Restore the mlock which got missed */ + mlock_vma_folio(folio, vma); + page_vma_mapped_walk_done(&pvmw); + pra->vm_flags |= VM_LOCKED; + return false; /* To break the loop */ + } + + /* + * Skip the non-shared swapbacked folio mapped solely by + * the exiting or OOM-reaped process. This avoids redundant + * swap-out followed by an immediate unmap. + */ + if ((!atomic_read(&vma->vm_mm->mm_users) || + check_stable_address_space(vma->vm_mm)) && + folio_test_anon(folio) && folio_test_swapbacked(folio) && + !folio_maybe_mapped_shared(folio)) { + pra->referenced = -1; + page_vma_mapped_walk_done(&pvmw); + return false; + } + + if (lru_gen_enabled() && pvmw.pte) { + if (lru_gen_look_around(&pvmw)) + referenced++; + } else if (pvmw.pte) { + if (ptep_clear_flush_young_notify(vma, address, + pvmw.pte)) + referenced++; + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { + if (pmdp_clear_flush_young_notify(vma, address, + pvmw.pmd)) + referenced++; + } else { + /* unexpected pmd-mapped folio? */ + WARN_ON_ONCE(1); + } + + pra->mapcount--; } - ptl = pte_lockptr(mm, pmd); -check: - spin_lock(ptl); - if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { - *ptlp = ptl; - return pte; + if (referenced) + folio_clear_idle(folio); + if (folio_test_clear_young(folio)) + referenced++; + + if (referenced) { + pra->referenced++; + pra->vm_flags |= vma->vm_flags & ~VM_LOCKED; } - pte_unmap_unlock(pte, ptl); - return NULL; + + if (!pra->mapcount) + return false; /* To break the loop */ + + return true; +} + +static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg) +{ + struct folio_referenced_arg *pra = arg; + struct mem_cgroup *memcg = pra->memcg; + + /* + * Ignore references from this mapping if it has no recency. If the + * folio has been used in another mapping, we will catch it; if this + * other mapping is already gone, the unmap path will have set the + * referenced flag or activated the folio in zap_pte_range(). + */ + if (!vma_has_recency(vma)) + return true; + + /* + * If we are reclaiming on behalf of a cgroup, skip counting on behalf + * of references from different cgroups. + */ + if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) + return true; + + return false; } /** - * page_mapped_in_vma - check whether a page is really mapped in a VMA - * @page: the page to test - * @vma: the VMA to test + * folio_referenced() - Test if the folio was referenced. + * @folio: The folio to test. + * @is_locked: Caller holds lock on the folio. + * @memcg: target memory cgroup + * @vm_flags: A combination of all the vma->vm_flags which referenced the folio. * - * Returns 1 if the page is mapped into the page tables of the VMA, 0 - * if the page is not mapped into the page tables of this VMA. Only - * valid for normal file or anonymous VMAs. + * Quick test_and_clear_referenced for all mappings of a folio, + * + * Return: The number of mappings which referenced the folio. Return -1 if + * the function bailed out due to rmap lock contention. */ -int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) +int folio_referenced(struct folio *folio, int is_locked, + struct mem_cgroup *memcg, vm_flags_t *vm_flags) { - unsigned long address; - pte_t *pte; - spinlock_t *ptl; + bool we_locked = false; + struct folio_referenced_arg pra = { + .mapcount = folio_mapcount(folio), + .memcg = memcg, + }; + struct rmap_walk_control rwc = { + .rmap_one = folio_referenced_one, + .arg = (void *)&pra, + .anon_lock = folio_lock_anon_vma_read, + .try_lock = true, + .invalid_vma = invalid_folio_referenced_vma, + }; - address = __vma_address(page, vma); - if (unlikely(address < vma->vm_start || address >= vma->vm_end)) + *vm_flags = 0; + if (!pra.mapcount) return 0; - pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); - if (!pte) /* the page is not in this mm */ + + if (!folio_raw_mapping(folio)) return 0; - pte_unmap_unlock(pte, ptl); - return 1; -} + if (!is_locked) { + we_locked = folio_trylock(folio); + if (!we_locked) + return 1; + } -/* - * Subfunctions of page_referenced: page_referenced_one called - * repeatedly from either page_referenced_anon or page_referenced_file. - */ -int page_referenced_one(struct page *page, struct vm_area_struct *vma, - unsigned long address, unsigned int *mapcount, - unsigned long *vm_flags) -{ - struct mm_struct *mm = vma->vm_mm; - int referenced = 0; + rmap_walk(folio, &rwc); + *vm_flags = pra.vm_flags; - if (unlikely(PageTransHuge(page))) { - pmd_t *pmd; + if (we_locked) + folio_unlock(folio); - spin_lock(&mm->page_table_lock); - /* - * rmap might return false positives; we must filter - * these out using page_check_address_pmd(). - */ - pmd = page_check_address_pmd(page, mm, address, - PAGE_CHECK_ADDRESS_PMD_FLAG); - if (!pmd) { - spin_unlock(&mm->page_table_lock); - goto out; - } + return rwc.contended ? -1 : pra.referenced; +} - if (vma->vm_flags & VM_LOCKED) { - spin_unlock(&mm->page_table_lock); - *mapcount = 0; /* break early from loop */ - *vm_flags |= VM_LOCKED; - goto out; - } +static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) +{ + int cleaned = 0; + struct vm_area_struct *vma = pvmw->vma; + struct mmu_notifier_range range; + unsigned long address = pvmw->address; - /* go ahead even if the pmd is pmd_trans_splitting() */ - if (pmdp_clear_flush_young_notify(vma, address, pmd)) - referenced++; - spin_unlock(&mm->page_table_lock); - } else { - pte_t *pte; - spinlock_t *ptl; + /* + * We have to assume the worse case ie pmd for invalidation. Note that + * the folio can not be freed from this function. + */ + mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, + vma->vm_mm, address, vma_address_end(pvmw)); + mmu_notifier_invalidate_range_start(&range); - /* - * rmap might return false positives; we must filter - * these out using page_check_address(). - */ - pte = page_check_address(page, mm, address, &ptl, 0); - if (!pte) - goto out; + while (page_vma_mapped_walk(pvmw)) { + int ret = 0; - if (vma->vm_flags & VM_LOCKED) { - pte_unmap_unlock(pte, ptl); - *mapcount = 0; /* break early from loop */ - *vm_flags |= VM_LOCKED; - goto out; - } + address = pvmw->address; + if (pvmw->pte) { + pte_t *pte = pvmw->pte; + pte_t entry = ptep_get(pte); - if (ptep_clear_flush_young_notify(vma, address, pte)) { /* - * Don't treat a reference through a sequentially read - * mapping as such. If the page has been used in - * another mapping, we will catch it; if this other - * mapping is already gone, the unmap path will have - * set PG_referenced or activated the page. + * PFN swap PTEs, such as device-exclusive ones, that + * actually map pages are clean and not writable from a + * CPU perspective. The MMU notifier takes care of any + * device aspects. */ - if (likely(!(vma->vm_flags & VM_SEQ_READ))) - referenced++; + if (!pte_present(entry)) + continue; + if (!pte_dirty(entry) && !pte_write(entry)) + continue; + + flush_cache_page(vma, address, pte_pfn(entry)); + entry = ptep_clear_flush(vma, address, pte); + entry = pte_wrprotect(entry); + entry = pte_mkclean(entry); + set_pte_at(vma->vm_mm, address, pte, entry); + ret = 1; + } else { +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + pmd_t *pmd = pvmw->pmd; + pmd_t entry = pmdp_get(pmd); + + /* + * Please see the comment above (!pte_present). + * A non present PMD is not writable from a CPU + * perspective. + */ + if (!pmd_present(entry)) + continue; + if (!pmd_dirty(entry) && !pmd_write(entry)) + continue; + + flush_cache_range(vma, address, + address + HPAGE_PMD_SIZE); + entry = pmdp_invalidate(vma, address, pmd); + entry = pmd_wrprotect(entry); + entry = pmd_mkclean(entry); + set_pmd_at(vma->vm_mm, address, pmd, entry); + ret = 1; +#else + /* unexpected pmd-mapped folio? */ + WARN_ON_ONCE(1); +#endif } - pte_unmap_unlock(pte, ptl); + + if (ret) + cleaned++; } - (*mapcount)--; + mmu_notifier_invalidate_range_end(&range); - if (referenced) - *vm_flags |= vma->vm_flags; -out: - return referenced; + return cleaned; } -static int page_referenced_anon(struct page *page, - struct mem_cgroup *memcg, - unsigned long *vm_flags) +static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma, + unsigned long address, void *arg) { - unsigned int mapcount; - struct anon_vma *anon_vma; - pgoff_t pgoff; - struct anon_vma_chain *avc; - int referenced = 0; + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC); + int *cleaned = arg; - anon_vma = page_lock_anon_vma_read(page); - if (!anon_vma) - return referenced; + *cleaned += page_vma_mkclean_one(&pvmw); - mapcount = page_mapcount(page); - pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { - struct vm_area_struct *vma = avc->vma; - unsigned long address = vma_address(page, vma); - /* - * If we are reclaiming on behalf of a cgroup, skip - * counting on behalf of references from different - * cgroups - */ - if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) - continue; - referenced += page_referenced_one(page, vma, address, - &mapcount, vm_flags); - if (!mapcount) - break; - } + return true; +} - page_unlock_anon_vma_read(anon_vma); - return referenced; +static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) +{ + if (vma->vm_flags & VM_SHARED) + return false; + + return true; } -/** - * page_referenced_file - referenced check for object-based rmap - * @page: the page we're checking references on. - * @memcg: target memory control group - * @vm_flags: collect encountered vma->vm_flags who actually referenced the page - * - * For an object-based mapped page, find all the places it is mapped and - * check/clear the referenced flag. This is done by following the page->mapping - * pointer, then walking the chain of vmas it holds. It returns the number - * of references it found. - * - * This function is only called from page_referenced for object-based pages. - */ -static int page_referenced_file(struct page *page, - struct mem_cgroup *memcg, - unsigned long *vm_flags) +int folio_mkclean(struct folio *folio) { - unsigned int mapcount; - struct address_space *mapping = page->mapping; - pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - struct vm_area_struct *vma; - int referenced = 0; + int cleaned = 0; + struct address_space *mapping; + struct rmap_walk_control rwc = { + .arg = (void *)&cleaned, + .rmap_one = page_mkclean_one, + .invalid_vma = invalid_mkclean_vma, + }; - /* - * The caller's checks on page->mapping and !PageAnon have made - * sure that this is a file page: the check for page->mapping - * excludes the case just before it gets set on an anon page. - */ - BUG_ON(PageAnon(page)); + BUG_ON(!folio_test_locked(folio)); - /* - * The page lock not only makes sure that page->mapping cannot - * suddenly be NULLified by truncation, it makes sure that the - * structure at mapping cannot be freed and reused yet, - * so we can safely take mapping->i_mmap_mutex. - */ - BUG_ON(!PageLocked(page)); + if (!folio_mapped(folio)) + return 0; - mutex_lock(&mapping->i_mmap_mutex); + mapping = folio_mapping(folio); + if (!mapping) + return 0; - /* - * i_mmap_mutex does not stabilize mapcount at all, but mapcount - * is more likely to be accurate if we note it after spinning. - */ - mapcount = page_mapcount(page); + rmap_walk(folio, &rwc); - vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { - unsigned long address = vma_address(page, vma); - /* - * If we are reclaiming on behalf of a cgroup, skip - * counting on behalf of references from different - * cgroups - */ - if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) - continue; - referenced += page_referenced_one(page, vma, address, - &mapcount, vm_flags); - if (!mapcount) - break; - } + return cleaned; +} +EXPORT_SYMBOL_GPL(folio_mkclean); + +struct wrprotect_file_state { + int cleaned; + pgoff_t pgoff; + unsigned long pfn; + unsigned long nr_pages; +}; - mutex_unlock(&mapping->i_mmap_mutex); - return referenced; +static bool mapping_wrprotect_range_one(struct folio *folio, + struct vm_area_struct *vma, unsigned long address, void *arg) +{ + struct wrprotect_file_state *state = (struct wrprotect_file_state *)arg; + struct page_vma_mapped_walk pvmw = { + .pfn = state->pfn, + .nr_pages = state->nr_pages, + .pgoff = state->pgoff, + .vma = vma, + .address = address, + .flags = PVMW_SYNC, + }; + + state->cleaned += page_vma_mkclean_one(&pvmw); + + return true; } +static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, + pgoff_t pgoff_start, unsigned long nr_pages, + struct rmap_walk_control *rwc, bool locked); + /** - * page_referenced - test if the page was referenced - * @page: the page to test - * @is_locked: caller holds lock on the page - * @memcg: target memory cgroup - * @vm_flags: collect encountered vma->vm_flags who actually referenced the page + * mapping_wrprotect_range() - Write-protect all mappings in a specified range. + * + * @mapping: The mapping whose reverse mapping should be traversed. + * @pgoff: The page offset at which @pfn is mapped within @mapping. + * @pfn: The PFN of the page mapped in @mapping at @pgoff. + * @nr_pages: The number of physically contiguous base pages spanned. + * + * Traverses the reverse mapping, finding all VMAs which contain a shared + * mapping of the pages in the specified range in @mapping, and write-protects + * them (that is, updates the page tables to mark the mappings read-only such + * that a write protection fault arises when the mappings are written to). * - * Quick test_and_clear_referenced for all mappings to a page, - * returns the number of ptes which referenced the page. + * The @pfn value need not refer to a folio, but rather can reference a kernel + * allocation which is mapped into userland. We therefore do not require that + * the page maps to a folio with a valid mapping or index field, rather the + * caller specifies these in @mapping and @pgoff. + * + * Return: the number of write-protected PTEs, or an error. */ -int page_referenced(struct page *page, - int is_locked, - struct mem_cgroup *memcg, - unsigned long *vm_flags) +int mapping_wrprotect_range(struct address_space *mapping, pgoff_t pgoff, + unsigned long pfn, unsigned long nr_pages) { - int referenced = 0; - int we_locked = 0; + struct wrprotect_file_state state = { + .cleaned = 0, + .pgoff = pgoff, + .pfn = pfn, + .nr_pages = nr_pages, + }; + struct rmap_walk_control rwc = { + .arg = (void *)&state, + .rmap_one = mapping_wrprotect_range_one, + .invalid_vma = invalid_mkclean_vma, + }; - *vm_flags = 0; - if (page_mapped(page) && page_rmapping(page)) { - if (!is_locked && (!PageAnon(page) || PageKsm(page))) { - we_locked = trylock_page(page); - if (!we_locked) { - referenced++; - goto out; - } - } - if (unlikely(PageKsm(page))) - referenced += page_referenced_ksm(page, memcg, - vm_flags); - else if (PageAnon(page)) - referenced += page_referenced_anon(page, memcg, - vm_flags); - else if (page->mapping) - referenced += page_referenced_file(page, memcg, - vm_flags); - if (we_locked) - unlock_page(page); - - if (page_test_and_clear_young(page_to_pfn(page))) - referenced++; - } -out: - return referenced; -} + if (!mapping) + return 0; -static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, - unsigned long address) -{ - struct mm_struct *mm = vma->vm_mm; - pte_t *pte; - spinlock_t *ptl; - int ret = 0; + __rmap_walk_file(/* folio = */NULL, mapping, pgoff, nr_pages, &rwc, + /* locked = */false); - pte = page_check_address(page, mm, address, &ptl, 1); - if (!pte) - goto out; - - if (pte_dirty(*pte) || pte_write(*pte)) { - pte_t entry; + return state.cleaned; +} +EXPORT_SYMBOL_GPL(mapping_wrprotect_range); - flush_cache_page(vma, address, pte_pfn(*pte)); - entry = ptep_clear_flush(vma, address, pte); - entry = pte_wrprotect(entry); - entry = pte_mkclean(entry); - set_pte_at(mm, address, pte, entry); - ret = 1; - } +/** + * pfn_mkclean_range - Cleans the PTEs (including PMDs) mapped with range of + * [@pfn, @pfn + @nr_pages) at the specific offset (@pgoff) + * within the @vma of shared mappings. And since clean PTEs + * should also be readonly, write protects them too. + * @pfn: start pfn. + * @nr_pages: number of physically contiguous pages srarting with @pfn. + * @pgoff: page offset that the @pfn mapped with. + * @vma: vma that @pfn mapped within. + * + * Returns the number of cleaned PTEs (including PMDs). + */ +int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, + struct vm_area_struct *vma) +{ + struct page_vma_mapped_walk pvmw = { + .pfn = pfn, + .nr_pages = nr_pages, + .pgoff = pgoff, + .vma = vma, + .flags = PVMW_SYNC, + }; + + if (invalid_mkclean_vma(vma, NULL)) + return 0; - pte_unmap_unlock(pte, ptl); + pvmw.address = vma_address(vma, pgoff, nr_pages); + VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); - if (ret) - mmu_notifier_invalidate_page(mm, address); -out: - return ret; + return page_vma_mkclean_one(&pvmw); } -static int page_mkclean_file(struct address_space *mapping, struct page *page) +static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped) { - pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - struct vm_area_struct *vma; - int ret = 0; - - BUG_ON(PageAnon(page)); + int idx; - mutex_lock(&mapping->i_mmap_mutex); - vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { - if (vma->vm_flags & VM_SHARED) { - unsigned long address = vma_address(page, vma); - ret += page_mkclean_one(page, vma, address); + if (nr) { + idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED; + lruvec_stat_mod_folio(folio, idx, nr); + } + if (nr_pmdmapped) { + if (folio_test_anon(folio)) { + idx = NR_ANON_THPS; + lruvec_stat_mod_folio(folio, idx, nr_pmdmapped); + } else { + /* NR_*_PMDMAPPED are not maintained per-memcg */ + idx = folio_test_swapbacked(folio) ? + NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED; + __mod_node_page_state(folio_pgdat(folio), idx, + nr_pmdmapped); } } - mutex_unlock(&mapping->i_mmap_mutex); - return ret; } -int page_mkclean(struct page *page) +static __always_inline void __folio_add_rmap(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *vma, + enum pgtable_level level) { - int ret = 0; + atomic_t *mapped = &folio->_nr_pages_mapped; + const int orig_nr_pages = nr_pages; + int first = 0, nr = 0, nr_pmdmapped = 0; - BUG_ON(!PageLocked(page)); + __folio_rmap_sanity_checks(folio, page, nr_pages, level); - if (page_mapped(page)) { - struct address_space *mapping = page_mapping(page); - if (mapping) - ret = page_mkclean_file(mapping, page); - } + switch (level) { + case PGTABLE_LEVEL_PTE: + if (!folio_test_large(folio)) { + nr = atomic_inc_and_test(&folio->_mapcount); + break; + } - return ret; + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + nr = folio_add_return_large_mapcount(folio, orig_nr_pages, vma); + if (nr == orig_nr_pages) + /* Was completely unmapped. */ + nr = folio_large_nr_pages(folio); + else + nr = 0; + break; + } + + do { + first += atomic_inc_and_test(&page->_mapcount); + } while (page++, --nr_pages > 0); + + if (first && + atomic_add_return_relaxed(first, mapped) < ENTIRELY_MAPPED) + nr = first; + + folio_add_large_mapcount(folio, orig_nr_pages, vma); + break; + case PGTABLE_LEVEL_PMD: + case PGTABLE_LEVEL_PUD: + first = atomic_inc_and_test(&folio->_entire_mapcount); + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + if (level == PGTABLE_LEVEL_PMD && first) + nr_pmdmapped = folio_large_nr_pages(folio); + nr = folio_inc_return_large_mapcount(folio, vma); + if (nr == 1) + /* Was completely unmapped. */ + nr = folio_large_nr_pages(folio); + else + nr = 0; + break; + } + + if (first) { + nr = atomic_add_return_relaxed(ENTIRELY_MAPPED, mapped); + if (likely(nr < ENTIRELY_MAPPED + ENTIRELY_MAPPED)) { + nr_pages = folio_large_nr_pages(folio); + /* + * We only track PMD mappings of PMD-sized + * folios separately. + */ + if (level == PGTABLE_LEVEL_PMD) + nr_pmdmapped = nr_pages; + nr = nr_pages - (nr & FOLIO_PAGES_MAPPED); + /* Raced ahead of a remove and another add? */ + if (unlikely(nr < 0)) + nr = 0; + } else { + /* Raced ahead of a remove of ENTIRELY_MAPPED */ + nr = 0; + } + } + folio_inc_large_mapcount(folio, vma); + break; + default: + BUILD_BUG(); + } + __folio_mod_stat(folio, nr, nr_pmdmapped); } -EXPORT_SYMBOL_GPL(page_mkclean); /** - * page_move_anon_rmap - move a page to our anon_vma - * @page: the page to move to our anon_vma - * @vma: the vma the page belongs to - * @address: the user virtual address mapped + * folio_move_anon_rmap - move a folio to our anon_vma + * @folio: The folio to move to our anon_vma + * @vma: The vma the folio belongs to * - * When a page belongs exclusively to one process after a COW event, - * that page can be moved into the anon_vma that belongs to just that - * process, so the rmap code will not search the parent or sibling - * processes. + * When a folio belongs exclusively to one process after a COW event, + * that folio can be moved into the anon_vma that belongs to just that + * process, so the rmap code will not search the parent or sibling processes. */ -void page_move_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address) +void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma) { - struct anon_vma *anon_vma = vma->anon_vma; + void *anon_vma = vma->anon_vma; - VM_BUG_ON(!PageLocked(page)); - VM_BUG_ON(!anon_vma); - VM_BUG_ON(page->index != linear_page_index(vma, address)); + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + VM_BUG_ON_VMA(!anon_vma, vma); - anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; - page->mapping = (struct address_space *) anon_vma; + anon_vma += FOLIO_MAPPING_ANON; + /* + * Ensure that anon_vma and the FOLIO_MAPPING_ANON bit are written + * simultaneously, so a concurrent reader (eg folio_referenced()'s + * folio_test_anon()) will not see one without the other. + */ + WRITE_ONCE(folio->mapping, anon_vma); } /** - * __page_set_anon_rmap - set up new anonymous rmap - * @page: Page to add to rmap - * @vma: VM area to add page to. - * @address: User virtual address of the mapping - * @exclusive: the page is exclusively owned by the current process + * __folio_set_anon - set up a new anonymous rmap for a folio + * @folio: The folio to set up the new anonymous rmap for. + * @vma: VM area to add the folio to. + * @address: User virtual address of the mapping + * @exclusive: Whether the folio is exclusive to the process. */ -static void __page_set_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address, int exclusive) +static void __folio_set_anon(struct folio *folio, struct vm_area_struct *vma, + unsigned long address, bool exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); - if (PageAnon(page)) - return; - /* - * If the page isn't exclusively mapped into this vma, - * we must use the _oldest_ possible anon_vma for the - * page mapping! + * If the folio isn't exclusive to this vma, we must use the _oldest_ + * possible anon_vma for the folio mapping! */ if (!exclusive) anon_vma = anon_vma->root; - anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; - page->mapping = (struct address_space *) anon_vma; - page->index = linear_page_index(vma, address); + /* + * page_idle does a lockless/optimistic rmap scan on folio->mapping. + * Make sure the compiler doesn't split the stores of anon_vma and + * the FOLIO_MAPPING_ANON type identifier, otherwise the rmap code + * could mistake the mapping for a struct address_space and crash. + */ + anon_vma = (void *) anon_vma + FOLIO_MAPPING_ANON; + WRITE_ONCE(folio->mapping, (struct address_space *) anon_vma); + folio->index = linear_page_index(vma, address); } /** * __page_check_anon_rmap - sanity check anonymous rmap addition - * @page: the page to add the mapping to + * @folio: The folio containing @page. + * @page: the page to check the mapping of * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped */ -static void __page_check_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address) +static void __page_check_anon_rmap(const struct folio *folio, + const struct page *page, struct vm_area_struct *vma, + unsigned long address) { -#ifdef CONFIG_DEBUG_VM /* * The page's anon-rmap details (mapping and index) are guaranteed to * be set up correctly at this point. * - * We have exclusion against page_add_anon_rmap because the caller - * always holds the page locked, except if called from page_dup_rmap, - * in which case the page is already known to be setup. + * We have exclusion against folio_add_anon_rmap_*() because the caller + * always holds the page locked. * - * We have exclusion against page_add_new_anon_rmap because those pages + * We have exclusion against folio_add_new_anon_rmap because those pages * are initially only visible via the pagetables, and the pte is locked - * over the call to page_add_new_anon_rmap. + * over the call to folio_add_new_anon_rmap. */ - BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); - BUG_ON(page->index != linear_page_index(vma, address)); -#endif + VM_BUG_ON_FOLIO(folio_anon_vma(folio)->root != vma->anon_vma->root, + folio); + VM_BUG_ON_PAGE(page_pgoff(folio, page) != linear_page_index(vma, address), + page); +} + +static __always_inline void __folio_add_anon_rmap(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *vma, + unsigned long address, rmap_t flags, enum pgtable_level level) +{ + int i; + + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + + __folio_add_rmap(folio, page, nr_pages, vma, level); + + if (likely(!folio_test_ksm(folio))) + __page_check_anon_rmap(folio, page, vma, address); + + if (flags & RMAP_EXCLUSIVE) { + switch (level) { + case PGTABLE_LEVEL_PTE: + for (i = 0; i < nr_pages; i++) + SetPageAnonExclusive(page + i); + break; + case PGTABLE_LEVEL_PMD: + SetPageAnonExclusive(page); + break; + case PGTABLE_LEVEL_PUD: + /* + * Keep the compiler happy, we don't support anonymous + * PUD mappings. + */ + WARN_ON_ONCE(1); + break; + default: + BUILD_BUG(); + } + } + + VM_WARN_ON_FOLIO(!folio_test_large(folio) && PageAnonExclusive(page) && + atomic_read(&folio->_mapcount) > 0, folio); + for (i = 0; i < nr_pages; i++) { + struct page *cur_page = page + i; + + VM_WARN_ON_FOLIO(folio_test_large(folio) && + folio_entire_mapcount(folio) > 1 && + PageAnonExclusive(cur_page), folio); + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) + continue; + + /* + * While PTE-mapping a THP we have a PMD and a PTE + * mapping. + */ + VM_WARN_ON_FOLIO(atomic_read(&cur_page->_mapcount) > 0 && + PageAnonExclusive(cur_page), folio); + } + + /* + * Only mlock it if the folio is fully mapped to the VMA. + * + * Partially mapped folios can be split on reclaim and part outside + * of mlocked VMA can be evicted or freed. + */ + if (folio_nr_pages(folio) == nr_pages) + mlock_vma_folio(folio, vma); } /** - * page_add_anon_rmap - add pte mapping to an anonymous page - * @page: the page to add the mapping to - * @vma: the vm area in which the mapping is added - * @address: the user virtual address mapped + * folio_add_anon_rmap_ptes - add PTE mappings to a page range of an anon folio + * @folio: The folio to add the mappings to + * @page: The first page to add + * @nr_pages: The number of pages which will be mapped + * @vma: The vm area in which the mappings are added + * @address: The user virtual address of the first page to map + * @flags: The rmap flags + * + * The page range of folio is defined by [first_page, first_page + nr_pages) * - * The caller needs to hold the pte lock, and the page must be locked in + * The caller needs to hold the page table lock, and the page must be locked in * the anon_vma case: to serialize mapping,index checking after setting, - * and to ensure that PageAnon is not being upgraded racily to PageKsm - * (but PageKsm is never downgraded to PageAnon). + * and to ensure that an anon folio is not being upgraded racily to a KSM folio + * (but KSM folios are never downgraded). */ -void page_add_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address) +void folio_add_anon_rmap_ptes(struct folio *folio, struct page *page, + int nr_pages, struct vm_area_struct *vma, unsigned long address, + rmap_t flags) { - do_page_add_anon_rmap(page, vma, address, 0); + __folio_add_anon_rmap(folio, page, nr_pages, vma, address, flags, + PGTABLE_LEVEL_PTE); } -/* - * Special version of the above for do_swap_page, which often runs - * into pages that are exclusively owned by the current process. - * Everybody else should continue to use page_add_anon_rmap above. +/** + * folio_add_anon_rmap_pmd - add a PMD mapping to a page range of an anon folio + * @folio: The folio to add the mapping to + * @page: The first page to add + * @vma: The vm area in which the mapping is added + * @address: The user virtual address of the first page to map + * @flags: The rmap flags + * + * The page range of folio is defined by [first_page, first_page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock, and the page must be locked in + * the anon_vma case: to serialize mapping,index checking after setting. */ -void do_page_add_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address, int exclusive) +void folio_add_anon_rmap_pmd(struct folio *folio, struct page *page, + struct vm_area_struct *vma, unsigned long address, rmap_t flags) { - int first = atomic_inc_and_test(&page->_mapcount); - if (first) { - if (!PageTransHuge(page)) - __inc_zone_page_state(page, NR_ANON_PAGES); - else - __inc_zone_page_state(page, - NR_ANON_TRANSPARENT_HUGEPAGES); - } - if (unlikely(PageKsm(page))) - return; - - VM_BUG_ON(!PageLocked(page)); - /* address might be in next vma when migration races vma_adjust */ - if (first) - __page_set_anon_rmap(page, vma, address, exclusive); - else - __page_check_anon_rmap(page, vma, address); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, + PGTABLE_LEVEL_PMD); +#else + WARN_ON_ONCE(true); +#endif } /** - * page_add_new_anon_rmap - add pte mapping to a new anonymous page - * @page: the page to add the mapping to + * folio_add_new_anon_rmap - Add mapping to a new anonymous folio. + * @folio: The folio to add the mapping to. * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped + * @flags: The rmap flags * - * Same as page_add_anon_rmap but must only be called on *new* pages. + * Like folio_add_anon_rmap_*() but must only be called on *new* folios. * This means the inc-and-test can be bypassed. - * Page does not have to be locked. + * The folio doesn't necessarily need to be locked while it's exclusive + * unless two threads map it concurrently. However, the folio must be + * locked if it's shared. + * + * If the folio is pmd-mappable, it is accounted as a THP. */ -void page_add_new_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address) -{ - VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); - SetPageSwapBacked(page); - atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ - if (!PageTransHuge(page)) - __inc_zone_page_state(page, NR_ANON_PAGES); - else - __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); - __page_set_anon_rmap(page, vma, address, 1); - if (!mlocked_vma_newpage(vma, page)) { - SetPageActive(page); - lru_cache_add(page); - } else - add_page_to_unevictable_list(page); +void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, + unsigned long address, rmap_t flags) +{ + const bool exclusive = flags & RMAP_EXCLUSIVE; + int nr = 1, nr_pmdmapped = 0; + + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(!exclusive && !folio_test_locked(folio), folio); + + /* + * VM_DROPPABLE mappings don't swap; instead they're just dropped when + * under memory pressure. + */ + if (!folio_test_swapbacked(folio) && !(vma->vm_flags & VM_DROPPABLE)) + __folio_set_swapbacked(folio); + __folio_set_anon(folio, vma, address, exclusive); + + if (likely(!folio_test_large(folio))) { + /* increment count (starts at -1) */ + atomic_set(&folio->_mapcount, 0); + if (exclusive) + SetPageAnonExclusive(&folio->page); + } else if (!folio_test_pmd_mappable(folio)) { + int i; + + nr = folio_large_nr_pages(folio); + for (i = 0; i < nr; i++) { + struct page *page = folio_page(folio, i); + + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + /* increment count (starts at -1) */ + atomic_set(&page->_mapcount, 0); + if (exclusive) + SetPageAnonExclusive(page); + } + + folio_set_large_mapcount(folio, nr, vma); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + atomic_set(&folio->_nr_pages_mapped, nr); + } else { + nr = folio_large_nr_pages(folio); + /* increment count (starts at -1) */ + atomic_set(&folio->_entire_mapcount, 0); + folio_set_large_mapcount(folio, 1, vma); + if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) + atomic_set(&folio->_nr_pages_mapped, ENTIRELY_MAPPED); + if (exclusive) + SetPageAnonExclusive(&folio->page); + nr_pmdmapped = nr; + } + + VM_WARN_ON_ONCE(address < vma->vm_start || + address + (nr << PAGE_SHIFT) > vma->vm_end); + + __folio_mod_stat(folio, nr, nr_pmdmapped); + mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); +} + +static __always_inline void __folio_add_file_rmap(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *vma, + enum pgtable_level level) +{ + VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); + + __folio_add_rmap(folio, page, nr_pages, vma, level); + + /* + * Only mlock it if the folio is fully mapped to the VMA. + * + * Partially mapped folios can be split on reclaim and part outside + * of mlocked VMA can be evicted or freed. + */ + if (folio_nr_pages(folio) == nr_pages) + mlock_vma_folio(folio, vma); } /** - * page_add_file_rmap - add pte mapping to a file page - * @page: the page to add the mapping to + * folio_add_file_rmap_ptes - add PTE mappings to a page range of a folio + * @folio: The folio to add the mappings to + * @page: The first page to add + * @nr_pages: The number of pages that will be mapped using PTEs + * @vma: The vm area in which the mappings are added + * + * The page range of the folio is defined by [page, page + nr_pages) * - * The caller needs to hold the pte lock. + * The caller needs to hold the page table lock. */ -void page_add_file_rmap(struct page *page) +void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, + int nr_pages, struct vm_area_struct *vma) { - bool locked; - unsigned long flags; + __folio_add_file_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE); +} - mem_cgroup_begin_update_page_stat(page, &locked, &flags); - if (atomic_inc_and_test(&page->_mapcount)) { - __inc_zone_page_state(page, NR_FILE_MAPPED); - mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED); - } - mem_cgroup_end_update_page_stat(page, &locked, &flags); +/** + * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio + * @folio: The folio to add the mapping to + * @page: The first page to add + * @vma: The vm area in which the mapping is added + * + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock. + */ +void folio_add_file_rmap_pmd(struct folio *folio, struct page *page, + struct vm_area_struct *vma) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD); +#else + WARN_ON_ONCE(true); +#endif } /** - * page_remove_rmap - take down pte mapping from a page - * @page: page to remove mapping from + * folio_add_file_rmap_pud - add a PUD mapping to a page range of a folio + * @folio: The folio to add the mapping to + * @page: The first page to add + * @vma: The vm area in which the mapping is added * - * The caller needs to hold the pte lock. + * The page range of the folio is defined by [page, page + HPAGE_PUD_NR) + * + * The caller needs to hold the page table lock. */ -void page_remove_rmap(struct page *page) +void folio_add_file_rmap_pud(struct folio *folio, struct page *page, + struct vm_area_struct *vma) { - bool anon = PageAnon(page); - bool locked; - unsigned long flags; +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) + __folio_add_file_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD); +#else + WARN_ON_ONCE(true); +#endif +} - /* - * The anon case has no mem_cgroup page_stat to update; but may - * uncharge_page() below, where the lock ordering can deadlock if - * we hold the lock against page_stat move: so avoid it on anon. - */ - if (!anon) - mem_cgroup_begin_update_page_stat(page, &locked, &flags); +static __always_inline void __folio_remove_rmap(struct folio *folio, + struct page *page, int nr_pages, struct vm_area_struct *vma, + enum pgtable_level level) +{ + atomic_t *mapped = &folio->_nr_pages_mapped; + int last = 0, nr = 0, nr_pmdmapped = 0; + bool partially_mapped = false; - /* page still mapped by someone else? */ - if (!atomic_add_negative(-1, &page->_mapcount)) - goto out; + __folio_rmap_sanity_checks(folio, page, nr_pages, level); + + switch (level) { + case PGTABLE_LEVEL_PTE: + if (!folio_test_large(folio)) { + nr = atomic_add_negative(-1, &folio->_mapcount); + break; + } + + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + nr = folio_sub_return_large_mapcount(folio, nr_pages, vma); + if (!nr) { + /* Now completely unmapped. */ + nr = folio_large_nr_pages(folio); + } else { + partially_mapped = nr < folio_large_nr_pages(folio) && + !folio_entire_mapcount(folio); + nr = 0; + } + break; + } + + folio_sub_large_mapcount(folio, nr_pages, vma); + do { + last += atomic_add_negative(-1, &page->_mapcount); + } while (page++, --nr_pages > 0); + + if (last && + atomic_sub_return_relaxed(last, mapped) < ENTIRELY_MAPPED) + nr = last; + + partially_mapped = nr && atomic_read(mapped); + break; + case PGTABLE_LEVEL_PMD: + case PGTABLE_LEVEL_PUD: + if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) { + last = atomic_add_negative(-1, &folio->_entire_mapcount); + if (level == PGTABLE_LEVEL_PMD && last) + nr_pmdmapped = folio_large_nr_pages(folio); + nr = folio_dec_return_large_mapcount(folio, vma); + if (!nr) { + /* Now completely unmapped. */ + nr = folio_large_nr_pages(folio); + } else { + partially_mapped = last && + nr < folio_large_nr_pages(folio); + nr = 0; + } + break; + } + + folio_dec_large_mapcount(folio, vma); + last = atomic_add_negative(-1, &folio->_entire_mapcount); + if (last) { + nr = atomic_sub_return_relaxed(ENTIRELY_MAPPED, mapped); + if (likely(nr < ENTIRELY_MAPPED)) { + nr_pages = folio_large_nr_pages(folio); + if (level == PGTABLE_LEVEL_PMD) + nr_pmdmapped = nr_pages; + nr = nr_pages - nr; + /* Raced ahead of another remove and an add? */ + if (unlikely(nr < 0)) + nr = 0; + } else { + /* An add of ENTIRELY_MAPPED raced ahead */ + nr = 0; + } + } + + partially_mapped = nr && nr < nr_pmdmapped; + break; + default: + BUILD_BUG(); + } /* - * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED - * and not charged by memcg for now. + * Queue anon large folio for deferred split if at least one page of + * the folio is unmapped and at least one page is still mapped. + * + * Check partially_mapped first to ensure it is a large folio. + * + * Device private folios do not support deferred splitting and + * shrinker based scanning of the folios to free. */ - if (unlikely(PageHuge(page))) - goto out; - if (anon) { - mem_cgroup_uncharge_page(page); - if (!PageTransHuge(page)) - __dec_zone_page_state(page, NR_ANON_PAGES); - else - __dec_zone_page_state(page, - NR_ANON_TRANSPARENT_HUGEPAGES); - } else { - __dec_zone_page_state(page, NR_FILE_MAPPED); - mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED); - mem_cgroup_end_update_page_stat(page, &locked, &flags); - } - if (unlikely(PageMlocked(page))) - clear_page_mlock(page); + if (partially_mapped && folio_test_anon(folio) && + !folio_test_partially_mapped(folio) && + !folio_is_device_private(folio)) + deferred_split_folio(folio, true); + + __folio_mod_stat(folio, -nr, -nr_pmdmapped); + /* - * It would be tidy to reset the PageAnon mapping here, - * but that might overwrite a racing page_add_anon_rmap - * which increments mapcount after us but sets mapping - * before us: so leave the reset to free_hot_cold_page, - * and remember that it's only reliable while mapped. - * Leaving it set also helps swapoff to reinstate ptes - * faster for those pages still in swapcache. + * It would be tidy to reset folio_test_anon mapping when fully + * unmapped, but that might overwrite a racing folio_add_anon_rmap_*() + * which increments mapcount after us but sets mapping before us: + * so leave the reset to free_pages_prepare, and remember that + * it's only reliable while mapped. */ - return; -out: - if (!anon) - mem_cgroup_end_update_page_stat(page, &locked, &flags); + + munlock_vma_folio(folio, vma); +} + +/** + * folio_remove_rmap_ptes - remove PTE mappings from a page range of a folio + * @folio: The folio to remove the mappings from + * @page: The first page to remove + * @nr_pages: The number of pages that will be removed from the mapping + * @vma: The vm area from which the mappings are removed + * + * The page range of the folio is defined by [page, page + nr_pages) + * + * The caller needs to hold the page table lock. + */ +void folio_remove_rmap_ptes(struct folio *folio, struct page *page, + int nr_pages, struct vm_area_struct *vma) +{ + __folio_remove_rmap(folio, page, nr_pages, vma, PGTABLE_LEVEL_PTE); +} + +/** + * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio + * @folio: The folio to remove the mapping from + * @page: The first page to remove + * @vma: The vm area from which the mapping is removed + * + * The page range of the folio is defined by [page, page + HPAGE_PMD_NR) + * + * The caller needs to hold the page table lock. + */ +void folio_remove_rmap_pmd(struct folio *folio, struct page *page, + struct vm_area_struct *vma) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD); +#else + WARN_ON_ONCE(true); +#endif +} + +/** + * folio_remove_rmap_pud - remove a PUD mapping from a page range of a folio + * @folio: The folio to remove the mapping from + * @page: The first page to remove + * @vma: The vm area from which the mapping is removed + * + * The page range of the folio is defined by [page, page + HPAGE_PUD_NR) + * + * The caller needs to hold the page table lock. + */ +void folio_remove_rmap_pud(struct folio *folio, struct page *page, + struct vm_area_struct *vma) +{ +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) + __folio_remove_rmap(folio, page, HPAGE_PUD_NR, vma, PGTABLE_LEVEL_PUD); +#else + WARN_ON_ONCE(true); +#endif +} + +static inline unsigned int folio_unmap_pte_batch(struct folio *folio, + struct page_vma_mapped_walk *pvmw, + enum ttu_flags flags, pte_t pte) +{ + unsigned long end_addr, addr = pvmw->address; + struct vm_area_struct *vma = pvmw->vma; + unsigned int max_nr; + + if (flags & TTU_HWPOISON) + return 1; + if (!folio_test_large(folio)) + return 1; + + /* We may only batch within a single VMA and a single page table. */ + end_addr = pmd_addr_end(addr, vma->vm_end); + max_nr = (end_addr - addr) >> PAGE_SHIFT; + + /* We only support lazyfree batching for now ... */ + if (!folio_test_anon(folio) || folio_test_swapbacked(folio)) + return 1; + if (pte_unused(pte)) + return 1; + + return folio_pte_batch(folio, pvmw->pte, pte, max_nr); } /* - * Subfunctions of try_to_unmap: try_to_unmap_one called - * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file. + * @arg: enum ttu_flags will be passed to this argument */ -int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, - unsigned long address, enum ttu_flags flags) +static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, + unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; - pte_t *pte; + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); + bool anon_exclusive, ret = true; pte_t pteval; - spinlock_t *ptl; - int ret = SWAP_AGAIN; + struct page *subpage; + struct mmu_notifier_range range; + enum ttu_flags flags = (enum ttu_flags)(long)arg; + unsigned long nr_pages = 1, end_addr; + unsigned long pfn; + unsigned long hsz = 0; + int ptes = 0; - pte = page_check_address(page, mm, address, &ptl, 0); - if (!pte) - goto out; + /* + * When racing against e.g. zap_pte_range() on another cpu, + * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), + * try_to_unmap() may return before page_mapped() has become false, + * if page table locking is skipped: use TTU_SYNC to wait for that. + */ + if (flags & TTU_SYNC) + pvmw.flags = PVMW_SYNC; /* - * If the page is mlock()d, we cannot swap it out. - * If it's recently referenced (perhaps page_referenced - * skipped over this mm) then we should reactivate it. + * For THP, we have to assume the worse case ie pmd for invalidation. + * For hugetlb, it could be much worse if we need to do pud + * invalidation in the case of pmd sharing. + * + * Note that the folio can not be freed in this function as call of + * try_to_unmap() must hold a reference on the folio. */ - if (!(flags & TTU_IGNORE_MLOCK)) { - if (vma->vm_flags & VM_LOCKED) - goto out_mlock; + range.end = vma_address_end(&pvmw); + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, + address, range.end); + if (folio_test_hugetlb(folio)) { + /* + * If sharing is possible, start and end will be adjusted + * accordingly. + */ + adjust_range_if_pmd_sharing_possible(vma, &range.start, + &range.end); - if (TTU_ACTION(flags) == TTU_MUNLOCK) - goto out_unmap; + /* We need the huge page size for set_huge_pte_at() */ + hsz = huge_page_size(hstate_vma(vma)); } - if (!(flags & TTU_IGNORE_ACCESS)) { - if (ptep_clear_flush_young_notify(vma, address, pte)) { - ret = SWAP_FAIL; - goto out_unmap; + mmu_notifier_invalidate_range_start(&range); + + while (page_vma_mapped_walk(&pvmw)) { + /* + * If the folio is in an mlock()d vma, we must not swap it out. + */ + if (!(flags & TTU_IGNORE_MLOCK) && + (vma->vm_flags & VM_LOCKED)) { + ptes++; + + /* + * Set 'ret' to indicate the page cannot be unmapped. + * + * Do not jump to walk_abort immediately as additional + * iteration might be required to detect fully mapped + * folio an mlock it. + */ + ret = false; + + /* Only mlock fully mapped pages */ + if (pvmw.pte && ptes != pvmw.nr_pages) + continue; + + /* + * All PTEs must be protected by page table lock in + * order to mlock the page. + * + * If page table boundary has been cross, current ptl + * only protect part of ptes. + */ + if (pvmw.flags & PVMW_PGTABLE_CROSSED) + goto walk_done; + + /* Restore the mlock which got missed */ + mlock_vma_folio(folio, vma); + goto walk_done; } - } - /* Nuke the page table entry. */ - flush_cache_page(vma, address, page_to_pfn(page)); - pteval = ptep_clear_flush(vma, address, pte); + if (!pvmw.pte) { + if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) { + if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio)) + goto walk_done; + /* + * unmap_huge_pmd_locked has either already marked + * the folio as swap-backed or decided to retain it + * due to GUP or speculative references. + */ + goto walk_abort; + } - /* Move the dirty bit to the physical page now the pte is gone. */ - if (pte_dirty(pteval)) - set_page_dirty(page); + if (flags & TTU_SPLIT_HUGE_PMD) { + /* + * We temporarily have to drop the PTL and + * restart so we can process the PTE-mapped THP. + */ + split_huge_pmd_locked(vma, pvmw.address, + pvmw.pmd, false); + flags &= ~TTU_SPLIT_HUGE_PMD; + page_vma_mapped_walk_restart(&pvmw); + continue; + } + } - /* Update high watermark before we lower rss */ - update_hiwater_rss(mm); + /* Unexpected PMD-mapped THP? */ + VM_BUG_ON_FOLIO(!pvmw.pte, folio); - if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { - if (!PageHuge(page)) { - if (PageAnon(page)) - dec_mm_counter(mm, MM_ANONPAGES); + /* + * Handle PFN swap PTEs, such as device-exclusive ones, that + * actually map pages. + */ + pteval = ptep_get(pvmw.pte); + if (likely(pte_present(pteval))) { + pfn = pte_pfn(pteval); + } else { + const softleaf_t entry = softleaf_from_pte(pteval); + + pfn = softleaf_to_pfn(entry); + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); + } + + subpage = folio_page(folio, pfn - folio_pfn(folio)); + address = pvmw.address; + anon_exclusive = folio_test_anon(folio) && + PageAnonExclusive(subpage); + + if (folio_test_hugetlb(folio)) { + bool anon = folio_test_anon(folio); + + /* + * The try_to_unmap() is only passed a hugetlb page + * in the case where the hugetlb page is poisoned. + */ + VM_BUG_ON_PAGE(!PageHWPoison(subpage), subpage); + /* + * huge_pmd_unshare may unmap an entire PMD page. + * There is no way of knowing exactly which PMDs may + * be cached for this mm, so we must flush them all. + * start/end were already adjusted above to cover this + * range. + */ + flush_cache_range(vma, range.start, range.end); + + /* + * To call huge_pmd_unshare, i_mmap_rwsem must be + * held in write mode. Caller needs to explicitly + * do this outside rmap routines. + * + * We also must hold hugetlb vma_lock in write mode. + * Lock order dictates acquiring vma_lock BEFORE + * i_mmap_rwsem. We can only try lock here and fail + * if unsuccessful. + */ + if (!anon) { + VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); + if (!hugetlb_vma_trylock_write(vma)) + goto walk_abort; + if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { + hugetlb_vma_unlock_write(vma); + flush_tlb_range(vma, + range.start, range.end); + /* + * The ref count of the PMD page was + * dropped which is part of the way map + * counting is done for shared PMDs. + * Return 'true' here. When there is + * no other sharing, huge_pmd_unshare + * returns false and we will unmap the + * actual page and drop map count + * to zero. + */ + goto walk_done; + } + hugetlb_vma_unlock_write(vma); + } + pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); + if (pte_dirty(pteval)) + folio_mark_dirty(folio); + } else if (likely(pte_present(pteval))) { + nr_pages = folio_unmap_pte_batch(folio, &pvmw, flags, pteval); + end_addr = address + nr_pages * PAGE_SIZE; + flush_cache_range(vma, address, end_addr); + + /* Nuke the page table entry. */ + pteval = get_and_clear_ptes(mm, address, pvmw.pte, nr_pages); + /* + * We clear the PTE but do not flush so potentially + * a remote CPU could still be writing to the folio. + * If the entry was previously clean then the + * architecture must guarantee that a clear->dirty + * transition on a cached TLB entry is written through + * and traps if the PTE is unmapped. + */ + if (should_defer_flush(mm, flags)) + set_tlb_ubc_flush_pending(mm, pteval, address, end_addr); else - dec_mm_counter(mm, MM_FILEPAGES); + flush_tlb_range(vma, address, end_addr); + if (pte_dirty(pteval)) + folio_mark_dirty(folio); + } else { + pte_clear(mm, address, pvmw.pte); } - set_pte_at(mm, address, pte, - swp_entry_to_pte(make_hwpoison_entry(page))); - } else if (PageAnon(page)) { - swp_entry_t entry = { .val = page_private(page) }; - if (PageSwapCache(page)) { + /* + * Now the pte is cleared. If this pte was uffd-wp armed, + * we may want to replace a none pte with a marker pte if + * it's file-backed, so we don't lose the tracking info. + */ + pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval); + + /* Update high watermark before we lower rss */ + update_hiwater_rss(mm); + + if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { + pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); + if (folio_test_hugetlb(folio)) { + hugetlb_count_sub(folio_nr_pages(folio), mm); + set_huge_pte_at(mm, address, pvmw.pte, pteval, + hsz); + } else { + dec_mm_counter(mm, mm_counter(folio)); + set_pte_at(mm, address, pvmw.pte, pteval); + } + } else if (likely(pte_present(pteval)) && pte_unused(pteval) && + !userfaultfd_armed(vma)) { + /* + * The guest indicated that the page content is of no + * interest anymore. Simply discard the pte, vmscan + * will take care of the rest. + * A future reference will then fault in a new zero + * page. When userfaultfd is active, we must not drop + * this page though, as its main user (postcopy + * migration) will not expect userfaults on already + * copied pages. + */ + dec_mm_counter(mm, mm_counter(folio)); + } else if (folio_test_anon(folio)) { + swp_entry_t entry = page_swap_entry(subpage); + pte_t swp_pte; /* * Store the swap location in the pte. * See handle_pte_fault() ... */ + if (unlikely(folio_test_swapbacked(folio) != + folio_test_swapcache(folio))) { + WARN_ON_ONCE(1); + goto walk_abort; + } + + /* MADV_FREE page check */ + if (!folio_test_swapbacked(folio)) { + int ref_count, map_count; + + /* + * Synchronize with gup_pte_range(): + * - clear PTE; barrier; read refcount + * - inc refcount; barrier; read PTE + */ + smp_mb(); + + ref_count = folio_ref_count(folio); + map_count = folio_mapcount(folio); + + /* + * Order reads for page refcount and dirty flag + * (see comments in __remove_mapping()). + */ + smp_rmb(); + + if (folio_test_dirty(folio) && !(vma->vm_flags & VM_DROPPABLE)) { + /* + * redirtied either using the page table or a previously + * obtained GUP reference. + */ + set_ptes(mm, address, pvmw.pte, pteval, nr_pages); + folio_set_swapbacked(folio); + goto walk_abort; + } else if (ref_count != 1 + map_count) { + /* + * Additional reference. Could be a GUP reference or any + * speculative reference. GUP users must mark the folio + * dirty if there was a modification. This folio cannot be + * reclaimed right now either way, so act just like nothing + * happened. + * We'll come back here later and detect if the folio was + * dirtied when the additional reference is gone. + */ + set_ptes(mm, address, pvmw.pte, pteval, nr_pages); + goto walk_abort; + } + add_mm_counter(mm, MM_ANONPAGES, -nr_pages); + goto discard; + } + if (swap_duplicate(entry) < 0) { - set_pte_at(mm, address, pte, pteval); - ret = SWAP_FAIL; - goto out_unmap; + set_pte_at(mm, address, pvmw.pte, pteval); + goto walk_abort; + } + + /* + * arch_unmap_one() is expected to be a NOP on + * architectures where we could have PFN swap PTEs, + * so we'll not check/care. + */ + if (arch_unmap_one(mm, vma, address, pteval) < 0) { + swap_free(entry); + set_pte_at(mm, address, pvmw.pte, pteval); + goto walk_abort; + } + + /* See folio_try_share_anon_rmap(): clear PTE first. */ + if (anon_exclusive && + folio_try_share_anon_rmap_pte(folio, subpage)) { + swap_free(entry); + set_pte_at(mm, address, pvmw.pte, pteval); + goto walk_abort; } if (list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); @@ -1255,535 +2179,840 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, } dec_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_SWAPENTS); - } else if (IS_ENABLED(CONFIG_MIGRATION)) { + swp_pte = swp_entry_to_pte(entry); + if (anon_exclusive) + swp_pte = pte_swp_mkexclusive(swp_pte); + if (likely(pte_present(pteval))) { + if (pte_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } else { + if (pte_swp_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_swp_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } + set_pte_at(mm, address, pvmw.pte, swp_pte); + } else { /* - * Store the pfn of the page in a special migration - * pte. do_swap_page() will wait until the migration - * pte is removed and then restart fault handling. + * This is a locked file-backed folio, + * so it cannot be removed from the page + * cache and replaced by a new folio before + * mmu_notifier_invalidate_range_end, so no + * concurrent thread might update its page table + * to point at a new folio while a device is + * still using this folio. + * + * See Documentation/mm/mmu_notifier.rst */ - BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); - entry = make_migration_entry(page, pte_write(pteval)); + dec_mm_counter(mm, mm_counter_file(folio)); } - set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); - BUG_ON(pte_file(*pte)); - } else if (IS_ENABLED(CONFIG_MIGRATION) && - (TTU_ACTION(flags) == TTU_MIGRATION)) { - /* Establish migration entry for a file page */ - swp_entry_t entry; - entry = make_migration_entry(page, pte_write(pteval)); - set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); - } else - dec_mm_counter(mm, MM_FILEPAGES); - - page_remove_rmap(page); - page_cache_release(page); - -out_unmap: - pte_unmap_unlock(pte, ptl); - if (ret != SWAP_FAIL) - mmu_notifier_invalidate_page(mm, address); -out: - return ret; +discard: + if (unlikely(folio_test_hugetlb(folio))) { + hugetlb_remove_rmap(folio); + } else { + folio_remove_rmap_ptes(folio, subpage, nr_pages, vma); + } + if (vma->vm_flags & VM_LOCKED) + mlock_drain_local(); + folio_put_refs(folio, nr_pages); -out_mlock: - pte_unmap_unlock(pte, ptl); + /* + * If we are sure that we batched the entire folio and cleared + * all PTEs, we can just optimize and stop right here. + */ + if (nr_pages == folio_nr_pages(folio)) + goto walk_done; + continue; +walk_abort: + ret = false; +walk_done: + page_vma_mapped_walk_done(&pvmw); + break; + } + mmu_notifier_invalidate_range_end(&range); - /* - * We need mmap_sem locking, Otherwise VM_LOCKED check makes - * unstable result and race. Plus, We can't wait here because - * we now hold anon_vma->rwsem or mapping->i_mmap_mutex. - * if trylock failed, the page remain in evictable lru and later - * vmscan could retry to move the page to unevictable lru if the - * page is actually mlocked. - */ - if (down_read_trylock(&vma->vm_mm->mmap_sem)) { - if (vma->vm_flags & VM_LOCKED) { - mlock_vma_page(page); - ret = SWAP_MLOCK; - } - up_read(&vma->vm_mm->mmap_sem); - } return ret; } -/* - * objrmap doesn't work for nonlinear VMAs because the assumption that - * offset-into-file correlates with offset-into-virtual-addresses does not hold. - * Consequently, given a particular page and its ->index, we cannot locate the - * ptes which are mapping that page without an exhaustive linear search. - * - * So what this code does is a mini "virtual scan" of each nonlinear VMA which - * maps the file to which the target page belongs. The ->vm_private_data field - * holds the current cursor into that scan. Successive searches will circulate - * around the vma's virtual address space. - * - * So as more replacement pressure is applied to the pages in a nonlinear VMA, - * more scanning pressure is placed against them as well. Eventually pages - * will become fully unmapped and are eligible for eviction. - * - * For very sparsely populated VMAs this is a little inefficient - chances are - * there there won't be many ptes located within the scan cluster. In this case - * maybe we could scan further - to the end of the pte page, perhaps. - * - * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can - * acquire it without blocking. If vma locked, mlock the pages in the cluster, - * rather than unmapping them. If we encounter the "check_page" that vmscan is - * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. +static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) +{ + return vma_is_temporary_stack(vma); +} + +static int folio_not_mapped(struct folio *folio) +{ + return !folio_mapped(folio); +} + +/** + * try_to_unmap - Try to remove all page table mappings to a folio. + * @folio: The folio to unmap. + * @flags: action and flags + * + * Tries to remove all the page table entries which are mapping this + * folio. It is the caller's responsibility to check if the folio is + * still mapped if needed (use TTU_SYNC to prevent accounting races). + * + * Context: Caller must hold the folio lock. */ -#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) -#define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) +void try_to_unmap(struct folio *folio, enum ttu_flags flags) +{ + struct rmap_walk_control rwc = { + .rmap_one = try_to_unmap_one, + .arg = (void *)flags, + .done = folio_not_mapped, + .anon_lock = folio_lock_anon_vma_read, + }; + + if (flags & TTU_RMAP_LOCKED) + rmap_walk_locked(folio, &rwc); + else + rmap_walk(folio, &rwc); +} -static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, - struct vm_area_struct *vma, struct page *check_page) +/* + * @arg: enum ttu_flags will be passed to this argument. + * + * If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs + * containing migration entries. + */ +static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, + unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; - pmd_t *pmd; - pte_t *pte; + DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); + bool anon_exclusive, writable, ret = true; pte_t pteval; - spinlock_t *ptl; - struct page *page; - unsigned long address; - unsigned long mmun_start; /* For mmu_notifiers */ - unsigned long mmun_end; /* For mmu_notifiers */ - unsigned long end; - int ret = SWAP_AGAIN; - int locked_vma = 0; - - address = (vma->vm_start + cursor) & CLUSTER_MASK; - end = address + CLUSTER_SIZE; - if (address < vma->vm_start) - address = vma->vm_start; - if (end > vma->vm_end) - end = vma->vm_end; - - pmd = mm_find_pmd(mm, address); - if (!pmd) - return ret; - - mmun_start = address; - mmun_end = end; - mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); + struct page *subpage; + struct mmu_notifier_range range; + enum ttu_flags flags = (enum ttu_flags)(long)arg; + unsigned long pfn; + unsigned long hsz = 0; + + /* + * When racing against e.g. zap_pte_range() on another cpu, + * in between its ptep_get_and_clear_full() and folio_remove_rmap_*(), + * try_to_migrate() may return before page_mapped() has become false, + * if page table locking is skipped: use TTU_SYNC to wait for that. + */ + if (flags & TTU_SYNC) + pvmw.flags = PVMW_SYNC; /* - * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, - * keep the sem while scanning the cluster for mlocking pages. + * For THP, we have to assume the worse case ie pmd for invalidation. + * For hugetlb, it could be much worse if we need to do pud + * invalidation in the case of pmd sharing. + * + * Note that the page can not be free in this function as call of + * try_to_unmap() must hold a reference on the page. */ - if (down_read_trylock(&vma->vm_mm->mmap_sem)) { - locked_vma = (vma->vm_flags & VM_LOCKED); - if (!locked_vma) - up_read(&vma->vm_mm->mmap_sem); /* don't need it */ + range.end = vma_address_end(&pvmw); + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, + address, range.end); + if (folio_test_hugetlb(folio)) { + /* + * If sharing is possible, start and end will be adjusted + * accordingly. + */ + adjust_range_if_pmd_sharing_possible(vma, &range.start, + &range.end); + + /* We need the huge page size for set_huge_pte_at() */ + hsz = huge_page_size(hstate_vma(vma)); } + mmu_notifier_invalidate_range_start(&range); + + while (page_vma_mapped_walk(&pvmw)) { + /* PMD-mapped THP migration entry */ + if (!pvmw.pte) { + __maybe_unused unsigned long pfn; + __maybe_unused pmd_t pmdval; + + if (flags & TTU_SPLIT_HUGE_PMD) { + split_huge_pmd_locked(vma, pvmw.address, + pvmw.pmd, true); + ret = false; + page_vma_mapped_walk_done(&pvmw); + break; + } +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION + pmdval = pmdp_get(pvmw.pmd); + if (likely(pmd_present(pmdval))) + pfn = pmd_pfn(pmdval); + else + pfn = softleaf_to_pfn(softleaf_from_pmd(pmdval)); - pte = pte_offset_map_lock(mm, pmd, address, &ptl); + subpage = folio_page(folio, pfn - folio_pfn(folio)); - /* Update high watermark before we lower rss */ - update_hiwater_rss(mm); + VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) || + !folio_test_pmd_mappable(folio), folio); - for (; address < end; pte++, address += PAGE_SIZE) { - if (!pte_present(*pte)) + if (set_pmd_migration_entry(&pvmw, subpage)) { + ret = false; + page_vma_mapped_walk_done(&pvmw); + break; + } continue; - page = vm_normal_page(vma, address, *pte); - BUG_ON(!page || PageAnon(page)); - - if (locked_vma) { - mlock_vma_page(page); /* no-op if already mlocked */ - if (page == check_page) - ret = SWAP_MLOCK; - continue; /* don't unmap */ +#endif } - if (ptep_clear_flush_young_notify(vma, address, pte)) - continue; + /* Unexpected PMD-mapped THP? */ + VM_BUG_ON_FOLIO(!pvmw.pte, folio); - /* Nuke the page table entry. */ - flush_cache_page(vma, address, pte_pfn(*pte)); - pteval = ptep_clear_flush(vma, address, pte); + /* + * Handle PFN swap PTEs, such as device-exclusive ones, that + * actually map pages. + */ + pteval = ptep_get(pvmw.pte); + if (likely(pte_present(pteval))) { + pfn = pte_pfn(pteval); + } else { + const softleaf_t entry = softleaf_from_pte(pteval); + + pfn = softleaf_to_pfn(entry); + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); + } - /* If nonlinear, store the file page offset in the pte. */ - if (page->index != linear_page_index(vma, address)) - set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); + subpage = folio_page(folio, pfn - folio_pfn(folio)); + address = pvmw.address; + anon_exclusive = folio_test_anon(folio) && + PageAnonExclusive(subpage); - /* Move the dirty bit to the physical page now the pte is gone. */ - if (pte_dirty(pteval)) - set_page_dirty(page); + if (folio_test_hugetlb(folio)) { + bool anon = folio_test_anon(folio); - page_remove_rmap(page); - page_cache_release(page); - dec_mm_counter(mm, MM_FILEPAGES); - (*mapcount)--; - } - pte_unmap_unlock(pte - 1, ptl); - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); - if (locked_vma) - up_read(&vma->vm_mm->mmap_sem); - return ret; -} + /* + * huge_pmd_unshare may unmap an entire PMD page. + * There is no way of knowing exactly which PMDs may + * be cached for this mm, so we must flush them all. + * start/end were already adjusted above to cover this + * range. + */ + flush_cache_range(vma, range.start, range.end); -bool is_vma_temporary_stack(struct vm_area_struct *vma) -{ - int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); + /* + * To call huge_pmd_unshare, i_mmap_rwsem must be + * held in write mode. Caller needs to explicitly + * do this outside rmap routines. + * + * We also must hold hugetlb vma_lock in write mode. + * Lock order dictates acquiring vma_lock BEFORE + * i_mmap_rwsem. We can only try lock here and + * fail if unsuccessful. + */ + if (!anon) { + VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); + if (!hugetlb_vma_trylock_write(vma)) { + page_vma_mapped_walk_done(&pvmw); + ret = false; + break; + } + if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { + hugetlb_vma_unlock_write(vma); + flush_tlb_range(vma, + range.start, range.end); + + /* + * The ref count of the PMD page was + * dropped which is part of the way map + * counting is done for shared PMDs. + * Return 'true' here. When there is + * no other sharing, huge_pmd_unshare + * returns false and we will unmap the + * actual page and drop map count + * to zero. + */ + page_vma_mapped_walk_done(&pvmw); + break; + } + hugetlb_vma_unlock_write(vma); + } + /* Nuke the hugetlb page table entry */ + pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); + if (pte_dirty(pteval)) + folio_mark_dirty(folio); + writable = pte_write(pteval); + } else if (likely(pte_present(pteval))) { + flush_cache_page(vma, address, pfn); + /* Nuke the page table entry. */ + if (should_defer_flush(mm, flags)) { + /* + * We clear the PTE but do not flush so potentially + * a remote CPU could still be writing to the folio. + * If the entry was previously clean then the + * architecture must guarantee that a clear->dirty + * transition on a cached TLB entry is written through + * and traps if the PTE is unmapped. + */ + pteval = ptep_get_and_clear(mm, address, pvmw.pte); + + set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE); + } else { + pteval = ptep_clear_flush(vma, address, pvmw.pte); + } + if (pte_dirty(pteval)) + folio_mark_dirty(folio); + writable = pte_write(pteval); + } else { + const softleaf_t entry = softleaf_from_pte(pteval); - if (!maybe_stack) - return false; + pte_clear(mm, address, pvmw.pte); - if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == - VM_STACK_INCOMPLETE_SETUP) - return true; + writable = softleaf_is_device_private_write(entry); + } - return false; -} + VM_WARN_ON_FOLIO(writable && folio_test_anon(folio) && + !anon_exclusive, folio); -/** - * try_to_unmap_anon - unmap or unlock anonymous page using the object-based - * rmap method - * @page: the page to unmap/unlock - * @flags: action and flags - * - * Find all the mappings of a page using the mapping pointer and the vma chains - * contained in the anon_vma struct it points to. - * - * This function is only called from try_to_unmap/try_to_munlock for - * anonymous pages. - * When called from try_to_munlock(), the mmap_sem of the mm containing the vma - * where the page was found will be held for write. So, we won't recheck - * vm_flags for that VMA. That should be OK, because that vma shouldn't be - * 'LOCKED. - */ -static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) -{ - struct anon_vma *anon_vma; - pgoff_t pgoff; - struct anon_vma_chain *avc; - int ret = SWAP_AGAIN; + /* Update high watermark before we lower rss */ + update_hiwater_rss(mm); - anon_vma = page_lock_anon_vma_read(page); - if (!anon_vma) - return ret; + if (PageHWPoison(subpage)) { + VM_WARN_ON_FOLIO(folio_is_device_private(folio), folio); - pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { - struct vm_area_struct *vma = avc->vma; - unsigned long address; + pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); + if (folio_test_hugetlb(folio)) { + hugetlb_count_sub(folio_nr_pages(folio), mm); + set_huge_pte_at(mm, address, pvmw.pte, pteval, + hsz); + } else { + dec_mm_counter(mm, mm_counter(folio)); + set_pte_at(mm, address, pvmw.pte, pteval); + } + } else if (likely(pte_present(pteval)) && pte_unused(pteval) && + !userfaultfd_armed(vma)) { + /* + * The guest indicated that the page content is of no + * interest anymore. Simply discard the pte, vmscan + * will take care of the rest. + * A future reference will then fault in a new zero + * page. When userfaultfd is active, we must not drop + * this page though, as its main user (postcopy + * migration) will not expect userfaults on already + * copied pages. + */ + dec_mm_counter(mm, mm_counter(folio)); + } else { + swp_entry_t entry; + pte_t swp_pte; - /* - * During exec, a temporary VMA is setup and later moved. - * The VMA is moved under the anon_vma lock but not the - * page tables leading to a race where migration cannot - * find the migration ptes. Rather than increasing the - * locking requirements of exec(), migration skips - * temporary VMAs until after exec() completes. - */ - if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && - is_vma_temporary_stack(vma)) - continue; + /* + * arch_unmap_one() is expected to be a NOP on + * architectures where we could have PFN swap PTEs, + * so we'll not check/care. + */ + if (arch_unmap_one(mm, vma, address, pteval) < 0) { + if (folio_test_hugetlb(folio)) + set_huge_pte_at(mm, address, pvmw.pte, + pteval, hsz); + else + set_pte_at(mm, address, pvmw.pte, pteval); + ret = false; + page_vma_mapped_walk_done(&pvmw); + break; + } - address = vma_address(page, vma); - ret = try_to_unmap_one(page, vma, address, flags); - if (ret != SWAP_AGAIN || !page_mapped(page)) - break; + /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ + if (folio_test_hugetlb(folio)) { + if (anon_exclusive && + hugetlb_try_share_anon_rmap(folio)) { + set_huge_pte_at(mm, address, pvmw.pte, + pteval, hsz); + ret = false; + page_vma_mapped_walk_done(&pvmw); + break; + } + } else if (anon_exclusive && + folio_try_share_anon_rmap_pte(folio, subpage)) { + set_pte_at(mm, address, pvmw.pte, pteval); + ret = false; + page_vma_mapped_walk_done(&pvmw); + break; + } + + /* + * Store the pfn of the page in a special migration + * pte. do_swap_page() will wait until the migration + * pte is removed and then restart fault handling. + */ + if (writable) + entry = make_writable_migration_entry( + page_to_pfn(subpage)); + else if (anon_exclusive) + entry = make_readable_exclusive_migration_entry( + page_to_pfn(subpage)); + else + entry = make_readable_migration_entry( + page_to_pfn(subpage)); + if (likely(pte_present(pteval))) { + if (pte_young(pteval)) + entry = make_migration_entry_young(entry); + if (pte_dirty(pteval)) + entry = make_migration_entry_dirty(entry); + swp_pte = swp_entry_to_pte(entry); + if (pte_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } else { + swp_pte = swp_entry_to_pte(entry); + if (pte_swp_soft_dirty(pteval)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + if (pte_swp_uffd_wp(pteval)) + swp_pte = pte_swp_mkuffd_wp(swp_pte); + } + if (folio_test_hugetlb(folio)) + set_huge_pte_at(mm, address, pvmw.pte, swp_pte, + hsz); + else + set_pte_at(mm, address, pvmw.pte, swp_pte); + trace_set_migration_pte(address, pte_val(swp_pte), + folio_order(folio)); + /* + * No need to invalidate here it will synchronize on + * against the special swap migration pte. + */ + } + + if (unlikely(folio_test_hugetlb(folio))) + hugetlb_remove_rmap(folio); + else + folio_remove_rmap_pte(folio, subpage, vma); + if (vma->vm_flags & VM_LOCKED) + mlock_drain_local(); + folio_put(folio); } - page_unlock_anon_vma_read(anon_vma); + mmu_notifier_invalidate_range_end(&range); + return ret; } /** - * try_to_unmap_file - unmap/unlock file page using the object-based rmap method - * @page: the page to unmap/unlock + * try_to_migrate - try to replace all page table mappings with swap entries + * @folio: the folio to replace page table entries for * @flags: action and flags * - * Find all the mappings of a page using the mapping pointer and the vma chains - * contained in the address_space struct it points to. - * - * This function is only called from try_to_unmap/try_to_munlock for - * object-based pages. - * When called from try_to_munlock(), the mmap_sem of the mm containing the vma - * where the page was found will be held for write. So, we won't recheck - * vm_flags for that VMA. That should be OK, because that vma shouldn't be - * 'LOCKED. + * Tries to remove all the page table entries which are mapping this folio and + * replace them with special swap entries. Caller must hold the folio lock. */ -static int try_to_unmap_file(struct page *page, enum ttu_flags flags) +void try_to_migrate(struct folio *folio, enum ttu_flags flags) { - struct address_space *mapping = page->mapping; - pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); - struct vm_area_struct *vma; - int ret = SWAP_AGAIN; - unsigned long cursor; - unsigned long max_nl_cursor = 0; - unsigned long max_nl_size = 0; - unsigned int mapcount; - - if (PageHuge(page)) - pgoff = page->index << compound_order(page); - - mutex_lock(&mapping->i_mmap_mutex); - vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { - unsigned long address = vma_address(page, vma); - ret = try_to_unmap_one(page, vma, address, flags); - if (ret != SWAP_AGAIN || !page_mapped(page)) - goto out; - } - - if (list_empty(&mapping->i_mmap_nonlinear)) - goto out; + struct rmap_walk_control rwc = { + .rmap_one = try_to_migrate_one, + .arg = (void *)flags, + .done = folio_not_mapped, + .anon_lock = folio_lock_anon_vma_read, + }; /* - * We don't bother to try to find the munlocked page in nonlinears. - * It's costly. Instead, later, page reclaim logic may call - * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily. + * Migration always ignores mlock and only supports TTU_RMAP_LOCKED and + * TTU_SPLIT_HUGE_PMD, TTU_SYNC, and TTU_BATCH_FLUSH flags. */ - if (TTU_ACTION(flags) == TTU_MUNLOCK) - goto out; - - list_for_each_entry(vma, &mapping->i_mmap_nonlinear, - shared.nonlinear) { - cursor = (unsigned long) vma->vm_private_data; - if (cursor > max_nl_cursor) - max_nl_cursor = cursor; - cursor = vma->vm_end - vma->vm_start; - if (cursor > max_nl_size) - max_nl_size = cursor; - } + if (WARN_ON_ONCE(flags & ~(TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | + TTU_SYNC | TTU_BATCH_FLUSH))) + return; - if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ - ret = SWAP_FAIL; - goto out; - } + if (folio_is_zone_device(folio) && + (!folio_is_device_private(folio) && !folio_is_device_coherent(folio))) + return; /* - * We don't try to search for this page in the nonlinear vmas, - * and page_referenced wouldn't have found it anyway. Instead - * just walk the nonlinear vmas trying to age and unmap some. - * The mapcount of the page we came in with is irrelevant, - * but even so use it as a guide to how hard we should try? + * During exec, a temporary VMA is setup and later moved. + * The VMA is moved under the anon_vma lock but not the + * page tables leading to a race where migration cannot + * find the migration ptes. Rather than increasing the + * locking requirements of exec(), migration skips + * temporary VMAs until after exec() completes. */ - mapcount = page_mapcount(page); - if (!mapcount) - goto out; - cond_resched(); - - max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; - if (max_nl_cursor == 0) - max_nl_cursor = CLUSTER_SIZE; - - do { - list_for_each_entry(vma, &mapping->i_mmap_nonlinear, - shared.nonlinear) { - cursor = (unsigned long) vma->vm_private_data; - while ( cursor < max_nl_cursor && - cursor < vma->vm_end - vma->vm_start) { - if (try_to_unmap_cluster(cursor, &mapcount, - vma, page) == SWAP_MLOCK) - ret = SWAP_MLOCK; - cursor += CLUSTER_SIZE; - vma->vm_private_data = (void *) cursor; - if ((int)mapcount <= 0) - goto out; - } - vma->vm_private_data = (void *) max_nl_cursor; - } - cond_resched(); - max_nl_cursor += CLUSTER_SIZE; - } while (max_nl_cursor <= max_nl_size); + if (!folio_test_ksm(folio) && folio_test_anon(folio)) + rwc.invalid_vma = invalid_migration_vma; - /* - * Don't loop forever (perhaps all the remaining pages are - * in locked vmas). Reset cursor on all unreserved nonlinear - * vmas, now forgetting on which ones it had fallen behind. - */ - list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear) - vma->vm_private_data = NULL; -out: - mutex_unlock(&mapping->i_mmap_mutex); - return ret; + if (flags & TTU_RMAP_LOCKED) + rmap_walk_locked(folio, &rwc); + else + rmap_walk(folio, &rwc); } +#ifdef CONFIG_DEVICE_PRIVATE /** - * try_to_unmap - try to remove all page table mappings to a page - * @page: the page to get unmapped - * @flags: action and flags + * make_device_exclusive() - Mark a page for exclusive use by a device + * @mm: mm_struct of associated target process + * @addr: the virtual address to mark for exclusive device access + * @owner: passed to MMU_NOTIFY_EXCLUSIVE range notifier to allow filtering + * @foliop: folio pointer will be stored here on success. * - * Tries to remove all the page table entries which are mapping this - * page, used in the pageout path. Caller must hold the page lock. - * Return values are: + * This function looks up the page mapped at the given address, grabs a + * folio reference, locks the folio and replaces the PTE with special + * device-exclusive PFN swap entry, preventing access through the process + * page tables. The function will return with the folio locked and referenced. + * + * On fault, the device-exclusive entries are replaced with the original PTE + * under folio lock, after calling MMU notifiers. * - * SWAP_SUCCESS - we succeeded in removing all mappings - * SWAP_AGAIN - we missed a mapping, try again later - * SWAP_FAIL - the page is unswappable - * SWAP_MLOCK - page is mlocked. + * Only anonymous non-hugetlb folios are supported and the VMA must have + * write permissions such that we can fault in the anonymous page writable + * in order to mark it exclusive. The caller must hold the mmap_lock in read + * mode. + * + * A driver using this to program access from a device must use a mmu notifier + * critical section to hold a device specific lock during programming. Once + * programming is complete it should drop the folio lock and reference after + * which point CPU access to the page will revoke the exclusive access. + * + * Notes: + * #. This function always operates on individual PTEs mapping individual + * pages. PMD-sized THPs are first remapped to be mapped by PTEs before + * the conversion happens on a single PTE corresponding to @addr. + * #. While concurrent access through the process page tables is prevented, + * concurrent access through other page references (e.g., earlier GUP + * invocation) is not handled and not supported. + * #. device-exclusive entries are considered "clean" and "old" by core-mm. + * Device drivers must update the folio state when informed by MMU + * notifiers. + * + * Returns: pointer to mapped page on success, otherwise a negative error. */ -int try_to_unmap(struct page *page, enum ttu_flags flags) +struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, + void *owner, struct folio **foliop) { + struct mmu_notifier_range range; + struct folio *folio, *fw_folio; + struct vm_area_struct *vma; + struct folio_walk fw; + struct page *page; + swp_entry_t entry; + pte_t swp_pte; int ret; - BUG_ON(!PageLocked(page)); - VM_BUG_ON(!PageHuge(page) && PageTransHuge(page)); + mmap_assert_locked(mm); + addr = PAGE_ALIGN_DOWN(addr); - if (unlikely(PageKsm(page))) - ret = try_to_unmap_ksm(page, flags); - else if (PageAnon(page)) - ret = try_to_unmap_anon(page, flags); - else - ret = try_to_unmap_file(page, flags); - if (ret != SWAP_MLOCK && !page_mapped(page)) - ret = SWAP_SUCCESS; - return ret; -} + /* + * Fault in the page writable and try to lock it; note that if the + * address would already be marked for exclusive use by a device, + * the GUP call would undo that first by triggering a fault. + * + * If any other device would already map this page exclusively, the + * fault will trigger a conversion to an ordinary + * (non-device-exclusive) PTE and issue a MMU_NOTIFY_EXCLUSIVE. + */ +retry: + page = get_user_page_vma_remote(mm, addr, + FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, + &vma); + if (IS_ERR(page)) + return page; + folio = page_folio(page); + + if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) { + folio_put(folio); + return ERR_PTR(-EOPNOTSUPP); + } -/** - * try_to_munlock - try to munlock a page - * @page: the page to be munlocked - * - * Called from munlock code. Checks all of the VMAs mapping the page - * to make sure nobody else has this page mlocked. The page will be - * returned with PG_mlocked cleared if no other vmas have it mlocked. - * - * Return values are: - * - * SWAP_AGAIN - no vma is holding page mlocked, or, - * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem - * SWAP_FAIL - page cannot be located at present - * SWAP_MLOCK - page is now mlocked. - */ -int try_to_munlock(struct page *page) -{ - VM_BUG_ON(!PageLocked(page) || PageLRU(page)); + ret = folio_lock_killable(folio); + if (ret) { + folio_put(folio); + return ERR_PTR(ret); + } - if (unlikely(PageKsm(page))) - return try_to_unmap_ksm(page, TTU_MUNLOCK); - else if (PageAnon(page)) - return try_to_unmap_anon(page, TTU_MUNLOCK); - else - return try_to_unmap_file(page, TTU_MUNLOCK); + /* + * Inform secondary MMUs that we are going to convert this PTE to + * device-exclusive, such that they unmap it now. Note that the + * caller must filter this event out to prevent livelocks. + */ + mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, + mm, addr, addr + PAGE_SIZE, owner); + mmu_notifier_invalidate_range_start(&range); + + /* + * Let's do a second walk and make sure we still find the same page + * mapped writable. Note that any page of an anonymous folio can + * only be mapped writable using exactly one PTE ("exclusive"), so + * there cannot be other mappings. + */ + fw_folio = folio_walk_start(&fw, vma, addr, 0); + if (fw_folio != folio || fw.page != page || + fw.level != FW_LEVEL_PTE || !pte_write(fw.pte)) { + if (fw_folio) + folio_walk_end(&fw, vma); + mmu_notifier_invalidate_range_end(&range); + folio_unlock(folio); + folio_put(folio); + goto retry; + } + + /* Nuke the page table entry so we get the uptodate dirty bit. */ + flush_cache_page(vma, addr, page_to_pfn(page)); + fw.pte = ptep_clear_flush(vma, addr, fw.ptep); + + /* Set the dirty flag on the folio now the PTE is gone. */ + if (pte_dirty(fw.pte)) + folio_mark_dirty(folio); + + /* + * Store the pfn of the page in a special device-exclusive PFN swap PTE. + * do_swap_page() will trigger the conversion back while holding the + * folio lock. + */ + entry = make_device_exclusive_entry(page_to_pfn(page)); + swp_pte = swp_entry_to_pte(entry); + if (pte_soft_dirty(fw.pte)) + swp_pte = pte_swp_mksoft_dirty(swp_pte); + /* The pte is writable, uffd-wp does not apply. */ + set_pte_at(mm, addr, fw.ptep, swp_pte); + + folio_walk_end(&fw, vma); + mmu_notifier_invalidate_range_end(&range); + *foliop = folio; + return page; } +EXPORT_SYMBOL_GPL(make_device_exclusive); +#endif void __put_anon_vma(struct anon_vma *anon_vma) { struct anon_vma *root = anon_vma->root; + anon_vma_free(anon_vma); if (root != anon_vma && atomic_dec_and_test(&root->refcount)) anon_vma_free(root); +} - anon_vma_free(anon_vma); +static struct anon_vma *rmap_walk_anon_lock(const struct folio *folio, + struct rmap_walk_control *rwc) +{ + struct anon_vma *anon_vma; + + if (rwc->anon_lock) + return rwc->anon_lock(folio, rwc); + + /* + * Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read() + * because that depends on page_mapped(); but not all its usages + * are holding mmap_lock. Users without mmap_lock are required to + * take a reference count to prevent the anon_vma disappearing + */ + anon_vma = folio_anon_vma(folio); + if (!anon_vma) + return NULL; + + if (anon_vma_trylock_read(anon_vma)) + goto out; + + if (rwc->try_lock) { + anon_vma = NULL; + rwc->contended = true; + goto out; + } + + anon_vma_lock_read(anon_vma); +out: + return anon_vma; } -#ifdef CONFIG_MIGRATION /* - * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): - * Called by migrate.c to remove migration ptes, but might be used more later. + * rmap_walk_anon - do something to anonymous page using the object-based + * rmap method + * @folio: the folio to be handled + * @rwc: control variable according to each walk type + * @locked: caller holds relevant rmap lock + * + * Find all the mappings of a folio using the mapping pointer and the vma + * chains contained in the anon_vma struct it points to. */ -static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg) +static void rmap_walk_anon(struct folio *folio, + struct rmap_walk_control *rwc, bool locked) { struct anon_vma *anon_vma; - pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + pgoff_t pgoff_start, pgoff_end; struct anon_vma_chain *avc; - int ret = SWAP_AGAIN; /* - * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() - * because that depends on page_mapped(); but not all its usages - * are holding mmap_sem. Users without mmap_sem are required to - * take a reference count to prevent the anon_vma disappearing + * The folio lock ensures that folio->mapping can't be changed under us + * to an anon_vma with different root. */ - anon_vma = page_anon_vma(page); + VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + + if (locked) { + anon_vma = folio_anon_vma(folio); + /* anon_vma disappear under us? */ + VM_BUG_ON_FOLIO(!anon_vma, folio); + } else { + anon_vma = rmap_walk_anon_lock(folio, rwc); + } if (!anon_vma) - return ret; - anon_vma_lock_read(anon_vma); - anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { + return; + + pgoff_start = folio_pgoff(folio); + pgoff_end = pgoff_start + folio_nr_pages(folio) - 1; + anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, + pgoff_start, pgoff_end) { struct vm_area_struct *vma = avc->vma; - unsigned long address = vma_address(page, vma); - ret = rmap_one(page, vma, address, arg); - if (ret != SWAP_AGAIN) + unsigned long address = vma_address(vma, pgoff_start, + folio_nr_pages(folio)); + + VM_BUG_ON_VMA(address == -EFAULT, vma); + cond_resched(); + + if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) + continue; + + if (!rwc->rmap_one(folio, vma, address, rwc->arg)) + break; + if (rwc->done && rwc->done(folio)) break; } - anon_vma_unlock_read(anon_vma); - return ret; + + if (!locked) + anon_vma_unlock_read(anon_vma); } -static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg) +/** + * __rmap_walk_file() - Traverse the reverse mapping for a file-backed mapping + * of a page mapped within a specified page cache object at a specified offset. + * + * @folio: Either the folio whose mappings to traverse, or if NULL, + * the callbacks specified in @rwc will be configured such + * as to be able to look up mappings correctly. + * @mapping: The page cache object whose mapping VMAs we intend to + * traverse. If @folio is non-NULL, this should be equal to + * folio_mapping(folio). + * @pgoff_start: The offset within @mapping of the page which we are + * looking up. If @folio is non-NULL, this should be equal + * to folio_pgoff(folio). + * @nr_pages: The number of pages mapped by the mapping. If @folio is + * non-NULL, this should be equal to folio_nr_pages(folio). + * @rwc: The reverse mapping walk control object describing how + * the traversal should proceed. + * @locked: Is the @mapping already locked? If not, we acquire the + * lock. + */ +static void __rmap_walk_file(struct folio *folio, struct address_space *mapping, + pgoff_t pgoff_start, unsigned long nr_pages, + struct rmap_walk_control *rwc, bool locked) { - struct address_space *mapping = page->mapping; - pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + pgoff_t pgoff_end = pgoff_start + nr_pages - 1; struct vm_area_struct *vma; - int ret = SWAP_AGAIN; - if (!mapping) - return ret; - mutex_lock(&mapping->i_mmap_mutex); - vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { - unsigned long address = vma_address(page, vma); - ret = rmap_one(page, vma, address, arg); - if (ret != SWAP_AGAIN) - break; + VM_WARN_ON_FOLIO(folio && mapping != folio_mapping(folio), folio); + VM_WARN_ON_FOLIO(folio && pgoff_start != folio_pgoff(folio), folio); + VM_WARN_ON_FOLIO(folio && nr_pages != folio_nr_pages(folio), folio); + + if (!locked) { + if (i_mmap_trylock_read(mapping)) + goto lookup; + + if (rwc->try_lock) { + rwc->contended = true; + return; + } + + i_mmap_lock_read(mapping); } +lookup: + vma_interval_tree_foreach(vma, &mapping->i_mmap, + pgoff_start, pgoff_end) { + unsigned long address = vma_address(vma, pgoff_start, nr_pages); + + VM_BUG_ON_VMA(address == -EFAULT, vma); + cond_resched(); + + if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) + continue; + + if (!rwc->rmap_one(folio, vma, address, rwc->arg)) + goto done; + if (rwc->done && rwc->done(folio)) + goto done; + } +done: + if (!locked) + i_mmap_unlock_read(mapping); +} + +/* + * rmap_walk_file - do something to file page using the object-based rmap method + * @folio: the folio to be handled + * @rwc: control variable according to each walk type + * @locked: caller holds relevant rmap lock + * + * Find all the mappings of a folio using the mapping pointer and the vma chains + * contained in the address_space struct it points to. + */ +static void rmap_walk_file(struct folio *folio, + struct rmap_walk_control *rwc, bool locked) +{ /* - * No nonlinear handling: being always shared, nonlinear vmas - * never contain migration ptes. Decide what to do about this - * limitation to linear when we need rmap_walk() on nonlinear. + * The folio lock not only makes sure that folio->mapping cannot + * suddenly be NULLified by truncation, it makes sure that the structure + * at mapping cannot be freed and reused yet, so we can safely take + * mapping->i_mmap_rwsem. */ - mutex_unlock(&mapping->i_mmap_mutex); - return ret; + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + + if (!folio->mapping) + return; + + __rmap_walk_file(folio, folio->mapping, folio->index, + folio_nr_pages(folio), rwc, locked); } -int rmap_walk(struct page *page, int (*rmap_one)(struct page *, - struct vm_area_struct *, unsigned long, void *), void *arg) +void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc) { - VM_BUG_ON(!PageLocked(page)); + if (unlikely(folio_test_ksm(folio))) + rmap_walk_ksm(folio, rwc); + else if (folio_test_anon(folio)) + rmap_walk_anon(folio, rwc, false); + else + rmap_walk_file(folio, rwc, false); +} - if (unlikely(PageKsm(page))) - return rmap_walk_ksm(page, rmap_one, arg); - else if (PageAnon(page)) - return rmap_walk_anon(page, rmap_one, arg); +/* Like rmap_walk, but caller holds relevant rmap lock */ +void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) +{ + /* no ksm support for now */ + VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio); + if (folio_test_anon(folio)) + rmap_walk_anon(folio, rwc, true); else - return rmap_walk_file(page, rmap_one, arg); + rmap_walk_file(folio, rwc, true); } -#endif /* CONFIG_MIGRATION */ #ifdef CONFIG_HUGETLB_PAGE /* - * The following three functions are for anonymous (private mapped) hugepages. + * The following two functions are for anonymous (private mapped) hugepages. * Unlike common anonymous pages, anonymous hugepages have no accounting code * and no lru code, because we handle hugepages differently from common pages. */ -static void __hugepage_set_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address, int exclusive) +void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, + unsigned long address, rmap_t flags) { - struct anon_vma *anon_vma = vma->anon_vma; - - BUG_ON(!anon_vma); - - if (PageAnon(page)) - return; - if (!exclusive) - anon_vma = anon_vma->root; - - anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; - page->mapping = (struct address_space *) anon_vma; - page->index = linear_page_index(vma, address); + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); + + atomic_inc(&folio->_entire_mapcount); + atomic_inc(&folio->_large_mapcount); + if (flags & RMAP_EXCLUSIVE) + SetPageAnonExclusive(&folio->page); + VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 && + PageAnonExclusive(&folio->page), folio); } -void hugepage_add_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address) +void hugetlb_add_new_anon_rmap(struct folio *folio, + struct vm_area_struct *vma, unsigned long address) { - struct anon_vma *anon_vma = vma->anon_vma; - int first; + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); - BUG_ON(!PageLocked(page)); - BUG_ON(!anon_vma); - /* address might be in next vma when migration races vma_adjust */ - first = atomic_inc_and_test(&page->_mapcount); - if (first) - __hugepage_set_anon_rmap(page, vma, address, 0); -} - -void hugepage_add_new_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address) -{ BUG_ON(address < vma->vm_start || address >= vma->vm_end); - atomic_set(&page->_mapcount, 0); - __hugepage_set_anon_rmap(page, vma, address, 1); + /* increment count (starts at -1) */ + atomic_set(&folio->_entire_mapcount, 0); + atomic_set(&folio->_large_mapcount, 0); + folio_clear_hugetlb_restore_reserve(folio); + __folio_set_anon(folio, vma, address, true); + SetPageAnonExclusive(&folio->page); } #endif /* CONFIG_HUGETLB_PAGE */ |
