summaryrefslogtreecommitdiff
path: root/mm/pagewalk.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/pagewalk.c')
-rw-r--r--mm/pagewalk.c680
1 files changed, 539 insertions, 141 deletions
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 7f1c9b274906..90cc346a6ecf 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -3,6 +3,13 @@
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/hugetlb.h>
+#include <linux/mmu_context.h>
+#include <linux/swap.h>
+#include <linux/leafops.h>
+
+#include <asm/tlbflush.h>
+
+#include "internal.h"
/*
* We want to know the real level where a entry is located ignoring any
@@ -27,9 +34,23 @@ static int walk_pte_range_inner(pte_t *pte, unsigned long addr,
int err = 0;
for (;;) {
- err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
- if (err)
- break;
+ if (ops->install_pte && pte_none(ptep_get(pte))) {
+ pte_t new_pte;
+
+ err = ops->install_pte(addr, addr + PAGE_SIZE, &new_pte,
+ walk);
+ if (err)
+ break;
+
+ set_pte_at(walk->mm, addr, pte, new_pte);
+ /* Non-present before, so for arches that need it. */
+ if (!WARN_ON_ONCE(walk->no_vma))
+ update_mmu_cache(walk->vma, addr, pte);
+ } else {
+ err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
+ if (err)
+ break;
+ }
if (addr >= end - PAGE_SIZE)
break;
addr += PAGE_SIZE;
@@ -46,56 +67,32 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
spinlock_t *ptl;
if (walk->no_vma) {
- pte = pte_offset_map(pmd, addr);
- err = walk_pte_range_inner(pte, addr, end, walk);
- pte_unmap(pte);
+ /*
+ * pte_offset_map() might apply user-specific validation.
+ * Indeed, on x86_64 the pmd entries set up by init_espfix_ap()
+ * fit its pmd_bad() check (_PAGE_NX set and _PAGE_RW clear),
+ * and CONFIG_EFI_PGT_DUMP efi_mm goes so far as to walk them.
+ */
+ if (walk->mm == &init_mm || addr >= TASK_SIZE)
+ pte = pte_offset_kernel(pmd, addr);
+ else
+ pte = pte_offset_map(pmd, addr);
+ if (pte) {
+ err = walk_pte_range_inner(pte, addr, end, walk);
+ if (walk->mm != &init_mm && addr < TASK_SIZE)
+ pte_unmap(pte);
+ }
} else {
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
- err = walk_pte_range_inner(pte, addr, end, walk);
- pte_unmap_unlock(pte, ptl);
- }
-
- return err;
-}
-
-#ifdef CONFIG_ARCH_HAS_HUGEPD
-static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr,
- unsigned long end, struct mm_walk *walk, int pdshift)
-{
- int err = 0;
- const struct mm_walk_ops *ops = walk->ops;
- int shift = hugepd_shift(*phpd);
- int page_size = 1 << shift;
-
- if (!ops->pte_entry)
- return 0;
-
- if (addr & (page_size - 1))
- return 0;
-
- for (;;) {
- pte_t *pte;
-
- spin_lock(&walk->mm->page_table_lock);
- pte = hugepte_offset(*phpd, addr, pdshift);
- err = ops->pte_entry(pte, addr, addr + page_size, walk);
- spin_unlock(&walk->mm->page_table_lock);
-
- if (err)
- break;
- if (addr >= end - page_size)
- break;
- addr += page_size;
+ if (pte) {
+ err = walk_pte_range_inner(pte, addr, end, walk);
+ pte_unmap_unlock(pte, ptl);
+ }
}
+ if (!pte)
+ walk->action = ACTION_AGAIN;
return err;
}
-#else
-static int walk_hugepd_range(hugepd_t *phpd, unsigned long addr,
- unsigned long end, struct mm_walk *walk, int pdshift)
-{
- return 0;
-}
-#endif
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
struct mm_walk *walk)
@@ -103,6 +100,8 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
pmd_t *pmd;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
+ bool has_handler = ops->pte_entry;
+ bool has_install = ops->install_pte;
int err = 0;
int depth = real_depth(3);
@@ -111,11 +110,14 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
again:
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd)) {
- if (ops->pte_hole)
+ if (has_install)
+ err = __pte_alloc(walk->mm, pmd);
+ else if (ops->pte_hole)
err = ops->pte_hole(addr, next, depth, walk);
if (err)
break;
- continue;
+ if (!has_install)
+ continue;
}
walk->action = ACTION_SUBTREE;
@@ -131,28 +133,32 @@ again:
if (walk->action == ACTION_AGAIN)
goto again;
-
- /*
- * Check this here so we only break down trans_huge
- * pages when we _need_ to
- */
- if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) ||
- walk->action == ACTION_CONTINUE ||
- !(ops->pte_entry))
+ if (walk->action == ACTION_CONTINUE)
continue;
- if (walk->vma) {
- split_huge_pmd(walk->vma, pmd, addr);
- if (pmd_trans_unstable(pmd))
- goto again;
+ if (!has_handler) { /* No handlers for lower page tables. */
+ if (!has_install)
+ continue; /* Nothing to do. */
+ /*
+ * We are ONLY installing, so avoid unnecessarily
+ * splitting a present huge page.
+ */
+ if (pmd_present(*pmd) && pmd_trans_huge(*pmd))
+ continue;
}
- if (is_hugepd(__hugepd(pmd_val(*pmd))))
- err = walk_hugepd_range((hugepd_t *)pmd, addr, next, walk, PMD_SHIFT);
- else
- err = walk_pte_range(pmd, addr, next, walk);
+ if (walk->vma)
+ split_huge_pmd(walk->vma, pmd, addr);
+ else if (pmd_leaf(*pmd) || !pmd_present(*pmd))
+ continue; /* Nothing to do. */
+
+ err = walk_pte_range(pmd, addr, next, walk);
if (err)
break;
+
+ if (walk->action == ACTION_AGAIN)
+ goto again;
+
} while (pmd++, addr = next, addr != end);
return err;
@@ -164,6 +170,8 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
pud_t *pud;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
+ bool has_handler = ops->pmd_entry || ops->pte_entry;
+ bool has_install = ops->install_pte;
int err = 0;
int depth = real_depth(2);
@@ -172,11 +180,14 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
again:
next = pud_addr_end(addr, end);
if (pud_none(*pud)) {
- if (ops->pte_hole)
+ if (has_install)
+ err = __pmd_alloc(walk->mm, pud, addr);
+ else if (ops->pte_hole)
err = ops->pte_hole(addr, next, depth, walk);
if (err)
break;
- continue;
+ if (!has_install)
+ continue;
}
walk->action = ACTION_SUBTREE;
@@ -188,21 +199,29 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
if (walk->action == ACTION_AGAIN)
goto again;
-
- if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) ||
- walk->action == ACTION_CONTINUE ||
- !(ops->pmd_entry || ops->pte_entry))
+ if (walk->action == ACTION_CONTINUE)
continue;
+ if (!has_handler) { /* No handlers for lower page tables. */
+ if (!has_install)
+ continue; /* Nothing to do. */
+ /*
+ * We are ONLY installing, so avoid unnecessarily
+ * splitting a present huge page.
+ */
+ if (pud_present(*pud) && pud_trans_huge(*pud))
+ continue;
+ }
+
if (walk->vma)
split_huge_pud(walk->vma, pud, addr);
+ else if (pud_leaf(*pud) || !pud_present(*pud))
+ continue; /* Nothing to do. */
+
if (pud_none(*pud))
goto again;
- if (is_hugepd(__hugepd(pud_val(*pud))))
- err = walk_hugepd_range((hugepd_t *)pud, addr, next, walk, PUD_SHIFT);
- else
- err = walk_pmd_range(pud, addr, next, walk);
+ err = walk_pmd_range(pud, addr, next, walk);
if (err)
break;
} while (pud++, addr = next, addr != end);
@@ -216,6 +235,8 @@ static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
p4d_t *p4d;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
+ bool has_handler = ops->pud_entry || ops->pmd_entry || ops->pte_entry;
+ bool has_install = ops->install_pte;
int err = 0;
int depth = real_depth(1);
@@ -223,20 +244,21 @@ static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
do {
next = p4d_addr_end(addr, end);
if (p4d_none_or_clear_bad(p4d)) {
- if (ops->pte_hole)
+ if (has_install)
+ err = __pud_alloc(walk->mm, p4d, addr);
+ else if (ops->pte_hole)
err = ops->pte_hole(addr, next, depth, walk);
if (err)
break;
- continue;
+ if (!has_install)
+ continue;
}
if (ops->p4d_entry) {
err = ops->p4d_entry(p4d, addr, next, walk);
if (err)
break;
}
- if (is_hugepd(__hugepd(p4d_val(*p4d))))
- err = walk_hugepd_range((hugepd_t *)p4d, addr, next, walk, P4D_SHIFT);
- else if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
+ if (has_handler || has_install)
err = walk_pud_range(p4d, addr, next, walk);
if (err)
break;
@@ -251,6 +273,9 @@ static int walk_pgd_range(unsigned long addr, unsigned long end,
pgd_t *pgd;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
+ bool has_handler = ops->p4d_entry || ops->pud_entry || ops->pmd_entry ||
+ ops->pte_entry;
+ bool has_install = ops->install_pte;
int err = 0;
if (walk->pgd)
@@ -260,20 +285,21 @@ static int walk_pgd_range(unsigned long addr, unsigned long end,
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd)) {
- if (ops->pte_hole)
+ if (has_install)
+ err = __p4d_alloc(walk->mm, pgd, addr);
+ else if (ops->pte_hole)
err = ops->pte_hole(addr, next, 0, walk);
if (err)
break;
- continue;
+ if (!has_install)
+ continue;
}
if (ops->pgd_entry) {
err = ops->pgd_entry(pgd, addr, next, walk);
if (err)
break;
}
- if (is_hugepd(__hugepd(pgd_val(*pgd))))
- err = walk_hugepd_range((hugepd_t *)pgd, addr, next, walk, PGDIR_SHIFT);
- else if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || ops->pte_entry)
+ if (has_handler || has_install)
err = walk_p4d_range(pgd, addr, next, walk);
if (err)
break;
@@ -302,18 +328,18 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
const struct mm_walk_ops *ops = walk->ops;
int err = 0;
+ hugetlb_vma_lock_read(vma);
do {
next = hugetlb_entry_end(h, addr, end);
- pte = huge_pte_offset(walk->mm, addr & hmask, sz);
-
+ pte = hugetlb_walk(vma, addr & hmask, sz);
if (pte)
err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
else if (ops->pte_hole)
err = ops->pte_hole(addr, next, -1, walk);
-
if (err)
break;
} while (addr = next, addr != end);
+ hugetlb_vma_unlock_read(vma);
return err;
}
@@ -365,6 +391,11 @@ static int __walk_page_range(unsigned long start, unsigned long end,
int err = 0;
struct vm_area_struct *vma = walk->vma;
const struct mm_walk_ops *ops = walk->ops;
+ bool is_hugetlb = is_vm_hugetlb_page(vma);
+
+ /* We do not support hugetlb PTE installation. */
+ if (ops->install_pte && is_hugetlb)
+ return -EINVAL;
if (ops->pre_vma) {
err = ops->pre_vma(start, end, walk);
@@ -372,7 +403,7 @@ static int __walk_page_range(unsigned long start, unsigned long end,
return err;
}
- if (is_vm_hugetlb_page(vma)) {
+ if (is_hugetlb) {
if (ops->hugetlb_entry)
err = walk_hugetlb_range(start, end, walk);
} else
@@ -384,47 +415,44 @@ static int __walk_page_range(unsigned long start, unsigned long end,
return err;
}
-/**
- * walk_page_range - walk page table with caller specific callbacks
- * @mm: mm_struct representing the target process of page table walk
- * @start: start address of the virtual address range
- * @end: end address of the virtual address range
- * @ops: operation to call during the walk
- * @private: private data for callbacks' usage
- *
- * Recursively walk the page table tree of the process represented by @mm
- * within the virtual address range [@start, @end). During walking, we can do
- * some caller-specific works for each entry, by setting up pmd_entry(),
- * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
- * callbacks, the associated entries/pages are just ignored.
- * The return values of these callbacks are commonly defined like below:
- *
- * - 0 : succeeded to handle the current entry, and if you don't reach the
- * end address yet, continue to walk.
- * - >0 : succeeded to handle the current entry, and return to the caller
- * with caller specific value.
- * - <0 : failed to handle the current entry, and return to the caller
- * with error code.
- *
- * Before starting to walk page table, some callers want to check whether
- * they really want to walk over the current vma, typically by checking
- * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
- * purpose.
- *
- * If operations need to be staged before and committed after a vma is walked,
- * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
- * since it is intended to handle commit-type operations, can't return any
- * errors.
- *
- * struct mm_walk keeps current values of some common data like vma and pmd,
- * which are useful for the access from callbacks. If you want to pass some
- * caller-specific data to callbacks, @private should be helpful.
+static inline void process_mm_walk_lock(struct mm_struct *mm,
+ enum page_walk_lock walk_lock)
+{
+ if (walk_lock == PGWALK_RDLOCK)
+ mmap_assert_locked(mm);
+ else if (walk_lock != PGWALK_VMA_RDLOCK_VERIFY)
+ mmap_assert_write_locked(mm);
+}
+
+static inline void process_vma_walk_lock(struct vm_area_struct *vma,
+ enum page_walk_lock walk_lock)
+{
+#ifdef CONFIG_PER_VMA_LOCK
+ switch (walk_lock) {
+ case PGWALK_WRLOCK:
+ vma_start_write(vma);
+ break;
+ case PGWALK_WRLOCK_VERIFY:
+ vma_assert_write_locked(vma);
+ break;
+ case PGWALK_VMA_RDLOCK_VERIFY:
+ vma_assert_locked(vma);
+ break;
+ case PGWALK_RDLOCK:
+ /* PGWALK_RDLOCK is handled by process_mm_walk_lock */
+ break;
+ }
+#endif
+}
+
+/*
+ * See the comment for walk_page_range(), this performs the heavy lifting of the
+ * operation, only sets no restrictions on how the walk proceeds.
*
- * Locking:
- * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
- * because these function traverse vma list and/or access to vma's data.
+ * We usually restrict the ability to install PTEs, but this functionality is
+ * available to internal memory management code and provided in mm/internal.h.
*/
-int walk_page_range(struct mm_struct *mm, unsigned long start,
+int walk_page_range_mm_unsafe(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private)
{
@@ -443,7 +471,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
if (!walk.mm)
return -EINVAL;
- mmap_assert_locked(walk.mm);
+ process_mm_walk_lock(walk.mm, ops->walk_lock);
vma = find_vma(walk.mm, start);
do {
@@ -458,6 +486,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
if (ops->pte_hole)
err = ops->pte_hole(start, next, -1, &walk);
} else { /* inside vma */
+ process_vma_walk_lock(vma, ops->walk_lock);
walk.vma = vma;
next = min(end, vma->vm_end);
vma = find_vma(mm, vma->vm_end);
@@ -482,24 +511,158 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
return err;
}
+/*
+ * Determine if the walk operations specified are permitted to be used for a
+ * page table walk.
+ *
+ * This check is performed on all functions which are parameterised by walk
+ * operations and exposed in include/linux/pagewalk.h.
+ *
+ * Internal memory management code can use *_unsafe() functions to be able to
+ * use all page walking operations.
+ */
+static bool check_ops_safe(const struct mm_walk_ops *ops)
+{
+ /*
+ * The installation of PTEs is solely under the control of memory
+ * management logic and subject to many subtle locking, security and
+ * cache considerations so we cannot permit other users to do so, and
+ * certainly not for exported symbols.
+ */
+ if (ops->install_pte)
+ return false;
+
+ return true;
+}
+
/**
- * walk_page_range_novma - walk a range of pagetables not backed by a vma
+ * walk_page_range - walk page table with caller specific callbacks
* @mm: mm_struct representing the target process of page table walk
* @start: start address of the virtual address range
* @end: end address of the virtual address range
* @ops: operation to call during the walk
+ * @private: private data for callbacks' usage
+ *
+ * Recursively walk the page table tree of the process represented by @mm
+ * within the virtual address range [@start, @end). During walking, we can do
+ * some caller-specific works for each entry, by setting up pmd_entry(),
+ * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
+ * callbacks, the associated entries/pages are just ignored.
+ * The return values of these callbacks are commonly defined like below:
+ *
+ * - 0 : succeeded to handle the current entry, and if you don't reach the
+ * end address yet, continue to walk.
+ * - >0 : succeeded to handle the current entry, and return to the caller
+ * with caller specific value.
+ * - <0 : failed to handle the current entry, and return to the caller
+ * with error code.
+ *
+ * Before starting to walk page table, some callers want to check whether
+ * they really want to walk over the current vma, typically by checking
+ * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
+ * purpose.
+ *
+ * If operations need to be staged before and committed after a vma is walked,
+ * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
+ * since it is intended to handle commit-type operations, can't return any
+ * errors.
+ *
+ * struct mm_walk keeps current values of some common data like vma and pmd,
+ * which are useful for the access from callbacks. If you want to pass some
+ * caller-specific data to callbacks, @private should be helpful.
+ *
+ * Locking:
+ * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
+ * because these function traverse vma list and/or access to vma's data.
+ */
+int walk_page_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ void *private)
+{
+ if (!check_ops_safe(ops))
+ return -EINVAL;
+
+ return walk_page_range_mm_unsafe(mm, start, end, ops, private);
+}
+
+/**
+ * walk_kernel_page_table_range - walk a range of kernel pagetables.
+ * @start: start address of the virtual address range
+ * @end: end address of the virtual address range
+ * @ops: operation to call during the walk
* @pgd: pgd to walk if different from mm->pgd
* @private: private data for callbacks' usage
*
* Similar to walk_page_range() but can walk any page tables even if they are
* not backed by VMAs. Because 'unusual' entries may be walked this function
* will also not lock the PTEs for the pte_entry() callback. This is useful for
- * walking the kernel pages tables or page tables for firmware.
+ * walking kernel pages tables or page tables for firmware.
+ *
+ * Note: Be careful to walk the kernel pages tables, the caller may be need to
+ * take other effective approaches (mmap lock may be insufficient) to prevent
+ * the intermediate kernel page tables belonging to the specified address range
+ * from being freed (e.g. memory hot-remove).
+ */
+int walk_kernel_page_table_range(unsigned long start, unsigned long end,
+ const struct mm_walk_ops *ops, pgd_t *pgd, void *private)
+{
+ /*
+ * Kernel intermediate page tables are usually not freed, so the mmap
+ * read lock is sufficient. But there are some exceptions.
+ * E.g. memory hot-remove. In which case, the mmap lock is insufficient
+ * to prevent the intermediate kernel pages tables belonging to the
+ * specified address range from being freed. The caller should take
+ * other actions to prevent this race.
+ */
+ mmap_assert_locked(&init_mm);
+
+ return walk_kernel_page_table_range_lockless(start, end, ops, pgd,
+ private);
+}
+
+/*
+ * Use this function to walk the kernel page tables locklessly. It should be
+ * guaranteed that the caller has exclusive access over the range they are
+ * operating on - that there should be no concurrent access, for example,
+ * changing permissions for vmalloc objects.
+ */
+int walk_kernel_page_table_range_lockless(unsigned long start, unsigned long end,
+ const struct mm_walk_ops *ops, pgd_t *pgd, void *private)
+{
+ struct mm_walk walk = {
+ .ops = ops,
+ .mm = &init_mm,
+ .pgd = pgd,
+ .private = private,
+ .no_vma = true
+ };
+
+ if (start >= end)
+ return -EINVAL;
+ if (!check_ops_safe(ops))
+ return -EINVAL;
+
+ return walk_pgd_range(start, end, &walk);
+}
+
+/**
+ * walk_page_range_debug - walk a range of pagetables not backed by a vma
+ * @mm: mm_struct representing the target process of page table walk
+ * @start: start address of the virtual address range
+ * @end: end address of the virtual address range
+ * @ops: operation to call during the walk
+ * @pgd: pgd to walk if different from mm->pgd
+ * @private: private data for callbacks' usage
+ *
+ * Similar to walk_page_range() but can walk any page tables even if they are
+ * not backed by VMAs. Because 'unusual' entries may be walked this function
+ * will also not lock the PTEs for the pte_entry() callback.
+ *
+ * This is for debugging purposes ONLY.
*/
-int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
+int walk_page_range_debug(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
- pgd_t *pgd,
- void *private)
+ pgd_t *pgd, void *private)
{
struct mm_walk walk = {
.ops = ops,
@@ -509,17 +672,30 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
.no_vma = true
};
+ /* For convenience, we allow traversal of kernel mappings. */
+ if (mm == &init_mm)
+ return walk_kernel_page_table_range(start, end, ops,
+ pgd, private);
if (start >= end || !walk.mm)
return -EINVAL;
+ if (!check_ops_safe(ops))
+ return -EINVAL;
- mmap_assert_write_locked(walk.mm);
+ /*
+ * The mmap lock protects the page walker from changes to the page
+ * tables during the walk. However a read lock is insufficient to
+ * protect those areas which don't have a VMA as munmap() detaches
+ * the VMAs before downgrading to a read lock and actually tearing
+ * down PTEs/page tables. In which case, the mmap write lock should
+ * be held.
+ */
+ mmap_assert_write_locked(mm);
return walk_pgd_range(start, end, &walk);
}
-int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, const struct mm_walk_ops *ops,
- void *private)
+int walk_page_range_vma_unsafe(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops, void *private)
{
struct mm_walk walk = {
.ops = ops,
@@ -533,10 +709,21 @@ int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
if (start < vma->vm_start || end > vma->vm_end)
return -EINVAL;
- mmap_assert_locked(walk.mm);
+ process_mm_walk_lock(walk.mm, ops->walk_lock);
+ process_vma_walk_lock(vma, ops->walk_lock);
return __walk_page_range(start, end, &walk);
}
+int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ void *private)
+{
+ if (!check_ops_safe(ops))
+ return -EINVAL;
+
+ return walk_page_range_vma_unsafe(vma, start, end, ops, private);
+}
+
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
void *private)
{
@@ -549,8 +736,11 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
if (!walk.mm)
return -EINVAL;
+ if (!check_ops_safe(ops))
+ return -EINVAL;
- mmap_assert_locked(walk.mm);
+ process_mm_walk_lock(walk.mm, ops->walk_lock);
+ process_vma_walk_lock(vma, ops->walk_lock);
return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
}
@@ -597,6 +787,9 @@ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
unsigned long start_addr, end_addr;
int err = 0;
+ if (!check_ops_safe(ops))
+ return -EINVAL;
+
lockdep_assert_held(&mapping->i_mmap_rwsem);
vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
first_index + nr - 1) {
@@ -630,3 +823,208 @@ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
return err;
}
+
+/**
+ * folio_walk_start - walk the page tables to a folio
+ * @fw: filled with information on success.
+ * @vma: the VMA.
+ * @addr: the virtual address to use for the page table walk.
+ * @flags: flags modifying which folios to walk to.
+ *
+ * Walk the page tables using @addr in a given @vma to a mapped folio and
+ * return the folio, making sure that the page table entry referenced by
+ * @addr cannot change until folio_walk_end() was called.
+ *
+ * As default, this function returns only folios that are not special (e.g., not
+ * the zeropage) and never returns folios that are supposed to be ignored by the
+ * VM as documented by vm_normal_page(). If requested, zeropages will be
+ * returned as well.
+ *
+ * As default, this function only considers present page table entries.
+ * If requested, it will also consider migration entries.
+ *
+ * If this function returns NULL it might either indicate "there is nothing" or
+ * "there is nothing suitable".
+ *
+ * On success, @fw is filled and the function returns the folio while the PTL
+ * is still held and folio_walk_end() must be called to clean up,
+ * releasing any held locks. The returned folio must *not* be used after the
+ * call to folio_walk_end(), unless a short-term folio reference is taken before
+ * that call.
+ *
+ * @fw->page will correspond to the page that is effectively referenced by
+ * @addr. However, for migration entries and shared zeropages @fw->page is
+ * set to NULL. Note that large folios might be mapped by multiple page table
+ * entries, and this function will always only lookup a single entry as
+ * specified by @addr, which might or might not cover more than a single page of
+ * the returned folio.
+ *
+ * This function must *not* be used as a naive replacement for
+ * get_user_pages() / pin_user_pages(), especially not to perform DMA or
+ * to carelessly modify page content. This function may *only* be used to grab
+ * short-term folio references, never to grab long-term folio references.
+ *
+ * Using the page table entry pointers in @fw for reading or modifying the
+ * entry should be avoided where possible: however, there might be valid
+ * use cases.
+ *
+ * WARNING: Modifying page table entries in hugetlb VMAs requires a lot of care.
+ * For example, PMD page table sharing might require prior unsharing. Also,
+ * logical hugetlb entries might span multiple physical page table entries,
+ * which *must* be modified in a single operation (set_huge_pte_at(),
+ * huge_ptep_set_*, ...). Note that the page table entry stored in @fw might
+ * not correspond to the first physical entry of a logical hugetlb entry.
+ *
+ * The mmap lock must be held in read mode.
+ *
+ * Return: folio pointer on success, otherwise NULL.
+ */
+struct folio *folio_walk_start(struct folio_walk *fw,
+ struct vm_area_struct *vma, unsigned long addr,
+ folio_walk_flags_t flags)
+{
+ unsigned long entry_size;
+ bool expose_page = true;
+ struct page *page;
+ pud_t *pudp, pud;
+ pmd_t *pmdp, pmd;
+ pte_t *ptep, pte;
+ spinlock_t *ptl;
+ pgd_t *pgdp;
+ p4d_t *p4dp;
+
+ mmap_assert_locked(vma->vm_mm);
+ vma_pgtable_walk_begin(vma);
+
+ if (WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end))
+ goto not_found;
+
+ pgdp = pgd_offset(vma->vm_mm, addr);
+ if (pgd_none_or_clear_bad(pgdp))
+ goto not_found;
+
+ p4dp = p4d_offset(pgdp, addr);
+ if (p4d_none_or_clear_bad(p4dp))
+ goto not_found;
+
+ pudp = pud_offset(p4dp, addr);
+ pud = pudp_get(pudp);
+ if (pud_none(pud))
+ goto not_found;
+ if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) &&
+ (!pud_present(pud) || pud_leaf(pud))) {
+ ptl = pud_lock(vma->vm_mm, pudp);
+ pud = pudp_get(pudp);
+
+ entry_size = PUD_SIZE;
+ fw->level = FW_LEVEL_PUD;
+ fw->pudp = pudp;
+ fw->pud = pud;
+
+ if (pud_none(pud)) {
+ spin_unlock(ptl);
+ goto not_found;
+ } else if (pud_present(pud) && !pud_leaf(pud)) {
+ spin_unlock(ptl);
+ goto pmd_table;
+ } else if (pud_present(pud)) {
+ page = vm_normal_page_pud(vma, addr, pud);
+ if (page)
+ goto found;
+ }
+ /*
+ * TODO: FW_MIGRATION support for PUD migration entries
+ * once there are relevant users.
+ */
+ spin_unlock(ptl);
+ goto not_found;
+ }
+
+pmd_table:
+ VM_WARN_ON_ONCE(!pud_present(pud) || pud_leaf(pud));
+ pmdp = pmd_offset(pudp, addr);
+ pmd = pmdp_get_lockless(pmdp);
+ if (pmd_none(pmd))
+ goto not_found;
+ if (IS_ENABLED(CONFIG_PGTABLE_HAS_HUGE_LEAVES) &&
+ (!pmd_present(pmd) || pmd_leaf(pmd))) {
+ ptl = pmd_lock(vma->vm_mm, pmdp);
+ pmd = pmdp_get(pmdp);
+
+ entry_size = PMD_SIZE;
+ fw->level = FW_LEVEL_PMD;
+ fw->pmdp = pmdp;
+ fw->pmd = pmd;
+
+ if (pmd_none(pmd)) {
+ spin_unlock(ptl);
+ goto not_found;
+ } else if (pmd_present(pmd) && !pmd_leaf(pmd)) {
+ spin_unlock(ptl);
+ goto pte_table;
+ } else if (pmd_present(pmd)) {
+ page = vm_normal_page_pmd(vma, addr, pmd);
+ if (page) {
+ goto found;
+ } else if ((flags & FW_ZEROPAGE) &&
+ is_huge_zero_pmd(pmd)) {
+ page = pfn_to_page(pmd_pfn(pmd));
+ expose_page = false;
+ goto found;
+ }
+ } else if ((flags & FW_MIGRATION) &&
+ pmd_is_migration_entry(pmd)) {
+ const softleaf_t entry = softleaf_from_pmd(pmd);
+
+ page = softleaf_to_page(entry);
+ expose_page = false;
+ goto found;
+ }
+ spin_unlock(ptl);
+ goto not_found;
+ }
+
+pte_table:
+ VM_WARN_ON_ONCE(!pmd_present(pmd) || pmd_leaf(pmd));
+ ptep = pte_offset_map_lock(vma->vm_mm, pmdp, addr, &ptl);
+ if (!ptep)
+ goto not_found;
+ pte = ptep_get(ptep);
+
+ entry_size = PAGE_SIZE;
+ fw->level = FW_LEVEL_PTE;
+ fw->ptep = ptep;
+ fw->pte = pte;
+
+ if (pte_present(pte)) {
+ page = vm_normal_page(vma, addr, pte);
+ if (page)
+ goto found;
+ if ((flags & FW_ZEROPAGE) &&
+ is_zero_pfn(pte_pfn(pte))) {
+ page = pfn_to_page(pte_pfn(pte));
+ expose_page = false;
+ goto found;
+ }
+ } else if (!pte_none(pte)) {
+ const softleaf_t entry = softleaf_from_pte(pte);
+
+ if ((flags & FW_MIGRATION) && softleaf_is_migration(entry)) {
+ page = softleaf_to_page(entry);
+ expose_page = false;
+ goto found;
+ }
+ }
+ pte_unmap_unlock(ptep, ptl);
+not_found:
+ vma_pgtable_walk_end(vma);
+ return NULL;
+found:
+ if (expose_page)
+ /* Note: Offset from the mapped page, not the folio start. */
+ fw->page = page + ((addr & (entry_size - 1)) >> PAGE_SHIFT);
+ else
+ fw->page = NULL;
+ fw->ptl = ptl;
+ return page_folio(page);
+}