diff options
Diffstat (limited to 'mm/mincore.c')
| -rw-r--r-- | mm/mincore.c | 128 |
1 files changed, 94 insertions, 34 deletions
diff --git a/mm/mincore.c b/mm/mincore.c index a085a2aeabd8..e5d13eea9234 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -14,13 +14,14 @@ #include <linux/mman.h> #include <linux/syscalls.h> #include <linux/swap.h> -#include <linux/swapops.h> +#include <linux/leafops.h> #include <linux/shmem_fs.h> #include <linux/hugetlb.h> #include <linux/pgtable.h> #include <linux/uaccess.h> #include "swap.h" +#include "internal.h" static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -28,21 +29,76 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, #ifdef CONFIG_HUGETLB_PAGE unsigned char present; unsigned char *vec = walk->private; + spinlock_t *ptl; + + ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); /* * Hugepages under user process are always in RAM and never * swapped out, but theoretically it needs to be checked. */ - present = pte && !huge_pte_none(huge_ptep_get(pte)); + if (!pte) { + present = 0; + } else { + const pte_t ptep = huge_ptep_get(walk->mm, addr, pte); + + if (huge_pte_none(ptep) || pte_is_marker(ptep)) + present = 0; + else + present = 1; + } + for (; addr != end; vec++, addr += PAGE_SIZE) *vec = present; walk->private = vec; + spin_unlock(ptl); #else BUG(); #endif return 0; } +static unsigned char mincore_swap(swp_entry_t entry, bool shmem) +{ + struct swap_info_struct *si; + struct folio *folio = NULL; + unsigned char present = 0; + + if (!IS_ENABLED(CONFIG_SWAP)) { + WARN_ON(1); + return 0; + } + + /* + * Shmem mapping may contain swapin error entries, which are + * absent. Page table may contain migration or hwpoison + * entries which are always uptodate. + */ + if (!softleaf_is_swap(entry)) + return !shmem; + + /* + * Shmem mapping lookup is lockless, so we need to grab the swap + * device. mincore page table walk locks the PTL, and the swap + * device is stable, avoid touching the si for better performance. + */ + if (shmem) { + si = get_swap_device(entry); + if (!si) + return 0; + } + folio = swap_cache_get_folio(entry); + if (shmem) + put_swap_device(si); + /* The swap cache space contains either folio, shadow or NULL */ + if (folio && !xa_is_value(folio)) { + present = folio_test_uptodate(folio); + folio_put(folio); + } + + return present; +} + /* * Later we can get more picky about what "in core" means precisely. * For now, simply check to see if the page is in the page cache, @@ -60,8 +116,15 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t index) * any other file mapping (ie. marked !present and faulted in with * tmpfs's .fault). So swapped out tmpfs mappings are tested here. */ - folio = filemap_get_incore_folio(mapping, index); + folio = filemap_get_entry(mapping, index); if (folio) { + if (xa_is_value(folio)) { + if (shmem_mapping(mapping)) + return mincore_swap(radix_to_swp_entry(folio), + true); + else + return 0; + } present = folio_test_uptodate(folio); folio_put(folio); } @@ -105,6 +168,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pte_t *ptep; unsigned char *vec = walk->private; int nr = (end - addr) >> PAGE_SHIFT; + int step, i; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { @@ -113,41 +177,36 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, goto out; } - if (pmd_trans_unstable(pmd)) { - __mincore_unmapped_range(addr, end, vma, vec); - goto out; - } - ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); - for (; addr != end; ptep++, addr += PAGE_SIZE) { - pte_t pte = *ptep; + if (!ptep) { + walk->action = ACTION_AGAIN; + return 0; + } + for (; addr != end; ptep += step, addr += step * PAGE_SIZE) { + pte_t pte = ptep_get(ptep); - /* We need to do cache lookup too for pte markers */ - if (pte_none_mostly(pte)) + step = 1; + /* We need to do cache lookup too for markers */ + if (pte_none(pte) || pte_is_marker(pte)) __mincore_unmapped_range(addr, addr + PAGE_SIZE, vma, vec); - else if (pte_present(pte)) - *vec = 1; - else { /* pte is a swap entry */ - swp_entry_t entry = pte_to_swp_entry(pte); - - if (non_swap_entry(entry)) { - /* - * migration or hwpoison entries are always - * uptodate - */ - *vec = 1; - } else { -#ifdef CONFIG_SWAP - *vec = mincore_page(swap_address_space(entry), - swp_offset(entry)); -#else - WARN_ON(1); - *vec = 1; -#endif + else if (pte_present(pte)) { + unsigned int batch = pte_batch_hint(ptep, pte); + + if (batch > 1) { + unsigned int max_nr = (end - addr) >> PAGE_SHIFT; + + step = min_t(unsigned int, batch, max_nr); } + + for (i = 0; i < step; i++) + vec[i] = 1; + } else { /* pte is a swap entry */ + const softleaf_t entry = softleaf_from_pte(pte); + + *vec = mincore_swap(entry, false); } - vec++; + vec += step; } pte_unmap_unlock(ptep - 1, ptl); out: @@ -168,7 +227,7 @@ static inline bool can_do_mincore(struct vm_area_struct *vma) * for writing; otherwise we'd be including shared non-exclusive * mappings, which opens a side channel. */ - return inode_owner_or_capable(&init_user_ns, + return inode_owner_or_capable(&nop_mnt_idmap, file_inode(vma->vm_file)) || file_permission(vma->vm_file, MAY_WRITE) == 0; } @@ -177,6 +236,7 @@ static const struct mm_walk_ops mincore_walk_ops = { .pmd_entry = mincore_pte_range, .pte_hole = mincore_unmapped_range, .hugetlb_entry = mincore_hugetlb, + .walk_lock = PGWALK_RDLOCK, }; /* @@ -239,7 +299,7 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, start = untagged_addr(start); /* Check the start address: needs to be page-aligned.. */ - if (start & ~PAGE_MASK) + if (unlikely(start & ~PAGE_MASK)) return -EINVAL; /* ..and we need to be passed a valid user-space range */ |
