diff options
Diffstat (limited to 'mm/page_vma_mapped.c')
| -rw-r--r-- | mm/page_vma_mapped.c | 147 |
1 files changed, 88 insertions, 59 deletions
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 49e0d28f0379..b38a1d00c971 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -3,7 +3,7 @@ #include <linux/rmap.h> #include <linux/hugetlb.h> #include <linux/swap.h> -#include <linux/swapops.h> +#include <linux/leafops.h> #include "internal.h" @@ -13,8 +13,10 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw) return false; } -static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) +static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp, + spinlock_t **ptlp) { + bool is_migration; pte_t ptent; if (pvmw->flags & PVMW_SYNC) { @@ -25,6 +27,8 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) return !!pvmw->pte; } + is_migration = pvmw->flags & PVMW_MIGRATION; +again: /* * It is important to return the ptl corresponding to pte, * in case *pvmw->pmd changes underneath us; so we need to @@ -32,18 +36,21 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) * proceeds to loop over next ptes, and finds a match later. * Though, in most cases, page lock already protects this. */ - pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd, - pvmw->address, ptlp); + pvmw->pte = pte_offset_map_rw_nolock(pvmw->vma->vm_mm, pvmw->pmd, + pvmw->address, pmdvalp, ptlp); if (!pvmw->pte) return false; ptent = ptep_get(pvmw->pte); - if (pvmw->flags & PVMW_MIGRATION) { - if (!is_swap_pte(ptent)) + if (pte_none(ptent)) { + return false; + } else if (pte_present(ptent)) { + if (is_migration) return false; - } else if (is_swap_pte(ptent)) { - swp_entry_t entry; + } else if (!is_migration) { + softleaf_t entry; + /* * Handle un-addressable ZONE_DEVICE memory. * @@ -60,71 +67,73 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) * For more details on device private memory see HMM * (include/linux/hmm.h or mm/hmm.c). */ - entry = pte_to_swp_entry(ptent); - if (!is_device_private_entry(entry) && - !is_device_exclusive_entry(entry)) + entry = softleaf_from_pte(ptent); + if (!softleaf_is_device_private(entry) && + !softleaf_is_device_exclusive(entry)) return false; - } else if (!pte_present(ptent)) { - return false; + } + spin_lock(*ptlp); + if (unlikely(!pmd_same(*pmdvalp, pmdp_get_lockless(pvmw->pmd)))) { + pte_unmap_unlock(pvmw->pte, *ptlp); + goto again; } pvmw->ptl = *ptlp; - spin_lock(pvmw->ptl); + return true; } /** - * check_pte - check if @pvmw->page is mapped at the @pvmw->pte - * @pvmw: page_vma_mapped_walk struct, includes a pair pte and page for checking + * check_pte - check if [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) is + * mapped at the @pvmw->pte + * @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range + * for checking + * @pte_nr: the number of small pages described by @pvmw->pte. * - * page_vma_mapped_walk() found a place where @pvmw->page is *potentially* + * page_vma_mapped_walk() found a place where pfn range is *potentially* * mapped. check_pte() has to validate this. * * pvmw->pte may point to empty PTE, swap PTE or PTE pointing to * arbitrary page. * * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration - * entry that points to @pvmw->page or any subpage in case of THP. + * entry that points to [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) * * If PVMW_MIGRATION flag is not set, returns true if pvmw->pte points to - * pvmw->page or any subpage in case of THP. + * [pvmw->pfn, @pvmw->pfn + @pvmw->nr_pages) * * Otherwise, return false. * */ -static bool check_pte(struct page_vma_mapped_walk *pvmw) +static bool check_pte(struct page_vma_mapped_walk *pvmw, unsigned long pte_nr) { unsigned long pfn; pte_t ptent = ptep_get(pvmw->pte); if (pvmw->flags & PVMW_MIGRATION) { - swp_entry_t entry; - if (!is_swap_pte(ptent)) - return false; - entry = pte_to_swp_entry(ptent); + const softleaf_t entry = softleaf_from_pte(ptent); - if (!is_migration_entry(entry) && - !is_device_exclusive_entry(entry)) + if (!softleaf_is_migration(entry)) return false; - pfn = swp_offset_pfn(entry); - } else if (is_swap_pte(ptent)) { - swp_entry_t entry; + pfn = softleaf_to_pfn(entry); + } else if (pte_present(ptent)) { + pfn = pte_pfn(ptent); + } else { + const softleaf_t entry = softleaf_from_pte(ptent); /* Handle un-addressable ZONE_DEVICE memory */ - entry = pte_to_swp_entry(ptent); - if (!is_device_private_entry(entry) && - !is_device_exclusive_entry(entry)) - return false; - - pfn = swp_offset_pfn(entry); - } else { - if (!pte_present(ptent)) + if (!softleaf_is_device_private(entry) && + !softleaf_is_device_exclusive(entry)) return false; - pfn = pte_pfn(ptent); + pfn = softleaf_to_pfn(entry); } - return (pfn - pvmw->pfn) < pvmw->nr_pages; + if ((pfn + pte_nr - 1) < pvmw->pfn) + return false; + if (pfn > (pvmw->pfn + pvmw->nr_pages - 1)) + return false; + return true; } /* Returns true if the two ranges overlap. Careful to not overflow. */ @@ -199,7 +208,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) return false; pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte); - if (!check_pte(pvmw)) + if (!check_pte(pvmw, pages_per_huge_page(hstate))) return not_found(pvmw); return true; } @@ -233,23 +242,23 @@ restart: */ pmde = pmdp_get_lockless(pvmw->pmd); - if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) || - (pmd_present(pmde) && pmd_devmap(pmde))) { + if (pmd_trans_huge(pmde) || pmd_is_migration_entry(pmde)) { pvmw->ptl = pmd_lock(mm, pvmw->pmd); pmde = *pvmw->pmd; if (!pmd_present(pmde)) { - swp_entry_t entry; + softleaf_t entry; if (!thp_migration_supported() || !(pvmw->flags & PVMW_MIGRATION)) return not_found(pvmw); - entry = pmd_to_swp_entry(pmde); - if (!is_migration_entry(entry) || - !check_pmd(swp_offset_pfn(entry), pvmw)) + entry = softleaf_from_pmd(pmde); + + if (!softleaf_is_migration(entry) || + !check_pmd(softleaf_to_pfn(entry), pvmw)) return not_found(pvmw); return true; } - if (likely(pmd_trans_huge(pmde) || pmd_devmap(pmde))) { + if (likely(pmd_trans_huge(pmde))) { if (pvmw->flags & PVMW_MIGRATION) return not_found(pvmw); if (!check_pmd(pmd_pfn(pmde), pvmw)) @@ -265,8 +274,16 @@ restart: * cannot return prematurely, while zap_huge_pmd() has * cleared *pmd but not decremented compound_mapcount(). */ + const softleaf_t entry = softleaf_from_pmd(pmde); + + if (softleaf_is_device_private(entry)) { + pvmw->ptl = pmd_lock(mm, pvmw->pmd); + return true; + } + if ((pvmw->flags & PVMW_SYNC) && - transhuge_vma_suitable(vma, pvmw->address) && + thp_vma_suitable_order(vma, pvmw->address, + PMD_ORDER) && (pvmw->nr_pages >= HPAGE_PMD_NR)) { spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); @@ -275,13 +292,13 @@ restart: step_forward(pvmw, PMD_SIZE); continue; } - if (!map_pte(pvmw, &ptl)) { + if (!map_pte(pvmw, &pmde, &ptl)) { if (!pvmw->pte) goto restart; goto next_pte; } this_pte: - if (check_pte(pvmw)) + if (check_pte(pvmw, 1)) return true; next_pte: do { @@ -296,14 +313,20 @@ next_pte: } pte_unmap(pvmw->pte); pvmw->pte = NULL; + pvmw->flags |= PVMW_PGTABLE_CROSSED; goto restart; } pvmw->pte++; } while (pte_none(ptep_get(pvmw->pte))); if (!pvmw->ptl) { + spin_lock(ptl); + if (unlikely(!pmd_same(pmde, pmdp_get_lockless(pvmw->pmd)))) { + pte_unmap_unlock(pvmw->pte, ptl); + pvmw->pte = NULL; + goto restart; + } pvmw->ptl = ptl; - spin_lock(pvmw->ptl); } goto this_pte; } while (pvmw->address < end); @@ -311,17 +334,21 @@ next_pte: return false; } +#ifdef CONFIG_MEMORY_FAILURE /** * page_mapped_in_vma - check whether a page is really mapped in a VMA * @page: the page to test * @vma: the VMA to test * - * Returns 1 if the page is mapped into the page tables of the VMA, 0 - * if the page is not mapped into the page tables of this VMA. Only - * valid for normal file or anonymous VMAs. + * Return: The address the page is mapped at if the page is in the range + * covered by the VMA and present in the page table. If the page is + * outside the VMA or not present, returns -EFAULT. + * Only valid for normal file or anonymous VMAs. */ -int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) +unsigned long page_mapped_in_vma(const struct page *page, + struct vm_area_struct *vma) { + const struct folio *folio = page_folio(page); struct page_vma_mapped_walk pvmw = { .pfn = page_to_pfn(page), .nr_pages = 1, @@ -329,11 +356,13 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) .flags = PVMW_SYNC, }; - pvmw.address = vma_address(page, vma); + pvmw.address = vma_address(vma, page_pgoff(folio, page), 1); if (pvmw.address == -EFAULT) - return 0; + goto out; if (!page_vma_mapped_walk(&pvmw)) - return 0; + return -EFAULT; page_vma_mapped_walk_done(&pvmw); - return 1; +out: + return pvmw.address; } +#endif |
