summaryrefslogtreecommitdiff
path: root/mm/page_vma_mapped.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_vma_mapped.c')
-rw-r--r--mm/page_vma_mapped.c62
1 files changed, 42 insertions, 20 deletions
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 74d2de15fb5e..e463c3be934a 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -13,7 +13,8 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw)
return false;
}
-static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
+static bool map_pte(struct page_vma_mapped_walk *pvmw, pmd_t *pmdvalp,
+ spinlock_t **ptlp)
{
pte_t ptent;
@@ -25,6 +26,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
return !!pvmw->pte;
}
+again:
/*
* It is important to return the ptl corresponding to pte,
* in case *pvmw->pmd changes underneath us; so we need to
@@ -32,8 +34,8 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
* proceeds to loop over next ptes, and finds a match later.
* Though, in most cases, page lock already protects this.
*/
- pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
- pvmw->address, ptlp);
+ pvmw->pte = pte_offset_map_rw_nolock(pvmw->vma->vm_mm, pvmw->pmd,
+ pvmw->address, pmdvalp, ptlp);
if (!pvmw->pte)
return false;
@@ -67,8 +69,13 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
} else if (!pte_present(ptent)) {
return false;
}
+ spin_lock(*ptlp);
+ if (unlikely(!pmd_same(*pmdvalp, pmdp_get_lockless(pvmw->pmd)))) {
+ pte_unmap_unlock(pvmw->pte, *ptlp);
+ goto again;
+ }
pvmw->ptl = *ptlp;
- spin_lock(pvmw->ptl);
+
return true;
}
@@ -77,6 +84,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
* mapped at the @pvmw->pte
* @pvmw: page_vma_mapped_walk struct, includes a pair pte and pfn range
* for checking
+ * @pte_nr: the number of small pages described by @pvmw->pte.
*
* page_vma_mapped_walk() found a place where pfn range is *potentially*
* mapped. check_pte() has to validate this.
@@ -93,7 +101,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
* Otherwise, return false.
*
*/
-static bool check_pte(struct page_vma_mapped_walk *pvmw)
+static bool check_pte(struct page_vma_mapped_walk *pvmw, unsigned long pte_nr)
{
unsigned long pfn;
pte_t ptent = ptep_get(pvmw->pte);
@@ -104,8 +112,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
return false;
entry = pte_to_swp_entry(ptent);
- if (!is_migration_entry(entry) &&
- !is_device_exclusive_entry(entry))
+ if (!is_migration_entry(entry))
return false;
pfn = swp_offset_pfn(entry);
@@ -126,7 +133,11 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
pfn = pte_pfn(ptent);
}
- return (pfn - pvmw->pfn) < pvmw->nr_pages;
+ if ((pfn + pte_nr - 1) < pvmw->pfn)
+ return false;
+ if (pfn > (pvmw->pfn + pvmw->nr_pages - 1))
+ return false;
+ return true;
}
/* Returns true if the two ranges overlap. Careful to not overflow. */
@@ -201,7 +212,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
return false;
pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
- if (!check_pte(pvmw))
+ if (!check_pte(pvmw, pages_per_huge_page(hstate)))
return not_found(pvmw);
return true;
}
@@ -278,13 +289,13 @@ restart:
step_forward(pvmw, PMD_SIZE);
continue;
}
- if (!map_pte(pvmw, &ptl)) {
+ if (!map_pte(pvmw, &pmde, &ptl)) {
if (!pvmw->pte)
goto restart;
goto next_pte;
}
this_pte:
- if (check_pte(pvmw))
+ if (check_pte(pvmw, 1))
return true;
next_pte:
do {
@@ -305,8 +316,13 @@ next_pte:
} while (pte_none(ptep_get(pvmw->pte)));
if (!pvmw->ptl) {
+ spin_lock(ptl);
+ if (unlikely(!pmd_same(pmde, pmdp_get_lockless(pvmw->pmd)))) {
+ pte_unmap_unlock(pvmw->pte, ptl);
+ pvmw->pte = NULL;
+ goto restart;
+ }
pvmw->ptl = ptl;
- spin_lock(pvmw->ptl);
}
goto this_pte;
} while (pvmw->address < end);
@@ -314,17 +330,21 @@ next_pte:
return false;
}
+#ifdef CONFIG_MEMORY_FAILURE
/**
* page_mapped_in_vma - check whether a page is really mapped in a VMA
* @page: the page to test
* @vma: the VMA to test
*
- * Returns 1 if the page is mapped into the page tables of the VMA, 0
- * if the page is not mapped into the page tables of this VMA. Only
- * valid for normal file or anonymous VMAs.
+ * Return: The address the page is mapped at if the page is in the range
+ * covered by the VMA and present in the page table. If the page is
+ * outside the VMA or not present, returns -EFAULT.
+ * Only valid for normal file or anonymous VMAs.
*/
-int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
+unsigned long page_mapped_in_vma(const struct page *page,
+ struct vm_area_struct *vma)
{
+ const struct folio *folio = page_folio(page);
struct page_vma_mapped_walk pvmw = {
.pfn = page_to_pfn(page),
.nr_pages = 1,
@@ -332,11 +352,13 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
.flags = PVMW_SYNC,
};
- pvmw.address = vma_address(page, vma);
+ pvmw.address = vma_address(vma, page_pgoff(folio, page), 1);
if (pvmw.address == -EFAULT)
- return 0;
+ goto out;
if (!page_vma_mapped_walk(&pvmw))
- return 0;
+ return -EFAULT;
page_vma_mapped_walk_done(&pvmw);
- return 1;
+out:
+ return pvmw.address;
}
+#endif