summaryrefslogtreecommitdiff
path: root/mm/khugepaged.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-12-11 16:22:13 +0000
committerAndrew Morton <akpm@linux-foundation.org>2023-12-29 11:58:26 -0800
commit5432726848bb27a01badcbc93b596f39ee6c5ffb (patch)
tree5a8e8765f4e0f87992c036ef9143eda68e4217ef /mm/khugepaged.c
parentd3b082736518562f4eed185e1a67f28d20635fef (diff)
mm: convert collapse_huge_page() to use a folio
Replace three calls to compound_head() with one. Link: https://lkml.kernel.org/r/20231211162214.2146080-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/khugepaged.c')
-rw-r--r--mm/khugepaged.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index de174d049e71..1002e9156388 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1090,6 +1090,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
+ struct folio *folio;
struct page *hpage;
spinlock_t *pmd_ptl, *pte_ptl;
int result = SCAN_FAIL;
@@ -1212,13 +1213,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
if (unlikely(result != SCAN_SUCCEED))
goto out_up_write;
+ folio = page_folio(hpage);
/*
- * spin_lock() below is not the equivalent of smp_wmb(), but
- * the smp_wmb() inside __SetPageUptodate() can be reused to
- * avoid the copy_huge_page writes to become visible after
- * the set_pmd_at() write.
+ * The smp_wmb() inside __folio_mark_uptodate() ensures the
+ * copy_huge_page writes become visible before the set_pmd_at()
+ * write.
*/
- __SetPageUptodate(hpage);
+ __folio_mark_uptodate(folio);
pgtable = pmd_pgtable(_pmd);
_pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
@@ -1226,8 +1227,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
- page_add_new_anon_rmap(hpage, vma, address);
- lru_cache_add_inactive_or_unevictable(hpage, vma);
+ folio_add_new_anon_rmap(folio, vma, address);
+ folio_add_lru_vma(folio, vma);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);