summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorSidhartha Kumar <sidhartha.kumar@oracle.com>2023-01-25 09:05:32 -0800
committerAndrew Morton <akpm@linux-foundation.org>2023-02-13 15:54:28 -0800
commitea8e72f4116a995c2aba3fb738ac372c4115375a (patch)
tree7301c5efc5d8dceb6dcb49cfe96cd6ef691cbf37 /mm
parent91a2fb956ad993f3cbcfc632611e17e3699fb652 (diff)
mm/hugetlb: convert putback_active_hugepage to take in a folio
Convert putback_active_hugepage() to folio_putback_active_hugetlb(), this removes one user of the Huge Page macros which take in a page. The callers in migrate.c are also cleaned up by being able to directly use the src and dst folio variables. Link: https://lkml.kernel.org/r/20230125170537.96973-4-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c8
-rw-r--r--mm/migrate.c8
2 files changed, 8 insertions, 8 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a0d486ed5411..fd1ce61b8f3f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7300,13 +7300,13 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
return ret;
}
-void putback_active_hugepage(struct page *page)
+void folio_putback_active_hugetlb(struct folio *folio)
{
spin_lock_irq(&hugetlb_lock);
- SetHPageMigratable(page);
- list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
+ folio_set_hugetlb_migratable(folio);
+ list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
spin_unlock_irq(&hugetlb_lock);
- put_page(page);
+ folio_put(folio);
}
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
diff --git a/mm/migrate.c b/mm/migrate.c
index 811e76c6fac1..c09872cf41b7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -151,7 +151,7 @@ void putback_movable_pages(struct list_head *l)
list_for_each_entry_safe(page, page2, l, lru) {
if (unlikely(PageHuge(page))) {
- putback_active_hugepage(page);
+ folio_putback_active_hugetlb(page_folio(page));
continue;
}
list_del(&page->lru);
@@ -1298,7 +1298,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (folio_ref_count(src) == 1) {
/* page was freed from under us. So we are done. */
- putback_active_hugepage(hpage);
+ folio_putback_active_hugetlb(src);
return MIGRATEPAGE_SUCCESS;
}
@@ -1383,7 +1383,7 @@ out_unlock:
folio_unlock(src);
out:
if (rc == MIGRATEPAGE_SUCCESS)
- putback_active_hugepage(hpage);
+ folio_putback_active_hugetlb(src);
else if (rc != -EAGAIN)
list_move_tail(&src->lru, ret);
@@ -1395,7 +1395,7 @@ out:
if (put_new_page)
put_new_page(new_hpage, private);
else
- putback_active_hugepage(new_hpage);
+ folio_putback_active_hugetlb(dst);
return rc;
}