summaryrefslogtreecommitdiff
path: root/mm/memory-failure.c
diff options
context:
space:
mode:
authorSidhartha Kumar <sidhartha.kumar@oracle.com>2023-01-12 14:46:04 -0600
committerAndrew Morton <akpm@linux-foundation.org>2023-02-02 22:33:02 -0800
commit9637d7dfb19ce934f81cd56cde23573759c73afb (patch)
tree82231167a6d4c857d097fb5375b84cc40ca0b4d0 /mm/memory-failure.c
parent2ff6cecee669bf0fc63eadebac8cfc81f74b9a4c (diff)
mm/memory-failure: convert free_raw_hwp_pages() to folios
Change free_raw_hwp_pages() to folio_free_raw_hwp(), converts two users of hugetlb specific page macro users to their folio equivalents. Link: https://lkml.kernel.org/r/20230112204608.80136-5-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Acked-by: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r--mm/memory-failure.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index d4aaed2756af..a2835907caf8 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1766,23 +1766,23 @@ static int hugetlb_set_page_hwpoison(struct page *hpage, struct page *page)
return ret;
}
-static unsigned long free_raw_hwp_pages(struct page *hpage, bool move_flag)
+static unsigned long folio_free_raw_hwp(struct folio *folio, bool move_flag)
{
/*
- * HPageVmemmapOptimized hugepages can't be freed because struct
+ * hugetlb_vmemmap_optimized hugepages can't be freed because struct
* pages for tail pages are required but they don't exist.
*/
- if (move_flag && HPageVmemmapOptimized(hpage))
+ if (move_flag && folio_test_hugetlb_vmemmap_optimized(folio))
return 0;
/*
- * HPageRawHwpUnreliable hugepages shouldn't be unpoisoned by
+ * hugetlb_raw_hwp_unreliable hugepages shouldn't be unpoisoned by
* definition.
*/
- if (HPageRawHwpUnreliable(hpage))
+ if (folio_test_hugetlb_raw_hwp_unreliable(folio))
return 0;
- return __free_raw_hwp_pages(hpage, move_flag);
+ return __free_raw_hwp_pages(&folio->page, move_flag);
}
void folio_clear_hugetlb_hwpoison(struct folio *folio)
@@ -1790,7 +1790,7 @@ void folio_clear_hugetlb_hwpoison(struct folio *folio)
if (folio_test_hugetlb_raw_hwp_unreliable(folio))
return;
folio_clear_hwpoison(folio);
- free_raw_hwp_pages(&folio->page, true);
+ folio_free_raw_hwp(folio, true);
}
/*
@@ -1929,7 +1929,7 @@ static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *
return 0;
}
-static inline unsigned long free_raw_hwp_pages(struct page *hpage, bool flag)
+static inline unsigned long folio_free_raw_hwp(struct folio *folio, bool flag)
{
return 0;
}
@@ -2336,6 +2336,7 @@ core_initcall(memory_failure_init);
int unpoison_memory(unsigned long pfn)
{
struct page *page;
+ struct folio *folio;
struct page *p;
int ret = -EBUSY;
unsigned long count = 1;
@@ -2348,6 +2349,7 @@ int unpoison_memory(unsigned long pfn)
p = pfn_to_page(pfn);
page = compound_head(p);
+ folio = page_folio(p);
mutex_lock(&mf_mutex);
@@ -2389,7 +2391,7 @@ int unpoison_memory(unsigned long pfn)
if (!ret) {
if (PageHuge(p)) {
huge = true;
- count = free_raw_hwp_pages(page, false);
+ count = folio_free_raw_hwp(folio, false);
if (count == 0) {
ret = -EBUSY;
goto unlock_mutex;
@@ -2405,7 +2407,7 @@ int unpoison_memory(unsigned long pfn)
} else {
if (PageHuge(p)) {
huge = true;
- count = free_raw_hwp_pages(page, false);
+ count = folio_free_raw_hwp(folio, false);
if (count == 0) {
ret = -EBUSY;
put_page(page);