summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorMike Kravetz <mike.kravetz@oracle.com>2023-08-22 15:30:43 -0700
committerAndrew Morton <akpm@linux-foundation.org>2023-08-24 16:20:15 -0700
commit6c1419730822fe991fc15bfd7059f6872a71a7af (patch)
treed3c11ca19bc7c273925db51d8a7608902b6b7f0e /mm/hugetlb.c
parentfcbc329fa39ef261ba9072c56c63563423bff798 (diff)
hugetlb: clear flags in tail pages that will be freed individually
hugetlb manually creates and destroys compound pages. As such it makes assumptions about struct page layout. Commit ebc1baf5c9b4 ("mm: free up a word in the first tail page") breaks hugetlb. The following will fix the breakage. Link: https://lkml.kernel.org/r/20230822231741.GC4509@monkey Fixes: ebc1baf5c9b4 ("mm: free up a word in the first tail page") Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c11
1 files changed, 1 insertions, 10 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a82c3104337e..cbc25826c9b0 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1484,6 +1484,7 @@ static void __destroy_compound_gigantic_folio(struct folio *folio,
for (i = 1; i < nr_pages; i++) {
p = folio_page(folio, i);
+ p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE;
p->mapping = NULL;
clear_compound_head(p);
if (!demote)
@@ -1702,8 +1703,6 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
static void __update_and_free_hugetlb_folio(struct hstate *h,
struct folio *folio)
{
- int i;
- struct page *subpage;
bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio);
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
@@ -1745,14 +1744,6 @@ static void __update_and_free_hugetlb_folio(struct hstate *h,
spin_unlock_irq(&hugetlb_lock);
}
- for (i = 0; i < pages_per_huge_page(h); i++) {
- subpage = folio_page(folio, i);
- subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
- 1 << PG_referenced | 1 << PG_dirty |
- 1 << PG_active | 1 << PG_private |
- 1 << PG_writeback);
- }
-
/*
* Non-gigantic pages demoted from CMA allocated gigantic pages
* need to be given back to CMA in free_gigantic_folio.