From 69bf4b6b54fb7f52b7ea9ce28d4a360cd5ec956d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 5 Jul 2019 19:55:18 -0700 Subject: Revert "mm: page cache: store only head pages in i_pages" This reverts commit 5fd4ca2d84b249f0858ce28cf637cf25b61a398f. Mikhail Gavrilov reports that it causes the VM_BUG_ON_PAGE() in __delete_from_swap_cache() to trigger: page:ffffd6d34dff0000 refcount:1 mapcount:1 mapping:ffff97812323a689 index:0xfecec363 anon flags: 0x17fffe00080034(uptodate|lru|active|swapbacked) raw: 0017fffe00080034 ffffd6d34c67c508 ffffd6d3504b8d48 ffff97812323a689 raw: 00000000fecec363 0000000000000000 0000000100000000 ffff978433ace000 page dumped because: VM_BUG_ON_PAGE(entry != page) page->mem_cgroup:ffff978433ace000 ------------[ cut here ]------------ kernel BUG at mm/swap_state.c:170! invalid opcode: 0000 [#1] SMP NOPTI CPU: 1 PID: 221 Comm: kswapd0 Not tainted 5.2.0-0.rc2.git0.1.fc31.x86_64 #1 Hardware name: System manufacturer System Product Name/ROG STRIX X470-I GAMING, BIOS 2202 04/11/2019 RIP: 0010:__delete_from_swap_cache+0x20d/0x240 Code: 30 65 48 33 04 25 28 00 00 00 75 4a 48 83 c4 38 5b 5d 41 5c 41 5d 41 5e 41 5f c3 48 c7 c6 2f dc 0f 8a 48 89 c7 e8 93 1b fd ff <0f> 0b 48 c7 c6 a8 74 0f 8a e8 85 1b fd ff 0f 0b 48 c7 c6 a8 7d 0f RSP: 0018:ffffa982036e7980 EFLAGS: 00010046 RAX: 0000000000000021 RBX: 0000000000000040 RCX: 0000000000000006 RDX: 0000000000000000 RSI: 0000000000000086 RDI: ffff97843d657900 RBP: 0000000000000001 R08: ffffa982036e7835 R09: 0000000000000535 R10: ffff97845e21a46c R11: ffffa982036e7835 R12: ffff978426387120 R13: 0000000000000000 R14: ffffd6d34dff0040 R15: ffffd6d34dff0000 FS: 0000000000000000(0000) GS:ffff97843d640000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00002cba88ef5000 CR3: 000000078a97c000 CR4: 00000000003406e0 Call Trace: delete_from_swap_cache+0x46/0xa0 try_to_free_swap+0xbc/0x110 swap_writepage+0x13/0x70 pageout.isra.0+0x13c/0x350 shrink_page_list+0xc14/0xdf0 shrink_inactive_list+0x1e5/0x3c0 shrink_node_memcg+0x202/0x760 shrink_node+0xe0/0x470 balance_pgdat+0x2d1/0x510 kswapd+0x220/0x420 kthread+0xfb/0x130 ret_from_fork+0x22/0x40 and it's not immediately obvious why it happens. It's too late in the rc cycle to do anything but revert for now. Link: https://lore.kernel.org/lkml/CABXGCsN9mYmBD-4GaaeW_NrDu+FDXLzr_6x+XNxfmFV6QkYCDg@mail.gmail.com/ Reported-and-bisected-by: Mikhail Gavrilov Suggested-by: Jan Kara Cc: Michal Hocko Cc: Vlastimil Babka Cc: Matthew Wilcox Cc: Kirill Shutemov Cc: William Kucharski Cc: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/pagemap.h | 13 ----- mm/filemap.c | 146 +++++++++++++++++++++++++++++------------------- mm/huge_memory.c | 3 - mm/khugepaged.c | 4 +- mm/memfd.c | 2 - mm/migrate.c | 2 +- mm/shmem.c | 2 +- mm/swap_state.c | 4 +- 8 files changed, 94 insertions(+), 82 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 9ec3544baee2..fe0b29bf2df7 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -333,19 +333,6 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, mapping_gfp_mask(mapping)); } -static inline struct page *find_subpage(struct page *page, pgoff_t offset) -{ - unsigned long mask; - - if (PageHuge(page)) - return page; - - VM_BUG_ON_PAGE(PageTail(page), page); - - mask = (1UL << compound_order(page)) - 1; - return page + (offset & mask); -} - struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); unsigned find_get_entries(struct address_space *mapping, pgoff_t start, diff --git a/mm/filemap.c b/mm/filemap.c index df2006ba0cfa..6dd9a2274c80 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -281,11 +281,11 @@ EXPORT_SYMBOL(delete_from_page_cache); * @pvec: pagevec with pages to delete * * The function walks over mapping->i_pages and removes pages passed in @pvec - * from the mapping. The function expects @pvec to be sorted by page index - * and is optimised for it to be dense. + * from the mapping. The function expects @pvec to be sorted by page index. * It tolerates holes in @pvec (mapping entries at those indices are not * modified). The function expects only THP head pages to be present in the - * @pvec. + * @pvec and takes care to delete all corresponding tail pages from the + * mapping as well. * * The function expects the i_pages lock to be held. */ @@ -294,44 +294,40 @@ static void page_cache_delete_batch(struct address_space *mapping, { XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); int total_pages = 0; - int i = 0; + int i = 0, tail_pages = 0; struct page *page; mapping_set_update(&xas, mapping); xas_for_each(&xas, page, ULONG_MAX) { - if (i >= pagevec_count(pvec)) + if (i >= pagevec_count(pvec) && !tail_pages) break; - - /* A swap/dax/shadow entry got inserted? Skip it. */ if (xa_is_value(page)) continue; - /* - * A page got inserted in our range? Skip it. We have our - * pages locked so they are protected from being removed. - * If we see a page whose index is higher than ours, it - * means our page has been removed, which shouldn't be - * possible because we're holding the PageLock. - */ - if (page != pvec->pages[i]) { - VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, - page); - continue; - } - - WARN_ON_ONCE(!PageLocked(page)); - - if (page->index == xas.xa_index) + if (!tail_pages) { + /* + * Some page got inserted in our range? Skip it. We + * have our pages locked so they are protected from + * being removed. + */ + if (page != pvec->pages[i]) { + VM_BUG_ON_PAGE(page->index > + pvec->pages[i]->index, page); + continue; + } + WARN_ON_ONCE(!PageLocked(page)); + if (PageTransHuge(page) && !PageHuge(page)) + tail_pages = HPAGE_PMD_NR - 1; page->mapping = NULL; - /* Leave page->index set: truncation lookup relies on it */ - - /* - * Move to the next page in the vector if this is a regular - * page or the index is of the last sub-page of this compound - * page. - */ - if (page->index + (1UL << compound_order(page)) - 1 == - xas.xa_index) + /* + * Leave page->index set: truncation lookup relies + * upon it + */ i++; + } else { + VM_BUG_ON_PAGE(page->index + HPAGE_PMD_NR - tail_pages + != pvec->pages[i]->index, page); + tail_pages--; + } xas_store(&xas, NULL); total_pages++; } @@ -1498,7 +1494,7 @@ EXPORT_SYMBOL(page_cache_prev_miss); struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) { XA_STATE(xas, &mapping->i_pages, offset); - struct page *page; + struct page *head, *page; rcu_read_lock(); repeat: @@ -1513,19 +1509,25 @@ repeat: if (!page || xa_is_value(page)) goto out; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) + goto repeat; + + /* The page was split under us? */ + if (compound_head(page) != head) { + put_page(head); goto repeat; + } /* - * Has the page moved or been split? + * Has the page moved? * This is part of the lockless pagecache protocol. See * include/linux/pagemap.h for details. */ if (unlikely(page != xas_reload(&xas))) { - put_page(page); + put_page(head); goto repeat; } - page = find_subpage(page, offset); out: rcu_read_unlock(); @@ -1707,6 +1709,7 @@ unsigned find_get_entries(struct address_space *mapping, rcu_read_lock(); xas_for_each(&xas, page, ULONG_MAX) { + struct page *head; if (xas_retry(&xas, page)) continue; /* @@ -1717,13 +1720,17 @@ unsigned find_get_entries(struct address_space *mapping, if (xa_is_value(page)) goto export; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto retry; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto put_page; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto put_page; - page = find_subpage(page, xas.xa_index); export: indices[ret] = xas.xa_index; @@ -1732,7 +1739,7 @@ export: break; continue; put_page: - put_page(page); + put_page(head); retry: xas_reset(&xas); } @@ -1774,27 +1781,33 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, rcu_read_lock(); xas_for_each(&xas, page, end) { + struct page *head; if (xas_retry(&xas, page)) continue; /* Skip over shadow, swap and DAX entries */ if (xa_is_value(page)) continue; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto retry; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto put_page; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto put_page; - pages[ret] = find_subpage(page, xas.xa_index); + pages[ret] = page; if (++ret == nr_pages) { *start = xas.xa_index + 1; goto out; } continue; put_page: - put_page(page); + put_page(head); retry: xas_reset(&xas); } @@ -1839,6 +1852,7 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, rcu_read_lock(); for (page = xas_load(&xas); page; page = xas_next(&xas)) { + struct page *head; if (xas_retry(&xas, page)) continue; /* @@ -1848,19 +1862,24 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, if (xa_is_value(page)) break; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto retry; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto put_page; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto put_page; - pages[ret] = find_subpage(page, xas.xa_index); + pages[ret] = page; if (++ret == nr_pages) break; continue; put_page: - put_page(page); + put_page(head); retry: xas_reset(&xas); } @@ -1896,6 +1915,7 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, rcu_read_lock(); xas_for_each_marked(&xas, page, end, tag) { + struct page *head; if (xas_retry(&xas, page)) continue; /* @@ -1906,21 +1926,26 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, if (xa_is_value(page)) continue; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto retry; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto put_page; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto put_page; - pages[ret] = find_subpage(page, xas.xa_index); + pages[ret] = page; if (++ret == nr_pages) { *index = xas.xa_index + 1; goto out; } continue; put_page: - put_page(page); + put_page(head); retry: xas_reset(&xas); } @@ -2603,7 +2628,7 @@ void filemap_map_pages(struct vm_fault *vmf, pgoff_t last_pgoff = start_pgoff; unsigned long max_idx; XA_STATE(xas, &mapping->i_pages, start_pgoff); - struct page *page; + struct page *head, *page; rcu_read_lock(); xas_for_each(&xas, page, end_pgoff) { @@ -2612,19 +2637,24 @@ void filemap_map_pages(struct vm_fault *vmf, if (xa_is_value(page)) goto next; + head = compound_head(page); + /* * Check for a locked page first, as a speculative * reference may adversely influence page migration. */ - if (PageLocked(page)) + if (PageLocked(head)) goto next; - if (!page_cache_get_speculative(page)) + if (!page_cache_get_speculative(head)) goto next; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto skip; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto skip; - page = find_subpage(page, xas.xa_index); if (!PageUptodate(page) || PageReadahead(page) || diff --git a/mm/huge_memory.c b/mm/huge_memory.c index bb8b617e34ed..885642c82aaa 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2496,9 +2496,6 @@ static void __split_huge_page(struct page *page, struct list_head *list, if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) shmem_uncharge(head->mapping->host, 1); put_page(head + i); - } else if (!PageAnon(page)) { - __xa_store(&head->mapping->i_pages, head[i].index, - head + i, 0); } } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 0f7419938008..eaaa21b23215 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1378,7 +1378,7 @@ static void collapse_shmem(struct mm_struct *mm, result = SCAN_FAIL; goto xa_locked; } - xas_store(&xas, new_page); + xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); nr_none++; continue; } @@ -1454,7 +1454,7 @@ static void collapse_shmem(struct mm_struct *mm, list_add_tail(&page->lru, &pagelist); /* Finally, replace with the new page. */ - xas_store(&xas, new_page); + xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); continue; out_unlock: unlock_page(page); diff --git a/mm/memfd.c b/mm/memfd.c index 2647c898990c..650e65a46b9c 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -39,7 +39,6 @@ static void memfd_tag_pins(struct xa_state *xas) xas_for_each(xas, page, ULONG_MAX) { if (xa_is_value(page)) continue; - page = find_subpage(page, xas->xa_index); if (page_count(page) - page_mapcount(page) > 1) xas_set_mark(xas, MEMFD_TAG_PINNED); @@ -89,7 +88,6 @@ static int memfd_wait_for_pins(struct address_space *mapping) bool clear = true; if (xa_is_value(page)) continue; - page = find_subpage(page, xas.xa_index); if (page_count(page) - page_mapcount(page) != 1) { /* * On the last scan, we clean up all those tags diff --git a/mm/migrate.c b/mm/migrate.c index f2ecc2855a12..e9594bc0d406 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -463,7 +463,7 @@ int migrate_page_move_mapping(struct address_space *mapping, for (i = 1; i < HPAGE_PMD_NR; i++) { xas_next(&xas); - xas_store(&xas, newpage); + xas_store(&xas, newpage + i); } } diff --git a/mm/shmem.c b/mm/shmem.c index 1bb3b8dc8bb2..f4dce9c8670d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -614,7 +614,7 @@ static int shmem_add_to_page_cache(struct page *page, if (xas_error(&xas)) goto unlock; next: - xas_store(&xas, page); + xas_store(&xas, page + i); if (++i < nr) { xas_next(&xas); goto next; diff --git a/mm/swap_state.c b/mm/swap_state.c index eb714165afd2..85245fdec8d9 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -132,7 +132,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp) for (i = 0; i < nr; i++) { VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); set_page_private(page + i, entry.val + i); - xas_store(&xas, page); + xas_store(&xas, page + i); xas_next(&xas); } address_space->nrpages += nr; @@ -167,7 +167,7 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry) for (i = 0; i < nr; i++) { void *entry = xas_store(&xas, NULL); - VM_BUG_ON_PAGE(entry != page, entry); + VM_BUG_ON_PAGE(entry != page + i, entry); set_page_private(page + i, 0); xas_next(&xas); } -- cgit