summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-03-11 19:18:34 +0000
committerAndrew Morton <akpm@linux-foundation.org>2024-03-12 13:07:16 -0700
commit47932e7048df9156e96133ee90fb3e9df68dbd15 (patch)
treeebba369496d523e16dc182d9167a1d8bd623687b
parent58f327f2ce80f9c7b4a70e9cf017ae8810d44a20 (diff)
mm: remove folio from deferred split list before uncharging it
When freeing a large folio, we must remove it from the deferred split list before we uncharge it as each memcg has its own deferred split list (with associated lock) and removing a folio from the deferred split list while holding the wrong lock will corrupt that list and cause various related problems. Link: https://lore.kernel.org/linux-mm/367a14f7-340e-4b29-90ae-bc3fcefdd5f4@arm.com/ Link: https://lkml.kernel.org/r/20240311191835.312162-1-willy@infradead.org Fixes: f77171d241e3 (mm: allow non-hugetlb large folios to be batch processed) Fixes: 29f3843026cf (mm: free folios directly in move_folios_to_lru()) Fixes: bc2ff4cbc329 (mm: free folios in a batch in shrink_folio_list()) Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Debugged-by: Ryan Roberts <ryan.roberts@arm.com> Tested-by: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/swap.c3
-rw-r--r--mm/vmscan.c6
2 files changed, 9 insertions, 0 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 6b697d33fa5b..e43a5911b170 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -1012,6 +1012,9 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
free_huge_folio(folio);
continue;
}
+ if (folio_test_large(folio) &&
+ folio_test_large_rmappable(folio))
+ folio_undo_large_rmappable(folio);
__page_cache_release(folio, &lruvec, &flags);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e3349b75f15b..61606fa83504 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1413,6 +1413,9 @@ free_it:
*/
nr_reclaimed += nr_pages;
+ if (folio_test_large(folio) &&
+ folio_test_large_rmappable(folio))
+ folio_undo_large_rmappable(folio);
if (folio_batch_add(&free_folios, folio) == 0) {
mem_cgroup_uncharge_folios(&free_folios);
try_to_unmap_flush();
@@ -1819,6 +1822,9 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
if (unlikely(folio_put_testzero(folio))) {
__folio_clear_lru_flags(folio);
+ if (folio_test_large(folio) &&
+ folio_test_large_rmappable(folio))
+ folio_undo_large_rmappable(folio);
if (folio_batch_add(&free_folios, folio) == 0) {
spin_unlock_irq(&lruvec->lru_lock);
mem_cgroup_uncharge_folios(&free_folios);