summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c83
1 files changed, 24 insertions, 59 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bb4a96c7b682..900c74b6aa62 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -477,17 +477,6 @@ static int reclaimer_offset(struct scan_control *sc)
return PGSTEAL_DIRECT - PGSTEAL_KSWAPD;
}
-static inline int is_page_cache_freeable(struct folio *folio)
-{
- /*
- * A freeable page cache folio is referenced only by the caller
- * that isolated the folio, the page cache and optional filesystem
- * private data at folio->private.
- */
- return folio_ref_count(folio) - folio_test_private(folio) ==
- 1 + folio_nr_pages(folio);
-}
-
/*
* We detected a synchronous write error writing a folio out. Probably
* -ENOSPC. We need to propagate that into the address_space for a subsequent
@@ -696,24 +685,12 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
* block, for some throttling. This happens by accident, because
* swap_backing_dev_info is bust: it doesn't reflect the
* congestion state of the swapdevs. Easy to fix, if needed.
+ *
+ * A freeable shmem or swapcache folio is referenced only by the
+ * caller that isolated the folio and the page cache.
*/
- if (!is_page_cache_freeable(folio))
- return PAGE_KEEP;
- if (!mapping) {
- /*
- * Some data journaling orphaned folios can have
- * folio->mapping == NULL while being dirty with clean buffers.
- */
- if (folio_test_private(folio)) {
- if (try_to_free_buffers(folio)) {
- folio_clear_dirty(folio);
- pr_info("%s: orphaned folio\n", __func__);
- return PAGE_CLEAN;
- }
- }
+ if (folio_ref_count(folio) != 1 + folio_nr_pages(folio) || !mapping)
return PAGE_KEEP;
- }
-
if (!shmem_mapping(mapping) && !folio_test_anon(folio))
return PAGE_ACTIVATE;
if (!folio_clear_dirty_for_io(folio))
@@ -1054,7 +1031,7 @@ static unsigned int demote_folio_list(struct list_head *demote_folios,
* When this happens, 'page' will likely just be discarded
* instead of migrated.
*/
- .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN |
+ .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
__GFP_NOMEMALLOC | GFP_NOWAIT,
.nid = target_nid,
.nmask = &allowed_mask,
@@ -1318,7 +1295,7 @@ retry:
split_folio_to_list(folio, folio_list))
goto activate_locked;
}
- if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) {
+ if (folio_alloc_swap(folio)) {
int __maybe_unused order = folio_order(folio);
if (!folio_test_large(folio))
@@ -1334,7 +1311,7 @@ retry:
}
#endif
count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
- if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN))
+ if (folio_alloc_swap(folio))
goto activate_locked_split;
}
/*
@@ -1409,21 +1386,7 @@ retry:
mapping = folio_mapping(folio);
if (folio_test_dirty(folio)) {
- /*
- * Only kswapd can writeback filesystem folios
- * to avoid risk of stack overflow. But avoid
- * injecting inefficient single-folio I/O into
- * flusher writeback as much as possible: only
- * write folios when we've encountered many
- * dirty folios, and when we've already scanned
- * the rest of the LRU for clean folios and see
- * the same dirty folios again (with the reclaim
- * flag set).
- */
- if (folio_is_file_lru(folio) &&
- (!current_is_kswapd() ||
- !folio_test_reclaim(folio) ||
- !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
+ if (folio_is_file_lru(folio)) {
/*
* Immediately reclaim when written back.
* Similar in principle to folio_deactivate()
@@ -1432,7 +1395,8 @@ retry:
*/
node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE,
nr_pages);
- folio_set_reclaim(folio);
+ if (!folio_test_reclaim(folio))
+ folio_set_reclaim(folio);
goto activate_locked;
}
@@ -2054,7 +2018,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
spin_lock_irq(&lruvec->lru_lock);
move_folios_to_lru(lruvec, &folio_list);
- __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
+ mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
stat.nr_demoted);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
@@ -3773,7 +3737,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
pud = pud_offset(p4d, start & P4D_MASK);
restart:
for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
- pud_t val = READ_ONCE(pud[i]);
+ pud_t val = pudp_get(pud + i);
next = pud_addr_end(addr, end);
@@ -4780,7 +4744,7 @@ retry:
reset_batch_size(walk);
}
- __mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
+ mod_lruvec_state(lruvec, PGDEMOTE_KSWAPD + reclaimer_offset(sc),
stat.nr_demoted);
item = PGSTEAL_KSWAPD + reclaimer_offset(sc);
@@ -6127,11 +6091,6 @@ again:
if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)
set_bit(PGDAT_WRITEBACK, &pgdat->flags);
- /* Allow kswapd to start writing pages during reclaim.*/
- if (sc->nr.unqueued_dirty &&
- sc->nr.unqueued_dirty == sc->nr.file_taken)
- set_bit(PGDAT_DIRTY, &pgdat->flags);
-
/*
* If kswapd scans pages marked for immediate
* reclaim and under writeback (nr_immediate), it
@@ -6872,7 +6831,6 @@ static void clear_pgdat_congested(pg_data_t *pgdat)
clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags);
clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags);
- clear_bit(PGDAT_DIRTY, &pgdat->flags);
clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
}
@@ -7169,7 +7127,12 @@ restart:
goto restart;
}
- if (!sc.nr_reclaimed)
+ /*
+ * If the reclaim was boosted, we might still be far from the
+ * watermark_high at this point. We need to avoid increasing the
+ * failure count to prevent the kswapd thread from stopping.
+ */
+ if (!sc.nr_reclaimed && !boosted)
atomic_inc(&pgdat->kswapd_failures);
out:
@@ -7623,9 +7586,11 @@ static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
else
nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
- /* If we can't clean pages, remove dirty pages from consideration */
- if (!(node_reclaim_mode & RECLAIM_WRITE))
- delta += node_page_state(pgdat, NR_FILE_DIRTY);
+ /*
+ * Since we can't clean folios through reclaim, remove dirty file
+ * folios from consideration.
+ */
+ delta += node_page_state(pgdat, NR_FILE_DIRTY);
/* Watch for any possible underflows due to delta */
if (unlikely(delta > nr_pagecache_reclaimable))