summaryrefslogtreecommitdiff
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c468
1 files changed, 220 insertions, 248 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 500a09a48dfd..4fc322f7111a 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -45,33 +45,29 @@
/* How many pages do we try to swap or page in/out together? As a power of 2 */
int page_cluster;
-const int page_cluster_max = 31;
+static const int page_cluster_max = 31;
-/* Protecting only lru_rotate.fbatch which requires disabling interrupts */
-struct lru_rotate {
- local_lock_t lock;
- struct folio_batch fbatch;
-};
-static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = {
- .lock = INIT_LOCAL_LOCK(lock),
-};
-
-/*
- * The following folio batches are grouped together because they are protected
- * by disabling preemption (and interrupts remain enabled).
- */
struct cpu_fbatches {
+ /*
+ * The following folio batches are grouped together because they are protected
+ * by disabling preemption (and interrupts remain enabled).
+ */
local_lock_t lock;
struct folio_batch lru_add;
struct folio_batch lru_deactivate_file;
struct folio_batch lru_deactivate;
struct folio_batch lru_lazyfree;
#ifdef CONFIG_SMP
- struct folio_batch activate;
+ struct folio_batch lru_activate;
#endif
+ /* Protecting the following batches which require disabling interrupts */
+ local_lock_t lock_irq;
+ struct folio_batch lru_move_tail;
};
+
static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
.lock = INIT_LOCAL_LOCK(lock),
+ .lock_irq = INIT_LOCAL_LOCK(lock_irq),
};
static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
@@ -82,20 +78,6 @@ static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
lruvec_del_folio(*lruvecp, folio);
__folio_clear_lru_flags(folio);
}
-
- /*
- * In rare cases, when truncation or holepunching raced with
- * munlock after VM_LOCKED was cleared, Mlocked may still be
- * found set here. This does not indicate a problem, unless
- * "unevictable_pgs_cleared" appears worryingly large.
- */
- if (unlikely(folio_test_mlocked(folio))) {
- long nr_pages = folio_nr_pages(folio);
-
- __folio_clear_mlocked(folio);
- zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
- count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
- }
}
/*
@@ -112,71 +94,28 @@ static void page_cache_release(struct folio *folio)
unlock_page_lruvec_irqrestore(lruvec, flags);
}
-static void __folio_put_small(struct folio *folio)
-{
- page_cache_release(folio);
- mem_cgroup_uncharge(folio);
- free_unref_page(&folio->page, 0);
-}
-
-static void __folio_put_large(struct folio *folio)
-{
- /*
- * __page_cache_release() is supposed to be called for thp, not for
- * hugetlb. This is because hugetlb page does never have PageLRU set
- * (it's never listed to any LRU lists) and no memcg routines should
- * be called for hugetlb (it has a separate hugetlb_cgroup.)
- */
- if (!folio_test_hugetlb(folio))
- page_cache_release(folio);
- destroy_large_folio(folio);
-}
-
void __folio_put(struct folio *folio)
{
- if (unlikely(folio_is_zone_device(folio)))
- free_zone_device_page(&folio->page);
- else if (unlikely(folio_test_large(folio)))
- __folio_put_large(folio);
- else
- __folio_put_small(folio);
-}
-EXPORT_SYMBOL(__folio_put);
-
-/**
- * put_pages_list() - release a list of pages
- * @pages: list of pages threaded on page->lru
- *
- * Release a list of pages which are strung together on page.lru.
- */
-void put_pages_list(struct list_head *pages)
-{
- struct folio_batch fbatch;
- struct folio *folio, *next;
+ if (unlikely(folio_is_zone_device(folio))) {
+ free_zone_device_folio(folio);
+ return;
+ }
- folio_batch_init(&fbatch);
- list_for_each_entry_safe(folio, next, pages, lru) {
- if (!folio_put_testzero(folio))
- continue;
- if (folio_test_large(folio)) {
- __folio_put_large(folio);
- continue;
- }
- /* LRU flag must be clear because it's passed using the lru */
- if (folio_batch_add(&fbatch, folio) > 0)
- continue;
- free_unref_folios(&fbatch);
+ if (folio_test_hugetlb(folio)) {
+ free_huge_folio(folio);
+ return;
}
- if (fbatch.nr)
- free_unref_folios(&fbatch);
- INIT_LIST_HEAD(pages);
+ page_cache_release(folio);
+ folio_unqueue_deferred_split(folio);
+ mem_cgroup_uncharge(folio);
+ free_frozen_pages(&folio->page, folio_order(folio));
}
-EXPORT_SYMBOL(put_pages_list);
+EXPORT_SYMBOL(__folio_put);
typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio);
-static void lru_add_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_add(struct lruvec *lruvec, struct folio *folio)
{
int was_unevictable = folio_test_clear_unevictable(folio);
long nr_pages = folio_nr_pages(folio);
@@ -225,10 +164,6 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
for (i = 0; i < folio_batch_count(fbatch); i++) {
struct folio *folio = fbatch->folios[i];
- /* block memcg migration while the folio moves between lru */
- if (move_fn != lru_add_fn && !folio_test_clear_lru(folio))
- continue;
-
folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
move_fn(lruvec, folio);
@@ -240,23 +175,50 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
folios_put(fbatch);
}
-static void folio_batch_add_and_move(struct folio_batch *fbatch,
- struct folio *folio, move_fn_t move_fn)
+static void __folio_batch_add_and_move(struct folio_batch __percpu *fbatch,
+ struct folio *folio, move_fn_t move_fn,
+ bool on_lru, bool disable_irq)
{
- if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) &&
- !lru_cache_disabled())
+ unsigned long flags;
+
+ if (on_lru && !folio_test_clear_lru(folio))
return;
- folio_batch_move_lru(fbatch, move_fn);
+
+ folio_get(folio);
+
+ if (disable_irq)
+ local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
+ else
+ local_lock(&cpu_fbatches.lock);
+
+ if (!folio_batch_add(this_cpu_ptr(fbatch), folio) || folio_test_large(folio) ||
+ lru_cache_disabled())
+ folio_batch_move_lru(this_cpu_ptr(fbatch), move_fn);
+
+ if (disable_irq)
+ local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
+ else
+ local_unlock(&cpu_fbatches.lock);
}
-static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
+#define folio_batch_add_and_move(folio, op, on_lru) \
+ __folio_batch_add_and_move( \
+ &cpu_fbatches.op, \
+ folio, \
+ op, \
+ on_lru, \
+ offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \
+ )
+
+static void lru_move_tail(struct lruvec *lruvec, struct folio *folio)
{
- if (!folio_test_unevictable(folio)) {
- lruvec_del_folio(lruvec, folio);
- folio_clear_active(folio);
- lruvec_add_folio_tail(lruvec, folio);
- __count_vm_events(PGROTATED, folio_nr_pages(folio));
- }
+ if (folio_test_unevictable(folio))
+ return;
+
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_active(folio);
+ lruvec_add_folio_tail(lruvec, folio);
+ __count_vm_events(PGROTATED, folio_nr_pages(folio));
}
/*
@@ -268,17 +230,11 @@ static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio)
*/
void folio_rotate_reclaimable(struct folio *folio)
{
- if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
- !folio_test_unevictable(folio) && folio_test_lru(folio)) {
- struct folio_batch *fbatch;
- unsigned long flags;
+ if (folio_test_locked(folio) || folio_test_dirty(folio) ||
+ folio_test_unevictable(folio))
+ return;
- folio_get(folio);
- local_lock_irqsave(&lru_rotate.lock, flags);
- fbatch = this_cpu_ptr(&lru_rotate.fbatch);
- folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn);
- local_unlock_irqrestore(&lru_rotate.lock, flags);
- }
+ folio_batch_add_and_move(folio, lru_move_tail, true);
}
void lru_note_cost(struct lruvec *lruvec, bool file,
@@ -339,43 +295,38 @@ void lru_note_cost_refault(struct folio *folio)
folio_nr_pages(folio), 0);
}
-static void folio_activate_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_activate(struct lruvec *lruvec, struct folio *folio)
{
- if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
- long nr_pages = folio_nr_pages(folio);
+ long nr_pages = folio_nr_pages(folio);
- lruvec_del_folio(lruvec, folio);
- folio_set_active(folio);
- lruvec_add_folio(lruvec, folio);
- trace_mm_lru_activate(folio);
+ if (folio_test_active(folio) || folio_test_unevictable(folio))
+ return;
- __count_vm_events(PGACTIVATE, nr_pages);
- __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
- nr_pages);
- }
+
+ lruvec_del_folio(lruvec, folio);
+ folio_set_active(folio);
+ lruvec_add_folio(lruvec, folio);
+ trace_mm_lru_activate(folio);
+
+ __count_vm_events(PGACTIVATE, nr_pages);
+ count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, nr_pages);
}
#ifdef CONFIG_SMP
static void folio_activate_drain(int cpu)
{
- struct folio_batch *fbatch = &per_cpu(cpu_fbatches.activate, cpu);
+ struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu);
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, folio_activate_fn);
+ folio_batch_move_lru(fbatch, lru_activate);
}
void folio_activate(struct folio *folio)
{
- if (folio_test_lru(folio) && !folio_test_active(folio) &&
- !folio_test_unevictable(folio)) {
- struct folio_batch *fbatch;
+ if (folio_test_active(folio) || folio_test_unevictable(folio))
+ return;
- folio_get(folio);
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.activate);
- folio_batch_add_and_move(fbatch, folio, folio_activate_fn);
- local_unlock(&cpu_fbatches.lock);
- }
+ folio_batch_add_and_move(folio, lru_activate, true);
}
#else
@@ -387,12 +338,13 @@ void folio_activate(struct folio *folio)
{
struct lruvec *lruvec;
- if (folio_test_clear_lru(folio)) {
- lruvec = folio_lruvec_lock_irq(folio);
- folio_activate_fn(lruvec, folio);
- unlock_page_lruvec_irq(lruvec);
- folio_set_lru(folio);
- }
+ if (!folio_test_clear_lru(folio))
+ return;
+
+ lruvec = folio_lruvec_lock_irq(folio);
+ lru_activate(lruvec, folio);
+ unlock_page_lruvec_irq(lruvec);
+ folio_set_lru(folio);
}
#endif
@@ -427,53 +379,79 @@ static void __lru_cache_activate_folio(struct folio *folio)
}
#ifdef CONFIG_LRU_GEN
-static void folio_inc_refs(struct folio *folio)
+
+static void lru_gen_inc_refs(struct folio *folio)
{
unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
if (folio_test_unevictable(folio))
return;
+ /* see the comment on LRU_REFS_FLAGS */
if (!folio_test_referenced(folio)) {
- folio_set_referenced(folio);
+ set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced));
return;
}
- if (!folio_test_workingset(folio)) {
- folio_set_workingset(folio);
- return;
- }
-
- /* see the comment on MAX_NR_TIERS */
do {
- new_flags = old_flags & LRU_REFS_MASK;
- if (new_flags == LRU_REFS_MASK)
- break;
+ if ((old_flags & LRU_REFS_MASK) == LRU_REFS_MASK) {
+ if (!folio_test_workingset(folio))
+ folio_set_workingset(folio);
+ return;
+ }
- new_flags += BIT(LRU_REFS_PGOFF);
- new_flags |= old_flags & ~LRU_REFS_MASK;
+ new_flags = old_flags + BIT(LRU_REFS_PGOFF);
} while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
}
-#else
-static void folio_inc_refs(struct folio *folio)
+
+static bool lru_gen_clear_refs(struct folio *folio)
{
+ struct lru_gen_folio *lrugen;
+ int gen = folio_lru_gen(folio);
+ int type = folio_is_file_lru(folio);
+
+ if (gen < 0)
+ return true;
+
+ set_mask_bits(&folio->flags, LRU_REFS_FLAGS | BIT(PG_workingset), 0);
+
+ lrugen = &folio_lruvec(folio)->lrugen;
+ /* whether can do without shuffling under the LRU lock */
+ return gen == lru_gen_from_seq(READ_ONCE(lrugen->min_seq[type]));
}
+
+#else /* !CONFIG_LRU_GEN */
+
+static void lru_gen_inc_refs(struct folio *folio)
+{
+}
+
+static bool lru_gen_clear_refs(struct folio *folio)
+{
+ return false;
+}
+
#endif /* CONFIG_LRU_GEN */
-/*
- * Mark a page as having seen activity.
+/**
+ * folio_mark_accessed - Mark a folio as having seen activity.
+ * @folio: The folio to mark.
+ *
+ * This function will perform one of the following transitions:
*
- * inactive,unreferenced -> inactive,referenced
- * inactive,referenced -> active,unreferenced
- * active,unreferenced -> active,referenced
+ * * inactive,unreferenced -> inactive,referenced
+ * * inactive,referenced -> active,unreferenced
+ * * active,unreferenced -> active,referenced
*
- * When a newly allocated page is not yet visible, so safe for non-atomic ops,
- * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
+ * When a newly allocated folio is not yet visible, so safe for non-atomic ops,
+ * __folio_set_referenced() may be substituted for folio_mark_accessed().
*/
void folio_mark_accessed(struct folio *folio)
{
+ if (folio_test_dropbehind(folio))
+ return;
if (lru_gen_enabled()) {
- folio_inc_refs(folio);
+ lru_gen_inc_refs(folio);
return;
}
@@ -488,7 +466,7 @@ void folio_mark_accessed(struct folio *folio)
} else if (!folio_test_active(folio)) {
/*
* If the folio is on the LRU, queue it for activation via
- * cpu_fbatches.activate. Otherwise, assume the folio is in a
+ * cpu_fbatches.lru_activate. Otherwise, assume the folio is in a
* folio_batch, mark it active and it'll be moved to the active
* LRU on the next drain.
*/
@@ -515,22 +493,16 @@ EXPORT_SYMBOL(folio_mark_accessed);
*/
void folio_add_lru(struct folio *folio)
{
- struct folio_batch *fbatch;
-
VM_BUG_ON_FOLIO(folio_test_active(folio) &&
folio_test_unevictable(folio), folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
- /* see the comment in lru_gen_add_folio() */
+ /* see the comment in lru_gen_folio_seq() */
if (lru_gen_enabled() && !folio_test_unevictable(folio) &&
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
folio_set_active(folio);
- folio_get(folio);
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
- folio_batch_add_and_move(fbatch, folio, lru_add_fn);
- local_unlock(&cpu_fbatches.lock);
+ folio_batch_add_and_move(folio, lru_add, false);
}
EXPORT_SYMBOL(folio_add_lru);
@@ -573,9 +545,9 @@ void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
* written out by flusher threads as this is much more efficient
* than the single-page writeout from reclaim.
*/
-static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio)
{
- bool active = folio_test_active(folio);
+ bool active = folio_test_active(folio) || lru_gen_enabled();
long nr_pages = folio_nr_pages(folio);
if (folio_test_unevictable(folio))
@@ -609,48 +581,51 @@ static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
if (active) {
__count_vm_events(PGDEACTIVATE, nr_pages);
- __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
+ count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
nr_pages);
}
}
-static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_deactivate(struct lruvec *lruvec, struct folio *folio)
{
- if (!folio_test_unevictable(folio) && (folio_test_active(folio) || lru_gen_enabled())) {
- long nr_pages = folio_nr_pages(folio);
+ long nr_pages = folio_nr_pages(folio);
- lruvec_del_folio(lruvec, folio);
- folio_clear_active(folio);
- folio_clear_referenced(folio);
- lruvec_add_folio(lruvec, folio);
+ if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled()))
+ return;
- __count_vm_events(PGDEACTIVATE, nr_pages);
- __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE,
- nr_pages);
- }
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_active(folio);
+ folio_clear_referenced(folio);
+ lruvec_add_folio(lruvec, folio);
+
+ __count_vm_events(PGDEACTIVATE, nr_pages);
+ count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, nr_pages);
}
-static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio)
+static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio)
{
- if (folio_test_anon(folio) && folio_test_swapbacked(folio) &&
- !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) {
- long nr_pages = folio_nr_pages(folio);
+ long nr_pages = folio_nr_pages(folio);
- lruvec_del_folio(lruvec, folio);
- folio_clear_active(folio);
+ if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
+ folio_test_swapcache(folio) || folio_test_unevictable(folio))
+ return;
+
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_active(folio);
+ if (lru_gen_enabled())
+ lru_gen_clear_refs(folio);
+ else
folio_clear_referenced(folio);
- /*
- * Lazyfree folios are clean anonymous folios. They have
- * the swapbacked flag cleared, to distinguish them from normal
- * anonymous folios
- */
- folio_clear_swapbacked(folio);
- lruvec_add_folio(lruvec, folio);
+ /*
+ * Lazyfree folios are clean anonymous folios. They have
+ * the swapbacked flag cleared, to distinguish them from normal
+ * anonymous folios
+ */
+ folio_clear_swapbacked(folio);
+ lruvec_add_folio(lruvec, folio);
- __count_vm_events(PGLAZYFREE, nr_pages);
- __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE,
- nr_pages);
- }
+ __count_vm_events(PGLAZYFREE, nr_pages);
+ count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, nr_pages);
}
/*
@@ -664,30 +639,30 @@ void lru_add_drain_cpu(int cpu)
struct folio_batch *fbatch = &fbatches->lru_add;
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, lru_add_fn);
+ folio_batch_move_lru(fbatch, lru_add);
- fbatch = &per_cpu(lru_rotate.fbatch, cpu);
+ fbatch = &fbatches->lru_move_tail;
/* Disabling interrupts below acts as a compiler barrier. */
if (data_race(folio_batch_count(fbatch))) {
unsigned long flags;
/* No harm done if a racing interrupt already did this */
- local_lock_irqsave(&lru_rotate.lock, flags);
- folio_batch_move_lru(fbatch, lru_move_tail_fn);
- local_unlock_irqrestore(&lru_rotate.lock, flags);
+ local_lock_irqsave(&cpu_fbatches.lock_irq, flags);
+ folio_batch_move_lru(fbatch, lru_move_tail);
+ local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags);
}
fbatch = &fbatches->lru_deactivate_file;
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, lru_deactivate_file_fn);
+ folio_batch_move_lru(fbatch, lru_deactivate_file);
fbatch = &fbatches->lru_deactivate;
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, lru_deactivate_fn);
+ folio_batch_move_lru(fbatch, lru_deactivate);
fbatch = &fbatches->lru_lazyfree;
if (folio_batch_count(fbatch))
- folio_batch_move_lru(fbatch, lru_lazyfree_fn);
+ folio_batch_move_lru(fbatch, lru_lazyfree);
folio_activate_drain(cpu);
}
@@ -704,17 +679,14 @@ void lru_add_drain_cpu(int cpu)
*/
void deactivate_file_folio(struct folio *folio)
{
- struct folio_batch *fbatch;
-
/* Deactivating an unevictable folio will not accelerate reclaim */
if (folio_test_unevictable(folio))
return;
- folio_get(folio);
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file);
- folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn);
- local_unlock(&cpu_fbatches.lock);
+ if (lru_gen_enabled() && lru_gen_clear_refs(folio))
+ return;
+
+ folio_batch_add_and_move(folio, lru_deactivate_file, true);
}
/*
@@ -727,16 +699,13 @@ void deactivate_file_folio(struct folio *folio)
*/
void folio_deactivate(struct folio *folio)
{
- if (folio_test_lru(folio) && !folio_test_unevictable(folio) &&
- (folio_test_active(folio) || lru_gen_enabled())) {
- struct folio_batch *fbatch;
+ if (folio_test_unevictable(folio))
+ return;
- folio_get(folio);
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate);
- folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn);
- local_unlock(&cpu_fbatches.lock);
- }
+ if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio))
+ return;
+
+ folio_batch_add_and_move(folio, lru_deactivate, true);
}
/**
@@ -748,17 +717,11 @@ void folio_deactivate(struct folio *folio)
*/
void folio_mark_lazyfree(struct folio *folio)
{
- if (folio_test_lru(folio) && folio_test_anon(folio) &&
- folio_test_swapbacked(folio) && !folio_test_swapcache(folio) &&
- !folio_test_unevictable(folio)) {
- struct folio_batch *fbatch;
+ if (!folio_test_anon(folio) || !folio_test_swapbacked(folio) ||
+ folio_test_swapcache(folio) || folio_test_unevictable(folio))
+ return;
- folio_get(folio);
- local_lock(&cpu_fbatches.lock);
- fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree);
- folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn);
- local_unlock(&cpu_fbatches.lock);
- }
+ folio_batch_add_and_move(folio, lru_lazyfree, true);
}
void lru_add_drain(void)
@@ -808,11 +771,11 @@ static bool cpu_needs_drain(unsigned int cpu)
/* Check these in order of likelihood that they're not zero */
return folio_batch_count(&fbatches->lru_add) ||
- data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
+ folio_batch_count(&fbatches->lru_move_tail) ||
folio_batch_count(&fbatches->lru_deactivate_file) ||
folio_batch_count(&fbatches->lru_deactivate) ||
folio_batch_count(&fbatches->lru_lazyfree) ||
- folio_batch_count(&fbatches->activate) ||
+ folio_batch_count(&fbatches->lru_activate) ||
need_mlock_drain(cpu) ||
has_bh_in_lru(cpu, NULL);
}
@@ -930,8 +893,8 @@ atomic_t lru_disable_count = ATOMIC_INIT(0);
/*
* lru_cache_disable() needs to be called before we start compiling
- * a list of pages to be migrated using isolate_lru_page().
- * It drains pages on LRU cache and then disable on all cpus until
+ * a list of folios to be migrated using folio_isolate_lru().
+ * It drains folios on LRU cache and then disable on all cpus until
* lru_cache_enable is called.
*
* Must be paired with a call to lru_cache_enable().
@@ -985,7 +948,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
struct folio *folio = folios->folios[i];
unsigned int nr_refs = refs ? refs[i] : 1;
- if (is_huge_zero_page(&folio->page))
+ if (is_huge_zero_folio(folio))
continue;
if (folio_is_zone_device(folio)) {
@@ -993,10 +956,8 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
unlock_page_lruvec_irqrestore(lruvec, flags);
lruvec = NULL;
}
- if (put_devmap_managed_page_refs(&folio->page, nr_refs))
- continue;
if (folio_ref_sub_and_test(folio, nr_refs))
- free_zone_device_page(&folio->page);
+ free_zone_device_folio(folio);
continue;
}
@@ -1012,10 +973,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
free_huge_folio(folio);
continue;
}
- if (folio_test_large(folio) &&
- folio_test_large_rmappable(folio))
- folio_undo_large_rmappable(folio);
-
+ folio_unqueue_deferred_split(folio);
__page_cache_release(folio, &lruvec, &flags);
if (j != i)
@@ -1116,6 +1074,18 @@ void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
fbatch->nr = j;
}
+static const struct ctl_table swap_sysctl_table[] = {
+ {
+ .procname = "page-cluster",
+ .data = &page_cluster,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = (void *)&page_cluster_max,
+ }
+};
+
/*
* Perform any setup for the swap system
*/
@@ -1132,4 +1102,6 @@ void __init swap_setup(void)
* Right now other parts of the system means that we
* _really_ don't want to cluster much more
*/
+
+ register_sysctl_init("vm", swap_sysctl_table);
}