summaryrefslogtreecommitdiff
path: root/mm/swap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-01 08:47:59 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-01 08:47:59 -0700
commit49f8275c7d9247cf1dd4440fc8162f784252c849 (patch)
tree7caefaa8b68d3162f60ecef7bafacbed0e1056d8 /mm/swap.c
parent8bb7eca972ad531c9b149c0a51ab43a417385813 (diff)
parent121703c1c817b3c77f61002466d0bfca7e39f25d (diff)
Merge tag 'folio-5.16' of git://git.infradead.org/users/willy/pagecache
Pull memory folios from Matthew Wilcox: "Add memory folios, a new type to represent either order-0 pages or the head page of a compound page. This should be enough infrastructure to support filesystems converting from pages to folios. The point of all this churn is to allow filesystems and the page cache to manage memory in larger chunks than PAGE_SIZE. The original plan was to use compound pages like THP does, but I ran into problems with some functions expecting only a head page while others expect the precise page containing a particular byte. The folio type allows a function to declare that it's expecting only a head page. Almost incidentally, this allows us to remove various calls to VM_BUG_ON(PageTail(page)) and compound_head(). This converts just parts of the core MM and the page cache. For 5.17, we intend to convert various filesystems (XFS and AFS are ready; other filesystems may make it) and also convert more of the MM and page cache to folios. For 5.18, multi-page folios should be ready. The multi-page folios offer some improvement to some workloads. The 80% win is real, but appears to be an artificial benchmark (postgres startup, which isn't a serious workload). Real workloads (eg building the kernel, running postgres in a steady state, etc) seem to benefit between 0-10%. I haven't heard of any performance losses as a result of this series. Nobody has done any serious performance tuning; I imagine that tweaking the readahead algorithm could provide some more interesting wins. There are also other places where we could choose to create large folios and currently do not, such as writes that are larger than PAGE_SIZE. I'd like to thank all my reviewers who've offered review/ack tags: Christoph Hellwig, David Howells, Jan Kara, Jeff Layton, Johannes Weiner, Kirill A. Shutemov, Michal Hocko, Mike Rapoport, Vlastimil Babka, William Kucharski, Yu Zhao and Zi Yan. I'd also like to thank those who gave feedback I incorporated but haven't offered up review tags for this part of the series: Nick Piggin, Mel Gorman, Ming Lei, Darrick Wong, Ted Ts'o, John Hubbard, Hugh Dickins, and probably a few others who I forget" * tag 'folio-5.16' of git://git.infradead.org/users/willy/pagecache: (90 commits) mm/writeback: Add folio_write_one mm/filemap: Add FGP_STABLE mm/filemap: Add filemap_get_folio mm/filemap: Convert mapping_get_entry to return a folio mm/filemap: Add filemap_add_folio() mm/filemap: Add filemap_alloc_folio mm/page_alloc: Add folio allocation functions mm/lru: Add folio_add_lru() mm/lru: Convert __pagevec_lru_add_fn to take a folio mm: Add folio_evictable() mm/workingset: Convert workingset_refault() to take a folio mm/filemap: Add readahead_folio() mm/filemap: Add folio_mkwrite_check_truncate() mm/filemap: Add i_blocks_per_folio() mm/writeback: Add folio_redirty_for_writepage() mm/writeback: Add folio_account_redirty() mm/writeback: Add folio_clear_dirty_for_io() mm/writeback: Add folio_cancel_dirty() mm/writeback: Add folio_account_cleaned() mm/writeback: Add filemap_dirty_folio() ...
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c197
1 files changed, 103 insertions, 94 deletions
diff --git a/mm/swap.c b/mm/swap.c
index af3cad4e5378..8ff9ba7cf2de 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -80,10 +80,11 @@ static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = {
static void __page_cache_release(struct page *page)
{
if (PageLRU(page)) {
+ struct folio *folio = page_folio(page);
struct lruvec *lruvec;
unsigned long flags;
- lruvec = lock_page_lruvec_irqsave(page, &flags);
+ lruvec = folio_lruvec_lock_irqsave(folio, &flags);
del_page_from_lru_list(page, lruvec);
__clear_page_lru_flags(page);
unlock_page_lruvec_irqrestore(lruvec, flags);
@@ -94,7 +95,7 @@ static void __page_cache_release(struct page *page)
static void __put_single_page(struct page *page)
{
__page_cache_release(page);
- mem_cgroup_uncharge(page);
+ mem_cgroup_uncharge(page_folio(page));
free_unref_page(page, 0);
}
@@ -188,12 +189,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
+ struct folio *folio = page_folio(page);
/* block memcg migration during page moving between lru */
if (!TestClearPageLRU(page))
continue;
- lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
+ lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
(*move_fn)(page, lruvec);
SetPageLRU(page);
@@ -206,11 +208,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec)
{
- if (!PageUnevictable(page)) {
- del_page_from_lru_list(page, lruvec);
- ClearPageActive(page);
- add_page_to_lru_list_tail(page, lruvec);
- __count_vm_events(PGROTATED, thp_nr_pages(page));
+ struct folio *folio = page_folio(page);
+
+ if (!folio_test_unevictable(folio)) {
+ lruvec_del_folio(lruvec, folio);
+ folio_clear_active(folio);
+ lruvec_add_folio_tail(lruvec, folio);
+ __count_vm_events(PGROTATED, folio_nr_pages(folio));
}
}
@@ -227,23 +231,23 @@ static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page)
}
/*
- * Writeback is about to end against a page which has been marked for immediate
- * reclaim. If it still appears to be reclaimable, move it to the tail of the
- * inactive list.
+ * Writeback is about to end against a folio which has been marked for
+ * immediate reclaim. If it still appears to be reclaimable, move it
+ * to the tail of the inactive list.
*
- * rotate_reclaimable_page() must disable IRQs, to prevent nasty races.
+ * folio_rotate_reclaimable() must disable IRQs, to prevent nasty races.
*/
-void rotate_reclaimable_page(struct page *page)
+void folio_rotate_reclaimable(struct folio *folio)
{
- if (!PageLocked(page) && !PageDirty(page) &&
- !PageUnevictable(page) && PageLRU(page)) {
+ if (!folio_test_locked(folio) && !folio_test_dirty(folio) &&
+ !folio_test_unevictable(folio) && folio_test_lru(folio)) {
struct pagevec *pvec;
unsigned long flags;
- get_page(page);
+ folio_get(folio);
local_lock_irqsave(&lru_rotate.lock, flags);
pvec = this_cpu_ptr(&lru_rotate.pvec);
- if (pagevec_add_and_need_flush(pvec, page))
+ if (pagevec_add_and_need_flush(pvec, &folio->page))
pagevec_lru_move_fn(pvec, pagevec_move_tail_fn);
local_unlock_irqrestore(&lru_rotate.lock, flags);
}
@@ -289,21 +293,21 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
} while ((lruvec = parent_lruvec(lruvec)));
}
-void lru_note_cost_page(struct page *page)
+void lru_note_cost_folio(struct folio *folio)
{
- lru_note_cost(mem_cgroup_page_lruvec(page),
- page_is_file_lru(page), thp_nr_pages(page));
+ lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio),
+ folio_nr_pages(folio));
}
-static void __activate_page(struct page *page, struct lruvec *lruvec)
+static void __folio_activate(struct folio *folio, struct lruvec *lruvec)
{
- if (!PageActive(page) && !PageUnevictable(page)) {
- int nr_pages = thp_nr_pages(page);
+ if (!folio_test_active(folio) && !folio_test_unevictable(folio)) {
+ long nr_pages = folio_nr_pages(folio);
- del_page_from_lru_list(page, lruvec);
- SetPageActive(page);
- add_page_to_lru_list(page, lruvec);
- trace_mm_lru_activate(page);
+ lruvec_del_folio(lruvec, folio);
+ folio_set_active(folio);
+ lruvec_add_folio(lruvec, folio);
+ trace_mm_lru_activate(folio);
__count_vm_events(PGACTIVATE, nr_pages);
__count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE,
@@ -312,6 +316,11 @@ static void __activate_page(struct page *page, struct lruvec *lruvec)
}
#ifdef CONFIG_SMP
+static void __activate_page(struct page *page, struct lruvec *lruvec)
+{
+ return __folio_activate(page_folio(page), lruvec);
+}
+
static void activate_page_drain(int cpu)
{
struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu);
@@ -325,16 +334,16 @@ static bool need_activate_page_drain(int cpu)
return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
}
-static void activate_page(struct page *page)
+static void folio_activate(struct folio *folio)
{
- page = compound_head(page);
- if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+ if (folio_test_lru(folio) && !folio_test_active(folio) &&
+ !folio_test_unevictable(folio)) {
struct pagevec *pvec;
+ folio_get(folio);
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.activate_page);
- get_page(page);
- if (pagevec_add_and_need_flush(pvec, page))
+ if (pagevec_add_and_need_flush(pvec, &folio->page))
pagevec_lru_move_fn(pvec, __activate_page);
local_unlock(&lru_pvecs.lock);
}
@@ -345,21 +354,20 @@ static inline void activate_page_drain(int cpu)
{
}
-static void activate_page(struct page *page)
+static void folio_activate(struct folio *folio)
{
struct lruvec *lruvec;
- page = compound_head(page);
- if (TestClearPageLRU(page)) {
- lruvec = lock_page_lruvec_irq(page);
- __activate_page(page, lruvec);
+ if (folio_test_clear_lru(folio)) {
+ lruvec = folio_lruvec_lock_irq(folio);
+ __folio_activate(folio, lruvec);
unlock_page_lruvec_irq(lruvec);
- SetPageLRU(page);
+ folio_set_lru(folio);
}
}
#endif
-static void __lru_cache_activate_page(struct page *page)
+static void __lru_cache_activate_folio(struct folio *folio)
{
struct pagevec *pvec;
int i;
@@ -380,8 +388,8 @@ static void __lru_cache_activate_page(struct page *page)
for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
struct page *pagevec_page = pvec->pages[i];
- if (pagevec_page == page) {
- SetPageActive(page);
+ if (pagevec_page == &folio->page) {
+ folio_set_active(folio);
break;
}
}
@@ -399,61 +407,59 @@ static void __lru_cache_activate_page(struct page *page)
* When a newly allocated page is not yet visible, so safe for non-atomic ops,
* __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
*/
-void mark_page_accessed(struct page *page)
+void folio_mark_accessed(struct folio *folio)
{
- page = compound_head(page);
-
- if (!PageReferenced(page)) {
- SetPageReferenced(page);
- } else if (PageUnevictable(page)) {
+ if (!folio_test_referenced(folio)) {
+ folio_set_referenced(folio);
+ } else if (folio_test_unevictable(folio)) {
/*
* Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
* this list is never rotated or maintained, so marking an
* evictable page accessed has no effect.
*/
- } else if (!PageActive(page)) {
+ } else if (!folio_test_active(folio)) {
/*
* If the page is on the LRU, queue it for activation via
* lru_pvecs.activate_page. Otherwise, assume the page is on a
* pagevec, mark it active and it'll be moved to the active
* LRU on the next drain.
*/
- if (PageLRU(page))
- activate_page(page);
+ if (folio_test_lru(folio))
+ folio_activate(folio);
else
- __lru_cache_activate_page(page);
- ClearPageReferenced(page);
- workingset_activation(page);
+ __lru_cache_activate_folio(folio);
+ folio_clear_referenced(folio);
+ workingset_activation(folio);
}
- if (page_is_idle(page))
- clear_page_idle(page);
+ if (folio_test_idle(folio))
+ folio_clear_idle(folio);
}
-EXPORT_SYMBOL(mark_page_accessed);
+EXPORT_SYMBOL(folio_mark_accessed);
/**
- * lru_cache_add - add a page to a page list
- * @page: the page to be added to the LRU.
+ * folio_add_lru - Add a folio to an LRU list.
+ * @folio: The folio to be added to the LRU.
*
- * Queue the page for addition to the LRU via pagevec. The decision on whether
+ * Queue the folio for addition to the LRU. The decision on whether
* to add the page to the [in]active [file|anon] list is deferred until the
- * pagevec is drained. This gives a chance for the caller of lru_cache_add()
- * have the page added to the active list using mark_page_accessed().
+ * pagevec is drained. This gives a chance for the caller of folio_add_lru()
+ * have the folio added to the active list using folio_mark_accessed().
*/
-void lru_cache_add(struct page *page)
+void folio_add_lru(struct folio *folio)
{
struct pagevec *pvec;
- VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page);
- VM_BUG_ON_PAGE(PageLRU(page), page);
+ VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
+ VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
- get_page(page);
+ folio_get(folio);
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.lru_add);
- if (pagevec_add_and_need_flush(pvec, page))
+ if (pagevec_add_and_need_flush(pvec, &folio->page))
__pagevec_lru_add(pvec);
local_unlock(&lru_pvecs.lock);
}
-EXPORT_SYMBOL(lru_cache_add);
+EXPORT_SYMBOL(folio_add_lru);
/**
* lru_cache_add_inactive_or_unevictable
@@ -888,11 +894,12 @@ void release_pages(struct page **pages, int nr)
int i;
LIST_HEAD(pages_to_free);
struct lruvec *lruvec = NULL;
- unsigned long flags;
+ unsigned long flags = 0;
unsigned int lock_batch;
for (i = 0; i < nr; i++) {
struct page *page = pages[i];
+ struct folio *folio = page_folio(page);
/*
* Make sure the IRQ-safe lock-holding time does not get
@@ -904,7 +911,7 @@ void release_pages(struct page **pages, int nr)
lruvec = NULL;
}
- page = compound_head(page);
+ page = &folio->page;
if (is_huge_zero_page(page))
continue;
@@ -943,7 +950,7 @@ void release_pages(struct page **pages, int nr)
if (PageLRU(page)) {
struct lruvec *prev_lruvec = lruvec;
- lruvec = relock_page_lruvec_irqsave(page, lruvec,
+ lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
&flags);
if (prev_lruvec != lruvec)
lock_batch = 0;
@@ -985,17 +992,18 @@ void __pagevec_release(struct pagevec *pvec)
}
EXPORT_SYMBOL(__pagevec_release);
-static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
+static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec)
{
- int was_unevictable = TestClearPageUnevictable(page);
- int nr_pages = thp_nr_pages(page);
+ int was_unevictable = folio_test_clear_unevictable(folio);
+ long nr_pages = folio_nr_pages(folio);
- VM_BUG_ON_PAGE(PageLRU(page), page);
+ VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
/*
- * Page becomes evictable in two ways:
+ * A folio becomes evictable in two ways:
* 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()].
- * 2) Before acquiring LRU lock to put the page to correct LRU and then
+ * 2) Before acquiring LRU lock to put the folio on the correct LRU
+ * and then
* a) do PageLRU check with lock [check_move_unevictable_pages]
* b) do PageLRU check before lock [clear_page_mlock]
*
@@ -1004,35 +1012,36 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec)
*
* #0: __pagevec_lru_add_fn #1: clear_page_mlock
*
- * SetPageLRU() TestClearPageMlocked()
+ * folio_set_lru() folio_test_clear_mlocked()
* smp_mb() // explicit ordering // above provides strict
* // ordering
- * PageMlocked() PageLRU()
+ * folio_test_mlocked() folio_test_lru()
*
*
- * if '#1' does not observe setting of PG_lru by '#0' and fails
- * isolation, the explicit barrier will make sure that page_evictable
- * check will put the page in correct LRU. Without smp_mb(), SetPageLRU
- * can be reordered after PageMlocked check and can make '#1' to fail
- * the isolation of the page whose Mlocked bit is cleared (#0 is also
- * looking at the same page) and the evictable page will be stranded
- * in an unevictable LRU.
+ * if '#1' does not observe setting of PG_lru by '#0' and
+ * fails isolation, the explicit barrier will make sure that
+ * folio_evictable check will put the folio on the correct
+ * LRU. Without smp_mb(), folio_set_lru() can be reordered
+ * after folio_test_mlocked() check and can make '#1' fail the
+ * isolation of the folio whose mlocked bit is cleared (#0 is
+ * also looking at the same folio) and the evictable folio will
+ * be stranded on an unevictable LRU.
*/
- SetPageLRU(page);
+ folio_set_lru(folio);
smp_mb__after_atomic();
- if (page_evictable(page)) {
+ if (folio_evictable(folio)) {
if (was_unevictable)
__count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages);
} else {
- ClearPageActive(page);
- SetPageUnevictable(page);
+ folio_clear_active(folio);
+ folio_set_unevictable(folio);
if (!was_unevictable)
__count_vm_events(UNEVICTABLE_PGCULLED, nr_pages);
}
- add_page_to_lru_list(page, lruvec);
- trace_mm_lru_insertion(page);
+ lruvec_add_folio(lruvec, folio);
+ trace_mm_lru_insertion(folio);
}
/*
@@ -1046,10 +1055,10 @@ void __pagevec_lru_add(struct pagevec *pvec)
unsigned long flags = 0;
for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
+ struct folio *folio = page_folio(pvec->pages[i]);
- lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
- __pagevec_lru_add_fn(page, lruvec);
+ lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
+ __pagevec_lru_add_fn(folio, lruvec);
}
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);