summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-04-27 10:47:39 -0400
committerMatthew Wilcox (Oracle) <willy@infradead.org>2021-10-18 07:49:39 -0400
commit76580b6529db622964b4ffee59ded25efcb73748 (patch)
treeccda10588915cc433475b9f6825936a17017367d /mm
parentf2d273927ea49e2f85b660aeca26f58b8a15d385 (diff)
mm/swap: Add folio_mark_accessed()
Convert mark_page_accessed() to folio_mark_accessed(). It already operated on the entire compound page, but now we can avoid calling compound_head quite so many times. Shrinks the function from 424 bytes to 295 bytes (shrinking by 129 bytes). The compatibility wrapper is 30 bytes, plus the 8 bytes for the exported symbol means the kernel shrinks by 91 bytes. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm')
-rw-r--r--mm/folio-compat.c7
-rw-r--r--mm/swap.c34
2 files changed, 23 insertions, 18 deletions
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 7044fcc8a8aa..a374747ae1c6 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -5,6 +5,7 @@
*/
#include <linux/pagemap.h>
+#include <linux/swap.h>
struct address_space *page_mapping(struct page *page)
{
@@ -41,3 +42,9 @@ bool page_mapped(struct page *page)
return folio_mapped(page_folio(page));
}
EXPORT_SYMBOL(page_mapped);
+
+void mark_page_accessed(struct page *page)
+{
+ folio_mark_accessed(page_folio(page));
+}
+EXPORT_SYMBOL(mark_page_accessed);
diff --git a/mm/swap.c b/mm/swap.c
index 3860d6bc8c8a..b95fd6d05768 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -368,7 +368,7 @@ static void folio_activate(struct folio *folio)
}
#endif
-static void __lru_cache_activate_page(struct page *page)
+static void __lru_cache_activate_folio(struct folio *folio)
{
struct pagevec *pvec;
int i;
@@ -389,8 +389,8 @@ static void __lru_cache_activate_page(struct page *page)
for (i = pagevec_count(pvec) - 1; i >= 0; i--) {
struct page *pagevec_page = pvec->pages[i];
- if (pagevec_page == page) {
- SetPageActive(page);
+ if (pagevec_page == &folio->page) {
+ folio_set_active(folio);
break;
}
}
@@ -408,36 +408,34 @@ static void __lru_cache_activate_page(struct page *page)
* When a newly allocated page is not yet visible, so safe for non-atomic ops,
* __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
*/
-void mark_page_accessed(struct page *page)
+void folio_mark_accessed(struct folio *folio)
{
- page = compound_head(page);
-
- if (!PageReferenced(page)) {
- SetPageReferenced(page);
- } else if (PageUnevictable(page)) {
+ if (!folio_test_referenced(folio)) {
+ folio_set_referenced(folio);
+ } else if (folio_test_unevictable(folio)) {
/*
* Unevictable pages are on the "LRU_UNEVICTABLE" list. But,
* this list is never rotated or maintained, so marking an
* evictable page accessed has no effect.
*/
- } else if (!PageActive(page)) {
+ } else if (!folio_test_active(folio)) {
/*
* If the page is on the LRU, queue it for activation via
* lru_pvecs.activate_page. Otherwise, assume the page is on a
* pagevec, mark it active and it'll be moved to the active
* LRU on the next drain.
*/
- if (PageLRU(page))
- folio_activate(page_folio(page));
+ if (folio_test_lru(folio))
+ folio_activate(folio);
else
- __lru_cache_activate_page(page);
- ClearPageReferenced(page);
- workingset_activation(page_folio(page));
+ __lru_cache_activate_folio(folio);
+ folio_clear_referenced(folio);
+ workingset_activation(folio);
}
- if (page_is_idle(page))
- clear_page_idle(page);
+ if (folio_test_idle(folio))
+ folio_clear_idle(folio);
}
-EXPORT_SYMBOL(mark_page_accessed);
+EXPORT_SYMBOL(folio_mark_accessed);
/**
* lru_cache_add - add a page to a page list