summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-06-17 16:42:44 +0100
committerakpm <akpm@linux-foundation.org>2022-07-03 18:08:44 -0700
commitb8cecb9376b9d3031cf62b476a0db087b6b01072 (patch)
tree84e082c50582d71530e004d09371c3be0f7c4dd6
parent64fe24a3e05e5f3ac56fcd45afd2fd1d9cc8fcb6 (diff)
mm/vmscan: convert reclaim_clean_pages_from_list() to folios
Patch series "nvert much of vmscan to folios" vmscan always operates on folios since it puts the pages on the LRU list. Switching all of these functions from pages to folios saves 1483 bytes of text from removing all the baggage around calling compound_page() and similar functions. This patch (of 5): This is a straightforward conversion which removes several hidden calls to compound_head, saving 330 bytes of kernel text. Link: https://lkml.kernel.org/r/20220617154248.700416-1-willy@infradead.org Link: https://lkml.kernel.org/r/20220617154248.700416-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/page-flags.h6
-rw-r--r--mm/vmscan.c22
2 files changed, 17 insertions, 11 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e66f7aa3191d..f32aade2a6e0 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -670,6 +670,12 @@ static __always_inline bool PageAnon(struct page *page)
return folio_test_anon(page_folio(page));
}
+static __always_inline bool __folio_test_movable(const struct folio *folio)
+{
+ return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
+ PAGE_MAPPING_MOVABLE;
+}
+
static __always_inline int __PageMovable(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 97ac6c6c026d..2ecca45672e2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2041,7 +2041,7 @@ keep:
}
unsigned int reclaim_clean_pages_from_list(struct zone *zone,
- struct list_head *page_list)
+ struct list_head *folio_list)
{
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
@@ -2049,16 +2049,16 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
};
struct reclaim_stat stat;
unsigned int nr_reclaimed;
- struct page *page, *next;
- LIST_HEAD(clean_pages);
+ struct folio *folio, *next;
+ LIST_HEAD(clean_folios);
unsigned int noreclaim_flag;
- list_for_each_entry_safe(page, next, page_list, lru) {
- if (!PageHuge(page) && page_is_file_lru(page) &&
- !PageDirty(page) && !__PageMovable(page) &&
- !PageUnevictable(page)) {
- ClearPageActive(page);
- list_move(&page->lru, &clean_pages);
+ list_for_each_entry_safe(folio, next, folio_list, lru) {
+ if (!folio_test_hugetlb(folio) && folio_is_file_lru(folio) &&
+ !folio_test_dirty(folio) && !__folio_test_movable(folio) &&
+ !folio_test_unevictable(folio)) {
+ folio_clear_active(folio);
+ list_move(&folio->lru, &clean_folios);
}
}
@@ -2069,11 +2069,11 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
* change in the future.
*/
noreclaim_flag = memalloc_noreclaim_save();
- nr_reclaimed = shrink_page_list(&clean_pages, zone->zone_pgdat, &sc,
+ nr_reclaimed = shrink_page_list(&clean_folios, zone->zone_pgdat, &sc,
&stat, true);
memalloc_noreclaim_restore(noreclaim_flag);
- list_splice(&clean_pages, page_list);
+ list_splice(&clean_folios, folio_list);
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
-(long)nr_reclaimed);
/*