summaryrefslogtreecommitdiff
path: root/mm/internal.h
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-02-27 17:42:37 +0000
committerAndrew Morton <akpm@linux-foundation.org>2024-03-04 17:01:23 -0800
commit90491d87dd46a4c843dae775b9e72c91624c5a7b (patch)
treeecce94adbdefc9b50e074d12d037521f899ebdd2 /mm/internal.h
parent7c76d92253dbb7c53ba03a4cd6639113cd1f7d3a (diff)
mm: add free_unref_folios()
Iterate over a folio_batch rather than a linked list. This is easier for the CPU to prefetch and has a batch count naturally built in so we don't need to track it. Again, this lowers the maximum lock hold time from 32 folios to 15, but I do not expect this to have a significant effect. Link: https://lkml.kernel.org/r/20240227174254.710559-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/internal.h')
-rw-r--r--mm/internal.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/internal.h b/mm/internal.h
index f376e3afbc4c..1dfdc3bde1b0 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -451,8 +451,9 @@ extern bool free_pages_prepare(struct page *page, unsigned int order);
extern int user_min_free_kbytes;
-extern void free_unref_page(struct page *page, unsigned int order);
-extern void free_unref_page_list(struct list_head *list);
+void free_unref_page(struct page *page, unsigned int order);
+void free_unref_folios(struct folio_batch *fbatch);
+void free_unref_page_list(struct list_head *list);
extern void zone_pcp_reset(struct zone *zone);
extern void zone_pcp_disable(struct zone *zone);