summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/swap_state.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 2f540748f7c0..96b5c585f047 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -15,6 +15,7 @@
#include <linux/swapops.h>
#include <linux/init.h>
#include <linux/pagemap.h>
+#include <linux/pagevec.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/migrate.h>
@@ -310,21 +311,25 @@ void free_page_and_swap_cache(struct page *page)
*/
void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
{
+ struct folio_batch folios;
+ unsigned int refs[PAGEVEC_SIZE];
+
lru_add_drain();
+ folio_batch_init(&folios);
for (int i = 0; i < nr; i++) {
- struct page *page = encoded_page_ptr(pages[i]);
+ struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
- /*
- * Skip over the "nr_pages" entry. It's sufficient to call
- * free_swap_cache() only once per folio.
- */
+ free_swap_cache(&folio->page);
+ refs[folios.nr] = 1;
if (unlikely(encoded_page_flags(pages[i]) &
ENCODED_PAGE_BIT_NR_PAGES_NEXT))
- i++;
+ refs[folios.nr] = encoded_nr_pages(pages[++i]);
- free_swap_cache(page);
+ if (folio_batch_add(&folios, folio) == 0)
+ folios_put_refs(&folios, refs);
}
- release_pages(pages, nr);
+ if (folios.nr)
+ folios_put_refs(&folios, refs);
}
static inline bool swap_use_vma_readahead(void)