summaryrefslogtreecommitdiff
path: root/mm/swap_state.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-12-13 21:58:39 +0000
committerAndrew Morton <akpm@linux-foundation.org>2023-12-29 11:58:31 -0800
commitc9bdf768dd9319d2d80a334646e2c8116af9e430 (patch)
tree38d45764a09763bc233cbbe4d1980afbaa43cc13 /mm/swap_state.c
parent3a61e6f668120ee2c7840b91891c858d575d07e2 (diff)
mm: convert swap_readpage() to swap_read_folio()
All callers have a folio, so pass it in, saving two calls to compound_head(). Link: https://lkml.kernel.org/r/20231213215842.671461-11-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/swap_state.c')
-rw-r--r--mm/swap_state.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 874b40a1f502..d2fe70e307d9 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -530,7 +530,7 @@ fail_put_swap:
* the swap entry is no longer in use.
*
* get/put_swap_device() aren't needed to call this function, because
- * __read_swap_cache_async() call them and swap_readpage() holds the
+ * __read_swap_cache_async() call them and swap_read_folio() holds the
* swap cache folio lock.
*/
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
@@ -548,7 +548,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
mpol_cond_put(mpol);
if (page_allocated)
- swap_readpage(&folio->page, false, plug);
+ swap_read_folio(folio, false, plug);
return folio_file_page(folio, swp_offset(entry));
}
@@ -665,7 +665,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
if (!folio)
continue;
if (page_allocated) {
- swap_readpage(&folio->page, false, &splug);
+ swap_read_folio(folio, false, &splug);
if (offset != entry_offset) {
folio_set_readahead(folio);
count_vm_event(SWAP_RA);
@@ -681,7 +681,7 @@ skip:
folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
&page_allocated, false);
if (unlikely(page_allocated))
- swap_readpage(&folio->page, false, NULL);
+ swap_read_folio(folio, false, NULL);
zswap_folio_swapin(folio);
return folio_file_page(folio, swp_offset(entry));
}
@@ -839,7 +839,7 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
if (!folio)
continue;
if (page_allocated) {
- swap_readpage(&folio->page, false, &splug);
+ swap_read_folio(folio, false, &splug);
if (i != ra_info.offset) {
folio_set_readahead(folio);
count_vm_event(SWAP_RA);
@@ -857,7 +857,7 @@ skip:
folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
&page_allocated, false);
if (unlikely(page_allocated))
- swap_readpage(&folio->page, false, NULL);
+ swap_read_folio(folio, false, NULL);
zswap_folio_swapin(folio);
return folio_file_page(folio, swp_offset(entry));
}