summaryrefslogtreecommitdiff
path: root/mm/page_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_io.c')
-rw-r--r--mm/page_io.c80
1 files changed, 39 insertions, 41 deletions
diff --git a/mm/page_io.c b/mm/page_io.c
index 684cd3c7b59b..fe4c21af23f2 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -19,20 +19,19 @@
#include <linux/bio.h>
#include <linux/swapops.h>
#include <linux/writeback.h>
-#include <linux/frontswap.h>
#include <linux/blkdev.h>
#include <linux/psi.h>
#include <linux/uio.h>
#include <linux/sched/task.h>
#include <linux/delayacct.h>
+#include <linux/zswap.h>
#include "swap.h"
static void __end_swap_bio_write(struct bio *bio)
{
- struct page *page = bio_first_page_all(bio);
+ struct folio *folio = bio_first_folio_all(bio);
if (bio->bi_status) {
- SetPageError(page);
/*
* We failed to write the page out to swap-space.
* Re-dirty the page in order to avoid it being reclaimed.
@@ -41,13 +40,13 @@ static void __end_swap_bio_write(struct bio *bio)
*
* Also clear PG_reclaim to avoid folio_rotate_reclaimable()
*/
- set_page_dirty(page);
+ folio_mark_dirty(folio);
pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
(unsigned long long)bio->bi_iter.bi_sector);
- ClearPageReclaim(page);
+ folio_clear_reclaim(folio);
}
- end_page_writeback(page);
+ folio_end_writeback(folio);
}
static void end_swap_bio_write(struct bio *bio)
@@ -58,18 +57,16 @@ static void end_swap_bio_write(struct bio *bio)
static void __end_swap_bio_read(struct bio *bio)
{
- struct page *page = bio_first_page_all(bio);
+ struct folio *folio = bio_first_folio_all(bio);
if (bio->bi_status) {
- SetPageError(page);
- ClearPageUptodate(page);
pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
(unsigned long long)bio->bi_iter.bi_sector);
} else {
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
- unlock_page(page);
+ folio_unlock(folio);
}
static void end_swap_bio_read(struct bio *bio)
@@ -198,7 +195,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
folio_unlock(folio);
return ret;
}
- if (frontswap_store(&folio->page) == 0) {
+ if (zswap_store(folio)) {
folio_start_writeback(folio);
folio_unlock(folio);
folio_end_writeback(folio);
@@ -208,22 +205,22 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
return 0;
}
-static inline void count_swpout_vm_event(struct page *page)
+static inline void count_swpout_vm_event(struct folio *folio)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (unlikely(PageTransHuge(page)))
+ if (unlikely(folio_test_pmd_mappable(folio)))
count_vm_event(THP_SWPOUT);
#endif
- count_vm_events(PSWPOUT, thp_nr_pages(page));
+ count_vm_events(PSWPOUT, folio_nr_pages(folio));
}
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
+static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
{
struct cgroup_subsys_state *css;
struct mem_cgroup *memcg;
- memcg = page_memcg(page);
+ memcg = folio_memcg(folio);
if (!memcg)
return;
@@ -233,7 +230,7 @@ static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
rcu_read_unlock();
}
#else
-#define bio_associate_blkg_from_page(bio, page) do { } while (0)
+#define bio_associate_blkg_from_page(bio, folio) do { } while (0)
#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
struct swap_iocb {
@@ -283,7 +280,7 @@ static void sio_write_complete(struct kiocb *iocb, long ret)
}
} else {
for (p = 0; p < sio->pages; p++)
- count_swpout_vm_event(sio->bvec[p].bv_page);
+ count_swpout_vm_event(page_folio(sio->bvec[p].bv_page));
}
for (p = 0; p < sio->pages; p++)
@@ -334,17 +331,18 @@ static void swap_writepage_bdev_sync(struct page *page,
{
struct bio_vec bv;
struct bio bio;
+ struct folio *folio = page_folio(page);
bio_init(&bio, sis->bdev, &bv, 1,
REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
bio.bi_iter.bi_sector = swap_page_sector(page);
__bio_add_page(&bio, page, thp_size(page), 0);
- bio_associate_blkg_from_page(&bio, page);
- count_swpout_vm_event(page);
+ bio_associate_blkg_from_page(&bio, folio);
+ count_swpout_vm_event(folio);
- set_page_writeback(page);
- unlock_page(page);
+ folio_start_writeback(folio);
+ folio_unlock(folio);
submit_bio_wait(&bio);
__end_swap_bio_write(&bio);
@@ -354,6 +352,7 @@ static void swap_writepage_bdev_async(struct page *page,
struct writeback_control *wbc, struct swap_info_struct *sis)
{
struct bio *bio;
+ struct folio *folio = page_folio(page);
bio = bio_alloc(sis->bdev, 1,
REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
@@ -362,10 +361,10 @@ static void swap_writepage_bdev_async(struct page *page,
bio->bi_end_io = end_swap_bio_write;
__bio_add_page(bio, page, thp_size(page), 0);
- bio_associate_blkg_from_page(bio, page);
- count_swpout_vm_event(page);
- set_page_writeback(page);
- unlock_page(page);
+ bio_associate_blkg_from_page(bio, folio);
+ count_swpout_vm_event(folio);
+ folio_start_writeback(folio);
+ folio_unlock(folio);
submit_bio(bio);
}
@@ -406,19 +405,17 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
if (ret == sio->len) {
for (p = 0; p < sio->pages; p++) {
- struct page *page = sio->bvec[p].bv_page;
+ struct folio *folio = page_folio(sio->bvec[p].bv_page);
- SetPageUptodate(page);
- unlock_page(page);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
}
count_vm_events(PSWPIN, sio->pages);
} else {
for (p = 0; p < sio->pages; p++) {
- struct page *page = sio->bvec[p].bv_page;
+ struct folio *folio = page_folio(sio->bvec[p].bv_page);
- SetPageError(page);
- ClearPageUptodate(page);
- unlock_page(page);
+ folio_unlock(folio);
}
pr_alert_ratelimited("Read-error on swap-device\n");
}
@@ -495,14 +492,15 @@ static void swap_readpage_bdev_async(struct page *page,
void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
{
+ struct folio *folio = page_folio(page);
struct swap_info_struct *sis = page_swap_info(page);
- bool workingset = PageWorkingset(page);
+ bool workingset = folio_test_workingset(folio);
unsigned long pflags;
bool in_thrashing;
- VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(PageUptodate(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio);
/*
* Count submission time as memory stall and delay. When the device
@@ -515,9 +513,9 @@ void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
}
delayacct_swapin_start();
- if (frontswap_load(page) == 0) {
- SetPageUptodate(page);
- unlock_page(page);
+ if (zswap_load(folio)) {
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
} else if (data_race(sis->flags & SWP_FS_OPS)) {
swap_readpage_fs(page, plug);
} else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {