summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/memcontrol.h4
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/memcontrol.c35
-rw-r--r--mm/migrate.c4
-rw-r--r--mm/shmem.c5
5 files changed, 29 insertions, 23 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index b4bc052db32b..07eda24ec581 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -745,7 +745,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
__mem_cgroup_uncharge_list(page_list);
}
-void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
+void mem_cgroup_migrate(struct folio *old, struct folio *new);
/**
* mem_cgroup_lruvec - get the lru list vector for a memcg & node
@@ -1244,7 +1244,7 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
{
}
-static inline void mem_cgroup_migrate(struct page *old, struct page *new)
+static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 44fcd9d1dd65..5368a4dcc35e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -835,6 +835,8 @@ EXPORT_SYMBOL(file_write_and_wait_range);
*/
void replace_page_cache_page(struct page *old, struct page *new)
{
+ struct folio *fold = page_folio(old);
+ struct folio *fnew = page_folio(new);
struct address_space *mapping = old->mapping;
void (*freepage)(struct page *) = mapping->a_ops->freepage;
pgoff_t offset = old->index;
@@ -848,7 +850,7 @@ void replace_page_cache_page(struct page *old, struct page *new)
new->mapping = mapping;
new->index = offset;
- mem_cgroup_migrate(old, new);
+ mem_cgroup_migrate(fold, fnew);
xas_lock_irq(&xas);
xas_store(&xas, new);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6321ed6d6e5a..c83d2f862f8a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6891,36 +6891,35 @@ void __mem_cgroup_uncharge_list(struct list_head *page_list)
}
/**
- * mem_cgroup_migrate - charge a page's replacement
- * @oldpage: currently circulating page
- * @newpage: replacement page
+ * mem_cgroup_migrate - Charge a folio's replacement.
+ * @old: Currently circulating folio.
+ * @new: Replacement folio.
*
- * Charge @newpage as a replacement page for @oldpage. @oldpage will
+ * Charge @new as a replacement folio for @old. @old will
* be uncharged upon free.
*
- * Both pages must be locked, @newpage->mapping must be set up.
+ * Both folios must be locked, @new->mapping must be set up.
*/
-void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
+void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
- struct folio *newfolio = page_folio(newpage);
struct mem_cgroup *memcg;
- long nr_pages = folio_nr_pages(newfolio);
+ long nr_pages = folio_nr_pages(new);
unsigned long flags;
- VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
- VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio);
- VM_BUG_ON_FOLIO(PageAnon(oldpage) != folio_test_anon(newfolio), newfolio);
- VM_BUG_ON_FOLIO(compound_nr(oldpage) != nr_pages, newfolio);
+ VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
+ VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
+ VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
+ VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
if (mem_cgroup_disabled())
return;
- /* Page cache replacement: new page already charged? */
- if (folio_memcg(newfolio))
+ /* Page cache replacement: new folio already charged? */
+ if (folio_memcg(new))
return;
- memcg = page_memcg(oldpage);
- VM_WARN_ON_ONCE_PAGE(!memcg, oldpage);
+ memcg = folio_memcg(old);
+ VM_WARN_ON_ONCE_FOLIO(!memcg, old);
if (!memcg)
return;
@@ -6932,11 +6931,11 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
}
css_get(&memcg->css);
- commit_charge(newfolio, memcg);
+ commit_charge(new, memcg);
local_irq_save(flags);
mem_cgroup_charge_statistics(memcg, nr_pages);
- memcg_check_events(memcg, page_to_nid(newpage));
+ memcg_check_events(memcg, folio_nid(new));
local_irq_restore(flags);
}
diff --git a/mm/migrate.c b/mm/migrate.c
index da55d2a8638d..bfb8ba490479 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -542,6 +542,8 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
*/
void migrate_page_states(struct page *newpage, struct page *page)
{
+ struct folio *folio = page_folio(page);
+ struct folio *newfolio = page_folio(newpage);
int cpupid;
if (PageError(page))
@@ -609,7 +611,7 @@ void migrate_page_states(struct page *newpage, struct page *page)
copy_page_owner(page, newpage);
if (!PageHuge(page))
- mem_cgroup_migrate(page, newpage);
+ mem_cgroup_migrate(folio, newfolio);
}
EXPORT_SYMBOL(migrate_page_states);
diff --git a/mm/shmem.c b/mm/shmem.c
index a2e653aeb536..1588f33d009a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1637,6 +1637,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
struct page *oldpage, *newpage;
+ struct folio *old, *new;
struct address_space *swap_mapping;
swp_entry_t entry;
pgoff_t swap_index;
@@ -1673,7 +1674,9 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
xa_lock_irq(&swap_mapping->i_pages);
error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
if (!error) {
- mem_cgroup_migrate(oldpage, newpage);
+ old = page_folio(oldpage);
+ new = page_folio(newpage);
+ mem_cgroup_migrate(old, new);
__inc_lruvec_page_state(newpage, NR_FILE_PAGES);
__dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
}