summaryrefslogtreecommitdiff
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-06-28 14:59:26 -0400
committerMatthew Wilcox (Oracle) <willy@infradead.org>2021-09-27 09:27:31 -0400
commit1b7e4464d43a488e383843bf96ec62d12393bff1 (patch)
tree4f40c7b95125160196b6c4977db0ef352c983c61 /mm/memcontrol.c
parent8e88bd2dfde222285a56a54b0400942b5b33da20 (diff)
mm/memcg: Add folio_memcg() and related functions
memcg information is only stored in the head page, so the memcg subsystem needs to assure that all accesses are to the head page. The first step is converting page_memcg() to folio_memcg(). The callers of page_memcg() and PageMemcgKmem() are not yet ready to be converted to use folios, so retain them as wrappers around folio_memcg() and folio_memcg_kmem(). They will be converted in a later patch set. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a064a85d51da..1385ac6f688e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2992,15 +2992,16 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
*/
void __memcg_kmem_uncharge_page(struct page *page, int order)
{
+ struct folio *folio = page_folio(page);
struct obj_cgroup *objcg;
unsigned int nr_pages = 1 << order;
- if (!PageMemcgKmem(page))
+ if (!folio_memcg_kmem(folio))
return;
- objcg = __page_objcg(page);
+ objcg = __folio_objcg(folio);
obj_cgroup_uncharge_pages(objcg, nr_pages);
- page->memcg_data = 0;
+ folio->memcg_data = 0;
obj_cgroup_put(objcg);
}
@@ -3234,17 +3235,18 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
*/
void split_page_memcg(struct page *head, unsigned int nr)
{
- struct mem_cgroup *memcg = page_memcg(head);
+ struct folio *folio = page_folio(head);
+ struct mem_cgroup *memcg = folio_memcg(folio);
int i;
if (mem_cgroup_disabled() || !memcg)
return;
for (i = 1; i < nr; i++)
- head[i].memcg_data = head->memcg_data;
+ folio_page(folio, i)->memcg_data = folio->memcg_data;
- if (PageMemcgKmem(head))
- obj_cgroup_get_many(__page_objcg(head), nr - 1);
+ if (folio_memcg_kmem(folio))
+ obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
else
css_get_many(&memcg->css, nr - 1);
}
@@ -6811,6 +6813,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
static void uncharge_page(struct page *page, struct uncharge_gather *ug)
{
+ struct folio *folio = page_folio(page);
unsigned long nr_pages;
struct mem_cgroup *memcg;
struct obj_cgroup *objcg;
@@ -6824,14 +6827,14 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
* exclusive access to the page.
*/
if (use_objcg) {
- objcg = __page_objcg(page);
+ objcg = __folio_objcg(folio);
/*
* This get matches the put at the end of the function and
* kmem pages do not hold memcg references anymore.
*/
memcg = get_mem_cgroup_from_objcg(objcg);
} else {
- memcg = __page_memcg(page);
+ memcg = __folio_memcg(folio);
}
if (!memcg)