summaryrefslogtreecommitdiff
path: root/mm/khugepaged.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2020-06-03 16:01:54 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-03 20:09:47 -0700
commit0d1c20722ab333ac0ac03ae2188922c1021d3abc (patch)
tree569760f469c249926ed60c210e2bd3ddd6a5dce0 /mm/khugepaged.c
parent9da7b5216869f80e91f78403a57c72b42357758c (diff)
mm: memcontrol: switch to native NR_FILE_PAGES and NR_SHMEM counters
Memcg maintains private MEMCG_CACHE and NR_SHMEM counters. This divergence from the generic VM accounting means unnecessary code overhead, and creates a dependency for memcg that page->mapping is set up at the time of charging, so that page types can be told apart. Convert the generic accounting sites to mod_lruvec_page_state and friends to maintain the per-cgroup vmstat counters of NR_FILE_PAGES and NR_SHMEM. The page is already locked in these places, so page->mem_cgroup is stable; we only need minimal tweaks of two mem_cgroup_migrate() calls to ensure it's set up in time. Then replace MEMCG_CACHE with NR_FILE_PAGES and delete the private NR_SHMEM accounting sites. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Alex Shi <alex.shi@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Michal Hocko <mhocko@suse.com> Cc: Roman Gushchin <guro@fb.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: Balbir Singh <bsingharora@gmail.com> Link: http://lkml.kernel.org/r/20200508183105.225460-10-hannes@cmpxchg.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/khugepaged.c')
-rw-r--r--mm/khugepaged.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 0a29b51494fd..ddbdc1e3a694 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1844,12 +1844,18 @@ out_unlock:
}
if (nr_none) {
- struct zone *zone = page_zone(new_page);
-
- __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
+ struct lruvec *lruvec;
+ /*
+ * XXX: We have started try_charge and pinned the
+ * memcg, but the page isn't committed yet so we
+ * cannot use mod_lruvec_page_state(). This hackery
+ * will be cleaned up when remove the page->mapping
+ * dependency from memcg and fully charge above.
+ */
+ lruvec = mem_cgroup_lruvec(memcg, page_pgdat(new_page));
+ __mod_lruvec_state(lruvec, NR_FILE_PAGES, nr_none);
if (is_shmem)
- __mod_node_page_state(zone->zone_pgdat,
- NR_SHMEM, nr_none);
+ __mod_lruvec_state(lruvec, NR_SHMEM, nr_none);
}
xa_locked: