summaryrefslogtreecommitdiff
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorYosry Ahmed <yosryahmed@google.com>2023-09-22 17:57:40 +0000
committerAndrew Morton <akpm@linux-foundation.org>2023-10-18 14:34:12 -0700
commit7bd5bc3ce9632aefd0eed33a19212a2e55c0f873 (patch)
tree2584a661fa5e51523ab5a2be43e1d8fa055fdfbd /mm/memcontrol.c
parentff841a06c844b0556b434d67cfc43f4fda56ae7b (diff)
mm: memcg: normalize the value passed into memcg_rstat_updated()
memcg_rstat_updated() uses the value of the state update to keep track of the magnitude of pending updates, so that we only do a stats flush when it's worth the work. Most values passed into memcg_rstat_updated() are in pages, however, a few of them are actually in bytes or KBs. To put this into perspective, a 512 byte slab allocation today would look the same as allocating 512 pages. This may result in premature flushes, which means unnecessary work and latency. Normalize all the state values passed into memcg_rstat_updated() to pages. Round up non-zero sub-page to 1 page, because memcg_rstat_updated() ignores 0 page updates. Link: https://lkml.kernel.org/r/20230922175741.635002-3-yosryahmed@google.com Fixes: 5b3be698a872 ("memcg: better bounds on the memcg stats updates") Signed-off-by: Yosry Ahmed <yosryahmed@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Michal Koutný <mkoutny@suse.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeelb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7e65d8c2e685..8539f2037168 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -763,6 +763,22 @@ unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
return x;
}
+static int memcg_page_state_unit(int item);
+
+/*
+ * Normalize the value passed into memcg_rstat_updated() to be in pages. Round
+ * up non-zero sub-page updates to 1 page as zero page updates are ignored.
+ */
+static int memcg_state_val_in_pages(int idx, int val)
+{
+ int unit = memcg_page_state_unit(idx);
+
+ if (!val || unit == PAGE_SIZE)
+ return val;
+ else
+ return max(val * unit / PAGE_SIZE, 1UL);
+}
+
/**
* __mod_memcg_state - update cgroup memory statistics
* @memcg: the memory cgroup
@@ -775,7 +791,7 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
return;
__this_cpu_add(memcg->vmstats_percpu->state[idx], val);
- memcg_rstat_updated(memcg, val);
+ memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
}
/* idx can be of type enum memcg_stat_item or node_stat_item. */
@@ -826,7 +842,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
/* Update lruvec */
__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
- memcg_rstat_updated(memcg, val);
+ memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val));
memcg_stats_unlock();
}