summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorYosry Ahmed <yosryahmed@google.com>2024-01-24 10:00:22 +0000
committerAndrew Morton <akpm@linux-foundation.org>2024-02-07 21:20:34 -0800
commit9cee7e8ef3e31ca25b40ca52b8585dc6935deff2 (patch)
treef16074428e12b8fd6296d8a6ea18632675746313 /mm
parent67b8bcbaed4777871bb0dcc888fb02a614a98ab1 (diff)
mm: memcg: optimize parent iteration in memcg_rstat_updated()
In memcg_rstat_updated(), we iterate the memcg being updated and its parents to update memcg->vmstats_percpu->stats_updates in the fast path (i.e. no atomic updates). According to my math, this is 3 memory loads (and potentially 3 cache misses) per memcg: - Load the address of memcg->vmstats_percpu. - Load vmstats_percpu->stats_updates (based on some percpu calculation). - Load the address of the parent memcg. Avoid most of the cache misses by caching a pointer from each struct memcg_vmstats_percpu to its parent on the corresponding CPU. In this case, for the first memcg we have 2 memory loads (same as above): - Load the address of memcg->vmstats_percpu. - Load vmstats_percpu->stats_updates (based on some percpu calculation). Then for each additional memcg, we need a single load to get the parent's stats_updates directly. This reduces the number of loads from O(3N) to O(2+N) -- where N is the number of memcgs we need to iterate. Additionally, stash a pointer to memcg->vmstats in each struct memcg_vmstats_percpu such that we can access the atomic counter that all CPUs fold into, memcg->vmstats->stats_updates. memcg_should_flush_stats() is changed to memcg_vmstats_needs_flush() to accept a struct memcg_vmstats pointer accordingly. In struct memcg_vmstats_percpu, make sure both pointers together with stats_updates live on the same cacheline. Finally, update mem_cgroup_alloc() to take in a parent pointer and initialize the new cache pointers on each CPU. The percpu loop in mem_cgroup_alloc() may look concerning, but there are multiple similar loops in the cgroup creation path (e.g. cgroup_rstat_init()), most of which are hidden within alloc_percpu(). According to Oliver's testing [1], this fixes multiple 30-38% regressions in vm-scalability, will-it-scale-tlb_flush2, and will-it-scale-fallocate1. This comes at a cost of 2 more pointers per CPU (<2KB on a machine with 128 CPUs). [1] https://lore.kernel.org/lkml/ZbDJsfsZt2ITyo61@xsang-OptiPlex-9020/ [yosryahmed@google.com: fix struct memcg_vmstats_percpu size and alignment] Link: https://lkml.kernel.org/r/20240203044612.1234216-1-yosryahmed@google.com Link: https://lkml.kernel.org/r/20240124100023.660032-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed <yosryahmed@google.com> Fixes: 8d59d2214c23 ("mm: memcg: make stats flushing threshold per-memcg") Tested-by: kernel test robot <oliver.sang@intel.com> Reported-by: kernel test robot <oliver.sang@intel.com> Closes: https://lore.kernel.org/oe-lkp/202401221624.cb53a8ca-oliver.sang@intel.com Acked-by: Shakeel Butt <shakeelb@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Greg Thelen <gthelen@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c56
1 files changed, 35 insertions, 21 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 46d8d02114cf..1ed40f9d3a27 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -621,6 +621,15 @@ static inline int memcg_events_index(enum vm_event_item idx)
}
struct memcg_vmstats_percpu {
+ /* Stats updates since the last flush */
+ unsigned int stats_updates;
+
+ /* Cached pointers for fast iteration in memcg_rstat_updated() */
+ struct memcg_vmstats_percpu *parent;
+ struct memcg_vmstats *vmstats;
+
+ /* The above should fit a single cacheline for memcg_rstat_updated() */
+
/* Local (CPU and cgroup) page state & events */
long state[MEMCG_NR_STAT];
unsigned long events[NR_MEMCG_EVENTS];
@@ -632,10 +641,7 @@ struct memcg_vmstats_percpu {
/* Cgroup1: threshold notifications & softlimit tree updates */
unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
-
- /* Stats updates since the last flush */
- unsigned int stats_updates;
-};
+} ____cacheline_aligned;
struct memcg_vmstats {
/* Aggregated (CPU and subtree) page state & events */
@@ -698,36 +704,35 @@ static void memcg_stats_unlock(void)
}
-static bool memcg_should_flush_stats(struct mem_cgroup *memcg)
+static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
{
- return atomic64_read(&memcg->vmstats->stats_updates) >
+ return atomic64_read(&vmstats->stats_updates) >
MEMCG_CHARGE_BATCH * num_online_cpus();
}
static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
{
+ struct memcg_vmstats_percpu *statc;
int cpu = smp_processor_id();
- unsigned int x;
if (!val)
return;
cgroup_rstat_updated(memcg->css.cgroup, cpu);
-
- for (; memcg; memcg = parent_mem_cgroup(memcg)) {
- x = __this_cpu_add_return(memcg->vmstats_percpu->stats_updates,
- abs(val));
-
- if (x < MEMCG_CHARGE_BATCH)
+ statc = this_cpu_ptr(memcg->vmstats_percpu);
+ for (; statc; statc = statc->parent) {
+ statc->stats_updates += abs(val);
+ if (statc->stats_updates < MEMCG_CHARGE_BATCH)
continue;
/*
* If @memcg is already flush-able, increasing stats_updates is
* redundant. Avoid the overhead of the atomic update.
*/
- if (!memcg_should_flush_stats(memcg))
- atomic64_add(x, &memcg->vmstats->stats_updates);
- __this_cpu_write(memcg->vmstats_percpu->stats_updates, 0);
+ if (!memcg_vmstats_needs_flush(statc->vmstats))
+ atomic64_add(statc->stats_updates,
+ &statc->vmstats->stats_updates);
+ statc->stats_updates = 0;
}
}
@@ -756,7 +761,7 @@ void mem_cgroup_flush_stats(struct mem_cgroup *memcg)
if (!memcg)
memcg = root_mem_cgroup;
- if (memcg_should_flush_stats(memcg))
+ if (memcg_vmstats_needs_flush(memcg->vmstats))
do_flush_stats(memcg);
}
@@ -770,7 +775,7 @@ void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg)
static void flush_memcg_stats_dwork(struct work_struct *w)
{
/*
- * Deliberately ignore memcg_should_flush_stats() here so that flushing
+ * Deliberately ignore memcg_vmstats_needs_flush() here so that flushing
* in latency-sensitive paths is as cheap as possible.
*/
do_flush_stats(root_mem_cgroup);
@@ -5477,10 +5482,11 @@ static void mem_cgroup_free(struct mem_cgroup *memcg)
__mem_cgroup_free(memcg);
}
-static struct mem_cgroup *mem_cgroup_alloc(void)
+static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
{
+ struct memcg_vmstats_percpu *statc, *pstatc;
struct mem_cgroup *memcg;
- int node;
+ int node, cpu;
int __maybe_unused i;
long error = -ENOMEM;
@@ -5504,6 +5510,14 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
if (!memcg->vmstats_percpu)
goto fail;
+ for_each_possible_cpu(cpu) {
+ if (parent)
+ pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
+ statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
+ statc->parent = parent ? pstatc : NULL;
+ statc->vmstats = memcg->vmstats;
+ }
+
for_each_node(node)
if (alloc_mem_cgroup_per_node_info(memcg, node))
goto fail;
@@ -5549,7 +5563,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
struct mem_cgroup *memcg, *old_memcg;
old_memcg = set_active_memcg(parent);
- memcg = mem_cgroup_alloc();
+ memcg = mem_cgroup_alloc(parent);
set_active_memcg(old_memcg);
if (IS_ERR(memcg))
return ERR_CAST(memcg);