summaryrefslogtreecommitdiff
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c70
1 files changed, 59 insertions, 11 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b46356da6c0e..be810c1fbfc3 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -81,6 +81,7 @@ struct cgroup_subsys memory_cgrp_subsys __read_mostly;
EXPORT_SYMBOL(memory_cgrp_subsys);
struct mem_cgroup *root_mem_cgroup __read_mostly;
+EXPORT_SYMBOL(root_mem_cgroup);
/* Active memory cgroup to use from an interrupt context */
DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
@@ -756,7 +757,7 @@ static void mod_memcg_lruvec_state(struct lruvec *lruvec,
}
/**
- * __mod_lruvec_state - update lruvec memory statistics
+ * mod_lruvec_state - update lruvec memory statistics
* @lruvec: the lruvec
* @idx: the stat item
* @val: delta to add to the counter, can be negative
@@ -765,18 +766,18 @@ static void mod_memcg_lruvec_state(struct lruvec *lruvec,
* function updates the all three counters that are affected by a
* change of state at this level: per-node, per-cgroup, per-lruvec.
*/
-void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val)
{
/* Update node */
- __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
+ mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
/* Update memcg and lruvec */
if (!mem_cgroup_disabled())
mod_memcg_lruvec_state(lruvec, idx, val);
}
-void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
+void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
int val)
{
struct mem_cgroup *memcg;
@@ -788,17 +789,17 @@ void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
/* Untracked pages have no memcg, no lruvec. Update only the node */
if (!memcg) {
rcu_read_unlock();
- __mod_node_page_state(pgdat, idx, val);
+ mod_node_page_state(pgdat, idx, val);
return;
}
lruvec = mem_cgroup_lruvec(memcg, pgdat);
- __mod_lruvec_state(lruvec, idx, val);
+ mod_lruvec_state(lruvec, idx, val);
rcu_read_unlock();
}
-EXPORT_SYMBOL(__lruvec_stat_mod_folio);
+EXPORT_SYMBOL(lruvec_stat_mod_folio);
-void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
+void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
{
pg_data_t *pgdat = page_pgdat(virt_to_page(p));
struct mem_cgroup *memcg;
@@ -814,10 +815,10 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
* vmstats to keep it correct for the root memcg.
*/
if (!memcg) {
- __mod_node_page_state(pgdat, idx, val);
+ mod_node_page_state(pgdat, idx, val);
} else {
lruvec = mem_cgroup_lruvec(memcg, pgdat);
- __mod_lruvec_state(lruvec, idx, val);
+ mod_lruvec_state(lruvec, idx, val);
}
rcu_read_unlock();
}
@@ -1625,6 +1626,37 @@ unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
return page_counter_read(&memcg->memory);
}
+void __memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event, bool allow_spinning)
+{
+ bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
+ event == MEMCG_SWAP_FAIL;
+
+ /* For now only MEMCG_MAX can happen with !allow_spinning context. */
+ VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX);
+
+ atomic_long_inc(&memcg->memory_events_local[event]);
+ if (!swap_event && allow_spinning)
+ cgroup_file_notify(&memcg->events_local_file);
+
+ do {
+ atomic_long_inc(&memcg->memory_events[event]);
+ if (allow_spinning) {
+ if (swap_event)
+ cgroup_file_notify(&memcg->swap_events_file);
+ else
+ cgroup_file_notify(&memcg->events_file);
+ }
+
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ break;
+ if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
+ break;
+ } while ((memcg = parent_mem_cgroup(memcg)) &&
+ !mem_cgroup_is_root(memcg));
+}
+EXPORT_SYMBOL_GPL(__memcg_memory_event);
+
static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
int order)
{
@@ -3880,6 +3912,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
zswap_memcg_offline_cleanup(memcg);
memcg_offline_kmem(memcg);
+ reparent_deferred_split_queue(memcg);
reparent_shrinker_deferred(memcg);
wb_memcg_offline(memcg);
lru_gen_offline_memcg(memcg);
@@ -4455,6 +4488,8 @@ static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
atomic_long_read(&events[MEMCG_OOM_KILL]));
seq_printf(m, "oom_group_kill %lu\n",
atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
+ seq_printf(m, "sock_throttled %lu\n",
+ atomic_long_read(&events[MEMCG_SOCK_THROTTLED]));
}
static int memory_events_show(struct seq_file *m, void *v)
@@ -5435,7 +5470,7 @@ bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
* @size: size of compressed object
*
* This forces the charge after obj_cgroup_may_zswap() allowed
- * compression and storage in zwap for this cgroup to go ahead.
+ * compression and storage in zswap for this cgroup to go ahead.
*/
void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
{
@@ -5593,3 +5628,16 @@ bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid)
{
return memcg ? cpuset_node_allowed(memcg->css.cgroup, nid) : true;
}
+
+void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_disabled() || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ return;
+
+ if (!memcg)
+ memcg = root_mem_cgroup;
+
+ pr_warn("Memory cgroup min protection %lukB -- low protection %lukB",
+ K(atomic_long_read(&memcg->memory.children_min_usage)*PAGE_SIZE),
+ K(atomic_long_read(&memcg->memory.children_low_usage)*PAGE_SIZE));
+}