summaryrefslogtreecommitdiff
path: root/include/linux/memcontrol.h
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2022-05-19 14:08:53 -0700
committerakpm <akpm@linux-foundation.org>2022-05-19 14:08:53 -0700
commitf4840ccfca25db225b3371a8f7b5770febee87c5 (patch)
treec35709fc53e78d36681620d17d410bcc0c5d5785 /include/linux/memcontrol.h
parentf6498b776d280b30a4614d8261840961e993c2c8 (diff)
zswap: memcg accounting
Applications can currently escape their cgroup memory containment when zswap is enabled. This patch adds per-cgroup tracking and limiting of zswap backend memory to rectify this. The existing cgroup2 memory.stat file is extended to show zswap statistics analogous to what's in meminfo and vmstat. Furthermore, two new control files, memory.zswap.current and memory.zswap.max, are added to allow tuning zswap usage on a per-workload basis. This is important since not all workloads benefit from zswap equally; some even suffer compared to disk swap when memory contents don't compress well. The optimal size of the zswap pool, and the threshold for writeback, also depends on the size of the workload's warm set. The implementation doesn't use a traditional page_counter transaction. zswap is unconventional as a memory consumer in that we only know the amount of memory to charge once expensive compression has occurred. If zwap is disabled or the limit is already exceeded we obviously don't want to compress page upon page only to reject them all. Instead, the limit is checked against current usage, then we compress and charge. This allows some limit overrun, but not enough to matter in practice. [hannes@cmpxchg.org: fix for CONFIG_SLOB builds] Link: https://lkml.kernel.org/r/YnwD14zxYjUJPc2w@cmpxchg.org [hannes@cmpxchg.org: opt out of cgroups v1] Link: https://lkml.kernel.org/r/Yn6it9mBYFA+/lTb@cmpxchg.org Link: https://lkml.kernel.org/r/20220510152847.230957-7-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Roman Gushchin <guro@fb.com> Cc: Shakeel Butt <shakeelb@google.com> Cc: Seth Jennings <sjenning@redhat.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r--include/linux/memcontrol.h54
1 files changed, 54 insertions, 0 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 8ea4b541c31e..9ecead1042b9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -35,6 +35,8 @@ enum memcg_stat_item {
MEMCG_PERCPU_B,
MEMCG_VMALLOC,
MEMCG_KMEM,
+ MEMCG_ZSWAP_B,
+ MEMCG_ZSWAPPED,
MEMCG_NR_STAT,
};
@@ -252,6 +254,10 @@ struct mem_cgroup {
/* Range enforcement for interrupt charges */
struct work_struct high_work;
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+ unsigned long zswap_max;
+#endif
+
unsigned long soft_limit;
/* vmpressure notifications */
@@ -1273,6 +1279,10 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
return NULL;
}
+static inline void obj_cgroup_put(struct obj_cgroup *objcg)
+{
+}
+
static inline void mem_cgroup_put(struct mem_cgroup *memcg)
{
}
@@ -1694,6 +1704,7 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_page(struct page *page, int order);
struct obj_cgroup *get_obj_cgroup_from_current(void);
+struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
@@ -1730,6 +1741,20 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg)
struct mem_cgroup *mem_cgroup_from_obj(void *p);
+static inline void count_objcg_event(struct obj_cgroup *objcg,
+ enum vm_event_item idx)
+{
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_kmem_disabled())
+ return;
+
+ rcu_read_lock();
+ memcg = obj_cgroup_memcg(objcg);
+ count_memcg_events(memcg, idx, 1);
+ rcu_read_unlock();
+}
+
#else
static inline bool mem_cgroup_kmem_disabled(void)
{
@@ -1756,6 +1781,11 @@ static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
{
}
+static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
+{
+ return NULL;
+}
+
static inline bool memcg_kmem_enabled(void)
{
return false;
@@ -1771,6 +1801,30 @@ static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
return NULL;
}
+static inline void count_objcg_event(struct obj_cgroup *objcg,
+ enum vm_event_item idx)
+{
+}
+
#endif /* CONFIG_MEMCG_KMEM */
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+bool obj_cgroup_may_zswap(struct obj_cgroup *objcg);
+void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size);
+void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size);
+#else
+static inline bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
+{
+ return true;
+}
+static inline void obj_cgroup_charge_zswap(struct obj_cgroup *objcg,
+ size_t size)
+{
+}
+static inline void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg,
+ size_t size)
+{
+}
+#endif
+
#endif /* _LINUX_MEMCONTROL_H */