summaryrefslogtreecommitdiff
path: root/mm/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h60
1 files changed, 54 insertions, 6 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 9f78e3167f19..d4f9fc93546f 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -470,6 +470,41 @@ static inline void memcg_free_page_obj_cgroups(struct page *page)
page->obj_cgroups = NULL;
}
+static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
+ struct obj_cgroup *objcg,
+ size_t size, void **p)
+{
+ struct page *page;
+ unsigned long off;
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ if (likely(p[i])) {
+ page = virt_to_head_page(p[i]);
+ off = obj_to_index(s, page, p[i]);
+ obj_cgroup_get(objcg);
+ page_obj_cgroups(page)[off] = objcg;
+ }
+ }
+ obj_cgroup_put(objcg);
+ memcg_kmem_put_cache(s);
+}
+
+static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
+ void *p)
+{
+ struct obj_cgroup *objcg;
+ unsigned int off;
+
+ if (!memcg_kmem_enabled() || is_root_cache(s))
+ return;
+
+ off = obj_to_index(s, page, p);
+ objcg = page_obj_cgroups(page)[off];
+ page_obj_cgroups(page)[off] = NULL;
+ obj_cgroup_put(objcg);
+}
+
extern void slab_init_memcg_params(struct kmem_cache *);
extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
@@ -529,6 +564,17 @@ static inline void memcg_free_page_obj_cgroups(struct page *page)
{
}
+static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
+ struct obj_cgroup *objcg,
+ size_t size, void **p)
+{
+}
+
+static inline void memcg_slab_free_hook(struct kmem_cache *s, struct page *page,
+ void *p)
+{
+}
+
static inline void slab_init_memcg_params(struct kmem_cache *s)
{
}
@@ -631,7 +677,8 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
}
static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
- gfp_t flags)
+ struct obj_cgroup **objcgp,
+ size_t size, gfp_t flags)
{
flags &= gfp_allowed_mask;
@@ -645,13 +692,14 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
if (memcg_kmem_enabled() &&
((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
- return memcg_kmem_get_cache(s);
+ return memcg_kmem_get_cache(s, objcgp);
return s;
}
-static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
- size_t size, void **p)
+static inline void slab_post_alloc_hook(struct kmem_cache *s,
+ struct obj_cgroup *objcg,
+ gfp_t flags, size_t size, void **p)
{
size_t i;
@@ -663,8 +711,8 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
s->flags, flags);
}
- if (memcg_kmem_enabled())
- memcg_kmem_put_cache(s);
+ if (memcg_kmem_enabled() && !is_root_cache(s))
+ memcg_slab_post_alloc_hook(s, objcg, size, p);
}
#ifndef CONFIG_SLOB