summaryrefslogtreecommitdiff
path: root/include/trace/events/kmem.h
diff options
context:
space:
mode:
authorVasily Averin <vvs@openvz.org>2022-06-03 06:21:49 +0300
committerVlastimil Babka <vbabka@suse.cz>2022-07-04 17:11:27 +0200
commitb347aa7b57477f71c740e2bbc6d1078a7109ba23 (patch)
tree2e2f1c711884b182d0134287c6b219733684ccf6 /include/trace/events/kmem.h
parent0c7e0d699ef1430d7f4cf12b4b1d097af58b5515 (diff)
mm/tracing: add 'accounted' entry into output of allocation tracepoints
Slab caches marked with SLAB_ACCOUNT force accounting for every allocation from this cache even if __GFP_ACCOUNT flag is not passed. Unfortunately, at the moment this flag is not visible in ftrace output, and this makes it difficult to analyze the accounted allocations. This patch adds boolean "accounted" entry into trace output, and set it to 'true' for calls used __GFP_ACCOUNT flag and for allocations from caches marked with SLAB_ACCOUNT. Set it to 'false' if accounting is disabled in configs. Signed-off-by: Vasily Averin <vvs@openvz.org> Acked-by: Shakeel Butt <shakeelb@google.com> Acked-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Muchun Song <songmuchun@bytedance.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Link: https://lore.kernel.org/r/c418ed25-65fe-f623-fbf8-1676528859ed@openvz.org Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'include/trace/events/kmem.h')
-rw-r--r--include/trace/events/kmem.h40
1 files changed, 26 insertions, 14 deletions
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index f76668305ac5..4cb51ace600d 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -13,11 +13,12 @@ DECLARE_EVENT_CLASS(kmem_alloc,
TP_PROTO(unsigned long call_site,
const void *ptr,
+ struct kmem_cache *s,
size_t bytes_req,
size_t bytes_alloc,
gfp_t gfp_flags),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
+ TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags),
TP_STRUCT__entry(
__field( unsigned long, call_site )
@@ -25,6 +26,7 @@ DECLARE_EVENT_CLASS(kmem_alloc,
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
__field( unsigned long, gfp_flags )
+ __field( bool, accounted )
),
TP_fast_assign(
@@ -33,42 +35,47 @@ DECLARE_EVENT_CLASS(kmem_alloc,
__entry->bytes_req = bytes_req;
__entry->bytes_alloc = bytes_alloc;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
+ __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
+ ((gfp_flags & __GFP_ACCOUNT) ||
+ (s && s->flags & SLAB_ACCOUNT)) : false;
),
- TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
+ TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s",
(void *)__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,
- show_gfp_flags(__entry->gfp_flags))
+ show_gfp_flags(__entry->gfp_flags),
+ __entry->accounted ? "true" : "false")
);
DEFINE_EVENT(kmem_alloc, kmalloc,
- TP_PROTO(unsigned long call_site, const void *ptr,
+ TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
+ TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
);
DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
- TP_PROTO(unsigned long call_site, const void *ptr,
+ TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
+ TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
);
DECLARE_EVENT_CLASS(kmem_alloc_node,
TP_PROTO(unsigned long call_site,
const void *ptr,
+ struct kmem_cache *s,
size_t bytes_req,
size_t bytes_alloc,
gfp_t gfp_flags,
int node),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
+ TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node),
TP_STRUCT__entry(
__field( unsigned long, call_site )
@@ -77,6 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__field( size_t, bytes_alloc )
__field( unsigned long, gfp_flags )
__field( int, node )
+ __field( bool, accounted )
),
TP_fast_assign(
@@ -86,33 +94,37 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->bytes_alloc = bytes_alloc;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->node = node;
+ __entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
+ ((gfp_flags & __GFP_ACCOUNT) ||
+ (s && s->flags & SLAB_ACCOUNT)) : false;
),
- TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
+ TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
(void *)__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags),
- __entry->node)
+ __entry->node,
+ __entry->accounted ? "true" : "false")
);
DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc,
+ struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
gfp_t gfp_flags, int node),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
+ TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
);
DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
TP_PROTO(unsigned long call_site, const void *ptr,
- size_t bytes_req, size_t bytes_alloc,
+ struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
gfp_t gfp_flags, int node),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
+ TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
);
TRACE_EVENT(kfree,