summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorHyeonggon Yoo <42.hyeyoo@gmail.com>2022-08-17 19:18:23 +0900
committerVlastimil Babka <vbabka@suse.cz>2022-09-01 10:40:27 +0200
commit11e9734bcb6a7361943f993eba4e97f5812120d8 (patch)
treeb1749d7a8ec5a2c805d727df95223c1c6bf5cc62 /mm
parent26a40990ba052e6f553256f9d0f112452b992a38 (diff)
mm/slab_common: unify NUMA and UMA version of tracepoints
Drop kmem_alloc event class, rename kmem_alloc_node to kmem_alloc, and remove _node postfix for NUMA version of tracepoints. This will break some tools that depend on {kmem_cache_alloc,kmalloc}_node, but at this point maintaining both kmem_alloc and kmem_alloc_node event classes does not makes sense at all. Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c9
-rw-r--r--mm/slab_common.c21
-rw-r--r--mm/slob.c20
-rw-r--r--mm/slub.c6
4 files changed, 25 insertions, 31 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 8d9d0fbf9792..2fd400203ac2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3440,8 +3440,8 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
{
void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_);
- trace_kmem_cache_alloc(_RET_IP_, ret, cachep,
- cachep->object_size, cachep->size, flags);
+ trace_kmem_cache_alloc(_RET_IP_, ret, cachep, cachep->object_size,
+ cachep->size, flags, NUMA_NO_NODE);
return ret;
}
@@ -3536,9 +3536,8 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, NULL, flags, nodeid, cachep->object_size, _RET_IP_);
- trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep,
- cachep->object_size, cachep->size,
- flags, nodeid);
+ trace_kmem_cache_alloc(_RET_IP_, ret, cachep, cachep->object_size,
+ cachep->size, flags, nodeid);
return ret;
}
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 2eab19043940..3d7ad992ece1 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -907,9 +907,8 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
ret = __kmalloc_large_node(size, flags, node);
- trace_kmalloc_node(caller, ret, NULL,
- size, PAGE_SIZE << get_order(size),
- flags, node);
+ trace_kmalloc(_RET_IP_, ret, NULL, size,
+ PAGE_SIZE << get_order(size), flags, node);
return ret;
}
@@ -920,8 +919,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
ret = kasan_kmalloc(s, ret, size, flags);
- trace_kmalloc_node(caller, ret, s, size,
- s->size, flags, node);
+ trace_kmalloc(_RET_IP_, ret, s, size, s->size, flags, node);
return ret;
}
@@ -1007,8 +1005,7 @@ void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
size, _RET_IP_);
- trace_kmalloc_node(_RET_IP_, ret, s, size, s->size,
- gfpflags, NUMA_NO_NODE);
+ trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags, NUMA_NO_NODE);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
@@ -1020,7 +1017,7 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
{
void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
- trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, gfpflags, node);
+ trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags, node);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
@@ -1076,7 +1073,7 @@ void *kmalloc_large(size_t size, gfp_t flags)
void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
trace_kmalloc(_RET_IP_, ret, NULL, size,
- PAGE_SIZE << get_order(size), flags);
+ PAGE_SIZE << get_order(size), flags, NUMA_NO_NODE);
return ret;
}
EXPORT_SYMBOL(kmalloc_large);
@@ -1085,8 +1082,8 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
void *ret = __kmalloc_large_node(size, flags, node);
- trace_kmalloc_node(_RET_IP_, ret, NULL, size,
- PAGE_SIZE << get_order(size), flags, node);
+ trace_kmalloc(_RET_IP_, ret, NULL, size,
+ PAGE_SIZE << get_order(size), flags, node);
return ret;
}
EXPORT_SYMBOL(kmalloc_large_node);
@@ -1421,8 +1418,6 @@ EXPORT_SYMBOL(ksize);
/* Tracepoints definitions. */
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
-EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
-EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
EXPORT_TRACEPOINT_SYMBOL(kfree);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
diff --git a/mm/slob.c b/mm/slob.c
index 96b08acd72ce..3208c56d8f82 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -507,8 +507,8 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
*m = size;
ret = (void *)m + minalign;
- trace_kmalloc_node(caller, ret, NULL,
- size, size + minalign, gfp, node);
+ trace_kmalloc(caller, ret, NULL, size,
+ size + minalign, gfp, node);
} else {
unsigned int order = get_order(size);
@@ -516,8 +516,8 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);
- trace_kmalloc_node(caller, ret, NULL,
- size, PAGE_SIZE << order, gfp, node);
+ trace_kmalloc(caller, ret, NULL, size,
+ PAGE_SIZE << order, gfp, node);
}
kmemleak_alloc(ret, size, 1, gfp);
@@ -608,14 +608,14 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node, 0);
- trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
- SLOB_UNITS(c->size) * SLOB_UNIT,
- flags, node);
+ trace_kmem_cache_alloc(_RET_IP_, b, NULL, c->object_size,
+ SLOB_UNITS(c->size) * SLOB_UNIT,
+ flags, node);
} else {
b = slob_new_pages(flags, get_order(c->size), node);
- trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
- PAGE_SIZE << get_order(c->size),
- flags, node);
+ trace_kmem_cache_alloc(_RET_IP_, b, NULL, c->object_size,
+ PAGE_SIZE << get_order(c->size),
+ flags, node);
}
if (b && c->ctor) {
diff --git a/mm/slub.c b/mm/slub.c
index 7d7fd9d4e8fa..22e4ccf06638 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3244,7 +3244,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size);
trace_kmem_cache_alloc(_RET_IP_, ret, s, s->object_size,
- s->size, gfpflags);
+ s->size, gfpflags, NUMA_NO_NODE);
return ret;
}
@@ -3274,8 +3274,8 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
- trace_kmem_cache_alloc_node(_RET_IP_, ret, s,
- s->object_size, s->size, gfpflags, node);
+ trace_kmem_cache_alloc(_RET_IP_, ret, s, s->object_size,
+ s->size, gfpflags, node);
return ret;
}