diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-01-11 17:12:14 -0600 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-01-11 17:12:14 -0600 |
commit | e8f60cd7db24f94f2dbed6bec30dd16a68fc0828 (patch) | |
tree | d6e951e44f34cb93ab0f244912572cba804c61e4 /tools/perf/builtin-kmem.c | |
parent | 7dd4b804e08041ff56c88bdd8da742d14b17ed25 (diff) | |
parent | cf129830ee820f7fc90b98df193cd49d49344d09 (diff) |
Merge tag 'perf-tools-fixes-for-v6.2-2-2023-01-11' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux
Pull perf tools fixes from Arnaldo Carvalho de Melo:
- Make 'perf kmem' cope with the removal of some
kmem:kmem_cache_alloc_node and kmem:kmalloc_node in the
11e9734bcb6a7361 ("mm/slab_common: unify NUMA and UMA version of
tracepoints") commit, making sure it works with Linux >= 6.2 as well
as with older kernels where those tracepoints are present.
- Also make it handle the new "node" kmem:kmalloc and
kmem:kmem_cache_alloc tracepoint field introduced in that same
commit.
- Fix hardware tracing PMU address filter duplicate symbol selection,
that was preventing to match with static functions with the same name
present in different object files.
- Fix regression on what linux/types.h file gets used to build the "BPF
prologue" 'perf test' entry, the system one lacks the fmode_t
definition used in this test, so provide that type in the test
itself.
- Avoid build breakage with libbpf < 0.8.0 + LIBBPF_DYNAMIC=1. If the
user asks for linking with the libbpf package provided by the distro,
then it has to be >= 0.8.0. Using the libbpf supplied with the kernel
would be a fallback in that case.
- Fix the build when libbpf isn't available or explicitly disabled via
NO_LIBBPF=1.
- Don't try to install libtraceevent plugins as its not anymore in the
kernel sources and will thus always fail.
* tag 'perf-tools-fixes-for-v6.2-2-2023-01-11' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux:
perf auxtrace: Fix address filter duplicate symbol selection
perf bpf: Avoid build breakage with libbpf < 0.8.0 + LIBBPF_DYNAMIC=1
perf build: Fix build error when NO_LIBBPF=1
perf tools: Don't install libtraceevent plugins as its not anymore in the kernel sources
perf kmem: Support field "node" in evsel__process_alloc_event() coping with recent tracepoint restructuring
perf kmem: Support legacy tracepoints
perf build: Properly guard libbpf includes
perf tests bpf prologue: Fix bpf-script-test-prologue test compile issue with clang
Diffstat (limited to 'tools/perf/builtin-kmem.c')
-rw-r--r-- | tools/perf/builtin-kmem.c | 65 |
1 files changed, 50 insertions, 15 deletions
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index e20656c431a4..8ae0a1535293 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c @@ -26,6 +26,7 @@ #include "util/string2.h" #include <linux/kernel.h> +#include <linux/numa.h> #include <linux/rbtree.h> #include <linux/string.h> #include <linux/zalloc.h> @@ -185,22 +186,33 @@ static int evsel__process_alloc_event(struct evsel *evsel, struct perf_sample *s total_allocated += bytes_alloc; nr_allocs++; - return 0; -} -static int evsel__process_alloc_node_event(struct evsel *evsel, struct perf_sample *sample) -{ - int ret = evsel__process_alloc_event(evsel, sample); + /* + * Commit 11e9734bcb6a ("mm/slab_common: unify NUMA and UMA + * version of tracepoints") adds the field "node" into the + * tracepoints 'kmalloc' and 'kmem_cache_alloc'. + * + * The legacy tracepoints 'kmalloc_node' and 'kmem_cache_alloc_node' + * also contain the field "node". + * + * If the tracepoint contains the field "node" the tool stats the + * cross allocation. + */ + if (evsel__field(evsel, "node")) { + int node1, node2; - if (!ret) { - int node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu}), - node2 = evsel__intval(evsel, sample, "node"); + node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu}); + node2 = evsel__intval(evsel, sample, "node"); - if (node1 != node2) + /* + * If the field "node" is NUMA_NO_NODE (-1), we don't take it + * as a cross allocation. + */ + if ((node2 != NUMA_NO_NODE) && (node1 != node2)) nr_cross_allocs++; } - return ret; + return 0; } static int ptr_cmp(void *, void *); @@ -1369,8 +1381,8 @@ static int __cmd_kmem(struct perf_session *session) /* slab allocator */ { "kmem:kmalloc", evsel__process_alloc_event, }, { "kmem:kmem_cache_alloc", evsel__process_alloc_event, }, - { "kmem:kmalloc_node", evsel__process_alloc_node_event, }, - { "kmem:kmem_cache_alloc_node", evsel__process_alloc_node_event, }, + { "kmem:kmalloc_node", evsel__process_alloc_event, }, + { "kmem:kmem_cache_alloc_node", evsel__process_alloc_event, }, { "kmem:kfree", evsel__process_free_event, }, { "kmem:kmem_cache_free", evsel__process_free_event, }, /* page allocator */ @@ -1824,6 +1836,19 @@ static int parse_line_opt(const struct option *opt __maybe_unused, return 0; } +static bool slab_legacy_tp_is_exposed(void) +{ + /* + * The tracepoints "kmem:kmalloc_node" and + * "kmem:kmem_cache_alloc_node" have been removed on the latest + * kernel, if the tracepoint "kmem:kmalloc_node" is existed it + * means the tool is running on an old kernel, we need to + * rollback to support these legacy tracepoints. + */ + return IS_ERR(trace_event__tp_format("kmem", "kmalloc_node")) ? + false : true; +} + static int __cmd_record(int argc, const char **argv) { const char * const record_args[] = { @@ -1831,22 +1856,28 @@ static int __cmd_record(int argc, const char **argv) }; const char * const slab_events[] = { "-e", "kmem:kmalloc", - "-e", "kmem:kmalloc_node", "-e", "kmem:kfree", "-e", "kmem:kmem_cache_alloc", - "-e", "kmem:kmem_cache_alloc_node", "-e", "kmem:kmem_cache_free", }; + const char * const slab_legacy_events[] = { + "-e", "kmem:kmalloc_node", + "-e", "kmem:kmem_cache_alloc_node", + }; const char * const page_events[] = { "-e", "kmem:mm_page_alloc", "-e", "kmem:mm_page_free", }; unsigned int rec_argc, i, j; const char **rec_argv; + unsigned int slab_legacy_tp_exposed = slab_legacy_tp_is_exposed(); rec_argc = ARRAY_SIZE(record_args) + argc - 1; - if (kmem_slab) + if (kmem_slab) { rec_argc += ARRAY_SIZE(slab_events); + if (slab_legacy_tp_exposed) + rec_argc += ARRAY_SIZE(slab_legacy_events); + } if (kmem_page) rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */ @@ -1861,6 +1892,10 @@ static int __cmd_record(int argc, const char **argv) if (kmem_slab) { for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++) rec_argv[i] = strdup(slab_events[j]); + if (slab_legacy_tp_exposed) { + for (j = 0; j < ARRAY_SIZE(slab_legacy_events); j++, i++) + rec_argv[i] = strdup(slab_legacy_events[j]); + } } if (kmem_page) { rec_argv[i++] = strdup("-g"); |