summaryrefslogtreecommitdiff
path: root/tools/perf/util
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-11-03 08:17:38 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2023-11-03 08:17:38 -1000
commit7ab89417ed235f56d84c7893d38d4905e38d2692 (patch)
tree0980734f4e492a09e68d820fedce20465c69e3df /tools/perf/util
parent31e5f934ff962820995c82a6953176a1c7d18ff5 (diff)
parentfed3a1be6433e15833068c701bfde7b422d8b988 (diff)
Merge tag 'perf-tools-for-v6.7-1-2023-11-01' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools
Pull perf tools updates from Namhyung Kim: "Build: - Compile BPF programs by default if clang (>= 12.0.1) is available to enable more features like kernel lock contention, off-cpu profiling, kwork, sample filtering and so on. This can be disabled by passing BUILD_BPF_SKEL=0 to make. - Produce better error messages for bison on debug build (make DEBUG=1) by defining YYDEBUG symbol internally. perf record: - Track sideband events (like FORK/MMAP) from all CPUs even if perf record targets a subset of CPUs only (using -C option). Otherwise it may lose some information happened on a CPU out of the target list. - Fix checking raw sched_switch tracepoint argument using system BTF. This affects off-cpu profiling which attaches a BPF program to the raw tracepoint. perf lock contention: - Add --lock-cgroup option to see contention by cgroups. This should be used with BPF only (using -b option). $ sudo perf lock con -ab --lock-cgroup -- sleep 1 contended total wait max wait avg wait cgroup 835 14.06 ms 41.19 us 16.83 us /system.slice/led.service 25 122.38 us 13.77 us 4.89 us / 44 23.73 us 3.87 us 539 ns /user.slice/user-657345.slice/session-c4.scope 1 491 ns 491 ns 491 ns /system.slice/connectd.service - Add -G/--cgroup-filter option to see contention only for given cgroups. This can be useful when you identified a cgroup in the above command and want to investigate more on it. It also works with other output options like -t/--threads and -l/--lock-addr. $ sudo perf lock con -ab -G /user.slice/user-657345.slice/session-c4.scope -- sleep 1 contended total wait max wait avg wait type caller 8 77.11 us 17.98 us 9.64 us spinlock futex_wake+0xc8 2 24.56 us 14.66 us 12.28 us spinlock tick_do_update_jiffies64+0x25 1 4.97 us 4.97 us 4.97 us spinlock futex_q_lock+0x2a - Use per-cpu array for better spinlock tracking. This is to improve performance of the BPF program and to avoid nested contention on a lock in the BPF hash map. - Update callstack check for PowerPC. To find a representative caller of a lock, it needs to look up the call stacks. It ends the lookup when it sees 0 in the call stack buffer. However, PowerPC call stacks can have 0 values in the beginning so skip them when it expects valid call stacks after. perf kwork: - Support 'sched' class (for -k option) so that it can see task scheduling event (using sched_switch tracepoint) as well as irq and workqueue items. - Add perf kwork top subcommand to show more accurate cpu utilization with sched class above. It works both with a recorded data (using perf kwork record command) and BPF (using -b option). Unlike perf top command, it does not support interactive mode (yet). $ sudo perf kwork top -b -k sched Starting trace, Hit <Ctrl+C> to stop and report ^C Total : 160702.425 ms, 8 cpus %Cpu(s): 36.00% id, 0.00% hi, 0.00% si %Cpu0 [|||||||||||||||||| 61.66%] %Cpu1 [|||||||||||||||||| 61.27%] %Cpu2 [||||||||||||||||||| 66.40%] %Cpu3 [|||||||||||||||||| 61.28%] %Cpu4 [|||||||||||||||||| 61.82%] %Cpu5 [||||||||||||||||||||||| 77.41%] %Cpu6 [|||||||||||||||||| 61.73%] %Cpu7 [|||||||||||||||||| 63.25%] PID SPID %CPU RUNTIME COMMMAND ------------------------------------------------------------- 0 0 38.72 8089.463 ms [swapper/1] 0 0 38.71 8084.547 ms [swapper/3] 0 0 38.33 8007.532 ms [swapper/0] 0 0 38.26 7992.985 ms [swapper/6] 0 0 38.17 7971.865 ms [swapper/4] 0 0 36.74 7447.765 ms [swapper/7] 0 0 33.59 6486.942 ms [swapper/2] 0 0 22.58 3771.268 ms [swapper/5] 9545 9351 2.48 447.136 ms sched-messaging 9574 9351 2.09 418.583 ms sched-messaging 9724 9351 2.05 372.407 ms sched-messaging 9531 9351 2.01 368.804 ms sched-messaging 9512 9351 2.00 362.250 ms sched-messaging 9514 9351 1.95 357.767 ms sched-messaging 9538 9351 1.86 384.476 ms sched-messaging 9712 9351 1.84 386.490 ms sched-messaging 9723 9351 1.83 380.021 ms sched-messaging 9722 9351 1.82 382.738 ms sched-messaging 9517 9351 1.81 354.794 ms sched-messaging 9559 9351 1.79 344.305 ms sched-messaging 9725 9351 1.77 365.315 ms sched-messaging <SNIP> - Add hard/soft-irq statistics to perf kwork top. This will show the total CPU utilization with IRQ stats like below: $ sudo perf kwork top -b -k sched,irq,softirq Starting trace, Hit <Ctrl+C> to stop and report ^C Total : 12554.889 ms, 8 cpus %Cpu(s): 96.23% id, 0.10% hi, 0.19% si <---- here %Cpu0 [| 4.60%] %Cpu1 [| 4.59%] %Cpu2 [ 2.73%] %Cpu3 [| 3.81%] <SNIP> perf bench: - Add -G/--cgroups option to perf bench sched pipe. The pipe bench is good to measure context switch overhead. With this option, it puts the reader and writer tasks in separate cgroups to enforce context switch between two different cgroups. Also it needs to set CPU affinity of the tasks in a CPU to accurately measure the impact of cgroup context switches. $ sudo perf stat -e context-switches,cgroup-switches -- \ > taskset -c 0 perf bench sched pipe -l 100000 # Running 'sched/pipe' benchmark: # Executed 100000 pipe operations between two processes Total time: 0.307 [sec] 3.078180 usecs/op 324867 ops/sec Performance counter stats for 'taskset -c 0 perf bench sched pipe -l 100000': 200,026 context-switches 63 cgroup-switches 0.321637922 seconds time elapsed You can see small number of cgroup-switches because both write and read tasks are in the same cgroup. $ sudo mkdir /sys/fs/cgroup/{AAA,BBB} $ sudo perf stat -e context-switches,cgroup-switches -- \ > taskset -c 0 perf bench sched pipe -l 100000 -G AAA,BBB # Running 'sched/pipe' benchmark: # Executed 100000 pipe operations between two processes Total time: 0.351 [sec] 3.512990 usecs/op 284657 ops/sec Performance counter stats for 'taskset -c 0 perf bench sched pipe -l 100000 -G AAA,BBB': 200,020 context-switches 200,019 cgroup-switches 0.365034567 seconds time elapsed Now context-switches and cgroup-switches are almost same. And you can see the pipe operation took little more. - Kill child processes when perf bench sched messaging exited abnormally. Otherwise it'd leave the child doing unnecessary work. perf test: - Fix various shellcheck issues on the tests written in shell script. - Skip tests when condition is not satisfied: - object code reading test for non-text section addresses. - CoreSight test if cs_etm// event is not available. - lock contention test if not enough CPUs. Event parsing: - Make PMU alias name loading lazy to reduce the startup time in the event parsing code for perf record, stat and others in the general case. - Lazily compute PMU default config. In the same sense, delay PMU initialization until it's really needed to reduce the startup cost. - Fix event term values that are raw events. The event specification can have several terms including event name. But sometimes it clashes with raw event encoding which starts with 'r' and has hex-digits. For example, an event named 'read' should be processed as a normal event but it was mis-treated as a raw encoding and caused a failure. $ perf stat -e 'uncore_imc_free_running/event=read/' -a sleep 1 event syntax error: '..nning/event=read/' \___ parser error Run 'perf list' for a list of valid events Usage: perf stat [<options>] [<command>] -e, --event <event> event selector. use 'perf list' to list available events Event metrics: - Add "Compat" regex to match event with multiple identifiers. - Usual updates for Intel, Power10, Arm telemetry/CMN and AmpereOne. Misc: - Assorted memory leak fixes and footprint reduction. - Add "bpf_skeletons" to perf version --build-options so that users can check whether their perf tools have BPF support easily. - Fix unaligned access in Intel-PT packet decoder found by undefined-behavior sanitizer. - Avoid frequency mode for the dummy event. Surprisingly it'd impact kernel timer tick handler performance by force iterating all PMU events. - Update bash shell completion for events and metrics" * tag 'perf-tools-for-v6.7-1-2023-11-01' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools: (187 commits) perf vendor events intel: Update tsx_cycles_per_elision metrics perf vendor events intel: Update bonnell version number to v5 perf vendor events intel: Update westmereex events to v4 perf vendor events intel: Update meteorlake events to v1.06 perf vendor events intel: Update knightslanding events to v16 perf vendor events intel: Add typo fix for ivybridge FP perf vendor events intel: Update a spelling in haswell/haswellx perf vendor events intel: Update emeraldrapids to v1.01 perf vendor events intel: Update alderlake/alderlake events to v1.23 perf build: Disable BPF skeletons if clang version is < 12.0.1 perf callchain: Fix spelling mistake "statisitcs" -> "statistics" perf report: Fix spelling mistake "heirachy" -> "hierarchy" perf python: Fix binding linkage due to rename and move of evsel__increase_rlimit() perf tests: test_arm_coresight: Simplify source iteration perf vendor events intel: Add tigerlake two metrics perf vendor events intel: Add broadwellde two metrics perf vendor events intel: Fix broadwellde tma_info_system_dram_bw_use metric perf mem_info: Add and use map_symbol__exit and addr_map_symbol__exit perf callchain: Minor layout changes to callchain_list perf callchain: Make brtype_stat in callchain_list optional ...
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/arm-spe.h4
-rw-r--r--tools/perf/util/bpf-filter.y4
-rw-r--r--tools/perf/util/bpf_counter.c5
-rw-r--r--tools/perf/util/bpf_kwork_top.c308
-rw-r--r--tools/perf/util/bpf_lock_contention.c51
-rw-r--r--tools/perf/util/bpf_off_cpu.c13
-rw-r--r--tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c16
-rw-r--r--tools/perf/util/bpf_skel/kwork_top.bpf.c338
-rw-r--r--tools/perf/util/bpf_skel/lock_contention.bpf.c145
-rw-r--r--tools/perf/util/bpf_skel/lock_data.h3
-rw-r--r--tools/perf/util/bpf_skel/vmlinux/.gitignore1
-rw-r--r--tools/perf/util/branch.c4
-rw-r--r--tools/perf/util/branch.h4
-rw-r--r--tools/perf/util/callchain.c76
-rw-r--r--tools/perf/util/callchain.h18
-rw-r--r--tools/perf/util/cgroup.c63
-rw-r--r--tools/perf/util/cgroup.h5
-rw-r--r--tools/perf/util/cs-etm.c106
-rw-r--r--tools/perf/util/cs-etm.h2
-rw-r--r--tools/perf/util/data.c12
-rw-r--r--tools/perf/util/dlfilter.c4
-rw-r--r--tools/perf/util/dso.c2
-rw-r--r--tools/perf/util/dso.h2
-rw-r--r--tools/perf/util/env.c6
-rw-r--r--tools/perf/util/evlist.c23
-rw-r--r--tools/perf/util/evlist.h1
-rw-r--r--tools/perf/util/evsel.c43
-rw-r--r--tools/perf/util/evsel.h5
-rw-r--r--tools/perf/util/expr.c2
-rw-r--r--tools/perf/util/expr.y2
-rw-r--r--tools/perf/util/header.c60
-rw-r--r--tools/perf/util/hisi-ptt.c4
-rw-r--r--tools/perf/util/hist.c32
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c43
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--tools/perf/util/intel-pt.h3
-rw-r--r--tools/perf/util/jitdump.c1
-rw-r--r--tools/perf/util/kwork.h61
-rw-r--r--tools/perf/util/lock-contention.h10
-rw-r--r--tools/perf/util/machine.c37
-rw-r--r--tools/perf/util/machine.h1
-rw-r--r--tools/perf/util/map_symbol.c15
-rw-r--r--tools/perf/util/map_symbol.h4
-rw-r--r--tools/perf/util/mem-events.c3
-rw-r--r--tools/perf/util/metricgroup.c2
-rw-r--r--tools/perf/util/parse-events.c213
-rw-r--r--tools/perf/util/parse-events.h34
-rw-r--r--tools/perf/util/parse-events.l6
-rw-r--r--tools/perf/util/parse-events.y68
-rw-r--r--tools/perf/util/pfm.c15
-rw-r--r--tools/perf/util/pmu.c173
-rw-r--r--tools/perf/util/pmu.h34
-rw-r--r--tools/perf/util/pmu.y4
-rw-r--r--tools/perf/util/pmus.c18
-rw-r--r--tools/perf/util/print-events.c28
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/python.c2
-rw-r--r--tools/perf/util/rlimit.c28
-rw-r--r--tools/perf/util/rlimit.h11
-rw-r--r--tools/perf/util/rwsem.c34
-rw-r--r--tools/perf/util/rwsem.h11
-rw-r--r--tools/perf/util/sort.c2
-rw-r--r--tools/perf/util/string.c48
-rw-r--r--tools/perf/util/string2.h1
-rw-r--r--tools/perf/util/svghelper.c5
-rw-r--r--tools/perf/util/symbol-elf.c4
-rw-r--r--tools/perf/util/symbol.c15
-rw-r--r--tools/perf/util/trace-event-info.c3
69 files changed, 1754 insertions, 552 deletions
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index 2f76230958ad..fb661c48992f 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -49,6 +49,7 @@ perf-y += dso.o
perf-y += dsos.o
perf-y += symbol.o
perf-y += symbol_fprintf.o
+perf-y += map_symbol.o
perf-y += color.o
perf-y += color_config.o
perf-y += metricgroup.o
@@ -165,6 +166,7 @@ endif
ifeq ($(CONFIG_LIBTRACEEVENT),y)
perf-$(CONFIG_PERF_BPF_SKEL) += bpf_kwork.o
+ perf-$(CONFIG_PERF_BPF_SKEL) += bpf_kwork_top.o
endif
perf-$(CONFIG_LIBELF) += symbol-elf.o
diff --git a/tools/perf/util/arm-spe.h b/tools/perf/util/arm-spe.h
index 98d3235781c3..4f4900c18f3e 100644
--- a/tools/perf/util/arm-spe.h
+++ b/tools/perf/util/arm-spe.h
@@ -27,5 +27,7 @@ struct auxtrace_record *arm_spe_recording_init(int *err,
int arm_spe_process_auxtrace_info(union perf_event *event,
struct perf_session *session);
-struct perf_event_attr *arm_spe_pmu_default_config(struct perf_pmu *arm_spe_pmu);
+void arm_spe_pmu_default_config(const struct perf_pmu *arm_spe_pmu,
+ struct perf_event_attr *attr);
+
#endif
diff --git a/tools/perf/util/bpf-filter.y b/tools/perf/util/bpf-filter.y
index 5dfa948fc986..0e4d6de3c2ad 100644
--- a/tools/perf/util/bpf-filter.y
+++ b/tools/perf/util/bpf-filter.y
@@ -3,6 +3,10 @@
%{
+#ifndef NDEBUG
+#define YYDEBUG 1
+#endif
+
#include <stdio.h>
#include <string.h>
#include <linux/compiler.h>
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index 6732cbbcf9b3..7f9b0e46e008 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -104,7 +104,7 @@ static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
struct bpf_prog_profiler_bpf *skel;
struct bpf_counter *counter;
struct bpf_program *prog;
- char *prog_name;
+ char *prog_name = NULL;
int prog_fd;
int err;
@@ -155,10 +155,12 @@ static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
assert(skel != NULL);
counter->skel = skel;
list_add(&counter->list, &evsel->bpf_counter_list);
+ free(prog_name);
close(prog_fd);
return 0;
err_out:
bpf_prog_profiler_bpf__destroy(skel);
+ free(prog_name);
free(counter);
close(prog_fd);
return -1;
@@ -180,6 +182,7 @@ static int bpf_program_profiler__load(struct evsel *evsel, struct target *target
(*p != '\0' && *p != ',')) {
pr_err("Failed to parse bpf prog ids %s\n",
target->bpf_str);
+ free(bpf_str_);
return -1;
}
diff --git a/tools/perf/util/bpf_kwork_top.c b/tools/perf/util/bpf_kwork_top.c
new file mode 100644
index 000000000000..035e02272790
--- /dev/null
+++ b/tools/perf/util/bpf_kwork_top.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bpf_kwork_top.c
+ *
+ * Copyright (c) 2022 Huawei Inc, Yang Jihong <yangjihong1@huawei.com>
+ */
+
+#include <time.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <linux/time64.h>
+
+#include "util/debug.h"
+#include "util/evsel.h"
+#include "util/kwork.h"
+
+#include <bpf/bpf.h>
+#include <perf/cpumap.h>
+
+#include "util/bpf_skel/kwork_top.skel.h"
+
+/*
+ * This should be in sync with "util/kwork_top.bpf.c"
+ */
+#define MAX_COMMAND_LEN 16
+
+struct time_data {
+ __u64 timestamp;
+};
+
+struct work_data {
+ __u64 runtime;
+};
+
+struct task_data {
+ __u32 tgid;
+ __u32 is_kthread;
+ char comm[MAX_COMMAND_LEN];
+};
+
+struct work_key {
+ __u32 type;
+ __u32 pid;
+ __u64 task_p;
+};
+
+struct task_key {
+ __u32 pid;
+ __u32 cpu;
+};
+
+struct kwork_class_bpf {
+ struct kwork_class *class;
+ void (*load_prepare)(void);
+};
+
+static struct kwork_top_bpf *skel;
+
+void perf_kwork__top_start(void)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ skel->bss->from_timestamp = (u64)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+ skel->bss->enabled = 1;
+ pr_debug("perf kwork top start at: %lld\n", skel->bss->from_timestamp);
+}
+
+void perf_kwork__top_finish(void)
+{
+ struct timespec ts;
+
+ skel->bss->enabled = 0;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ skel->bss->to_timestamp = (u64)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
+ pr_debug("perf kwork top finish at: %lld\n", skel->bss->to_timestamp);
+}
+
+static void irq_load_prepare(void)
+{
+ bpf_program__set_autoload(skel->progs.on_irq_handler_entry, true);
+ bpf_program__set_autoload(skel->progs.on_irq_handler_exit, true);
+}
+
+static struct kwork_class_bpf kwork_irq_bpf = {
+ .load_prepare = irq_load_prepare,
+};
+
+static void softirq_load_prepare(void)
+{
+ bpf_program__set_autoload(skel->progs.on_softirq_entry, true);
+ bpf_program__set_autoload(skel->progs.on_softirq_exit, true);
+}
+
+static struct kwork_class_bpf kwork_softirq_bpf = {
+ .load_prepare = softirq_load_prepare,
+};
+
+static void sched_load_prepare(void)
+{
+ bpf_program__set_autoload(skel->progs.on_switch, true);
+}
+
+static struct kwork_class_bpf kwork_sched_bpf = {
+ .load_prepare = sched_load_prepare,
+};
+
+static struct kwork_class_bpf *
+kwork_class_bpf_supported_list[KWORK_CLASS_MAX] = {
+ [KWORK_CLASS_IRQ] = &kwork_irq_bpf,
+ [KWORK_CLASS_SOFTIRQ] = &kwork_softirq_bpf,
+ [KWORK_CLASS_SCHED] = &kwork_sched_bpf,
+};
+
+static bool valid_kwork_class_type(enum kwork_class_type type)
+{
+ return type >= 0 && type < KWORK_CLASS_MAX;
+}
+
+static int setup_filters(struct perf_kwork *kwork)
+{
+ u8 val = 1;
+ int i, nr_cpus, fd;
+ struct perf_cpu_map *map;
+
+ if (kwork->cpu_list) {
+ fd = bpf_map__fd(skel->maps.kwork_top_cpu_filter);
+ if (fd < 0) {
+ pr_debug("Invalid cpu filter fd\n");
+ return -1;
+ }
+
+ map = perf_cpu_map__new(kwork->cpu_list);
+ if (!map) {
+ pr_debug("Invalid cpu_list\n");
+ return -1;
+ }
+
+ nr_cpus = libbpf_num_possible_cpus();
+ for (i = 0; i < perf_cpu_map__nr(map); i++) {
+ struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
+
+ if (cpu.cpu >= nr_cpus) {
+ perf_cpu_map__put(map);
+ pr_err("Requested cpu %d too large\n", cpu.cpu);
+ return -1;
+ }
+ bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
+ }
+ perf_cpu_map__put(map);
+
+ skel->bss->has_cpu_filter = 1;
+ }
+
+ return 0;
+}
+
+int perf_kwork__top_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
+{
+ struct bpf_program *prog;
+ struct kwork_class *class;
+ struct kwork_class_bpf *class_bpf;
+ enum kwork_class_type type;
+
+ skel = kwork_top_bpf__open();
+ if (!skel) {
+ pr_debug("Failed to open kwork top skeleton\n");
+ return -1;
+ }
+
+ /*
+ * set all progs to non-autoload,
+ * then set corresponding progs according to config
+ */
+ bpf_object__for_each_program(prog, skel->obj)
+ bpf_program__set_autoload(prog, false);
+
+ list_for_each_entry(class, &kwork->class_list, list) {
+ type = class->type;
+ if (!valid_kwork_class_type(type) ||
+ !kwork_class_bpf_supported_list[type]) {
+ pr_err("Unsupported bpf trace class %s\n", class->name);
+ goto out;
+ }
+
+ class_bpf = kwork_class_bpf_supported_list[type];
+ class_bpf->class = class;
+
+ if (class_bpf->load_prepare)
+ class_bpf->load_prepare();
+ }
+
+ if (kwork_top_bpf__load(skel)) {
+ pr_debug("Failed to load kwork top skeleton\n");
+ goto out;
+ }
+
+ if (setup_filters(kwork))
+ goto out;
+
+ if (kwork_top_bpf__attach(skel)) {
+ pr_debug("Failed to attach kwork top skeleton\n");
+ goto out;
+ }
+
+ return 0;
+
+out:
+ kwork_top_bpf__destroy(skel);
+ return -1;
+}
+
+static void read_task_info(struct kwork_work *work)
+{
+ int fd;
+ struct task_data data;
+ struct task_key key = {
+ .pid = work->id,
+ .cpu = work->cpu,
+ };
+
+ fd = bpf_map__fd(skel->maps.kwork_top_tasks);
+ if (fd < 0) {
+ pr_debug("Invalid top tasks map fd\n");
+ return;
+ }
+
+ if (!bpf_map_lookup_elem(fd, &key, &data)) {
+ work->tgid = data.tgid;
+ work->is_kthread = data.is_kthread;
+ work->name = strdup(data.comm);
+ }
+}
+static int add_work(struct perf_kwork *kwork, struct work_key *key,
+ struct work_data *data, int cpu)
+{
+ struct kwork_class_bpf *bpf_trace;
+ struct kwork_work *work;
+ struct kwork_work tmp = {
+ .id = key->pid,
+ .cpu = cpu,
+ .name = NULL,
+ };
+ enum kwork_class_type type = key->type;
+
+ if (!valid_kwork_class_type(type)) {
+ pr_debug("Invalid class type %d to add work\n", type);
+ return -1;
+ }
+
+ bpf_trace = kwork_class_bpf_supported_list[type];
+ tmp.class = bpf_trace->class;
+
+ work = perf_kwork_add_work(kwork, tmp.class, &tmp);
+ if (!work)
+ return -1;
+
+ work->total_runtime = data->runtime;
+ read_task_info(work);
+
+ return 0;
+}
+
+int perf_kwork__top_read_bpf(struct perf_kwork *kwork)
+{
+ int i, fd, nr_cpus;
+ struct work_data *data;
+ struct work_key key, prev;
+
+ fd = bpf_map__fd(skel->maps.kwork_top_works);
+ if (fd < 0) {
+ pr_debug("Invalid top runtime fd\n");
+ return -1;
+ }
+
+ nr_cpus = libbpf_num_possible_cpus();
+ data = calloc(nr_cpus, sizeof(struct work_data));
+ if (!data)
+ return -1;
+
+ memset(&prev, 0, sizeof(prev));
+ while (!bpf_map_get_next_key(fd, &prev, &key)) {
+ if ((bpf_map_lookup_elem(fd, &key, data)) != 0) {
+ pr_debug("Failed to lookup top elem\n");
+ return -1;
+ }
+
+ for (i = 0; i < nr_cpus; i++) {
+ if (data[i].runtime == 0)
+ continue;
+
+ if (add_work(kwork, &key, &data[i], i))
+ return -1;
+ }
+ prev = key;
+ }
+ free(data);
+
+ return 0;
+}
+
+void perf_kwork__top_cleanup_bpf(void)
+{
+ kwork_top_bpf__destroy(skel);
+}
diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
index e7dddf0127bc..e105245eb905 100644
--- a/tools/perf/util/bpf_lock_contention.c
+++ b/tools/perf/util/bpf_lock_contention.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include "util/cgroup.h"
#include "util/debug.h"
#include "util/evlist.h"
#include "util/machine.h"
@@ -20,7 +21,7 @@ static struct lock_contention_bpf *skel;
int lock_contention_prepare(struct lock_contention *con)
{
int i, fd;
- int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1;
+ int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1;
struct evlist *evlist = con->evlist;
struct target *target = con->target;
@@ -50,6 +51,8 @@ int lock_contention_prepare(struct lock_contention *con)
ntasks = perf_thread_map__nr(evlist->core.threads);
if (con->filters->nr_types)
ntypes = con->filters->nr_types;
+ if (con->filters->nr_cgrps)
+ ncgrps = con->filters->nr_cgrps;
/* resolve lock name filters to addr */
if (con->filters->nr_syms) {
@@ -84,6 +87,7 @@ int lock_contention_prepare(struct lock_contention *con)
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
+ bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
if (lock_contention_bpf__load(skel) < 0) {
pr_err("Failed to load lock-contention BPF skeleton\n");
@@ -145,12 +149,29 @@ int lock_contention_prepare(struct lock_contention *con)
bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
}
+ if (con->filters->nr_cgrps) {
+ u8 val = 1;
+
+ skel->bss->has_cgroup = 1;
+ fd = bpf_map__fd(skel->maps.cgroup_filter);
+
+ for (i = 0; i < con->filters->nr_cgrps; i++)
+ bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY);
+ }
+
/* these don't work well if in the rodata section */
skel->bss->stack_skip = con->stack_skip;
skel->bss->aggr_mode = con->aggr_mode;
skel->bss->needs_callstack = con->save_callstack;
skel->bss->lock_owner = con->owner;
+ if (con->aggr_mode == LOCK_AGGR_CGROUP) {
+ if (cgroup_is_v2("perf_event"))
+ skel->bss->use_cgroup_v2 = 1;
+
+ read_all_cgroups(&con->cgroups);
+ }
+
bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
lock_contention_bpf__attach(skel);
@@ -209,12 +230,12 @@ static const char *lock_contention_get_name(struct lock_contention *con,
return "siglock";
/* global locks with symbols */
- sym = machine__find_kernel_symbol(machine, key->lock_addr, &kmap);
+ sym = machine__find_kernel_symbol(machine, key->lock_addr_or_cgroup, &kmap);
if (sym)
return sym->name;
/* try semi-global locks collected separately */
- if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr, &flags)) {
+ if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr_or_cgroup, &flags)) {
if (flags == LOCK_CLASS_RQLOCK)
return "rq_lock";
}
@@ -222,6 +243,17 @@ static const char *lock_contention_get_name(struct lock_contention *con,
return "";
}
+ if (con->aggr_mode == LOCK_AGGR_CGROUP) {
+ u64 cgrp_id = key->lock_addr_or_cgroup;
+ struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id);
+
+ if (cgrp)
+ return cgrp->name;
+
+ snprintf(name_buf, sizeof(name_buf), "cgroup:%lu", cgrp_id);
+ return name_buf;
+ }
+
/* LOCK_AGGR_CALLER: skip lock internal functions */
while (machine__is_lock_function(machine, stack_trace[idx]) &&
idx < con->max_stack - 1)
@@ -313,7 +345,8 @@ int lock_contention_read(struct lock_contention *con)
ls_key = key.pid;
break;
case LOCK_AGGR_ADDR:
- ls_key = key.lock_addr;
+ case LOCK_AGGR_CGROUP:
+ ls_key = key.lock_addr_or_cgroup;
break;
default:
goto next;
@@ -364,12 +397,20 @@ next:
return err;
}
-int lock_contention_finish(void)
+int lock_contention_finish(struct lock_contention *con)
{
if (skel) {
skel->bss->enabled = 0;
lock_contention_bpf__destroy(skel);
}
+ while (!RB_EMPTY_ROOT(&con->cgroups)) {
+ struct rb_node *node = rb_first(&con->cgroups);
+ struct cgroup *cgrp = rb_entry(node, struct cgroup, node);
+
+ rb_erase(node, &con->cgroups);
+ cgroup__put(cgrp);
+ }
+
return 0;
}
diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
index 01f70b8e705a..6af36142dc5a 100644
--- a/tools/perf/util/bpf_off_cpu.c
+++ b/tools/perf/util/bpf_off_cpu.c
@@ -98,28 +98,31 @@ static void off_cpu_finish(void *arg __maybe_unused)
/* v5.18 kernel added prev_state arg, so it needs to check the signature */
static void check_sched_switch_args(void)
{
- const struct btf *btf = bpf_object__btf(skel->obj);
+ struct btf *btf = btf__load_vmlinux_btf();
const struct btf_type *t1, *t2, *t3;
u32 type_id;
type_id = btf__find_by_name_kind(btf, "btf_trace_sched_switch",
BTF_KIND_TYPEDEF);
if ((s32)type_id < 0)
- return;
+ goto cleanup;
t1 = btf__type_by_id(btf, type_id);
if (t1 == NULL)
- return;
+ goto cleanup;
t2 = btf__type_by_id(btf, t1->type);
if (t2 == NULL || !btf_is_ptr(t2))
- return;
+ goto cleanup;
t3 = btf__type_by_id(btf, t2->type);
- if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
+ /* btf_trace func proto has one more argument for the context */
+ if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 5) {
/* new format: pass prev_state as 4th arg */
skel->rodata->has_prev_state = true;
}
+cleanup:
+ btf__free(btf);
}
int off_cpu_prepare(struct evlist *evlist, struct target *target,
diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
index 939ec769bf4a..52c270330ae0 100644
--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
@@ -153,7 +153,7 @@ static inline
unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const void *arg, unsigned int arg_len)
{
unsigned int augmented_len = sizeof(*augmented_arg);
- int string_len = bpf_probe_read_str(&augmented_arg->value, arg_len, arg);
+ int string_len = bpf_probe_read_user_str(&augmented_arg->value, arg_len, arg);
augmented_arg->size = augmented_arg->err = 0;
/*
@@ -203,7 +203,7 @@ int sys_enter_connect(struct syscall_enter_args *args)
_Static_assert(is_power_of_2(sizeof(augmented_args->saddr)), "sizeof(augmented_args->saddr) needs to be a power of two");
socklen &= sizeof(augmented_args->saddr) - 1;
- bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
+ bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
return augmented__output(args, augmented_args, len + socklen);
}
@@ -221,7 +221,7 @@ int sys_enter_sendto(struct syscall_enter_args *args)
socklen &= sizeof(augmented_args->saddr) - 1;
- bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
+ bpf_probe_read_user(&augmented_args->saddr, socklen, sockaddr_arg);
return augmented__output(args, augmented_args, len + socklen);
}
@@ -311,7 +311,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args)
if (augmented_args == NULL)
goto failure;
- if (bpf_probe_read(&augmented_args->__data, sizeof(*attr), attr) < 0)
+ if (bpf_probe_read_user(&augmented_args->__data, sizeof(*attr), attr) < 0)
goto failure;
attr_read = (const struct perf_event_attr_size *)augmented_args->__data;
@@ -325,7 +325,7 @@ int sys_enter_perf_event_open(struct syscall_enter_args *args)
goto failure;
// Now that we read attr->size and tested it against the size limits, read it completely
- if (bpf_probe_read(&augmented_args->__data, size, attr) < 0)
+ if (bpf_probe_read_user(&augmented_args->__data, size, attr) < 0)
goto failure;
return augmented__output(args, augmented_args, len + size);
@@ -347,7 +347,7 @@ int sys_enter_clock_nanosleep(struct syscall_enter_args *args)
if (size > sizeof(augmented_args->__data))
goto failure;
- bpf_probe_read(&augmented_args->__data, size, rqtp_arg);
+ bpf_probe_read_user(&augmented_args->__data, size, rqtp_arg);
return augmented__output(args, augmented_args, len + size);
failure:
@@ -385,7 +385,7 @@ int sys_enter(struct syscall_enter_args *args)
if (augmented_args == NULL)
return 1;
- bpf_probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
+ bpf_probe_read_kernel(&augmented_args->args, sizeof(augmented_args->args), args);
/*
* Jump to syscall specific augmenter, even if the default one,
@@ -406,7 +406,7 @@ int sys_exit(struct syscall_exit_args *args)
if (pid_filter__has(&pids_filtered, getpid()))
return 0;
- bpf_probe_read(&exit_args, sizeof(exit_args), args);
+ bpf_probe_read_kernel(&exit_args, sizeof(exit_args), args);
/*
* Jump to syscall specific return augmenter, even if the default one,
* "!raw_syscalls:unaugmented" that will just return 1 to return the
diff --git a/tools/perf/util/bpf_skel/kwork_top.bpf.c b/tools/perf/util/bpf_skel/kwork_top.bpf.c
new file mode 100644
index 000000000000..84c15ccbab44
--- /dev/null
+++ b/tools/perf/util/bpf_skel/kwork_top.bpf.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2022, Huawei
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+/*
+ * This should be in sync with "util/kwork.h"
+ */
+enum kwork_class_type {
+ KWORK_CLASS_IRQ,
+ KWORK_CLASS_SOFTIRQ,
+ KWORK_CLASS_WORKQUEUE,
+ KWORK_CLASS_SCHED,
+ KWORK_CLASS_MAX,
+};
+
+#define MAX_ENTRIES 102400
+#define MAX_NR_CPUS 2048
+#define PF_KTHREAD 0x00200000
+#define MAX_COMMAND_LEN 16
+
+struct time_data {
+ __u64 timestamp;
+};
+
+struct work_data {
+ __u64 runtime;
+};
+
+struct task_data {
+ __u32 tgid;
+ __u32 is_kthread;
+ char comm[MAX_COMMAND_LEN];
+};
+
+struct work_key {
+ __u32 type;
+ __u32 pid;
+ __u64 task_p;
+};
+
+struct task_key {
+ __u32 pid;
+ __u32 cpu;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct time_data);
+} kwork_top_task_time SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(key_size, sizeof(struct work_key));
+ __uint(value_size, sizeof(struct time_data));
+ __uint(max_entries, MAX_ENTRIES);
+} kwork_top_irq_time SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(struct task_key));
+ __uint(value_size, sizeof(struct task_data));
+ __uint(max_entries, MAX_ENTRIES);
+} kwork_top_tasks SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(key_size, sizeof(struct work_key));
+ __uint(value_size, sizeof(struct work_data));
+ __uint(max_entries, MAX_ENTRIES);
+} kwork_top_works SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u8));
+ __uint(max_entries, MAX_NR_CPUS);
+} kwork_top_cpu_filter SEC(".maps");
+
+int enabled = 0;
+
+int has_cpu_filter = 0;
+
+__u64 from_timestamp = 0;
+__u64 to_timestamp = 0;
+
+static __always_inline int cpu_is_filtered(__u32 cpu)
+{
+ __u8 *cpu_val;
+
+ if (has_cpu_filter) {
+ cpu_val = bpf_map_lookup_elem(&kwork_top_cpu_filter, &cpu);
+ if (!cpu_val)
+ return 1;
+ }
+
+ return 0;
+}
+
+static __always_inline void update_task_info(struct task_struct *task, __u32 cpu)
+{
+ struct task_key key = {
+ .pid = task->pid,
+ .cpu = cpu,
+ };
+
+ if (!bpf_map_lookup_elem(&kwork_top_tasks, &key)) {
+ struct task_data data = {
+ .tgid = task->tgid,
+ .is_kthread = task->flags & PF_KTHREAD ? 1 : 0,
+ };
+ BPF_CORE_READ_STR_INTO(&data.comm, task, comm);
+
+ bpf_map_update_elem(&kwork_top_tasks, &key, &data, BPF_ANY);
+ }
+}
+
+static __always_inline void update_work(struct work_key *key, __u64 delta)
+{
+ struct work_data *data;
+
+ data = bpf_map_lookup_elem(&kwork_top_works, key);
+ if (data) {
+ data->runtime += delta;
+ } else {
+ struct work_data new_data = {
+ .runtime = delta,
+ };
+
+ bpf_map_update_elem(&kwork_top_works, key, &new_data, BPF_ANY);
+ }
+}
+
+static void on_sched_out(struct task_struct *task, __u64 ts, __u32 cpu)
+{
+ __u64 delta;
+ struct time_data *pelem;
+
+ pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL, 0);
+ if (pelem)
+ delta = ts - pelem->timestamp;
+ else
+ delta = ts - from_timestamp;
+
+ struct work_key key = {
+ .type = KWORK_CLASS_SCHED,
+ .pid = task->pid,
+ .task_p = (__u64)task,
+ };
+
+ update_work(&key, delta);
+ update_task_info(task, cpu);
+}
+
+static void on_sched_in(struct task_struct *task, __u64 ts)
+{
+ struct time_data *pelem;
+
+ pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (pelem)
+ pelem->timestamp = ts;
+}
+
+SEC("tp_btf/sched_switch")
+int on_switch(u64 *ctx)
+{
+ struct task_struct *prev, *next;
+
+ prev = (struct task_struct *)ctx[1];
+ next = (struct task_struct *)ctx[2];
+
+ if (!enabled)
+ return 0;
+
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ if (cpu_is_filtered(cpu))
+ return 0;
+
+ __u64 ts = bpf_ktime_get_ns();
+
+ on_sched_out(prev, ts, cpu);
+ on_sched_in(next, ts);
+
+ return 0;
+}
+
+SEC("tp_btf/irq_handler_entry")
+int on_irq_handler_entry(u64 *cxt)
+{
+ struct task_struct *task;
+
+ if (!enabled)
+ return 0;
+
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ if (cpu_is_filtered(cpu))
+ return 0;
+
+ __u64 ts = bpf_ktime_get_ns();
+
+ task = (struct task_struct *)bpf_get_current_task();
+ if (!task)
+ return 0;
+
+ struct work_key key = {
+ .type = KWORK_CLASS_IRQ,
+ .pid = BPF_CORE_READ(task, pid),
+ .task_p = (__u64)task,
+ };
+
+ struct time_data data = {
+ .timestamp = ts,
+ };
+
+ bpf_map_update_elem(&kwork_top_irq_time, &key, &data, BPF_ANY);
+
+ return 0;
+}
+
+SEC("tp_btf/irq_handler_exit")
+int on_irq_handler_exit(u64 *cxt)
+{
+ __u64 delta;
+ struct task_struct *task;
+ struct time_data *pelem;
+
+ if (!enabled)
+ return 0;
+
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ if (cpu_is_filtered(cpu))
+ return 0;
+
+ __u64 ts = bpf_ktime_get_ns();
+
+ task = (struct task_struct *)bpf_get_current_task();
+ if (!task)
+ return 0;
+
+ struct work_key key = {
+ .type = KWORK_CLASS_IRQ,
+ .pid = BPF_CORE_READ(task, pid),
+ .task_p = (__u64)task,
+ };
+
+ pelem = bpf_map_lookup_elem(&kwork_top_irq_time, &key);
+ if (pelem && pelem->timestamp != 0)
+ delta = ts - pelem->timestamp;
+ else
+ delta = ts - from_timestamp;
+
+ update_work(&key, delta);
+
+ return 0;
+}
+
+SEC("tp_btf/softirq_entry")
+int on_softirq_entry(u64 *cxt)
+{
+ struct task_struct *task;
+
+ if (!enabled)
+ return 0;
+
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ if (cpu_is_filtered(cpu))
+ return 0;
+
+ __u64 ts = bpf_ktime_get_ns();
+
+ task = (struct task_struct *)bpf_get_current_task();
+ if (!task)
+ return 0;
+
+ struct work_key key = {
+ .type = KWORK_CLASS_SOFTIRQ,
+ .pid = BPF_CORE_READ(task, pid),
+ .task_p = (__u64)task,
+ };
+
+ struct time_data data = {
+ .timestamp = ts,
+ };
+
+ bpf_map_update_elem(&kwork_top_irq_time, &key, &data, BPF_ANY);
+
+ return 0;
+}
+
+SEC("tp_btf/softirq_exit")
+int on_softirq_exit(u64 *cxt)
+{
+ __u64 delta;
+ struct task_struct *task;
+ struct time_data *pelem;
+
+ if (!enabled)
+ return 0;
+
+ __u32 cpu = bpf_get_smp_processor_id();
+
+ if (cpu_is_filtered(cpu))
+ return 0;
+
+ __u64 ts = bpf_ktime_get_ns();
+
+ task = (struct task_struct *)bpf_get_current_task();
+ if (!task)
+ return 0;
+
+ struct work_key key = {
+ .type = KWORK_CLASS_SOFTIRQ,
+ .pid = BPF_CORE_READ(task, pid),
+ .task_p = (__u64)task,
+ };
+
+ pelem = bpf_map_lookup_elem(&kwork_top_irq_time, &key);
+ if (pelem)
+ delta = ts - pelem->timestamp;
+ else
+ delta = ts - from_timestamp;
+
+ update_work(&key, delta);
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
index 8d3cfbb3cc65..95cd8414f6ef 100644
--- a/tools/perf/util/bpf_skel/lock_contention.bpf.c
+++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
@@ -42,6 +42,14 @@ struct {
__uint(max_entries, MAX_ENTRIES);
} tstamp SEC(".maps");
+/* maintain per-CPU timestamp at the beginning of contention */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct tstamp_data));
+ __uint(max_entries, 1);
+} tstamp_cpu SEC(".maps");
+
/* actual lock contention statistics */
struct {
__uint(type, BPF_MAP_TYPE_HASH);
@@ -92,6 +100,13 @@ struct {
__uint(max_entries, 1);
} addr_filter SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u64));
+ __uint(value_size, sizeof(__u8));
+ __uint(max_entries, 1);
+} cgroup_filter SEC(".maps");
+
struct rw_semaphore___old {
struct task_struct *owner;
} __attribute__((preserve_access_index));
@@ -114,10 +129,14 @@ int has_cpu;
int has_task;
int has_type;
int has_addr;
+int has_cgroup;
int needs_callstack;
int stack_skip;
int lock_owner;
+int use_cgroup_v2;
+int perf_subsys_id = -1;
+
/* determine the key of lock stat */
int aggr_mode;
@@ -130,6 +149,29 @@ int data_fail;
int task_map_full;
int data_map_full;
+static inline __u64 get_current_cgroup_id(void)
+{
+ struct task_struct *task;
+ struct cgroup *cgrp;
+
+ if (use_cgroup_v2)
+ return bpf_get_current_cgroup_id();
+
+ task = bpf_get_current_task_btf();
+
+ if (perf_subsys_id == -1) {
+#if __has_builtin(__builtin_preserve_enum_value)
+ perf_subsys_id = bpf_core_enum_value(enum cgroup_subsys_id,
+ perf_event_cgrp_id);
+#else
+ perf_subsys_id = perf_event_cgrp_id;
+#endif
+ }
+
+ cgrp = BPF_CORE_READ(task, cgroups, subsys[perf_subsys_id], cgroup);
+ return BPF_CORE_READ(cgrp, kn, id);
+}
+
static inline int can_record(u64 *ctx)
{
if (has_cpu) {
@@ -168,6 +210,15 @@ static inline int can_record(u64 *ctx)
return 0;
}
+ if (has_cgroup) {
+ __u8 *ok;
+ __u64 cgrp = get_current_cgroup_id();
+
+ ok = bpf_map_lookup_elem(&cgroup_filter, &cgrp);
+ if (!ok)
+ return 0;
+ }
+
return 1;
}
@@ -268,30 +319,57 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags)
return 0;
}
-SEC("tp_btf/contention_begin")
-int contention_begin(u64 *ctx)
+static inline struct tstamp_data *get_tstamp_elem(__u32 flags)
{
__u32 pid;
struct tstamp_data *pelem;
- if (!enabled || !can_record(ctx))
- return 0;
+ /* Use per-cpu array map for spinlock and rwlock */
+ if (flags == (LCB_F_SPIN | LCB_F_READ) || flags == LCB_F_SPIN ||
+ flags == (LCB_F_SPIN | LCB_F_WRITE)) {
+ __u32 idx = 0;
+
+ pelem = bpf_map_lookup_elem(&tstamp_cpu, &idx);
+ /* Do not update the element for nested locks */
+ if (pelem && pelem->lock)
+ pelem = NULL;
+ return pelem;
+ }
pid = bpf_get_current_pid_tgid();
pelem = bpf_map_lookup_elem(&tstamp, &pid);
+ /* Do not update the element for nested locks */
if (pelem && pelem->lock)
- return 0;
+ return NULL;
if (pelem == NULL) {
struct tstamp_data zero = {};
- bpf_map_update_elem(&tstamp, &pid, &zero, BPF_ANY);
+ if (bpf_map_update_elem(&tstamp, &pid, &zero, BPF_NOEXIST) < 0) {
+ __sync_fetch_and_add(&task_fail, 1);
+ return NULL;
+ }
+
pelem = bpf_map_lookup_elem(&tstamp, &pid);
if (pelem == NULL) {
__sync_fetch_and_add(&task_fail, 1);
- return 0;
+ return NULL;
}
}
+ return pelem;
+}
+
+SEC("tp_btf/contention_begin")
+int contention_begin(u64 *ctx)
+{
+ struct tstamp_data *pelem;
+
+ if (!enabled || !can_record(ctx))
+ return 0;
+
+ pelem = get_tstamp_elem(ctx[1]);
+ if (pelem == NULL)
+ return 0;
pelem->timestamp = bpf_ktime_get_ns();
pelem->lock = (__u64)ctx[0];
@@ -330,23 +408,42 @@ int contention_begin(u64 *ctx)
SEC("tp_btf/contention_end")
int contention_end(u64 *ctx)
{
- __u32 pid;
+ __u32 pid = 0, idx = 0;
struct tstamp_data *pelem;
struct contention_key key = {};
struct contention_data *data;
__u64 duration;
+ bool need_delete = false;
if (!enabled)
return 0;
- pid = bpf_get_current_pid_tgid();
- pelem = bpf_map_lookup_elem(&tstamp, &pid);
- if (!pelem || pelem->lock != ctx[0])
- return 0;
+ /*
+ * For spinlock and rwlock, it needs to get the timestamp for the
+ * per-cpu map. However, contention_end does not have the flags
+ * so it cannot know whether it reads percpu or hash map.
+ *
+ * Try per-cpu map first and check if there's active contention.
+ * If it is, do not read hash map because it cannot go to sleeping
+ * locks before releasing the spinning locks.
+ */
+ pelem = bpf_map_lookup_elem(&tstamp_cpu, &idx);
+ if (pelem && pelem->lock) {
+ if (pelem->lock != ctx[0])
+ return 0;
+ } else {
+ pid = bpf_get_current_pid_tgid();
+ pelem = bpf_map_lookup_elem(&tstamp, &pid);
+ if (!pelem || pelem->lock != ctx[0])
+ return 0;
+ need_delete = true;
+ }
duration = bpf_ktime_get_ns() - pelem->timestamp;
if ((__s64)duration < 0) {
- bpf_map_delete_elem(&tstamp, &pid);
+ pelem->lock = 0;
+ if (need_delete)
+ bpf_map_delete_elem(&tstamp, &pid);
__sync_fetch_and_add(&time_fail, 1);
return 0;
}
@@ -358,16 +455,22 @@ int contention_end(u64 *ctx)
case LOCK_AGGR_TASK:
if (lock_owner)
key.pid = pelem->flags;
- else
+ else {
+ if (!need_delete)
+ pid = bpf_get_current_pid_tgid();
key.pid = pid;
+ }
if (needs_callstack)
key.stack_id = pelem->stack_id;
break;
case LOCK_AGGR_ADDR:
- key.lock_addr = pelem->lock;
+ key.lock_addr_or_cgroup = pelem->lock;
if (needs_callstack)
key.stack_id = pelem->stack_id;
break;
+ case LOCK_AGGR_CGROUP:
+ key.lock_addr_or_cgroup = get_current_cgroup_id();
+ break;
default:
/* should not happen */
return 0;
@@ -376,7 +479,9 @@ int contention_end(u64 *ctx)
data = bpf_map_lookup_elem(&lock_stat, &key);
if (!data) {
if (data_map_full) {
- bpf_map_delete_elem(&tstamp, &pid);
+ pelem->lock = 0;
+ if (need_delete)
+ bpf_map_delete_elem(&tstamp, &pid);
__sync_fetch_and_add(&data_fail, 1);
return 0;
}
@@ -399,7 +504,9 @@ int contention_end(u64 *ctx)
data_map_full = 1;
__sync_fetch_and_add(&data_fail, 1);
}
- bpf_map_delete_elem(&tstamp, &pid);
+ pelem->lock = 0;
+ if (need_delete)
+ bpf_map_delete_elem(&tstamp, &pid);
return 0;
}
@@ -412,7 +519,9 @@ int contention_end(u64 *ctx)
if (data->min_time > duration)
data->min_time = duration;
- bpf_map_delete_elem(&tstamp, &pid);
+ pelem->lock = 0;
+ if (need_delete)
+ bpf_map_delete_elem(&tstamp, &pid);
return 0;
}
diff --git a/tools/perf/util/bpf_skel/lock_data.h b/tools/perf/util/bpf_skel/lock_data.h
index 260062a9f2ab..08482daf61be 100644
--- a/tools/perf/util/bpf_skel/lock_data.h
+++ b/tools/perf/util/bpf_skel/lock_data.h
@@ -6,7 +6,7 @@
struct contention_key {
u32 stack_id;
u32 pid;
- u64 lock_addr;
+ u64 lock_addr_or_cgroup;
};
#define TASK_COMM_LEN 16
@@ -39,6 +39,7 @@ enum lock_aggr_mode {
LOCK_AGGR_ADDR = 0,
LOCK_AGGR_TASK,
LOCK_AGGR_CALLER,
+ LOCK_AGGR_CGROUP,
};
enum lock_class_sym {
diff --git a/tools/perf/util/bpf_skel/vmlinux/.gitignore b/tools/perf/util/bpf_skel/vmlinux/.gitignore
new file mode 100644
index 000000000000..49502c04183a
--- /dev/null
+++ b/tools/perf/util/bpf_skel/vmlinux/.gitignore
@@ -0,0 +1 @@
+!vmlinux.h
diff --git a/tools/perf/util/branch.c b/tools/perf/util/branch.c
index 378f16a24751..ab760e267d41 100644
--- a/tools/perf/util/branch.c
+++ b/tools/perf/util/branch.c
@@ -109,7 +109,7 @@ const char *get_branch_type(struct branch_entry *e)
return branch_type_name(e->flags.type);
}
-void branch_type_stat_display(FILE *fp, struct branch_type_stat *st)
+void branch_type_stat_display(FILE *fp, const struct branch_type_stat *st)
{
u64 total = 0;
int i;
@@ -171,7 +171,7 @@ static int count_str_scnprintf(int idx, const char *str, char *bf, int size)
return scnprintf(bf, size, "%s%s", (idx) ? " " : " (", str);
}
-int branch_type_str(struct branch_type_stat *st, char *bf, int size)
+int branch_type_str(const struct branch_type_stat *st, char *bf, int size)
{
int i, j = 0, printed = 0;
u64 total = 0;
diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
index e41bfffe2217..87704d713ff6 100644
--- a/tools/perf/util/branch.h
+++ b/tools/perf/util/branch.h
@@ -86,8 +86,8 @@ void branch_type_count(struct branch_type_stat *st, struct branch_flags *flags,
const char *branch_type_name(int type);
const char *branch_new_type_name(int new_type);
const char *get_branch_type(struct branch_entry *e);
-void branch_type_stat_display(FILE *fp, struct branch_type_stat *st);
-int branch_type_str(struct branch_type_stat *st, char *bf, int bfsize);
+void branch_type_stat_display(FILE *fp, const struct branch_type_stat *st);
+int branch_type_str(const struct branch_type_stat *st, char *bf, int bfsize);
const char *branch_spec_desc(int spec);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index aee937d14fbb..8262f69118db 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -586,7 +586,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
call = zalloc(sizeof(*call));
if (!call) {
perror("not enough memory for the code path tree");
- return -1;
+ return -ENOMEM;
}
call->ip = cursor_node->ip;
call->ms = cursor_node->ms;
@@ -602,7 +602,15 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
* branch_from is set with value somewhere else
* to imply it's "to" of a branch.
*/
- call->brtype_stat.branch_to = true;
+ if (!call->brtype_stat) {
+ call->brtype_stat = zalloc(sizeof(*call->brtype_stat));
+ if (!call->brtype_stat) {
+ perror("not enough memory for the code path branch statistics");
+ free(call->brtype_stat);
+ return -ENOMEM;
+ }
+ }
+ call->brtype_stat->branch_to = true;
if (cursor_node->branch_flags.predicted)
call->predicted_count = 1;
@@ -610,7 +618,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
if (cursor_node->branch_flags.abort)
call->abort_count = 1;
- branch_type_count(&call->brtype_stat,
+ branch_type_count(call->brtype_stat,
&cursor_node->branch_flags,
cursor_node->branch_from,
cursor_node->ip);
@@ -618,7 +626,8 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
/*
* It's "from" of a branch
*/
- call->brtype_stat.branch_to = false;
+ if (call->brtype_stat && call->brtype_stat->branch_to)
+ call->brtype_stat->branch_to = false;
call->cycles_count =
cursor_node->branch_flags.cycles;
call->iter_count = cursor_node->nr_loop_iter;
@@ -650,8 +659,8 @@ add_child(struct callchain_node *parent,
list_for_each_entry_safe(call, tmp, &new->val, list) {
list_del_init(&call->list);
- map__zput(call->ms.map);
- maps__zput(call->ms.maps);
+ map_symbol__exit(&call->ms);
+ zfree(&call->brtype_stat);
free(call);
}
free(new);
@@ -762,7 +771,14 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
/*
* It's "to" of a branch
*/
- cnode->brtype_stat.branch_to = true;
+ if (!cnode->brtype_stat) {
+ cnode->brtype_stat = zalloc(sizeof(*cnode->brtype_stat));
+ if (!cnode->brtype_stat) {
+ perror("not enough memory for the code path branch statistics");
+ return MATCH_ERROR;
+ }
+ }
+ cnode->brtype_stat->branch_to = true;
if (node->branch_flags.predicted)
cnode->predicted_count++;
@@ -770,7 +786,7 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
if (node->branch_flags.abort)
cnode->abort_count++;
- branch_type_count(&cnode->brtype_stat,
+ branch_type_count(cnode->brtype_stat,
&node->branch_flags,
node->branch_from,
node->ip);
@@ -778,7 +794,8 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
/*
* It's "from" of a branch
*/
- cnode->brtype_stat.branch_to = false;
+ if (cnode->brtype_stat && cnode->brtype_stat->branch_to)
+ cnode->brtype_stat->branch_to = false;
cnode->cycles_count += node->branch_flags.cycles;
cnode->iter_count += node->nr_loop_iter;
cnode->iter_cycles += node->iter_cycles;
@@ -1022,10 +1039,9 @@ merge_chain_branch(struct callchain_cursor *cursor,
};
callchain_cursor_append(cursor, list->ip, &ms, false, NULL, 0, 0, 0, list->srcline);
list_del_init(&list->list);
- map__zput(ms.map);
- maps__zput(ms.maps);
- map__zput(list->ms.map);
- maps__zput(list->ms.maps);
+ map_symbol__exit(&ms);
+ map_symbol__exit(&list->ms);
+ zfree(&list->brtype_stat);
free(list);
}
@@ -1077,8 +1093,7 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
}
node->ip = ip;
- maps__zput(node->ms.maps);
- map__zput(node->ms.map);
+ map_symbol__exit(&node->ms);
node->ms = *ms;
node->ms.maps = maps__get(ms->maps);
node->ms.map = map__get(ms->map);
@@ -1142,7 +1157,7 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *
if (al->map == NULL)
goto out;
}
- if (RC_CHK_ACCESS(al->maps) == RC_CHK_ACCESS(machine__kernel_maps(machine))) {
+ if (RC_CHK_EQUAL(al->maps, machine__kernel_maps(machine))) {
if (machine__is_host(machine)) {
al->cpumode = PERF_RECORD_MISC_KERNEL;
al->level = 'k';
@@ -1339,7 +1354,7 @@ static int count_float_printf(int idx, const char *str, float value,
static int branch_to_str(char *bf, int bfsize,
u64 branch_count, u64 predicted_count,
u64 abort_count,
- struct branch_type_stat *brtype_stat)
+ const struct branch_type_stat *brtype_stat)
{
int printed, i = 0;
@@ -1403,7 +1418,7 @@ static int counts_str_build(char *bf, int bfsize,
u64 abort_count, u64 cycles_count,
u64 iter_count, u64 iter_cycles,
u64 from_count,
- struct branch_type_stat *brtype_stat)
+ const struct branch_type_stat *brtype_stat)
{
int printed;
@@ -1430,7 +1445,7 @@ static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
u64 abort_count, u64 cycles_count,
u64 iter_count, u64 iter_cycles,
u64 from_count,
- struct branch_type_stat *brtype_stat)
+ const struct branch_type_stat *brtype_stat)
{
char str[256];
@@ -1447,11 +1462,14 @@ static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
int callchain_list_counts__printf_value(struct callchain_list *clist,
FILE *fp, char *bf, int bfsize)
{
+ static const struct branch_type_stat empty_brtype_stat = {};
+ const struct branch_type_stat *brtype_stat;
u64 branch_count, predicted_count;
u64 abort_count, cycles_count;
u64 iter_count, iter_cycles;
u64 from_count;
+ brtype_stat = clist->brtype_stat ?: &empty_brtype_stat;
branch_count = clist->branch_count;
predicted_count = clist->predicted_count;
abort_count = clist->abort_count;
@@ -1463,7 +1481,7 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
return callchain_counts_printf(fp, bf, bfsize, branch_count,
predicted_count, abort_count,
cycles_count, iter_count, iter_cycles,
- from_count, &clist->brtype_stat);
+ from_count, brtype_stat);
}
static void free_callchain_node(struct callchain_node *node)
@@ -1474,15 +1492,15 @@ static void free_callchain_node(struct callchain_node *node)
list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
list_del_init(&list->list);
- map__zput(list->ms.map);
- maps__zput(list->ms.maps);
+ map_symbol__exit(&list->ms);
+ zfree(&list->brtype_stat);
free(list);
}
list_for_each_entry_safe(list, tmp, &node->val, list) {
list_del_init(&list->list);
- map__zput(list->ms.map);
- maps__zput(list->ms.maps);
+ map_symbol__exit(&list->ms);
+ zfree(&list->brtype_stat);
free(list);
}
@@ -1567,8 +1585,8 @@ int callchain_node__make_parent_list(struct callchain_node *node)
out:
list_for_each_entry_safe(chain, new, &head, list) {
list_del_init(&chain->list);
- map__zput(chain->ms.map);
- maps__zput(chain->ms.maps);
+ map_symbol__exit(&chain->ms);
+ zfree(&chain->brtype_stat);
free(chain);
}
return -ENOMEM;
@@ -1651,10 +1669,8 @@ void callchain_cursor_reset(struct callchain_cursor *cursor)
cursor->nr = 0;
cursor->last = &cursor->first;
- for (node = cursor->first; node != NULL; node = node->next) {
- map__zput(node->ms.map);
- maps__zput(node->ms.maps);
- }
+ for (node = cursor->first; node != NULL; node = node->next)
+ map_symbol__exit(&node->ms);
}
void callchain_param_setup(u64 sample_type, const char *arch)
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index d2618a47deca..d5c66345ae31 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -116,22 +116,22 @@ extern struct callchain_param callchain_param;
extern struct callchain_param callchain_param_default;
struct callchain_list {
+ struct list_head list;
u64 ip;
struct map_symbol ms;
- struct /* for TUI */ {
- bool unfolded;
- bool has_children;
- };
+ const char *srcline;
u64 branch_count;
u64 from_count;
- u64 predicted_count;
- u64 abort_count;
u64 cycles_count;
u64 iter_count;
u64 iter_cycles;
- struct branch_type_stat brtype_stat;
- const char *srcline;
- struct list_head list;
+ struct branch_type_stat *brtype_stat;
+ u64 predicted_count;
+ u64 abort_count;
+ struct /* for TUI */ {
+ bool unfolded;
+ bool has_children;
+ };
};
/*
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index bfb13306d82c..fcb509058499 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -48,28 +48,36 @@ static int open_cgroup(const char *name)
}
#ifdef HAVE_FILE_HANDLE
-int read_cgroup_id(struct cgroup *cgrp)
+static u64 __read_cgroup_id(const char *path)
{
- char path[PATH_MAX + 1];
- char mnt[PATH_MAX + 1];
struct {
struct file_handle fh;
uint64_t cgroup_id;
} handle;
int mount_id;
+ handle.fh.handle_bytes = sizeof(handle.cgroup_id);
+ if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0)
+ return -1ULL;
+
+ return handle.cgroup_id;
+}
+
+int read_cgroup_id(struct cgroup *cgrp)
+{
+ char path[PATH_MAX + 1];
+ char mnt[PATH_MAX + 1];
+
if (cgroupfs_find_mountpoint(mnt, PATH_MAX + 1, "perf_event"))
return -1;
scnprintf(path, PATH_MAX, "%s/%s", mnt, cgrp->name);
- handle.fh.handle_bytes = sizeof(handle.cgroup_id);
- if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0)
- return -1;
-
- cgrp->id = handle.cgroup_id;
+ cgrp->id = __read_cgroup_id(path);
return 0;
}
+#else
+static inline u64 __read_cgroup_id(const char *path __maybe_unused) { return -1ULL; }
#endif /* HAVE_FILE_HANDLE */
#ifndef CGROUP2_SUPER_MAGIC
@@ -106,7 +114,7 @@ static struct cgroup *evlist__find_cgroup(struct evlist *evlist, const char *str
return NULL;
}
-static struct cgroup *cgroup__new(const char *name, bool do_open)
+struct cgroup *cgroup__new(const char *name, bool do_open)
{
struct cgroup *cgroup = zalloc(sizeof(*cgroup));
@@ -562,6 +570,11 @@ struct cgroup *cgroup__findnew(struct perf_env *env, uint64_t id,
return cgrp;
}
+struct cgroup *__cgroup__find(struct rb_root *root, uint64_t id)
+{
+ return __cgroup__findnew(root, id, /*create=*/false, /*path=*/NULL);
+}
+
struct cgroup *cgroup__find(struct perf_env *env, uint64_t id)
{
struct cgroup *cgrp;
@@ -587,3 +600,35 @@ void perf_env__purge_cgroups(struct perf_env *env)
}
up_write(&env->cgroups.lock);
}
+
+void read_all_cgroups(struct rb_root *root)
+{
+ char mnt[PATH_MAX];
+ struct cgroup_name *cn;
+ int prefix_len;
+
+ if (cgroupfs_find_mountpoint(mnt, sizeof(mnt), "perf_event"))
+ return;
+
+ /* cgroup_name will have a full path, skip the root directory */
+ prefix_len = strlen(mnt);
+
+ /* collect all cgroups in the cgroup_list */
+ if (nftw(mnt, add_cgroup_name, 20, 0) < 0)
+ return;
+
+ list_for_each_entry(cn, &cgroup_list, list) {
+ const char *name;
+ u64 cgrp_id;
+
+ /* cgroup_name might have a full path, skip the prefix */
+ name = cn->name + prefix_len;
+ if (name[0] == '\0')
+ name = "/";
+
+ cgrp_id = __read_cgroup_id(cn->name);
+ __cgroup__findnew(root, cgrp_id, /*create=*/true, name);
+ }
+
+ release_cgroup_list();
+}
diff --git a/tools/perf/util/cgroup.h b/tools/perf/util/cgroup.h
index 12256b78608c..de8882d6e8d3 100644
--- a/tools/perf/util/cgroup.h
+++ b/tools/perf/util/cgroup.h
@@ -26,6 +26,7 @@ void cgroup__put(struct cgroup *cgroup);
struct evlist;
struct rblist;
+struct cgroup *cgroup__new(const char *name, bool do_open);
struct cgroup *evlist__findnew_cgroup(struct evlist *evlist, const char *name);
int evlist__expand_cgroup(struct evlist *evlist, const char *cgroups,
struct rblist *metric_events, bool open_cgroup);
@@ -37,6 +38,7 @@ int parse_cgroups(const struct option *opt, const char *str, int unset);
struct cgroup *cgroup__findnew(struct perf_env *env, uint64_t id,
const char *path);
struct cgroup *cgroup__find(struct perf_env *env, uint64_t id);
+struct cgroup *__cgroup__find(struct rb_root *root, uint64_t id);
void perf_env__purge_cgroups(struct perf_env *env);
@@ -49,6 +51,9 @@ static inline int read_cgroup_id(struct cgroup *cgrp __maybe_unused)
}
#endif /* HAVE_FILE_HANDLE */
+/* read all cgroups in the system and save them in the rbtree */
+void read_all_cgroups(struct rb_root *root);
+
int cgroup_is_v2(const char *subsys);
#endif /* __CGROUP_H__ */
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 9729d006550d..a9873d14c632 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -283,22 +283,31 @@ static int cs_etm__metadata_set_trace_id(u8 trace_chan_id, u64 *cpu_metadata)
}
/*
- * Get a metadata for a specific cpu from an array.
+ * Get a metadata index for a specific cpu from an array.
*
*/
-static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu)
+static int get_cpu_data_idx(struct cs_etm_auxtrace *etm, int cpu)
{
int i;
- u64 *metadata = NULL;
for (i = 0; i < etm->num_cpu; i++) {
if (etm->metadata[i][CS_ETM_CPU] == (u64)cpu) {
- metadata = etm->metadata[i];
- break;
+ return i;
}
}
- return metadata;
+ return -1;
+}
+
+/*
+ * Get a metadata for a specific cpu from an array.
+ *
+ */
+static u64 *get_cpu_data(struct cs_etm_auxtrace *etm, int cpu)
+{
+ int idx = get_cpu_data_idx(etm, cpu);
+
+ return (idx != -1) ? etm->metadata[idx] : NULL;
}
/*
@@ -641,66 +650,80 @@ static void cs_etm__packet_dump(const char *pkt_string)
}
static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
- struct cs_etm_auxtrace *etm, int idx,
- u32 etmidr)
+ struct cs_etm_auxtrace *etm, int t_idx,
+ int m_idx, u32 etmidr)
{
u64 **metadata = etm->metadata;
- t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
- t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
- t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
+ t_params[t_idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
+ t_params[t_idx].etmv3.reg_ctrl = metadata[m_idx][CS_ETM_ETMCR];
+ t_params[t_idx].etmv3.reg_trc_id = metadata[m_idx][CS_ETM_ETMTRACEIDR];
}
static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
- struct cs_etm_auxtrace *etm, int idx)
+ struct cs_etm_auxtrace *etm, int t_idx,
+ int m_idx)
{
u64 **metadata = etm->metadata;
- t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
- t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
- t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
- t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
- t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
- t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
+ t_params[t_idx].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[t_idx].etmv4.reg_idr0 = metadata[m_idx][CS_ETMV4_TRCIDR0];
+ t_params[t_idx].etmv4.reg_idr1 = metadata[m_idx][CS_ETMV4_TRCIDR1];
+ t_params[t_idx].etmv4.reg_idr2 = metadata[m_idx][CS_ETMV4_TRCIDR2];
+ t_params[t_idx].etmv4.reg_idr8 = metadata[m_idx][CS_ETMV4_TRCIDR8];
+ t_params[t_idx].etmv4.reg_configr = metadata[m_idx][CS_ETMV4_TRCCONFIGR];
+ t_params[t_idx].etmv4.reg_traceidr = metadata[m_idx][CS_ETMV4_TRCTRACEIDR];
}
static void cs_etm__set_trace_param_ete(struct cs_etm_trace_params *t_params,
- struct cs_etm_auxtrace *etm, int idx)
+ struct cs_etm_auxtrace *etm, int t_idx,
+ int m_idx)
{
u64 **metadata = etm->metadata;
- t_params[idx].protocol = CS_ETM_PROTO_ETE;
- t_params[idx].ete.reg_idr0 = metadata[idx][CS_ETE_TRCIDR0];
- t_params[idx].ete.reg_idr1 = metadata[idx][CS_ETE_TRCIDR1];
- t_params[idx].ete.reg_idr2 = metadata[idx][CS_ETE_TRCIDR2];
- t_params[idx].ete.reg_idr8 = metadata[idx][CS_ETE_TRCIDR8];
- t_params[idx].ete.reg_configr = metadata[idx][CS_ETE_TRCCONFIGR];
- t_params[idx].ete.reg_traceidr = metadata[idx][CS_ETE_TRCTRACEIDR];
- t_params[idx].ete.reg_devarch = metadata[idx][CS_ETE_TRCDEVARCH];
+ t_params[t_idx].protocol = CS_ETM_PROTO_ETE;
+ t_params[t_idx].ete.reg_idr0 = metadata[m_idx][CS_ETE_TRCIDR0];
+ t_params[t_idx].ete.reg_idr1 = metadata[m_idx][CS_ETE_TRCIDR1];
+ t_params[t_idx].ete.reg_idr2 = metadata[m_idx][CS_ETE_TRCIDR2];
+ t_params[t_idx].ete.reg_idr8 = metadata[m_idx][CS_ETE_TRCIDR8];
+ t_params[t_idx].ete.reg_configr = metadata[m_idx][CS_ETE_TRCCONFIGR];
+ t_params[t_idx].ete.reg_traceidr = metadata[m_idx][CS_ETE_TRCTRACEIDR];
+ t_params[t_idx].ete.reg_devarch = metadata[m_idx][CS_ETE_TRCDEVARCH];
}
static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
struct cs_etm_auxtrace *etm,
+ bool formatted,
+ int sample_cpu,
int decoders)
{
- int i;
+ int t_idx, m_idx;
u32 etmidr;
u64 architecture;
- for (i = 0; i < decoders; i++) {
- architecture = etm->metadata[i][CS_ETM_MAGIC];
+ for (t_idx = 0; t_idx < decoders; t_idx++) {
+ if (formatted)
+ m_idx = t_idx;
+ else {
+ m_idx = get_cpu_data_idx(etm, sample_cpu);
+ if (m_idx == -1) {
+ pr_warning("CS_ETM: unknown CPU, falling back to first metadata\n");
+ m_idx = 0;
+ }
+ }
+
+ architecture = etm->metadata[m_idx][CS_ETM_MAGIC];
switch (architecture) {
case __perf_cs_etmv3_magic:
- etmidr = etm->metadata[i][CS_ETM_ETMIDR];
- cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
+ etmidr = etm->metadata[m_idx][CS_ETM_ETMIDR];
+ cs_etm__set_trace_param_etmv3(t_params, etm, t_idx, m_idx, etmidr);
break;
case __perf_cs_etmv4_magic:
- cs_etm__set_trace_param_etmv4(t_params, etm, i);
+ cs_etm__set_trace_param_etmv4(t_params, etm, t_idx, m_idx);
break;
case __perf_cs_ete_magic:
- cs_etm__set_trace_param_ete(t_params, etm, i);
+ cs_etm__set_trace_param_ete(t_params, etm, t_idx, m_idx);
break;
default:
return -EINVAL;
@@ -1016,7 +1039,7 @@ out:
}
static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
- bool formatted)
+ bool formatted, int sample_cpu)
{
struct cs_etm_decoder_params d_params;
struct cs_etm_trace_params *t_params = NULL;
@@ -1041,7 +1064,7 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
if (!t_params)
goto out_free;
- if (cs_etm__init_trace_params(t_params, etm, decoders))
+ if (cs_etm__init_trace_params(t_params, etm, formatted, sample_cpu, decoders))
goto out_free;
/* Set decoder parameters to decode trace packets */
@@ -1081,14 +1104,15 @@ out_free:
static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
struct auxtrace_queue *queue,
unsigned int queue_nr,
- bool formatted)
+ bool formatted,
+ int sample_cpu)
{
struct cs_etm_queue *etmq = queue->priv;
if (list_empty(&queue->head) || etmq)
return 0;
- etmq = cs_etm__alloc_queue(etm, formatted);
+ etmq = cs_etm__alloc_queue(etm, formatted, sample_cpu);
if (!etmq)
return -ENOMEM;
@@ -2816,7 +2840,7 @@ static int cs_etm__process_auxtrace_event(struct perf_session *session,
* formatted in piped mode (true).
*/
err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
- idx, true);
+ idx, true, -1);
if (err)
return err;
@@ -3022,7 +3046,7 @@ static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_o
idx = auxtrace_event->idx;
formatted = !(aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
- idx, formatted);
+ idx, formatted, sample->cpu);
}
/* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 7cca37887917..4696267a32f0 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -242,7 +242,7 @@ struct cs_etm_packet_queue {
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
-struct perf_event_attr *cs_etm_get_default_config(struct perf_pmu *pmu);
+void cs_etm_get_default_config(const struct perf_pmu *pmu, struct perf_event_attr *attr);
enum cs_etm_pid_fmt {
CS_ETM_PIDFMT_NONE,
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index fc16299c915f..c29d8a382b19 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -17,6 +17,7 @@
#include "util.h" // rm_rf_perf_data()
#include "debug.h"
#include "header.h"
+#include "rlimit.h"
#include <internal/lib.h>
static void close_dir(struct perf_data_file *files, int nr)
@@ -35,6 +36,7 @@ void perf_data__close_dir(struct perf_data *data)
int perf_data__create_dir(struct perf_data *data, int nr)
{
+ enum rlimit_action set_rlimit = NO_CHANGE;
struct perf_data_file *files = NULL;
int i, ret;
@@ -54,11 +56,21 @@ int perf_data__create_dir(struct perf_data *data, int nr)
goto out_err;
}
+retry_open:
ret = open(file->path, O_RDWR|O_CREAT|O_TRUNC, S_IRUSR|S_IWUSR);
if (ret < 0) {
+ /*
+ * If using parallel threads to collect data,
+ * perf record needs at least 6 fds per CPU.
+ * When we run out of them try to increase the limits.
+ */
+ if (errno == EMFILE && rlimit__increase_nofile(&set_rlimit))
+ goto retry_open;
+
ret = -errno;
goto out_err;
}
+ set_rlimit = NO_CHANGE;
file->fd = ret;
}
diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
index 4a1dc21b0450..908e16813722 100644
--- a/tools/perf/util/dlfilter.c
+++ b/tools/perf/util/dlfilter.c
@@ -52,8 +52,10 @@ static void al_to_d_al(struct addr_location *al, struct perf_dlfilter_al *d_al)
d_al->sym_end = sym->end;
if (al->addr < sym->end)
d_al->symoff = al->addr - sym->start;
- else
+ else if (al->map)
d_al->symoff = al->addr - map__start(al->map) - sym->start;
+ else
+ d_al->symoff = 0;
d_al->sym_binding = sym->binding;
} else {
d_al->sym = NULL;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index bdfead36b83a..1f629b6fb7cf 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -477,6 +477,7 @@ void dso__set_module_info(struct dso *dso, struct kmod_path *m,
dso->comp = m->comp;
}
+ dso->is_kmod = 1;
dso__set_short_name(dso, strdup(m->name), true);
}
@@ -1338,6 +1339,7 @@ struct dso *dso__new_id(const char *name, struct dso_id *id)
dso->has_srcline = 1;
dso->a2l_fails = 1;
dso->kernel = DSO_SPACE__USER;
+ dso->is_kmod = 0;
dso->needs_swap = DSO_SWAP__UNSET;
dso->comp = COMP_ID__NONE;
RB_CLEAR_NODE(&dso->rb_node);
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index b41c9782c754..3759de8c2267 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -162,6 +162,7 @@ struct dso {
char *symsrc_filename;
unsigned int a2l_fails;
enum dso_space_type kernel;
+ bool is_kmod;
enum dso_swap_type needs_swap;
enum dso_binary_type symtab_type;
enum dso_binary_type binary_type;
@@ -181,6 +182,7 @@ struct dso {
u8 rel;
struct build_id bid;
u64 text_offset;
+ u64 text_end;
const char *short_name;
const char *long_name;
u16 long_name_len;
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index a164164001fb..44140b7f596a 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -457,7 +457,7 @@ const char *perf_env__cpuid(struct perf_env *env)
{
int status;
- if (!env || !env->cpuid) { /* Assume local operation */
+ if (!env->cpuid) { /* Assume local operation */
status = perf_env__read_cpuid(env);
if (status)
return NULL;
@@ -470,7 +470,7 @@ int perf_env__nr_pmu_mappings(struct perf_env *env)
{
int status;
- if (!env || !env->nr_pmu_mappings) { /* Assume local operation */
+ if (!env->nr_pmu_mappings) { /* Assume local operation */
status = perf_env__read_pmu_mappings(env);
if (status)
return 0;
@@ -483,7 +483,7 @@ const char *perf_env__pmu_mappings(struct perf_env *env)
{
int status;
- if (!env || !env->pmu_mappings) { /* Assume local operation */
+ if (!env->pmu_mappings) { /* Assume local operation */
status = perf_env__read_pmu_mappings(env);
if (status)
return NULL;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 7ef43f72098e..e36da58522ef 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -251,6 +251,9 @@ static struct evsel *evlist__dummy_event(struct evlist *evlist)
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_DUMMY,
.size = sizeof(attr), /* to capture ABI version */
+ /* Avoid frequency mode for dummy events to avoid associated timers. */
+ .freq = 0,
+ .sample_period = 1,
};
return evsel__new_idx(&attr, evlist->core.nr_entries);
@@ -277,8 +280,6 @@ struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
evsel->core.attr.exclude_kernel = 1;
evsel->core.attr.exclude_guest = 1;
evsel->core.attr.exclude_hv = 1;
- evsel->core.attr.freq = 0;
- evsel->core.attr.sample_period = 1;
evsel->core.system_wide = system_wide;
evsel->no_aux_samples = true;
evsel->name = strdup("dummy:u");
@@ -1694,6 +1695,24 @@ void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_ev
tracking_evsel->tracking = true;
}
+struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide)
+{
+ struct evsel *evsel;
+
+ evsel = evlist__get_tracking_event(evlist);
+ if (!evsel__is_dummy_event(evsel)) {
+ evsel = evlist__add_aux_dummy(evlist, system_wide);
+ if (!evsel)
+ return NULL;
+
+ evlist__set_tracking_event(evlist, evsel);
+ } else if (system_wide) {
+ perf_evlist__go_system_wide(&evlist->core, &evsel->core);
+ }
+
+ return evsel;
+}
+
struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str)
{
struct evsel *evsel;
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 664c6bf7b3e0..98e7ddb2bd30 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -387,6 +387,7 @@ bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr);
struct evsel *evlist__get_tracking_event(struct evlist *evlist);
void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel);
+struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide);
struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index a8a5ff87cc1f..72a5dfc38d38 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -49,6 +49,7 @@
#include "off_cpu.h"
#include "pmu.h"
#include "pmus.h"
+#include "rlimit.h"
#include "../perf-sys.h"
#include "util/parse-branch-options.h"
#include "util/bpf-filter.h"
@@ -1989,33 +1990,6 @@ bool evsel__detect_missing_features(struct evsel *evsel)
}
}
-bool evsel__increase_rlimit(enum rlimit_action *set_rlimit)
-{
- int old_errno;
- struct rlimit l;
-
- if (*set_rlimit < INCREASED_MAX) {
- old_errno = errno;
-
- if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
- if (*set_rlimit == NO_CHANGE) {
- l.rlim_cur = l.rlim_max;
- } else {
- l.rlim_cur = l.rlim_max + 1000;
- l.rlim_max = l.rlim_cur;
- }
- if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
- (*set_rlimit) += 1;
- errno = old_errno;
- return true;
- }
- }
- errno = old_errno;
- }
-
- return false;
-}
-
static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads,
int start_cpu_map_idx, int end_cpu_map_idx)
@@ -2143,7 +2117,7 @@ try_fallback:
* perf stat needs between 5 and 22 fds per CPU. When we run out
* of them try to increase the limits.
*/
- if (err == -EMFILE && evsel__increase_rlimit(&set_rlimit))
+ if (err == -EMFILE && rlimit__increase_nofile(&set_rlimit))
goto retry_open;
if (err != -EINVAL || idx > 0 || thread > 0)
@@ -2766,6 +2740,11 @@ struct tep_format_field *evsel__field(struct evsel *evsel, const char *name)
return tep_find_field(evsel->tp_format, name);
}
+struct tep_format_field *evsel__common_field(struct evsel *evsel, const char *name)
+{
+ return tep_find_common_field(evsel->tp_format, name);
+}
+
void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name)
{
struct tep_format_field *field = evsel__field(evsel, name);
@@ -2831,6 +2810,14 @@ u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *n
return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
}
+
+u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const char *name)
+{
+ struct tep_format_field *field = evsel__common_field(evsel, name);
+
+ return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
+}
+
#endif
bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 848534ec74fa..d791316a1792 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -330,9 +330,6 @@ int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads);
bool evsel__detect_missing_features(struct evsel *evsel);
-enum rlimit_action { NO_CHANGE, SET_TO_MAX, INCREASED_MAX };
-bool evsel__increase_rlimit(enum rlimit_action *set_rlimit);
-
bool evsel__precise_ip_fallback(struct evsel *evsel);
struct perf_sample;
@@ -340,6 +337,7 @@ struct perf_sample;
#ifdef HAVE_LIBTRACEEVENT
void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name);
u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name);
+u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const char *name);
static inline char *evsel__strval(struct evsel *evsel, struct perf_sample *sample, const char *name)
{
@@ -352,6 +350,7 @@ struct tep_format_field;
u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample, bool needs_swap);
struct tep_format_field *evsel__field(struct evsel *evsel, const char *name);
+struct tep_format_field *evsel__common_field(struct evsel *evsel, const char *name);
static inline bool __evsel__match(const struct evsel *evsel, u32 type, u64 config)
{
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index 4488f306de78..7be23b3ac082 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -509,7 +509,7 @@ double expr__strcmp_cpuid_str(const struct expr_parse_ctx *ctx __maybe_unused,
bool compute_ids __maybe_unused, const char *test_id)
{
double ret;
- struct perf_pmu *pmu = pmu__find_core_pmu();
+ struct perf_pmu *pmu = perf_pmus__find_core_pmu();
char *cpuid = perf_pmu__getcpuid(pmu);
if (!cpuid)
diff --git a/tools/perf/util/expr.y b/tools/perf/util/expr.y
index 6c93b358cc2d..e364790babb5 100644
--- a/tools/perf/util/expr.y
+++ b/tools/perf/util/expr.y
@@ -1,6 +1,8 @@
/* Simple expression parser */
%{
+#ifndef NDEBUG
#define YYDEBUG 1
+#endif
#include <assert.h>
#include <math.h>
#include <stdlib.h>
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index d812e1e371a7..e86b9439ffee 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2573,7 +2573,7 @@ error:
static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
{
u32 nr, i;
- char *str;
+ char *str = NULL;
struct strbuf sb;
int cpu_nr = ff->ph->env.nr_cpus_avail;
u64 size = 0;
@@ -2601,7 +2601,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
goto error;
size += string_size(str);
- free(str);
+ zfree(&str);
}
ph->env.sibling_cores = strbuf_detach(&sb, NULL);
@@ -2620,7 +2620,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
goto error;
size += string_size(str);
- free(str);
+ zfree(&str);
}
ph->env.sibling_threads = strbuf_detach(&sb, NULL);
@@ -2684,7 +2684,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
goto error;
size += string_size(str);
- free(str);
+ zfree(&str);
}
ph->env.sibling_dies = strbuf_detach(&sb, NULL);
@@ -2699,6 +2699,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
error:
strbuf_release(&sb);
+ zfree(&str);
free_cpu:
zfree(&ph->env.cpu);
return -1;
@@ -2736,10 +2737,9 @@ static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
goto error;
n->map = perf_cpu_map__new(str);
+ free(str);
if (!n->map)
goto error;
-
- free(str);
}
ff->ph->env.nr_numa_nodes = nr;
ff->ph->env.numa_nodes = nodes;
@@ -2913,10 +2913,10 @@ static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
return -1;
for (i = 0; i < cnt; i++) {
- struct cpu_cache_level c;
+ struct cpu_cache_level *c = &caches[i];
#define _R(v) \
- if (do_read_u32(ff, &c.v))\
+ if (do_read_u32(ff, &c->v)) \
goto out_free_caches; \
_R(level)
@@ -2926,22 +2926,25 @@ static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
#undef _R
#define _R(v) \
- c.v = do_read_string(ff); \
- if (!c.v) \
- goto out_free_caches;
+ c->v = do_read_string(ff); \
+ if (!c->v) \
+ goto out_free_caches; \
_R(type)
_R(size)
_R(map)
#undef _R
-
- caches[i] = c;
}
ff->ph->env.caches = caches;
ff->ph->env.caches_cnt = cnt;
return 0;
out_free_caches:
+ for (i = 0; i < cnt; i++) {
+ free(caches[i].type);
+ free(caches[i].size);
+ free(caches[i].map);
+ }
free(caches);
return -1;
}
@@ -3585,18 +3588,16 @@ static int perf_header__adds_write(struct perf_header *header,
struct feat_copier *fc)
{
int nr_sections;
- struct feat_fd ff;
+ struct feat_fd ff = {
+ .fd = fd,
+ .ph = header,
+ };
struct perf_file_section *feat_sec, *p;
int sec_size;
u64 sec_start;
int feat;
int err;
- ff = (struct feat_fd){
- .fd = fd,
- .ph = header,
- };
-
nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
if (!nr_sections)
return 0;
@@ -3623,6 +3624,7 @@ static int perf_header__adds_write(struct perf_header *header,
err = do_write(&ff, feat_sec, sec_size);
if (err < 0)
pr_debug("failed to write feature section\n");
+ free(ff.buf); /* TODO: added to silence clang-tidy. */
free(feat_sec);
return err;
}
@@ -3630,11 +3632,11 @@ static int perf_header__adds_write(struct perf_header *header,
int perf_header__write_pipe(int fd)
{
struct perf_pipe_file_header f_header;
- struct feat_fd ff;
+ struct feat_fd ff = {
+ .fd = fd,
+ };
int err;
- ff = (struct feat_fd){ .fd = fd };
-
f_header = (struct perf_pipe_file_header){
.magic = PERF_MAGIC,
.size = sizeof(f_header),
@@ -3645,7 +3647,7 @@ int perf_header__write_pipe(int fd)
pr_debug("failed to write perf pipe header\n");
return err;
}
-
+ free(ff.buf);
return 0;
}
@@ -3658,11 +3660,12 @@ static int perf_session__do_write_header(struct perf_session *session,
struct perf_file_attr f_attr;
struct perf_header *header = &session->header;
struct evsel *evsel;
- struct feat_fd ff;
+ struct feat_fd ff = {
+ .fd = fd,
+ };
u64 attr_offset;
int err;
- ff = (struct feat_fd){ .fd = fd};
lseek(fd, sizeof(f_header), SEEK_SET);
evlist__for_each_entry(session->evlist, evsel) {
@@ -3670,6 +3673,7 @@ static int perf_session__do_write_header(struct perf_session *session,
err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
if (err < 0) {
pr_debug("failed to write perf header\n");
+ free(ff.buf);
return err;
}
}
@@ -3695,6 +3699,7 @@ static int perf_session__do_write_header(struct perf_session *session,
err = do_write(&ff, &f_attr, sizeof(f_attr));
if (err < 0) {
pr_debug("failed to write perf header attribute\n");
+ free(ff.buf);
return err;
}
}
@@ -3705,8 +3710,10 @@ static int perf_session__do_write_header(struct perf_session *session,
if (at_exit) {
err = perf_header__adds_write(header, evlist, fd, fc);
- if (err < 0)
+ if (err < 0) {
+ free(ff.buf);
return err;
+ }
}
f_header = (struct perf_file_header){
@@ -3728,6 +3735,7 @@ static int perf_session__do_write_header(struct perf_session *session,
lseek(fd, 0, SEEK_SET);
err = do_write(&ff, &f_header, sizeof(f_header));
+ free(ff.buf);
if (err < 0) {
pr_debug("failed to write perf header\n");
return err;
diff --git a/tools/perf/util/hisi-ptt.c b/tools/perf/util/hisi-ptt.c
index 45b614bb73bf..43bd1ca62d58 100644
--- a/tools/perf/util/hisi-ptt.c
+++ b/tools/perf/util/hisi-ptt.c
@@ -108,8 +108,10 @@ static int hisi_ptt_process_auxtrace_event(struct perf_session *session,
data_offset = 0;
} else {
data_offset = lseek(fd, 0, SEEK_CUR);
- if (data_offset == -1)
+ if (data_offset == -1) {
+ free(data);
return -errno;
+ }
}
err = readn(fd, data, size);
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 3dc8a4968beb..0888b7163b7c 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -515,17 +515,16 @@ err_rawdata:
err_infos:
if (he->branch_info) {
- map__put(he->branch_info->from.ms.map);
- map__put(he->branch_info->to.ms.map);
+ map_symbol__exit(&he->branch_info->from.ms);
+ map_symbol__exit(&he->branch_info->to.ms);
zfree(&he->branch_info);
}
if (he->mem_info) {
- map__put(he->mem_info->iaddr.ms.map);
- map__put(he->mem_info->daddr.ms.map);
+ map_symbol__exit(&he->mem_info->iaddr.ms);
+ map_symbol__exit(&he->mem_info->daddr.ms);
}
err:
- maps__zput(he->ms.maps);
- map__zput(he->ms.map);
+ map_symbol__exit(&he->ms);
zfree(&he->stat_acc);
return -ENOMEM;
}
@@ -1317,20 +1316,19 @@ void hist_entry__delete(struct hist_entry *he)
struct hist_entry_ops *ops = he->ops;
thread__zput(he->thread);
- maps__zput(he->ms.maps);
- map__zput(he->ms.map);
+ map_symbol__exit(&he->ms);
if (he->branch_info) {
- map__zput(he->branch_info->from.ms.map);
- map__zput(he->branch_info->to.ms.map);
+ map_symbol__exit(&he->branch_info->from.ms);
+ map_symbol__exit(&he->branch_info->to.ms);
zfree_srcline(&he->branch_info->srcline_from);
zfree_srcline(&he->branch_info->srcline_to);
zfree(&he->branch_info);
}
if (he->mem_info) {
- map__zput(he->mem_info->iaddr.ms.map);
- map__zput(he->mem_info->daddr.ms.map);
+ map_symbol__exit(&he->mem_info->iaddr.ms);
+ map_symbol__exit(&he->mem_info->daddr.ms);
mem_info__zput(he->mem_info);
}
@@ -2142,7 +2140,7 @@ static bool hists__filter_entry_by_thread(struct hists *hists,
struct hist_entry *he)
{
if (hists->thread_filter != NULL &&
- RC_CHK_ACCESS(he->thread) != RC_CHK_ACCESS(hists->thread_filter)) {
+ !RC_CHK_EQUAL(he->thread, hists->thread_filter)) {
he->filtered |= (1 << HIST_FILTER__THREAD);
return true;
}
@@ -2676,8 +2674,6 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
/* If we have branch cycles always annotate them. */
if (bs && bs->nr && entries[0].flags.cycles) {
- int i;
-
bi = sample__resolve_bstack(sample, al);
if (bi) {
struct addr_map_symbol *prev = NULL;
@@ -2692,7 +2688,7 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
* Note that perf stores branches reversed from
* program order!
*/
- for (i = bs->nr - 1; i >= 0; i--) {
+ for (int i = bs->nr - 1; i >= 0; i--) {
addr_map_symbol__account_cycles(&bi[i].from,
nonany_branch_mode ? NULL : prev,
bi[i].flags.cycles);
@@ -2701,6 +2697,10 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
if (total_cycles)
*total_cycles += bi[i].flags.cycles;
}
+ for (unsigned int i = 0; i < bs->nr; i++) {
+ map_symbol__exit(&bi[i].to.ms);
+ map_symbol__exit(&bi[i].from.ms);
+ }
free(bi);
}
}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
index af9710622a1f..bccb988a7a44 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -8,7 +8,9 @@
#include <string.h>
#include <endian.h>
#include <byteswap.h>
+#include <linux/kernel.h>
#include <linux/compiler.h>
+#include <asm-generic/unaligned.h>
#include "intel-pt-pkt-decoder.h"
@@ -17,17 +19,11 @@
#define BIT63 ((uint64_t)1 << 63)
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
-#define le16_to_cpu bswap_16
-#define le32_to_cpu bswap_32
-#define le64_to_cpu bswap_64
#define memcpy_le64(d, s, n) do { \
memcpy((d), (s), (n)); \
*(d) = le64_to_cpu(*(d)); \
} while (0)
#else
-#define le16_to_cpu
-#define le32_to_cpu
-#define le64_to_cpu
#define memcpy_le64 memcpy
#endif
@@ -83,7 +79,7 @@ static int intel_pt_get_long_tnt(const unsigned char *buf, size_t len,
if (len < 8)
return INTEL_PT_NEED_MORE_BYTES;
- payload = le64_to_cpu(*(uint64_t *)buf);
+ payload = get_unaligned_le64(buf);
for (count = 47; count; count--) {
if (payload & BIT63)
@@ -124,26 +120,21 @@ static int intel_pt_get_cbr(const unsigned char *buf, size_t len,
if (len < 4)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_CBR;
- packet->payload = le16_to_cpu(*(uint16_t *)(buf + 2));
+ packet->payload = get_unaligned_le16(buf + 2);
return 4;
}
static int intel_pt_get_vmcs(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
- unsigned int count = (52 - 5) >> 3;
-
- if (count < 1 || count > 7)
- return INTEL_PT_BAD_PACKET;
-
- if (len < count + 2)
+ if (len < 7)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_VMCS;
- packet->count = count;
- memcpy_le64(&packet->payload, buf + 2, count);
+ packet->count = 5;
+ memcpy_le64(&packet->payload, buf + 2, 5);
- return count + 2;
+ return 7;
}
static int intel_pt_get_ovf(struct intel_pt_pkt *packet)
@@ -199,7 +190,7 @@ static int intel_pt_get_mnt(const unsigned char *buf, size_t len,
if (len < 11)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_MNT;
- memcpy_le64(&packet->payload, buf + 3, 8);
+ packet->payload = get_unaligned_le64(buf + 3);
return 11;
}
@@ -228,12 +219,12 @@ static int intel_pt_get_ptwrite(const unsigned char *buf, size_t len,
case 0:
if (len < 6)
return INTEL_PT_NEED_MORE_BYTES;
- packet->payload = le32_to_cpu(*(uint32_t *)(buf + 2));
+ packet->payload = get_unaligned_le32(buf + 2);
return 6;
case 1:
if (len < 10)
return INTEL_PT_NEED_MORE_BYTES;
- packet->payload = le64_to_cpu(*(uint64_t *)(buf + 2));
+ packet->payload = get_unaligned_le64(buf + 2);
return 10;
default:
return INTEL_PT_BAD_PACKET;
@@ -258,7 +249,7 @@ static int intel_pt_get_mwait(const unsigned char *buf, size_t len,
if (len < 10)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_MWAIT;
- packet->payload = le64_to_cpu(*(uint64_t *)(buf + 2));
+ packet->payload = get_unaligned_le64(buf + 2);
return 10;
}
@@ -311,7 +302,7 @@ static int intel_pt_get_bip_8(const unsigned char *buf, size_t len,
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_BIP;
packet->count = buf[0] >> 3;
- memcpy_le64(&packet->payload, buf + 1, 8);
+ packet->payload = get_unaligned_le64(buf + 1);
return 9;
}
@@ -350,7 +341,7 @@ static int intel_pt_get_evd(const unsigned char *buf, size_t len,
packet->type = INTEL_PT_EVD;
packet->count = buf[2] & 0x3f;
packet->payload = buf[3];
- memcpy_le64(&packet->payload, buf + 3, 8);
+ packet->payload = get_unaligned_le64(buf + 3);
return 11;
}
@@ -465,13 +456,13 @@ static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte,
if (len < 3)
return INTEL_PT_NEED_MORE_BYTES;
ip_len = 2;
- packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1));
+ packet->payload = get_unaligned_le16(buf + 1);
break;
case 2:
if (len < 5)
return INTEL_PT_NEED_MORE_BYTES;
ip_len = 4;
- packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1));
+ packet->payload = get_unaligned_le32(buf + 1);
break;
case 3:
case 4:
@@ -484,7 +475,7 @@ static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte,
if (len < 9)
return INTEL_PT_NEED_MORE_BYTES;
ip_len = 8;
- packet->payload = le64_to_cpu(*(uint64_t *)(buf + 1));
+ packet->payload = get_unaligned_le64(buf + 1);
break;
default:
return INTEL_PT_BAD_PACKET;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index dbf0bc71a63b..f38893e0b036 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -1512,9 +1512,11 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
} else if (ptq->state->flags & INTEL_PT_ASYNC) {
if (!ptq->state->to_ip)
ptq->flags = PERF_IP_FLAG_BRANCH |
+ PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_TRACE_END;
else if (ptq->state->from_nr && !ptq->state->to_nr)
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
+ PERF_IP_FLAG_ASYNC |
PERF_IP_FLAG_VMEXIT;
else
ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
diff --git a/tools/perf/util/intel-pt.h b/tools/perf/util/intel-pt.h
index c7d6068e3a6b..18fd0be52e6c 100644
--- a/tools/perf/util/intel-pt.h
+++ b/tools/perf/util/intel-pt.h
@@ -42,6 +42,7 @@ struct auxtrace_record *intel_pt_recording_init(int *err);
int intel_pt_process_auxtrace_info(union perf_event *event,
struct perf_session *session);
-struct perf_event_attr *intel_pt_pmu_default_config(struct perf_pmu *pmu);
+void intel_pt_pmu_default_config(const struct perf_pmu *intel_pt_pmu,
+ struct perf_event_attr *attr);
#endif
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 6b2b96c16ccd..1f657ef8975f 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -675,6 +675,7 @@ jit_repipe_unwinding_info(struct jit_buf_desc *jd, union jr_entry *jr)
jd->eh_frame_hdr_size = jr->unwinding.eh_frame_hdr_size;
jd->unwinding_size = jr->unwinding.unwinding_size;
jd->unwinding_mapped_size = jr->unwinding.mapped_size;
+ free(jd->unwinding_data);
jd->unwinding_data = unwinding_data;
return 0;
diff --git a/tools/perf/util/kwork.h b/tools/perf/util/kwork.h
index 53b7327550b8..76fe2a821bcf 100644
--- a/tools/perf/util/kwork.h
+++ b/tools/perf/util/kwork.h
@@ -16,6 +16,7 @@ enum kwork_class_type {
KWORK_CLASS_IRQ,
KWORK_CLASS_SOFTIRQ,
KWORK_CLASS_WORKQUEUE,
+ KWORK_CLASS_SCHED,
KWORK_CLASS_MAX,
};
@@ -23,6 +24,7 @@ enum kwork_report_type {
KWORK_REPORT_RUNTIME,
KWORK_REPORT_LATENCY,
KWORK_REPORT_TIMEHIST,
+ KWORK_REPORT_TOP,
};
enum kwork_trace_type {
@@ -91,6 +93,7 @@ struct kwork_atom_page {
DECLARE_BITMAP(bitmap, NR_ATOM_PER_PAGE);
};
+struct perf_kwork;
struct kwork_class;
struct kwork_work {
/*
@@ -127,6 +130,13 @@ struct kwork_work {
u64 max_latency_start;
u64 max_latency_end;
u64 total_latency;
+
+ /*
+ * top report
+ */
+ u32 cpu_usage;
+ u32 tgid;
+ bool is_kthread;
};
struct kwork_class {
@@ -142,8 +152,10 @@ struct kwork_class {
int (*class_init)(struct kwork_class *class,
struct perf_session *session);
- void (*work_init)(struct kwork_class *class,
+ void (*work_init)(struct perf_kwork *kwork,
+ struct kwork_class *class,
struct kwork_work *work,
+ enum kwork_trace_type src_type,
struct evsel *evsel,
struct perf_sample *sample,
struct machine *machine);
@@ -152,7 +164,6 @@ struct kwork_class {
char *buf, int len);
};
-struct perf_kwork;
struct trace_kwork_handler {
int (*raise_event)(struct perf_kwork *kwork,
struct kwork_class *class, struct evsel *evsel,
@@ -165,6 +176,23 @@ struct trace_kwork_handler {
int (*exit_event)(struct perf_kwork *kwork,
struct kwork_class *class, struct evsel *evsel,
struct perf_sample *sample, struct machine *machine);
+
+ int (*sched_switch_event)(struct perf_kwork *kwork,
+ struct kwork_class *class, struct evsel *evsel,
+ struct perf_sample *sample, struct machine *machine);
+};
+
+struct __top_cpus_runtime {
+ u64 load;
+ u64 idle;
+ u64 irq;
+ u64 softirq;
+ u64 total;
+};
+
+struct kwork_top_stat {
+ DECLARE_BITMAP(all_cpus_bitmap, MAX_NR_CPUS);
+ struct __top_cpus_runtime *cpus_runtime;
};
struct perf_kwork {
@@ -218,6 +246,11 @@ struct perf_kwork {
u64 all_runtime;
u64 all_count;
u64 nr_skipped_events[KWORK_TRACE_MAX + 1];
+
+ /*
+ * perf kwork top data
+ */
+ struct kwork_top_stat top_stat;
};
struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
@@ -233,6 +266,13 @@ void perf_kwork__report_cleanup_bpf(void);
void perf_kwork__trace_start(void);
void perf_kwork__trace_finish(void);
+int perf_kwork__top_prepare_bpf(struct perf_kwork *kwork);
+int perf_kwork__top_read_bpf(struct perf_kwork *kwork);
+void perf_kwork__top_cleanup_bpf(void);
+
+void perf_kwork__top_start(void);
+void perf_kwork__top_finish(void);
+
#else /* !HAVE_BPF_SKEL */
static inline int
@@ -252,6 +292,23 @@ static inline void perf_kwork__report_cleanup_bpf(void) {}
static inline void perf_kwork__trace_start(void) {}
static inline void perf_kwork__trace_finish(void) {}
+static inline int
+perf_kwork__top_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
+{
+ return -1;
+}
+
+static inline int
+perf_kwork__top_read_bpf(struct perf_kwork *kwork __maybe_unused)
+{
+ return -1;
+}
+
+static inline void perf_kwork__top_cleanup_bpf(void) {}
+
+static inline void perf_kwork__top_start(void) {}
+static inline void perf_kwork__top_finish(void) {}
+
#endif /* HAVE_BPF_SKEL */
#endif /* PERF_UTIL_KWORK_H */
diff --git a/tools/perf/util/lock-contention.h b/tools/perf/util/lock-contention.h
index fa16532c971c..1a7248ff3889 100644
--- a/tools/perf/util/lock-contention.h
+++ b/tools/perf/util/lock-contention.h
@@ -9,9 +9,11 @@ struct lock_filter {
int nr_types;
int nr_addrs;
int nr_syms;
+ int nr_cgrps;
unsigned int *types;
unsigned long *addrs;
char **syms;
+ u64 *cgrps;
};
struct lock_stat {
@@ -136,6 +138,7 @@ struct lock_contention {
struct hlist_head *result;
struct lock_filter *filters;
struct lock_contention_fails fails;
+ struct rb_root cgroups;
unsigned long map_nr_entries;
int max_stack;
int stack_skip;
@@ -151,7 +154,7 @@ int lock_contention_prepare(struct lock_contention *con);
int lock_contention_start(void);
int lock_contention_stop(void);
int lock_contention_read(struct lock_contention *con);
-int lock_contention_finish(void);
+int lock_contention_finish(struct lock_contention *con);
#else /* !HAVE_BPF_SKEL */
@@ -162,7 +165,10 @@ static inline int lock_contention_prepare(struct lock_contention *con __maybe_un
static inline int lock_contention_start(void) { return 0; }
static inline int lock_contention_stop(void) { return 0; }
-static inline int lock_contention_finish(void) { return 0; }
+static inline int lock_contention_finish(struct lock_contention *con __maybe_unused)
+{
+ return 0;
+}
static inline int lock_contention_read(struct lock_contention *con __maybe_unused)
{
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 88f31b3a63ac..90c750150b19 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -67,7 +67,6 @@ static void machine__threads_init(struct machine *machine)
threads->entries = RB_ROOT_CACHED;
init_rwsem(&threads->lock);
threads->nr = 0;
- INIT_LIST_HEAD(&threads->dead);
threads->last_match = NULL;
}
}
@@ -969,7 +968,7 @@ static int machine__process_ksymbol_unregister(struct machine *machine,
if (!map)
return 0;
- if (RC_CHK_ACCESS(map) != RC_CHK_ACCESS(machine->vmlinux_map))
+ if (!RC_CHK_EQUAL(map, machine->vmlinux_map))
maps__remove(machine__kernel_maps(machine), map);
else {
struct dso *dso = map__dso(map);
@@ -2058,7 +2057,7 @@ static void __machine__remove_thread(struct machine *machine, struct thread_rb_n
if (!nd)
nd = thread_rb_node__find(th, &threads->entries.rb_root);
- if (threads->last_match && RC_CHK_ACCESS(threads->last_match) == RC_CHK_ACCESS(th))
+ if (threads->last_match && RC_CHK_EQUAL(threads->last_match, th))
threads__set_last_match(threads, NULL);
if (lock)
@@ -2213,9 +2212,7 @@ int machine__process_event(struct machine *machine, union perf_event *event,
static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
{
- if (!regexec(regex, sym->name, 0, NULL, 0))
- return true;
- return false;
+ return regexec(regex, sym->name, 0, NULL, 0) == 0;
}
static void ip__resolve_ams(struct thread *thread,
@@ -2392,8 +2389,7 @@ static int add_callchain_ip(struct thread *thread,
iter_cycles, branch_from, srcline);
out:
addr_location__exit(&al);
- maps__put(ms.maps);
- map__put(ms.map);
+ map_symbol__exit(&ms);
return err;
}
@@ -2624,16 +2620,18 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
save_lbr_cursor_node(thread, cursor, i);
}
- /* Add LBR ip from first entries.to */
- ip = entries[0].to;
- flags = &entries[0].flags;
- *branch_from = entries[0].from;
- err = add_callchain_ip(thread, cursor, parent,
- root_al, &cpumode, ip,
- true, flags, NULL,
- *branch_from);
- if (err)
- return err;
+ if (lbr_nr > 0) {
+ /* Add LBR ip from first entries.to */
+ ip = entries[0].to;
+ flags = &entries[0].flags;
+ *branch_from = entries[0].from;
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, ip,
+ true, flags, NULL,
+ *branch_from);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -3117,8 +3115,7 @@ static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms
if (ret != 0)
return ret;
}
- map__put(ilist_ms.map);
- maps__put(ilist_ms.maps);
+ map_symbol__exit(&ilist_ms);
return ret;
}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index d034ecaf89c1..1279acda6a8a 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -35,7 +35,6 @@ struct threads {
struct rb_root_cached entries;
struct rw_semaphore lock;
unsigned int nr;
- struct list_head dead;
struct thread *last_match;
};
diff --git a/tools/perf/util/map_symbol.c b/tools/perf/util/map_symbol.c
new file mode 100644
index 000000000000..bef5079f2403
--- /dev/null
+++ b/tools/perf/util/map_symbol.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "map_symbol.h"
+#include "maps.h"
+#include "map.h"
+
+void map_symbol__exit(struct map_symbol *ms)
+{
+ maps__zput(ms->maps);
+ map__zput(ms->map);
+}
+
+void addr_map_symbol__exit(struct addr_map_symbol *ams)
+{
+ map_symbol__exit(&ams->ms);
+}
diff --git a/tools/perf/util/map_symbol.h b/tools/perf/util/map_symbol.h
index e08817b0c30f..72d5ed938ed6 100644
--- a/tools/perf/util/map_symbol.h
+++ b/tools/perf/util/map_symbol.h
@@ -22,4 +22,8 @@ struct addr_map_symbol {
u64 phys_addr;
u64 data_page_size;
};
+
+void map_symbol__exit(struct map_symbol *ms);
+void addr_map_symbol__exit(struct addr_map_symbol *ams);
+
#endif // __PERF_MAP_SYMBOL
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 39ffe8ceb380..954b235e12e5 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -185,7 +185,6 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
{
int i = *argv_nr, k = 0;
struct perf_mem_event *e;
- struct perf_pmu *pmu;
for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
e = perf_mem_events__ptr(j);
@@ -202,6 +201,8 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
rec_argv[i++] = "-e";
rec_argv[i++] = perf_mem_events__name(j, NULL);
} else {
+ struct perf_pmu *pmu = NULL;
+
if (!e->supported) {
perf_mem_events__print_unsupport_hybrid(e, j);
return -1;
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 6231044a491e..0484736d9fe4 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -498,7 +498,7 @@ static int metricgroup__sys_event_iter(const struct pmu_metric *pm,
while ((pmu = perf_pmus__scan(pmu))) {
- if (!pmu->id || strcmp(pmu->id, pm->compat))
+ if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id))
continue;
return d->fn(pm, table, d->data);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 65608a3cba81..aa2f5c6fc7fc 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -34,8 +34,9 @@
#ifdef PARSER_DEBUG
extern int parse_events_debug;
#endif
-static int get_config_terms(struct list_head *head_config,
- struct list_head *head_terms __maybe_unused);
+static int get_config_terms(struct parse_events_terms *head_config, struct list_head *head_terms);
+static int parse_events_terms__copy(const struct parse_events_terms *src,
+ struct parse_events_terms *dest);
struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = {
@@ -153,26 +154,27 @@ const char *event_type(int type)
return "unknown";
}
-static char *get_config_str(struct list_head *head_terms, enum parse_events__term_type type_term)
+static char *get_config_str(struct parse_events_terms *head_terms,
+ enum parse_events__term_type type_term)
{
struct parse_events_term *term;
if (!head_terms)
return NULL;
- list_for_each_entry(term, head_terms, list)
+ list_for_each_entry(term, &head_terms->terms, list)
if (term->type_term == type_term)
return term->val.str;
return NULL;
}
-static char *get_config_metric_id(struct list_head *head_terms)
+static char *get_config_metric_id(struct parse_events_terms *head_terms)
{
return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
}
-static char *get_config_name(struct list_head *head_terms)
+static char *get_config_name(struct parse_events_terms *head_terms)
{
return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
}
@@ -188,11 +190,11 @@ static char *get_config_name(struct list_head *head_terms)
* @config_terms: the list of terms that may contain a raw term.
* @pmu: the PMU to scan for events from.
*/
-static void fix_raw(struct list_head *config_terms, struct perf_pmu *pmu)
+static void fix_raw(struct parse_events_terms *config_terms, struct perf_pmu *pmu)
{
struct parse_events_term *term;
- list_for_each_entry(term, config_terms, list) {
+ list_for_each_entry(term, &config_terms->terms, list) {
u64 num;
if (term->type_term != PARSE_EVENTS__TERM_TYPE_RAW)
@@ -356,7 +358,7 @@ static int config_term_common(struct perf_event_attr *attr,
struct parse_events_term *term,
struct parse_events_error *err);
static int config_attr(struct perf_event_attr *attr,
- struct list_head *head,
+ struct parse_events_terms *head,
struct parse_events_error *err,
config_term_func_t config_term);
@@ -442,7 +444,7 @@ bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
struct parse_events_state *parse_state,
- struct list_head *head_config)
+ struct parse_events_terms *head_config)
{
struct perf_pmu *pmu = NULL;
bool found_supported = false;
@@ -520,7 +522,7 @@ static void tracepoint_error(struct parse_events_error *e, int err,
static int add_tracepoint(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
- struct list_head *head_config, void *loc_)
+ struct parse_events_terms *head_config, void *loc_)
{
YYLTYPE *loc = loc_;
struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++);
@@ -545,7 +547,7 @@ static int add_tracepoint(struct list_head *list, int *idx,
static int add_tracepoint_multi_event(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
- struct list_head *head_config, YYLTYPE *loc)
+ struct parse_events_terms *head_config, YYLTYPE *loc)
{
char *evt_path;
struct dirent *evt_ent;
@@ -593,7 +595,7 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
static int add_tracepoint_event(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
- struct list_head *head_config, YYLTYPE *loc)
+ struct parse_events_terms *head_config, YYLTYPE *loc)
{
return strpbrk(evt_name, "*?") ?
add_tracepoint_multi_event(list, idx, sys_name, evt_name,
@@ -605,7 +607,7 @@ static int add_tracepoint_event(struct list_head *list, int *idx,
static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
- struct list_head *head_config, YYLTYPE *loc)
+ struct parse_events_terms *head_config, YYLTYPE *loc)
{
struct dirent *events_ent;
DIR *events_dir;
@@ -680,7 +682,7 @@ do { \
int parse_events_add_breakpoint(struct parse_events_state *parse_state,
struct list_head *list,
u64 addr, char *type, u64 len,
- struct list_head *head_config __maybe_unused)
+ struct parse_events_terms *head_config)
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
@@ -1066,21 +1068,20 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
#endif
static int config_attr(struct perf_event_attr *attr,
- struct list_head *head,
+ struct parse_events_terms *head,
struct parse_events_error *err,
config_term_func_t config_term)
{
struct parse_events_term *term;
- list_for_each_entry(term, head, list)
+ list_for_each_entry(term, &head->terms, list)
if (config_term(attr, term, err))
return -EINVAL;
return 0;
}
-static int get_config_terms(struct list_head *head_config,
- struct list_head *head_terms __maybe_unused)
+static int get_config_terms(struct parse_events_terms *head_config, struct list_head *head_terms)
{
#define ADD_CONFIG_TERM(__type, __weak) \
struct evsel_config_term *__t; \
@@ -1113,7 +1114,7 @@ do { \
struct parse_events_term *term;
- list_for_each_entry(term, head_config, list) {
+ list_for_each_entry(term, &head_config->terms, list) {
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
ADD_CONFIG_TERM_VAL(PERIOD, period, term->val.num, term->weak);
@@ -1194,14 +1195,14 @@ do { \
* Add EVSEL__CONFIG_TERM_CFG_CHG where cfg_chg will have a bit set for
* each bit of attr->config that the user has changed.
*/
-static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
+static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head_config,
struct list_head *head_terms)
{
struct parse_events_term *term;
u64 bits = 0;
int type;
- list_for_each_entry(term, head_config, list) {
+ list_for_each_entry(term, &head_config->terms, list) {
switch (term->type_term) {
case PARSE_EVENTS__TERM_TYPE_USER:
type = perf_pmu__format_type(pmu, term->config);
@@ -1251,7 +1252,7 @@ static int get_config_chgs(struct perf_pmu *pmu, struct list_head *head_config,
int parse_events_add_tracepoint(struct list_head *list, int *idx,
const char *sys, const char *event,
struct parse_events_error *err,
- struct list_head *head_config, void *loc_)
+ struct parse_events_terms *head_config, void *loc_)
{
YYLTYPE *loc = loc_;
#ifdef HAVE_LIBTRACEEVENT
@@ -1284,7 +1285,7 @@ int parse_events_add_tracepoint(struct list_head *list, int *idx,
static int __parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
struct perf_pmu *pmu, u32 type, u32 extended_type,
- u64 config, struct list_head *head_config)
+ u64 config, struct parse_events_terms *head_config)
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
@@ -1320,7 +1321,7 @@ static int __parse_events_add_numeric(struct parse_events_state *parse_state,
int parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
u32 type, u64 config,
- struct list_head *head_config,
+ struct parse_events_terms *head_config,
bool wildcard)
{
struct perf_pmu *pmu = NULL;
@@ -1369,7 +1370,7 @@ static bool config_term_percore(struct list_head *config_terms)
int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, const char *name,
- struct list_head *head_config,
+ const struct parse_events_terms *const_parsed_terms,
bool auto_merge_stats, void *loc_)
{
struct perf_event_attr attr;
@@ -1379,6 +1380,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
struct parse_events_error *err = parse_state->error;
YYLTYPE *loc = loc_;
LIST_HEAD(config_terms);
+ struct parse_events_terms parsed_terms;
pmu = parse_state->fake_pmu ?: perf_pmus__find(name);
@@ -1392,32 +1394,37 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
return -EINVAL;
}
+ parse_events_terms__init(&parsed_terms);
+ if (const_parsed_terms) {
+ int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
+
+ if (ret)
+ return ret;
+ }
+
if (verbose > 1) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
- if (pmu->selectable && !head_config) {
+ if (pmu->selectable && list_empty(&parsed_terms.terms)) {
strbuf_addf(&sb, "%s//", name);
} else {
strbuf_addf(&sb, "%s/", name);
- parse_events_term__to_strbuf(head_config, &sb);
+ parse_events_terms__to_strbuf(&parsed_terms, &sb);
strbuf_addch(&sb, '/');
}
fprintf(stderr, "Attempt to add: %s\n", sb.buf);
strbuf_release(&sb);
}
- if (head_config)
- fix_raw(head_config, pmu);
+ fix_raw(&parsed_terms, pmu);
+
+ memset(&attr, 0, sizeof(attr));
+ if (pmu->perf_event_attr_init_default)
+ pmu->perf_event_attr_init_default(pmu, &attr);
- if (pmu->default_config) {
- memcpy(&attr, pmu->default_config,
- sizeof(struct perf_event_attr));
- } else {
- memset(&attr, 0, sizeof(attr));
- }
attr.type = pmu->type;
- if (!head_config) {
+ if (list_empty(&parsed_terms.terms)) {
evsel = __add_event(list, &parse_state->idx, &attr,
/*init_attr=*/true, /*name=*/NULL,
/*metric_id=*/NULL, pmu,
@@ -1426,14 +1433,16 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
return evsel ? 0 : -ENOMEM;
}
- if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, head_config, &info, err))
+ if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, &parsed_terms, &info, err)) {
+ parse_events_terms__exit(&parsed_terms);
return -EINVAL;
+ }
if (verbose > 1) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
- parse_events_term__to_strbuf(head_config, &sb);
+ parse_events_terms__to_strbuf(&parsed_terms, &sb);
fprintf(stderr, "..after resolving event: %s/%s/\n", name, sb.buf);
strbuf_release(&sb);
}
@@ -1442,39 +1451,53 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
* Configure hardcoded terms first, no need to check
* return value when called with fail == 0 ;)
*/
- if (config_attr(&attr, head_config, parse_state->error, config_term_pmu))
+ if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
+ parse_events_terms__exit(&parsed_terms);
return -EINVAL;
+ }
- if (get_config_terms(head_config, &config_terms))
+ if (get_config_terms(&parsed_terms, &config_terms)) {
+ parse_events_terms__exit(&parsed_terms);
return -ENOMEM;
+ }
/*
* When using default config, record which bits of attr->config were
* changed by the user.
*/
- if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
+ if (pmu->perf_event_attr_init_default &&
+ get_config_chgs(pmu, &parsed_terms, &config_terms)) {
+ parse_events_terms__exit(&parsed_terms);
return -ENOMEM;
+ }
- if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
+ if (!parse_state->fake_pmu &&
+ perf_pmu__config(pmu, &attr, &parsed_terms, parse_state->error)) {
free_config_terms(&config_terms);
+ parse_events_terms__exit(&parsed_terms);
return -EINVAL;
}
evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
- get_config_name(head_config),
- get_config_metric_id(head_config), pmu,
+ get_config_name(&parsed_terms),
+ get_config_metric_id(&parsed_terms), pmu,
&config_terms, auto_merge_stats, /*cpu_list=*/NULL);
- if (!evsel)
+ if (!evsel) {
+ parse_events_terms__exit(&parsed_terms);
return -ENOMEM;
+ }
if (evsel->name)
evsel->use_config_name = true;
evsel->percore = config_term_percore(&evsel->config_terms);
- if (parse_state->fake_pmu)
+ if (parse_state->fake_pmu) {
+ parse_events_terms__exit(&parsed_terms);
return 0;
+ }
+ parse_events_terms__exit(&parsed_terms);
free((char *)evsel->unit);
evsel->unit = strdup(info.unit);
evsel->scale = info.scale;
@@ -1484,27 +1507,29 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
}
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
- char *str, struct list_head *head,
+ const char *event_name,
+ const struct parse_events_terms *const_parsed_terms,
struct list_head **listp, void *loc_)
{
struct parse_events_term *term;
struct list_head *list = NULL;
- struct list_head *orig_head = NULL;
struct perf_pmu *pmu = NULL;
YYLTYPE *loc = loc_;
int ok = 0;
const char *config;
+ struct parse_events_terms parsed_terms;
*listp = NULL;
- if (!head) {
- head = malloc(sizeof(struct list_head));
- if (!head)
- goto out_err;
+ parse_events_terms__init(&parsed_terms);
+ if (const_parsed_terms) {
+ int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
- INIT_LIST_HEAD(head);
+ if (ret)
+ return ret;
}
- config = strdup(str);
+
+ config = strdup(event_name);
if (!config)
goto out_err;
@@ -1515,7 +1540,7 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
zfree(&config);
goto out_err;
}
- list_add_tail(&term->list, head);
+ list_add_tail(&term->list, &parsed_terms.terms);
/* Add it for all PMUs that support the alias */
list = malloc(sizeof(struct list_head));
@@ -1530,44 +1555,42 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
if (parse_events__filter_pmu(parse_state, pmu))
continue;
- if (!perf_pmu__have_event(pmu, str))
+ if (!perf_pmu__have_event(pmu, event_name))
continue;
auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
- parse_events_copy_term_list(head, &orig_head);
if (!parse_events_add_pmu(parse_state, list, pmu->name,
- orig_head, auto_merge_stats, loc)) {
+ &parsed_terms, auto_merge_stats, loc)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
- parse_events_term__to_strbuf(orig_head, &sb);
- pr_debug("%s -> %s/%s/\n", str, pmu->name, sb.buf);
+ parse_events_terms__to_strbuf(&parsed_terms, &sb);
+ pr_debug("%s -> %s/%s/\n", event_name, pmu->name, sb.buf);
strbuf_release(&sb);
ok++;
}
- parse_events_terms__delete(orig_head);
}
if (parse_state->fake_pmu) {
- if (!parse_events_add_pmu(parse_state, list, str, head,
+ if (!parse_events_add_pmu(parse_state, list, event_name, &parsed_terms,
/*auto_merge_stats=*/true, loc)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
- parse_events_term__to_strbuf(head, &sb);
- pr_debug("%s -> %s/%s/\n", str, "fake_pmu", sb.buf);
+ parse_events_terms__to_strbuf(&parsed_terms, &sb);
+ pr_debug("%s -> %s/%s/\n", event_name, "fake_pmu", sb.buf);
strbuf_release(&sb);
ok++;
}
}
out_err:
+ parse_events_terms__exit(&parsed_terms);
if (ok)
*listp = list;
else
free(list);
- parse_events_terms__delete(head);
return ok ? 0 : -1;
}
@@ -1832,7 +1855,7 @@ static int parse_events__scanner(const char *str,
/*
* parse event config string, return a list of event terms.
*/
-int parse_events_terms(struct list_head *terms, const char *str, FILE *input)
+int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input)
{
struct parse_events_state parse_state = {
.terms = NULL,
@@ -1841,14 +1864,10 @@ int parse_events_terms(struct list_head *terms, const char *str, FILE *input)
int ret;
ret = parse_events__scanner(str, input, &parse_state);
+ if (!ret)
+ list_splice(&parse_state.terms->terms, &terms->terms);
- if (!ret) {
- list_splice(parse_state.terms, terms);
- zfree(&parse_state.terms);
- return 0;
- }
-
- parse_events_terms__delete(parse_state.terms);
+ zfree(&parse_state.terms);
return ret;
}
@@ -2530,8 +2549,10 @@ int parse_events_term__clone(struct parse_events_term **new,
return new_term(new, &temp, /*str=*/NULL, term->val.num);
str = strdup(term->val.str);
- if (!str)
+ if (!str) {
+ zfree(&temp.config);
return -ENOMEM;
+ }
return new_term(new, &temp, str, /*num=*/0);
}
@@ -2544,58 +2565,56 @@ void parse_events_term__delete(struct parse_events_term *term)
free(term);
}
-int parse_events_copy_term_list(struct list_head *old,
- struct list_head **new)
+static int parse_events_terms__copy(const struct parse_events_terms *src,
+ struct parse_events_terms *dest)
{
- struct parse_events_term *term, *n;
- int ret;
-
- if (!old) {
- *new = NULL;
- return 0;
- }
+ struct parse_events_term *term;
- *new = malloc(sizeof(struct list_head));
- if (!*new)
- return -ENOMEM;
- INIT_LIST_HEAD(*new);
+ list_for_each_entry (term, &src->terms, list) {
+ struct parse_events_term *n;
+ int ret;
- list_for_each_entry (term, old, list) {
ret = parse_events_term__clone(&n, term);
if (ret)
return ret;
- list_add_tail(&n->list, *new);
+
+ list_add_tail(&n->list, &dest->terms);
}
return 0;
}
-void parse_events_terms__purge(struct list_head *terms)
+void parse_events_terms__init(struct parse_events_terms *terms)
+{
+ INIT_LIST_HEAD(&terms->terms);
+}
+
+void parse_events_terms__exit(struct parse_events_terms *terms)
{
struct parse_events_term *term, *h;
- list_for_each_entry_safe(term, h, terms, list) {
+ list_for_each_entry_safe(term, h, &terms->terms, list) {
list_del_init(&term->list);
parse_events_term__delete(term);
}
}
-void parse_events_terms__delete(struct list_head *terms)
+void parse_events_terms__delete(struct parse_events_terms *terms)
{
if (!terms)
return;
- parse_events_terms__purge(terms);
+ parse_events_terms__exit(terms);
free(terms);
}
-int parse_events_term__to_strbuf(struct list_head *term_list, struct strbuf *sb)
+int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb)
{
struct parse_events_term *term;
bool first = true;
- if (!term_list)
+ if (!terms)
return 0;
- list_for_each_entry(term, term_list, list) {
+ list_for_each_entry(term, &terms->terms, list) {
int ret;
if (!first) {
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 594e5d2dc67f..63c0a36a4bf1 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -44,7 +44,6 @@ static inline int parse_events(struct evlist *evlist, const char *str,
int parse_event(struct evlist *evlist, const char *str);
-int parse_events_terms(struct list_head *terms, const char *str, FILE *input);
int parse_filter(const struct option *opt, const char *str, int unset);
int exclude_perf(const struct option *opt, const char *arg, int unset);
@@ -140,6 +139,11 @@ struct parse_events_error {
char *first_help;
};
+/* A wrapper around a list of terms for the sake of better type safety. */
+struct parse_events_terms {
+ struct list_head terms;
+};
+
struct parse_events_state {
/* The list parsed events are placed on. */
struct list_head list;
@@ -148,7 +152,7 @@ struct parse_events_state {
/* Error information. */
struct parse_events_error *error;
/* Holds returned terms for term parsing. */
- struct list_head *terms;
+ struct parse_events_terms *terms;
/* Start token. */
int stoken;
/* Special fake PMU marker for testing. */
@@ -181,35 +185,38 @@ int parse_events_term__term(struct parse_events_term **term,
int parse_events_term__clone(struct parse_events_term **new,
struct parse_events_term *term);
void parse_events_term__delete(struct parse_events_term *term);
-void parse_events_terms__delete(struct list_head *terms);
-void parse_events_terms__purge(struct list_head *terms);
-int parse_events_term__to_strbuf(struct list_head *term_list, struct strbuf *sb);
+
+void parse_events_terms__delete(struct parse_events_terms *terms);
+void parse_events_terms__init(struct parse_events_terms *terms);
+void parse_events_terms__exit(struct parse_events_terms *terms);
+int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input);
+int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb);
int parse_events__modifier_event(struct list_head *list, char *str, bool add);
int parse_events__modifier_group(struct list_head *list, char *event_mod);
int parse_events_name(struct list_head *list, const char *name);
int parse_events_add_tracepoint(struct list_head *list, int *idx,
const char *sys, const char *event,
struct parse_events_error *error,
- struct list_head *head_config, void *loc);
+ struct parse_events_terms *head_config, void *loc);
int parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
u32 type, u64 config,
- struct list_head *head_config,
+ struct parse_events_terms *head_config,
bool wildcard);
int parse_events_add_tool(struct parse_events_state *parse_state,
struct list_head *list,
int tool_event);
int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
struct parse_events_state *parse_state,
- struct list_head *head_config);
+ struct parse_events_terms *head_config);
int parse_events__decode_legacy_cache(const char *name, int pmu_type, __u64 *config);
int parse_events_add_breakpoint(struct parse_events_state *parse_state,
struct list_head *list,
u64 addr, char *type, u64 len,
- struct list_head *head_config);
+ struct parse_events_terms *head_config);
int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, const char *name,
- struct list_head *head_config,
+ const struct parse_events_terms *const_parsed_terms,
bool auto_merge_stats, void *loc);
struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
@@ -217,13 +224,10 @@ struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
struct perf_pmu *pmu);
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
- char *str,
- struct list_head *head_config,
+ const char *event_name,
+ const struct parse_events_terms *const_parsed_terms,
struct list_head **listp, void *loc);
-int parse_events_copy_term_list(struct list_head *old,
- struct list_head **new);
-
void parse_events__set_leader(char *name, struct list_head *list);
void parse_events_update_lists(struct list_head *list_event,
struct list_head *list_all);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 4ef4b6f171a0..e86c45675e1d 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -120,7 +120,7 @@ static int term(yyscan_t scanner, enum parse_events__term_type type)
{
YYSTYPE *yylval = parse_events_get_lval(scanner);
- yylval->num = type;
+ yylval->term_type = type;
return PE_TERM;
}
@@ -156,8 +156,8 @@ event_pmu [^,{}/]+[/][^/]*[/][^,{}/]*
event [^,{}/]+
num_dec [0-9]+
-num_hex 0x[a-fA-F0-9]+
-num_raw_hex [a-fA-F0-9]+
+num_hex 0x[a-fA-F0-9]{1,16}
+num_raw_hex [a-fA-F0-9]{1,16}
name [a-zA-Z_*?\[\]][a-zA-Z0-9_*?.\[\]!\-]*
name_tag [\'][a-zA-Z_*?\[\]][a-zA-Z0-9_*?\-,\.\[\]:=]*[\']
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]*
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 21bfe7e0d944..de098caf0c1c 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -6,14 +6,13 @@
%{
+#ifndef NDEBUG
#define YYDEBUG 1
+#endif
#include <errno.h>
-#include <fnmatch.h>
-#include <stdio.h>
#include <linux/compiler.h>
#include <linux/types.h>
-#include <linux/zalloc.h>
#include "pmu.h"
#include "pmus.h"
#include "evsel.h"
@@ -70,7 +69,7 @@ static void free_list_evsel(struct list_head* list_evsel)
%type <num> PE_VALUE_SYM_HW
%type <num> PE_VALUE_SYM_SW
%type <num> PE_VALUE_SYM_TOOL
-%type <num> PE_TERM
+%type <term_type> PE_TERM
%type <num> value_sym
%type <str> PE_RAW
%type <str> PE_NAME
@@ -79,7 +78,7 @@ static void free_list_evsel(struct list_head* list_evsel)
%type <str> PE_MODIFIER_BP
%type <str> PE_EVENT_NAME
%type <str> PE_DRV_CFG_TERM
-%type <str> name_or_raw name_or_legacy
+%type <str> name_or_raw
%destructor { free ($$); } <str>
%type <term> event_term
%destructor { parse_events_term__delete ($$); } <term>
@@ -104,6 +103,7 @@ static void free_list_evsel(struct list_head* list_evsel)
%type <list_evsel> groups
%destructor { free_list_evsel ($$); } <list_evsel>
%type <tracepoint_name> tracepoint_name
+%destructor { free ($$.sys); free ($$.event); } <tracepoint_name>
%type <hardware_term> PE_TERM_HW
%destructor { free ($$.str); } <hardware_term>
@@ -111,8 +111,9 @@ static void free_list_evsel(struct list_head* list_evsel)
{
char *str;
u64 num;
+ enum parse_events__term_type term_type;
struct list_head *list_evsel;
- struct list_head *list_terms;
+ struct parse_events_terms *list_terms;
struct parse_events_term *term;
struct tracepoint_name {
char *sys;
@@ -273,23 +274,18 @@ event_pmu:
PE_NAME opt_pmu_config
{
struct parse_events_state *parse_state = _parse_state;
- struct list_head *list = NULL, *orig_terms = NULL, *terms= NULL;
+ /* List of created evsels. */
+ struct list_head *list = NULL;
char *pattern = NULL;
#define CLEANUP \
do { \
parse_events_terms__delete($2); \
- parse_events_terms__delete(orig_terms); \
free(list); \
free($1); \
free(pattern); \
} while(0)
- if (parse_events_copy_term_list($2, &orig_terms)) {
- CLEANUP;
- YYNOMEM;
- }
-
list = alloc_list();
if (!list) {
CLEANUP;
@@ -319,16 +315,11 @@ PE_NAME opt_pmu_config
!perf_pmu__match(pattern, pmu->alias_name, $1)) {
bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
- if (parse_events_copy_term_list(orig_terms, &terms)) {
- CLEANUP;
- YYNOMEM;
- }
- if (!parse_events_add_pmu(parse_state, list, pmu->name, terms,
+ if (!parse_events_add_pmu(parse_state, list, pmu->name, $2,
auto_merge_stats, &@1)) {
ok++;
parse_state->wild_card_pmus = true;
}
- parse_events_terms__delete(terms);
}
}
@@ -336,7 +327,6 @@ PE_NAME opt_pmu_config
/* Failure to add, assume $1 is an event name. */
zfree(&list);
ok = !parse_events_multi_pmu_add(parse_state, $1, $2, &list, &@1);
- $2 = NULL;
}
if (!ok) {
struct parse_events_error *error = parse_state->error;
@@ -654,33 +644,31 @@ start_terms: event_config
event_config:
event_config ',' event_term
{
- struct list_head *head = $1;
+ struct parse_events_terms *head = $1;
struct parse_events_term *term = $3;
if (!head) {
parse_events_term__delete(term);
YYABORT;
}
- list_add_tail(&term->list, head);
+ list_add_tail(&term->list, &head->terms);
$$ = $1;
}
|
event_term
{
- struct list_head *head = malloc(sizeof(*head));
+ struct parse_events_terms *head = malloc(sizeof(*head));
struct parse_events_term *term = $1;
if (!head)
YYNOMEM;
- INIT_LIST_HEAD(head);
- list_add_tail(&term->list, head);
+ parse_events_terms__init(head);
+ list_add_tail(&term->list, &head->terms);
$$ = head;
}
name_or_raw: PE_RAW | PE_NAME | PE_LEGACY_CACHE
-name_or_legacy: PE_NAME | PE_LEGACY_CACHE
-
event_term:
PE_RAW
{
@@ -695,7 +683,7 @@ PE_RAW
$$ = term;
}
|
-name_or_raw '=' name_or_legacy
+name_or_raw '=' name_or_raw
{
struct parse_events_term *term;
int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER, $1, $3, &@1, &@3);
@@ -775,11 +763,10 @@ PE_TERM_HW
$$ = term;
}
|
-PE_TERM '=' name_or_legacy
+PE_TERM '=' name_or_raw
{
struct parse_events_term *term;
- int err = parse_events_term__str(&term, (enum parse_events__term_type)$1,
- /*config=*/NULL, $3, &@1, &@3);
+ int err = parse_events_term__str(&term, $1, /*config=*/NULL, $3, &@1, &@3);
if (err) {
free($3);
@@ -791,8 +778,7 @@ PE_TERM '=' name_or_legacy
PE_TERM '=' PE_TERM_HW
{
struct parse_events_term *term;
- int err = parse_events_term__str(&term, (enum parse_events__term_type)$1,
- /*config=*/NULL, $3.str, &@1, &@3);
+ int err = parse_events_term__str(&term, $1, /*config=*/NULL, $3.str, &@1, &@3);
if (err) {
free($3.str);
@@ -804,10 +790,7 @@ PE_TERM '=' PE_TERM_HW
PE_TERM '=' PE_TERM
{
struct parse_events_term *term;
- int err = parse_events_term__term(&term,
- (enum parse_events__term_type)$1,
- (enum parse_events__term_type)$3,
- &@1, &@3);
+ int err = parse_events_term__term(&term, $1, $3, &@1, &@3);
if (err)
PE_ABORT(err);
@@ -818,8 +801,9 @@ PE_TERM '=' PE_TERM
PE_TERM '=' PE_VALUE
{
struct parse_events_term *term;
- int err = parse_events_term__num(&term, (enum parse_events__term_type)$1,
- /*config=*/NULL, $3, /*novalue=*/false, &@1, &@3);
+ int err = parse_events_term__num(&term, $1,
+ /*config=*/NULL, $3, /*novalue=*/false,
+ &@1, &@3);
if (err)
PE_ABORT(err);
@@ -830,9 +814,9 @@ PE_TERM '=' PE_VALUE
PE_TERM
{
struct parse_events_term *term;
- int err = parse_events_term__num(&term, (enum parse_events__term_type)$1,
- /*config=*/NULL, /*num=*/1, /*novalue=*/true,
- &@1, /*loc_val=*/NULL);
+ int err = parse_events_term__num(&term, $1,
+ /*config=*/NULL, /*num=*/1, /*novalue=*/true,
+ &@1, /*loc_val=*/NULL);
if (err)
PE_ABORT(err);
diff --git a/tools/perf/util/pfm.c b/tools/perf/util/pfm.c
index 862e4a689868..5ccfe4b64cdf 100644
--- a/tools/perf/util/pfm.c
+++ b/tools/perf/util/pfm.c
@@ -145,7 +145,20 @@ static bool is_libpfm_event_supported(const char *name, struct perf_cpu_map *cpu
evsel->is_libpfm_event = true;
- if (evsel__open(evsel, cpus, threads) < 0)
+ ret = evsel__open(evsel, cpus, threads);
+ if (ret == -EACCES) {
+ /*
+ * This happens if the paranoid value
+ * /proc/sys/kernel/perf_event_paranoid is set to 2
+ * Re-run with exclude_kernel set; we don't do that
+ * by default as some ARM machines do not support it.
+ *
+ */
+ evsel->core.attr.exclude_kernel = 1;
+ ret = evsel__open(evsel, cpus, threads);
+
+ }
+ if (ret < 0)
result = false;
evsel__close(evsel);
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index d515ba8a0e16..d3c9aa4326be 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -28,6 +28,7 @@
#include "strbuf.h"
#include "fncache.h"
#include "util/evsel_config.h"
+#include <regex.h>
struct perf_pmu perf_pmu__fake = {
.name = "fake",
@@ -52,7 +53,7 @@ struct perf_pmu_alias {
*/
char *topic;
/** @terms: Owned list of the original parsed parameters. */
- struct list_head terms;
+ struct parse_events_terms terms;
/** @list: List element of struct perf_pmu aliases. */
struct list_head list;
/**
@@ -155,7 +156,7 @@ static void __perf_pmu_format__load(struct perf_pmu_format *format, FILE *file)
format->loaded = true;
}
-static void perf_pmu_format__load(struct perf_pmu *pmu, struct perf_pmu_format *format)
+static void perf_pmu_format__load(const struct perf_pmu *pmu, struct perf_pmu_format *format)
{
char path[PATH_MAX];
FILE *file = NULL;
@@ -404,7 +405,7 @@ static void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
zfree(&newalias->long_desc);
zfree(&newalias->topic);
zfree(&newalias->pmu_name);
- parse_events_terms__purge(&newalias->terms);
+ parse_events_terms__exit(&newalias->terms);
free(newalias);
}
@@ -484,7 +485,7 @@ static int update_alias(const struct pmu_event *pe,
assign_str(pe->name, "topic", &data->alias->topic, pe->topic);
data->alias->per_pkg = pe->perpkg;
if (pe->event) {
- parse_events_terms__purge(&data->alias->terms);
+ parse_events_terms__exit(&data->alias->terms);
ret = parse_events_terms(&data->alias->terms, pe->event, /*input=*/NULL);
}
if (!ret && pe->unit) {
@@ -524,7 +525,7 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
if (!alias)
return -ENOMEM;
- INIT_LIST_HEAD(&alias->terms);
+ parse_events_terms__init(&alias->terms);
alias->scale = 1.0;
alias->unit[0] = '\0';
alias->per_pkg = perpkg;
@@ -574,7 +575,7 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
return 0;
}
-static inline bool pmu_alias_info_file(char *name)
+static inline bool pmu_alias_info_file(const char *name)
{
size_t len;
@@ -656,17 +657,17 @@ static int pmu_aliases_parse(struct perf_pmu *pmu)
return 0;
}
-static int pmu_alias_terms(struct perf_pmu_alias *alias,
- struct list_head *terms)
+static int pmu_alias_terms(struct perf_pmu_alias *alias, struct list_head *terms)
{
struct parse_events_term *term, *cloned;
- LIST_HEAD(list);
- int ret;
+ struct parse_events_terms clone_terms;
+
+ parse_events_terms__init(&clone_terms);
+ list_for_each_entry(term, &alias->terms.terms, list) {
+ int ret = parse_events_term__clone(&cloned, term);
- list_for_each_entry(term, &alias->terms, list) {
- ret = parse_events_term__clone(&cloned, term);
if (ret) {
- parse_events_terms__purge(&list);
+ parse_events_terms__exit(&clone_terms);
return ret;
}
/*
@@ -674,9 +675,10 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
* which we don't want for implicit terms in aliases.
*/
cloned->weak = true;
- list_add_tail(&cloned->list, &list);
+ list_add_tail(&cloned->list, &clone_terms.terms);
}
- list_splice(&list, terms);
+ list_splice_init(&clone_terms.terms, terms);
+ parse_events_terms__exit(&clone_terms);
return 0;
}
@@ -775,11 +777,6 @@ char *perf_pmu__getcpuid(struct perf_pmu *pmu)
return cpuid;
}
-__weak const struct pmu_events_table *pmu_events_table__find(void)
-{
- return perf_pmu__find_events_table(NULL);
-}
-
__weak const struct pmu_metrics_table *pmu_metrics_table__find(void)
{
return perf_pmu__find_metrics_table(NULL);
@@ -874,6 +871,28 @@ out:
return res;
}
+bool pmu_uncore_identifier_match(const char *compat, const char *id)
+{
+ regex_t re;
+ regmatch_t pmatch[1];
+ int match;
+
+ if (regcomp(&re, compat, REG_EXTENDED) != 0) {
+ /* Warn unable to generate match particular string. */
+ pr_info("Invalid regular expression %s\n", compat);
+ return false;
+ }
+
+ match = !regexec(&re, id, 1, pmatch, 0);
+ if (match) {
+ /* Ensure a full match. */
+ match = pmatch[0].rm_so == 0 && (size_t)pmatch[0].rm_eo == strlen(id);
+ }
+ regfree(&re);
+
+ return match;
+}
+
static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe,
const struct pmu_events_table *table __maybe_unused,
void *vdata)
@@ -914,8 +933,8 @@ static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe,
if (!pe->compat || !pe->pmu)
return 0;
- if (!strcmp(pmu->id, pe->compat) &&
- pmu_uncore_alias_match(pe->pmu, pmu->name)) {
+ if (pmu_uncore_alias_match(pe->pmu, pmu->name) &&
+ pmu_uncore_identifier_match(pe->compat, pmu->id)) {
perf_pmu__new_alias(pmu,
pe->name,
pe->desc,
@@ -935,22 +954,27 @@ void pmu_add_sys_aliases(struct perf_pmu *pmu)
pmu_for_each_sys_event(pmu_add_sys_aliases_iter_fn, pmu);
}
-struct perf_event_attr * __weak
-perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
+static char *pmu_find_alias_name(struct perf_pmu *pmu, int dirfd)
{
- return NULL;
-}
+ FILE *file = perf_pmu__open_file_at(pmu, dirfd, "alias");
+ char *line = NULL;
+ size_t line_len = 0;
+ ssize_t ret;
-const char * __weak
-pmu_find_real_name(const char *name)
-{
- return name;
-}
+ if (!file)
+ return NULL;
-const char * __weak
-pmu_find_alias_name(const char *name __maybe_unused)
-{
- return NULL;
+ ret = getline(&line, &line_len, file);
+ if (ret < 0) {
+ fclose(file);
+ return NULL;
+ }
+ /* Remove trailing newline. */
+ if (ret > 0 && line[ret - 1] == '\n')
+ line[--ret] = '\0';
+
+ fclose(file);
+ return line;
}
static int pmu_max_precise(int dirfd, struct perf_pmu *pmu)
@@ -961,12 +985,15 @@ static int pmu_max_precise(int dirfd, struct perf_pmu *pmu)
return max_precise;
}
-struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name)
+void __weak
+perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused)
+{
+}
+
+struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *name)
{
struct perf_pmu *pmu;
__u32 type;
- const char *name = pmu_find_real_name(lookup_name);
- const char *alias_name;
pmu = zalloc(sizeof(*pmu));
if (!pmu)
@@ -999,23 +1026,17 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
pmu->is_core = is_pmu_core(name);
pmu->cpus = pmu_cpumask(dirfd, name, pmu->is_core);
- alias_name = pmu_find_alias_name(name);
- if (alias_name) {
- pmu->alias_name = strdup(alias_name);
- if (!pmu->alias_name)
- goto err;
- }
-
pmu->type = type;
pmu->is_uncore = pmu_is_uncore(dirfd, name);
if (pmu->is_uncore)
pmu->id = pmu_id(name);
pmu->max_precise = pmu_max_precise(dirfd, pmu);
+ pmu->alias_name = pmu_find_alias_name(pmu, dirfd);
pmu->events_table = perf_pmu__find_events_table(pmu);
pmu_add_sys_aliases(pmu);
list_add_tail(&pmu->list, pmus);
- pmu->default_config = perf_pmu__get_default_config(pmu);
+ perf_pmu__arch_init(pmu);
return pmu;
err:
@@ -1110,7 +1131,7 @@ void evsel__set_config_if_unset(struct perf_pmu *pmu, struct evsel *evsel,
}
static struct perf_pmu_format *
-pmu_find_format(struct list_head *formats, const char *name)
+pmu_find_format(const struct list_head *formats, const char *name)
{
struct perf_pmu_format *format;
@@ -1188,12 +1209,12 @@ static __u64 pmu_format_max_value(const unsigned long *format)
* in a config string) later on in the term list.
*/
static int pmu_resolve_param_term(struct parse_events_term *term,
- struct list_head *head_terms,
+ struct parse_events_terms *head_terms,
__u64 *value)
{
struct parse_events_term *t;
- list_for_each_entry(t, head_terms, list) {
+ list_for_each_entry(t, &head_terms->terms, list) {
if (t->type_val == PARSE_EVENTS__TERM_TYPE_NUM &&
t->config && !strcmp(t->config, term->config)) {
t->used = true;
@@ -1208,7 +1229,7 @@ static int pmu_resolve_param_term(struct parse_events_term *term,
return -1;
}
-static char *pmu_formats_string(struct list_head *formats)
+static char *pmu_formats_string(const struct list_head *formats)
{
struct perf_pmu_format *format;
char *str = NULL;
@@ -1234,10 +1255,10 @@ error:
* Setup one of config[12] attr members based on the
* user input data - term parameter.
*/
-static int pmu_config_term(struct perf_pmu *pmu,
+static int pmu_config_term(const struct perf_pmu *pmu,
struct perf_event_attr *attr,
struct parse_events_term *term,
- struct list_head *head_terms,
+ struct parse_events_terms *head_terms,
bool zero, struct parse_events_error *err)
{
struct perf_pmu_format *format;
@@ -1357,15 +1378,15 @@ static int pmu_config_term(struct perf_pmu *pmu,
return 0;
}
-int perf_pmu__config_terms(struct perf_pmu *pmu,
+int perf_pmu__config_terms(const struct perf_pmu *pmu,
struct perf_event_attr *attr,
- struct list_head *head_terms,
+ struct parse_events_terms *terms,
bool zero, struct parse_events_error *err)
{
struct parse_events_term *term;
- list_for_each_entry(term, head_terms, list) {
- if (pmu_config_term(pmu, attr, term, head_terms, zero, err))
+ list_for_each_entry(term, &terms->terms, list) {
+ if (pmu_config_term(pmu, attr, term, terms, zero, err))
return -EINVAL;
}
@@ -1378,10 +1399,10 @@ int perf_pmu__config_terms(struct perf_pmu *pmu,
* 2) pmu format definitions - specified by pmu parameter
*/
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
- struct list_head *head_terms,
+ struct parse_events_terms *head_terms,
struct parse_events_error *err)
{
- bool zero = !!pmu->default_config;
+ bool zero = !!pmu->perf_event_attr_init_default;
return perf_pmu__config_terms(pmu, attr, head_terms, zero, err);
}
@@ -1472,7 +1493,7 @@ static int check_info_data(struct perf_pmu *pmu,
* Find alias in the terms list and replace it with the terms
* defined for the alias
*/
-int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_terms,
struct perf_pmu_info *info, struct parse_events_error *err)
{
struct parse_events_term *term, *h;
@@ -1489,7 +1510,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
info->scale = 0.0;
info->snapshot = false;
- list_for_each_entry_safe(term, h, head_terms, list) {
+ list_for_each_entry_safe(term, h, &head_terms->terms, list) {
alias = pmu_find_alias(pmu, term);
if (!alias)
continue;
@@ -1634,7 +1655,7 @@ static char *format_alias(char *buf, int len, const struct perf_pmu *pmu,
: (int)strlen(pmu->name);
int used = snprintf(buf, len, "%.*s/%s", pmu_name_len, pmu->name, alias->name);
- list_for_each_entry(term, &alias->terms, list) {
+ list_for_each_entry(term, &alias->terms.terms, list) {
if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
used += snprintf(buf + used, sub_non_neg(len, used),
",%s=%s", term->config,
@@ -1693,7 +1714,7 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
info.desc = event->desc;
info.long_desc = event->long_desc;
info.encoding_desc = buf + buf_used;
- parse_events_term__to_strbuf(&event->terms, &sb);
+ parse_events_terms__to_strbuf(&event->terms, &sb);
buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
"%s/%s/", info.pmu_name, sb.buf) + 1;
info.topic = event->topic;
@@ -1749,7 +1770,7 @@ bool perf_pmu__is_software(const struct perf_pmu *pmu)
return !strcmp(pmu->name, "kprobe") || !strcmp(pmu->name, "uprobe");
}
-FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name)
+FILE *perf_pmu__open_file(const struct perf_pmu *pmu, const char *name)
{
char path[PATH_MAX];
@@ -1760,7 +1781,7 @@ FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name)
return fopen(path, "r");
}
-FILE *perf_pmu__open_file_at(struct perf_pmu *pmu, int dirfd, const char *name)
+FILE *perf_pmu__open_file_at(const struct perf_pmu *pmu, int dirfd, const char *name)
{
int fd;
@@ -1771,7 +1792,7 @@ FILE *perf_pmu__open_file_at(struct perf_pmu *pmu, int dirfd, const char *name)
return fdopen(fd, "r");
}
-int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
+int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char *fmt,
...)
{
va_list args;
@@ -1788,7 +1809,7 @@ int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
return ret;
}
-int perf_pmu__scan_file_at(struct perf_pmu *pmu, int dirfd, const char *name,
+int perf_pmu__scan_file_at(const struct perf_pmu *pmu, int dirfd, const char *name,
const char *fmt, ...)
{
va_list args;
@@ -1805,7 +1826,7 @@ int perf_pmu__scan_file_at(struct perf_pmu *pmu, int dirfd, const char *name,
return ret;
}
-bool perf_pmu__file_exists(struct perf_pmu *pmu, const char *name)
+bool perf_pmu__file_exists(const struct perf_pmu *pmu, const char *name)
{
char path[PATH_MAX];
@@ -2043,26 +2064,8 @@ void perf_pmu__delete(struct perf_pmu *pmu)
perf_cpu_map__put(pmu->cpus);
- zfree(&pmu->default_config);
zfree(&pmu->name);
zfree(&pmu->alias_name);
zfree(&pmu->id);
free(pmu);
}
-
-struct perf_pmu *pmu__find_core_pmu(void)
-{
- struct perf_pmu *pmu = NULL;
-
- while ((pmu = perf_pmus__scan_core(pmu))) {
- /*
- * The cpumap should cover all CPUs. Otherwise, some CPUs may
- * not support some events or have different event IDs.
- */
- if (RC_CHK_ACCESS(pmu->cpus)->nr != cpu__max_cpu().cpu)
- return NULL;
-
- return pmu;
- }
- return NULL;
-}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 6a4e170c61d6..d2895d415f08 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -92,10 +92,11 @@ struct perf_pmu {
*/
int max_precise;
/**
- * @default_config: Optional default perf_event_attr determined in
- * architecture specific code.
+ * @perf_event_attr_init_default: Optional function to default
+ * initialize PMU specific parts of the perf_event_attr.
*/
- struct perf_event_attr *default_config;
+ void (*perf_event_attr_init_default)(const struct perf_pmu *pmu,
+ struct perf_event_attr *attr);
/**
* @cpus: Empty or the contents of either of:
* <sysfs>/bus/event_source/devices/<name>/cpumask.
@@ -191,15 +192,15 @@ typedef int (*pmu_event_callback)(void *state, struct pmu_event_info *info);
void pmu_add_sys_aliases(struct perf_pmu *pmu);
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
- struct list_head *head_terms,
+ struct parse_events_terms *head_terms,
struct parse_events_error *error);
-int perf_pmu__config_terms(struct perf_pmu *pmu,
+int perf_pmu__config_terms(const struct perf_pmu *pmu,
struct perf_event_attr *attr,
- struct list_head *head_terms,
+ struct parse_events_terms *terms,
bool zero, struct parse_events_error *error);
__u64 perf_pmu__format_bits(struct perf_pmu *pmu, const char *name);
int perf_pmu__format_type(struct perf_pmu *pmu, const char *name);
-int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_terms,
struct perf_pmu_info *info, struct parse_events_error *err);
int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback cb);
@@ -222,24 +223,25 @@ bool pmu__name_match(const struct perf_pmu *pmu, const char *pmu_name);
*/
bool perf_pmu__is_software(const struct perf_pmu *pmu);
-FILE *perf_pmu__open_file(struct perf_pmu *pmu, const char *name);
-FILE *perf_pmu__open_file_at(struct perf_pmu *pmu, int dirfd, const char *name);
+FILE *perf_pmu__open_file(const struct perf_pmu *pmu, const char *name);
+FILE *perf_pmu__open_file_at(const struct perf_pmu *pmu, int dirfd, const char *name);
-int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...) __scanf(3, 4);
-int perf_pmu__scan_file_at(struct perf_pmu *pmu, int dirfd, const char *name,
+int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char *fmt, ...)
+ __scanf(3, 4);
+int perf_pmu__scan_file_at(const struct perf_pmu *pmu, int dirfd, const char *name,
const char *fmt, ...) __scanf(4, 5);
-bool perf_pmu__file_exists(struct perf_pmu *pmu, const char *name);
+bool perf_pmu__file_exists(const struct perf_pmu *pmu, const char *name);
int perf_pmu__test(void);
-struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu);
+void perf_pmu__arch_init(struct perf_pmu *pmu);
void pmu_add_cpu_aliases_table(struct perf_pmu *pmu,
const struct pmu_events_table *table);
char *perf_pmu__getcpuid(struct perf_pmu *pmu);
-const struct pmu_events_table *pmu_events_table__find(void);
const struct pmu_metrics_table *pmu_metrics_table__find(void);
+bool pmu_uncore_identifier_match(const char *compat, const char *id);
int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
@@ -252,8 +254,6 @@ void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu);
int perf_pmu__match(const char *pattern, const char *name, const char *tok);
-const char *pmu_find_real_name(const char *name);
-const char *pmu_find_alias_name(const char *name);
double perf_pmu__cpu_slots_per_cycle(void);
int perf_pmu__event_source_devices_scnprintf(char *pathname, size_t size);
int perf_pmu__pathname_scnprintf(char *buf, size_t size,
@@ -264,6 +264,6 @@ int perf_pmu__pathname_fd(int dirfd, const char *pmu_name, const char *filename,
struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name);
struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus);
void perf_pmu__delete(struct perf_pmu *pmu);
-struct perf_pmu *pmu__find_core_pmu(void);
+struct perf_pmu *perf_pmus__find_core_pmu(void);
#endif /* __PMU_H */
diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
index 600c8c158c8e..198907a8a48a 100644
--- a/tools/perf/util/pmu.y
+++ b/tools/perf/util/pmu.y
@@ -5,6 +5,10 @@
%{
+#ifndef NDEBUG
+#define YYDEBUG 1
+#endif
+
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/bitmap.h>
diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c
index 6631367c756f..ce4931461741 100644
--- a/tools/perf/util/pmus.c
+++ b/tools/perf/util/pmus.c
@@ -10,6 +10,7 @@
#include <pthread.h>
#include <string.h>
#include <unistd.h>
+#include "cpumap.h"
#include "debug.h"
#include "evsel.h"
#include "pmus.h"
@@ -36,6 +37,8 @@ static LIST_HEAD(other_pmus);
static bool read_sysfs_core_pmus;
static bool read_sysfs_all_pmus;
+static void pmu_read_sysfs(bool core_only);
+
int pmu_name_len_no_suffix(const char *str, unsigned long *num)
{
int orig_len, len;
@@ -123,6 +126,14 @@ struct perf_pmu *perf_pmus__find(const char *name)
pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
close(dirfd);
+ if (!pmu) {
+ /*
+ * Looking up an inidividual PMU failed. This may mean name is
+ * an alias, so read the PMUs from sysfs and try to find again.
+ */
+ pmu_read_sysfs(core_pmu);
+ pmu = pmu_find(name);
+ }
return pmu;
}
@@ -268,7 +279,7 @@ struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
{
if (!pmu) {
pmu_read_sysfs(/*core_only=*/true);
- pmu = list_prepare_entry(pmu, &core_pmus, list);
+ return list_first_entry_or_null(&core_pmus, typeof(*pmu), list);
}
list_for_each_entry_continue(pmu, &core_pmus, list)
return pmu;
@@ -592,3 +603,8 @@ struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
}
return pmu;
}
+
+struct perf_pmu *perf_pmus__find_core_pmu(void)
+{
+ return perf_pmus__scan_core(NULL);
+}
diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
index a7566edc86a3..b0fc48be623f 100644
--- a/tools/perf/util/print-events.c
+++ b/tools/perf/util/print-events.c
@@ -395,6 +395,8 @@ void print_symbol_events(const struct print_callbacks *print_cb, void *print_sta
*/
void print_events(const struct print_callbacks *print_cb, void *print_state)
{
+ char *tmp;
+
print_symbol_events(print_cb, print_state, PERF_TYPE_HARDWARE,
event_symbols_hw, PERF_COUNT_HW_MAX);
print_symbol_events(print_cb, print_state, PERF_TYPE_SOFTWARE,
@@ -418,17 +420,21 @@ void print_events(const struct print_callbacks *print_cb, void *print_state)
/*long_desc=*/NULL,
/*encoding_desc=*/NULL);
- print_cb->print_event(print_state,
- /*topic=*/NULL,
- /*pmu_name=*/NULL,
- "cpu/t1=v1[,t2=v2,t3 ...]/modifier",
- /*event_alias=*/NULL,
- /*scale_unit=*/NULL,
- /*deprecated=*/false,
- event_type_descriptors[PERF_TYPE_RAW],
- "(see 'man perf-list' on how to encode it)",
- /*long_desc=*/NULL,
- /*encoding_desc=*/NULL);
+ if (asprintf(&tmp, "%s/t1=v1[,t2=v2,t3 ...]/modifier",
+ perf_pmus__scan_core(/*pmu=*/NULL)->name) > 0) {
+ print_cb->print_event(print_state,
+ /*topic=*/NULL,
+ /*pmu_name=*/NULL,
+ tmp,
+ /*event_alias=*/NULL,
+ /*scale_unit=*/NULL,
+ /*deprecated=*/false,
+ event_type_descriptors[PERF_TYPE_RAW],
+ "(see 'man perf-list' on how to encode it)",
+ /*long_desc=*/NULL,
+ /*encoding_desc=*/NULL);
+ free(tmp);
+ }
print_cb->print_event(print_state,
/*topic=*/NULL,
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 26e1c8d973ea..593b660ec75e 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -40,6 +40,7 @@ util/rwsem.c
util/hashmap.c
util/perf_regs.c
util/fncache.c
+util/rlimit.c
util/perf-regs-arch/perf_regs_aarch64.c
util/perf-regs-arch/perf_regs_arm.c
util/perf-regs-arch/perf_regs_csky.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index c29f5f0bb552..8761f51b5c7c 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -98,7 +98,7 @@ struct perf_pmu *evsel__find_pmu(const struct evsel *evsel __maybe_unused)
return NULL;
}
-int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...)
+int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char *fmt, ...)
{
return EOF;
}
diff --git a/tools/perf/util/rlimit.c b/tools/perf/util/rlimit.c
index 13521d392a22..f857405fe1aa 100644
--- a/tools/perf/util/rlimit.c
+++ b/tools/perf/util/rlimit.c
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1 */
+#include <errno.h>
#include "util/debug.h"
#include "util/rlimit.h"
#include <sys/time.h>
@@ -27,3 +28,30 @@ void rlimit__bump_memlock(void)
}
}
}
+
+bool rlimit__increase_nofile(enum rlimit_action *set_rlimit)
+{
+ int old_errno;
+ struct rlimit l;
+
+ if (*set_rlimit < INCREASED_MAX) {
+ old_errno = errno;
+
+ if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
+ if (*set_rlimit == NO_CHANGE) {
+ l.rlim_cur = l.rlim_max;
+ } else {
+ l.rlim_cur = l.rlim_max + 1000;
+ l.rlim_max = l.rlim_cur;
+ }
+ if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
+ (*set_rlimit) += 1;
+ errno = old_errno;
+ return true;
+ }
+ }
+ errno = old_errno;
+ }
+
+ return false;
+}
diff --git a/tools/perf/util/rlimit.h b/tools/perf/util/rlimit.h
index 9f59d8e710a3..19050d7fb9d7 100644
--- a/tools/perf/util/rlimit.h
+++ b/tools/perf/util/rlimit.h
@@ -1,6 +1,15 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
#ifndef __PERF_RLIMIT_H_
#define __PERF_RLIMIT_H_
-/* SPDX-License-Identifier: LGPL-2.1 */
+
+enum rlimit_action {
+ NO_CHANGE,
+ SET_TO_MAX,
+ INCREASED_MAX
+};
void rlimit__bump_memlock(void);
+
+bool rlimit__increase_nofile(enum rlimit_action *set_rlimit);
+
#endif // __PERF_RLIMIT_H_
diff --git a/tools/perf/util/rwsem.c b/tools/perf/util/rwsem.c
index f3d29d8ddc99..5109167f27f7 100644
--- a/tools/perf/util/rwsem.c
+++ b/tools/perf/util/rwsem.c
@@ -2,32 +2,66 @@
#include "util.h"
#include "rwsem.h"
+#if RWS_ERRORCHECK
+#include "mutex.h"
+#endif
+
int init_rwsem(struct rw_semaphore *sem)
{
+#if RWS_ERRORCHECK
+ mutex_init(&sem->mtx);
+ return 0;
+#else
return pthread_rwlock_init(&sem->lock, NULL);
+#endif
}
int exit_rwsem(struct rw_semaphore *sem)
{
+#if RWS_ERRORCHECK
+ mutex_destroy(&sem->mtx);
+ return 0;
+#else
return pthread_rwlock_destroy(&sem->lock);
+#endif
}
int down_read(struct rw_semaphore *sem)
{
+#if RWS_ERRORCHECK
+ mutex_lock(&sem->mtx);
+ return 0;
+#else
return perf_singlethreaded ? 0 : pthread_rwlock_rdlock(&sem->lock);
+#endif
}
int up_read(struct rw_semaphore *sem)
{
+#if RWS_ERRORCHECK
+ mutex_unlock(&sem->mtx);
+ return 0;
+#else
return perf_singlethreaded ? 0 : pthread_rwlock_unlock(&sem->lock);
+#endif
}
int down_write(struct rw_semaphore *sem)
{
+#if RWS_ERRORCHECK
+ mutex_lock(&sem->mtx);
+ return 0;
+#else
return perf_singlethreaded ? 0 : pthread_rwlock_wrlock(&sem->lock);
+#endif
}
int up_write(struct rw_semaphore *sem)
{
+#if RWS_ERRORCHECK
+ mutex_unlock(&sem->mtx);
+ return 0;
+#else
return perf_singlethreaded ? 0 : pthread_rwlock_unlock(&sem->lock);
+#endif
}
diff --git a/tools/perf/util/rwsem.h b/tools/perf/util/rwsem.h
index 94565ad4d494..ef5cbc31d967 100644
--- a/tools/perf/util/rwsem.h
+++ b/tools/perf/util/rwsem.h
@@ -2,9 +2,20 @@
#define _PERF_RWSEM_H
#include <pthread.h>
+#include "mutex.h"
+
+/*
+ * Mutexes have additional error checking. Enable to use a mutex rather than a
+ * rwlock for debugging.
+ */
+#define RWS_ERRORCHECK 0
struct rw_semaphore {
+#if RWS_ERRORCHECK
+ struct mutex mtx;
+#else
pthread_rwlock_t lock;
+#endif
};
int init_rwsem(struct rw_semaphore *sem);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 6aa1c7f2b444..80e4f6132740 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -128,7 +128,7 @@ static int hist_entry__thread_filter(struct hist_entry *he, int type, const void
if (type != HIST_FILTER__THREAD)
return -1;
- return th && RC_CHK_ACCESS(he->thread) != RC_CHK_ACCESS(th);
+ return th && !RC_CHK_EQUAL(he->thread, th);
}
struct sort_entry sort_thread = {
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index cf05b0b56c57..116a642ad99d 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -301,3 +301,51 @@ unsigned int hex(char c)
return c - 'a' + 10;
return c - 'A' + 10;
}
+
+/*
+ * Replace all occurrences of character 'needle' in string 'haystack' with
+ * string 'replace'
+ *
+ * The new string could be longer so a new string is returned which must be
+ * freed.
+ */
+char *strreplace_chars(char needle, const char *haystack, const char *replace)
+{
+ int replace_len = strlen(replace);
+ char *new_s, *to;
+ const char *loc = strchr(haystack, needle);
+ const char *from = haystack;
+ int num = 0;
+
+ /* Count occurrences */
+ while (loc) {
+ loc = strchr(loc + 1, needle);
+ num++;
+ }
+
+ /* Allocate enough space for replacements and reset first location */
+ new_s = malloc(strlen(haystack) + (num * (replace_len - 1) + 1));
+ if (!new_s)
+ return NULL;
+ loc = strchr(haystack, needle);
+ to = new_s;
+
+ while (loc) {
+ /* Copy original string up to found char and update positions */
+ memcpy(to, from, 1 + loc - from);
+ to += loc - from;
+ from = loc + 1;
+
+ /* Copy replacement string and update positions */
+ memcpy(to, replace, replace_len);
+ to += replace_len;
+
+ /* needle next occurrence or end of string */
+ loc = strchr(from, needle);
+ }
+
+ /* Copy any remaining chars + null */
+ strcpy(to, from);
+
+ return new_s;
+}
diff --git a/tools/perf/util/string2.h b/tools/perf/util/string2.h
index 56c30fef9682..52cb8ba057c7 100644
--- a/tools/perf/util/string2.h
+++ b/tools/perf/util/string2.h
@@ -39,5 +39,6 @@ char *strpbrk_esc(char *str, const char *stopset);
char *strdup_esc(const char *str);
unsigned int hex(char c);
+char *strreplace_chars(char needle, const char *haystack, const char *replace);
#endif /* PERF_STRING_H */
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 0e4dc31c6c9c..1892e9b6aa7f 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -754,6 +754,7 @@ int svg_build_topology_map(struct perf_env *env)
int i, nr_cpus;
struct topology t;
char *sib_core, *sib_thr;
+ int ret = -1;
nr_cpus = min(env->nr_cpus_online, MAX_NR_CPUS);
@@ -799,11 +800,11 @@ int svg_build_topology_map(struct perf_env *env)
scan_core_topology(topology_map, &t, nr_cpus);
- return 0;
+ ret = 0;
exit:
zfree(&t.sib_core);
zfree(&t.sib_thr);
- return -1;
+ return ret;
}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 95e99c332d7e..9e7eeaf616b8 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1514,8 +1514,10 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
}
if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
- ".text", NULL))
+ ".text", NULL)) {
dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
+ dso->text_end = tshdr.sh_offset + tshdr.sh_size;
+ }
if (runtime_ss->opdsec)
opddata = elf_rawdata(runtime_ss->opdsec, NULL);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 3f36675b7c8f..82cc74b9358e 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -202,11 +202,10 @@ void symbols__fixup_duplicate(struct rb_root_cached *symbols)
curr = rb_entry(nd, struct symbol, rb_node);
again:
nd = rb_next(&curr->rb_node);
- next = rb_entry(nd, struct symbol, rb_node);
-
if (!nd)
break;
+ next = rb_entry(nd, struct symbol, rb_node);
if (curr->start != next->start)
continue;
@@ -719,6 +718,7 @@ static bool symbol__is_idle(const char *name)
"cpu_startup_entry",
"idle_cpu",
"intel_idle",
+ "intel_idle_ibrs",
"default_idle",
"native_safe_halt",
"enter_idle",
@@ -877,7 +877,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
*module++ = '\0';
curr_map_dso = map__dso(curr_map);
if (strcmp(curr_map_dso->short_name, module)) {
- if (RC_CHK_ACCESS(curr_map) != RC_CHK_ACCESS(initial_map) &&
+ if (!RC_CHK_EQUAL(curr_map, initial_map) &&
dso->kernel == DSO_SPACE__KERNEL_GUEST &&
machine__is_default_guest(machine)) {
/*
@@ -1469,7 +1469,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
list_del_init(&new_node->node);
- if (RC_CHK_ACCESS(new_map) == RC_CHK_ACCESS(replacement_map)) {
+ if (RC_CHK_EQUAL(new_map, replacement_map)) {
struct map *map_ref;
map__set_start(map, map__start(new_map));
@@ -1733,8 +1733,10 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
/* PE symbols can only have 4 bytes, so use .text high bits */
dso->text_offset = section->vma - (u32)section->vma;
dso->text_offset += (u32)bfd_asymbol_value(symbols[i]);
+ dso->text_end = (section->vma - dso->text_offset) + section->size;
} else {
dso->text_offset = section->vma - section->filepos;
+ dso->text_end = section->filepos + section->size;
}
}
@@ -2789,8 +2791,11 @@ struct mem_info *mem_info__get(struct mem_info *mi)
void mem_info__put(struct mem_info *mi)
{
- if (mi && refcount_dec_and_test(&mi->refcnt))
+ if (mi && refcount_dec_and_test(&mi->refcnt)) {
+ addr_map_symbol__exit(&mi->iaddr);
+ addr_map_symbol__exit(&mi->daddr);
free(mi);
+ }
}
struct mem_info *mem_info__new(void)
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 319ccf09a435..c8755679281e 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -313,7 +313,8 @@ static int record_event_files(struct tracepoint_path *tps)
}
err = 0;
out:
- closedir(dir);
+ if (dir)
+ closedir(dir);
put_tracing_file(path);
return err;