summaryrefslogtreecommitdiff
path: root/tools/perf/tests/shell
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/tests/shell')
-rwxr-xr-xtools/perf/tests/shell/annotate.sh88
-rwxr-xr-xtools/perf/tests/shell/attr.sh22
-rw-r--r--tools/perf/tests/shell/attr/README71
-rw-r--r--tools/perf/tests/shell/attr/base-record41
-rw-r--r--tools/perf/tests/shell/attr/base-record-spe40
-rw-r--r--tools/perf/tests/shell/attr/base-stat41
-rw-r--r--tools/perf/tests/shell/attr/system-wide-dummy52
-rw-r--r--tools/perf/tests/shell/attr/test-record-C024
-rw-r--r--tools/perf/tests/shell/attr/test-record-basic6
-rw-r--r--tools/perf/tests/shell/attr/test-record-branch-any8
-rw-r--r--tools/perf/tests/shell/attr/test-record-branch-filter-any8
-rw-r--r--tools/perf/tests/shell/attr/test-record-branch-filter-any_call8
-rw-r--r--tools/perf/tests/shell/attr/test-record-branch-filter-any_ret8
-rw-r--r--tools/perf/tests/shell/attr/test-record-branch-filter-hv8
-rw-r--r--tools/perf/tests/shell/attr/test-record-branch-filter-ind_call8
-rw-r--r--tools/perf/tests/shell/attr/test-record-branch-filter-k8
-rw-r--r--tools/perf/tests/shell/attr/test-record-branch-filter-u8
-rw-r--r--tools/perf/tests/shell/attr/test-record-count9
-rw-r--r--tools/perf/tests/shell/attr/test-record-data10
-rw-r--r--tools/perf/tests/shell/attr/test-record-dummy-C055
-rw-r--r--tools/perf/tests/shell/attr/test-record-freq7
-rw-r--r--tools/perf/tests/shell/attr/test-record-graph-default9
-rw-r--r--tools/perf/tests/shell/attr/test-record-graph-default-aarch649
-rw-r--r--tools/perf/tests/shell/attr/test-record-graph-dwarf12
-rw-r--r--tools/perf/tests/shell/attr/test-record-graph-fp9
-rw-r--r--tools/perf/tests/shell/attr/test-record-graph-fp-aarch649
-rw-r--r--tools/perf/tests/shell/attr/test-record-group-sampling40
-rw-r--r--tools/perf/tests/shell/attr/test-record-group-sampling150
-rw-r--r--tools/perf/tests/shell/attr/test-record-group-sampling261
-rw-r--r--tools/perf/tests/shell/attr/test-record-group123
-rw-r--r--tools/perf/tests/shell/attr/test-record-group230
-rw-r--r--tools/perf/tests/shell/attr/test-record-group331
-rw-r--r--tools/perf/tests/shell/attr/test-record-no-buffering9
-rw-r--r--tools/perf/tests/shell/attr/test-record-no-inherit8
-rw-r--r--tools/perf/tests/shell/attr/test-record-no-samples7
-rw-r--r--tools/perf/tests/shell/attr/test-record-period8
-rw-r--r--tools/perf/tests/shell/attr/test-record-pfm-period9
-rw-r--r--tools/perf/tests/shell/attr/test-record-raw7
-rw-r--r--tools/perf/tests/shell/attr/test-record-spe-period12
-rw-r--r--tools/perf/tests/shell/attr/test-record-spe-period-term12
-rw-r--r--tools/perf/tests/shell/attr/test-record-spe-physical-address12
-rw-r--r--tools/perf/tests/shell/attr/test-record-user-regs-no-sve-aarch649
-rw-r--r--tools/perf/tests/shell/attr/test-record-user-regs-old-sve-aarch6410
-rw-r--r--tools/perf/tests/shell/attr/test-record-user-regs-sve-aarch6414
-rw-r--r--tools/perf/tests/shell/attr/test-stat-C010
-rw-r--r--tools/perf/tests/shell/attr/test-stat-basic7
-rw-r--r--tools/perf/tests/shell/attr/test-stat-default229
-rw-r--r--tools/perf/tests/shell/attr/test-stat-detailed-1271
-rw-r--r--tools/perf/tests/shell/attr/test-stat-detailed-2331
-rw-r--r--tools/perf/tests/shell/attr/test-stat-detailed-3351
-rw-r--r--tools/perf/tests/shell/attr/test-stat-group117
-rw-r--r--tools/perf/tests/shell/attr/test-stat-no-inherit8
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_blacklisted.sh106
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_kernel.sh301
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_basic.sh80
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_invalid_options.sh82
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_line_semantics.sh58
-rwxr-xr-xtools/perf/tests/shell/base_report/setup.sh32
-rw-r--r--tools/perf/tests/shell/base_report/stderr-whitelist.txt5
-rwxr-xr-xtools/perf/tests/shell/base_report/test_basic.sh190
-rwxr-xr-xtools/perf/tests/shell/common/check_all_lines_matched.pl39
-rwxr-xr-xtools/perf/tests/shell/common/check_all_patterns_found.pl34
-rwxr-xr-xtools/perf/tests/shell/common/check_errors_whitelisted.pl51
-rwxr-xr-xtools/perf/tests/shell/common/check_no_patterns_found.pl34
-rw-r--r--tools/perf/tests/shell/common/init.sh143
-rw-r--r--tools/perf/tests/shell/common/patterns.sh268
-rw-r--r--tools/perf/tests/shell/common/settings.sh105
-rw-r--r--tools/perf/tests/shell/coresight/Makefile2
-rwxr-xr-xtools/perf/tests/shell/coresight/asm_pure_loop.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh2
-rwxr-xr-xtools/perf/tests/shell/coresight/unroll_loop_thread_10.sh2
-rwxr-xr-xtools/perf/tests/shell/ftrace.sh86
-rw-r--r--tools/perf/tests/shell/lib/attr.py476
-rw-r--r--tools/perf/tests/shell/lib/coresight.sh2
-rw-r--r--tools/perf/tests/shell/lib/perf_has_symbol.sh2
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py19
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py241
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh11
-rw-r--r--tools/perf/tests/shell/lib/stat_output.sh14
-rwxr-xr-xtools/perf/tests/shell/list.sh5
-rwxr-xr-xtools/perf/tests/shell/lock_contention.sh2
-rwxr-xr-xtools/perf/tests/shell/perftool-testsuite_probe.sh23
-rwxr-xr-xtools/perf/tests/shell/perftool-testsuite_report.sh23
-rwxr-xr-xtools/perf/tests/shell/pipe_test.sh130
-rwxr-xr-xtools/perf/tests/shell/probe_vfs_getname.sh2
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh45
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh7
-rwxr-xr-xtools/perf/tests/shell/record.sh134
-rwxr-xr-xtools/perf/tests/shell/record_bpf_filter.sh86
-rwxr-xr-xtools/perf/tests/shell/record_lbr.sh161
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh2
-rwxr-xr-xtools/perf/tests/shell/script.sh29
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh15
-rwxr-xr-xtools/perf/tests/shell/stat+std_output.sh4
-rwxr-xr-xtools/perf/tests/shell/stat.sh69
-rwxr-xr-xtools/perf/tests/shell/stat_all_metricgroups.sh36
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh87
-rwxr-xr-xtools/perf/tests/shell/stat_all_pmu.sh54
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh83
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters_cgrp.sh13
-rwxr-xr-xtools/perf/tests/shell/stat_metrics_values.sh4
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh37
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh6
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight_disasm.sh65
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe.sh34
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe_fork.sh2
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh4
-rwxr-xr-xtools/perf/tests/shell/test_data_symbol.sh2
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh32
-rwxr-xr-xtools/perf/tests/shell/test_stat_intel_tpebs.sh22
-rwxr-xr-xtools/perf/tests/shell/test_task_analyzer.sh9
-rwxr-xr-xtools/perf/tests/shell/test_uprobe_from_different_cu.sh9
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh4
-rwxr-xr-xtools/perf/tests/shell/trace_btf_enum.sh66
-rwxr-xr-xtools/perf/tests/shell/trace_btf_general.sh94
-rwxr-xr-xtools/perf/tests/shell/trace_exit_race.sh51
119 files changed, 5679 insertions, 319 deletions
diff --git a/tools/perf/tests/shell/annotate.sh b/tools/perf/tests/shell/annotate.sh
new file mode 100755
index 000000000000..1590a37363de
--- /dev/null
+++ b/tools/perf/tests/shell/annotate.sh
@@ -0,0 +1,88 @@
+#!/bin/bash
+# perf annotate basic tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+shelldir=$(dirname "$0")
+
+# shellcheck source=lib/perf_has_symbol.sh
+. "${shelldir}"/lib/perf_has_symbol.sh
+
+testsym="noploop"
+
+skip_test_missing_symbol ${testsym}
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+perfout=$(mktemp /tmp/__perf_test.perf.out.XXXXX)
+testprog="perf test -w noploop"
+# disassembly format: "percent : offset: instruction (operands ...)"
+disasm_regex="[0-9]*\.[0-9]* *: *\w*: *\w*"
+
+cleanup() {
+ rm -rf "${perfdata}" "${perfout}"
+ rm -rf "${perfdata}".old
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_basic() {
+ echo "Basic perf annotate test"
+ if ! perf record -o "${perfdata}" ${testprog} 2> /dev/null
+ then
+ echo "Basic annotate [Failed: perf record]"
+ err=1
+ return
+ fi
+
+ # Generate the annotated output file
+ perf annotate --no-demangle -i "${perfdata}" --stdio 2> /dev/null | head -250 > "${perfout}"
+
+ # check if it has the target symbol
+ if ! grep "${testsym}" "${perfout}"
+ then
+ echo "Basic annotate [Failed: missing target symbol]"
+ err=1
+ return
+ fi
+
+ # check if it has the disassembly lines
+ if ! grep "${disasm_regex}" "${perfout}"
+ then
+ echo "Basic annotate [Failed: missing disasm output from default disassembler]"
+ err=1
+ return
+ fi
+
+ # check again with a target symbol name
+ if ! perf annotate --no-demangle -i "${perfdata}" "${testsym}" 2> /dev/null | \
+ head -250 | grep -m 3 "${disasm_regex}"
+ then
+ echo "Basic annotate [Failed: missing disasm output when specifying the target symbol]"
+ err=1
+ return
+ fi
+
+ # check one more with external objdump tool (forced by --objdump option)
+ if ! perf annotate --no-demangle -i "${perfdata}" --objdump=objdump 2> /dev/null | \
+ head -250 | grep -m 3 "${disasm_regex}"
+ then
+ echo "Basic annotate [Failed: missing disasm output from non default disassembler (using --objdump)]"
+ err=1
+ return
+ fi
+ echo "Basic annotate test [Success]"
+}
+
+test_basic
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/attr.sh b/tools/perf/tests/shell/attr.sh
new file mode 100755
index 000000000000..5a4e43b2471d
--- /dev/null
+++ b/tools/perf/tests/shell/attr.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Perf attribute expectations test
+# SPDX-License-Identifier: GPL-2.0
+
+err=0
+
+cleanup() {
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+shelldir=$(dirname "$0")
+perf_path=$(which perf)
+python "${shelldir}"/lib/attr.py -d "${shelldir}"/attr -v -p "$perf_path"
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/attr/README b/tools/perf/tests/shell/attr/README
new file mode 100644
index 000000000000..67c4ca76b85d
--- /dev/null
+++ b/tools/perf/tests/shell/attr/README
@@ -0,0 +1,71 @@
+The struct perf_event_attr test (attr tests) support
+====================================================
+This testing support is embedded into perf directly and is governed
+by the PERF_TEST_ATTR environment variable and hook inside the
+sys_perf_event_open function.
+
+The general idea is to store 'struct perf_event_attr' details for
+each event created within single perf command. Each event details
+are stored into separate text file. Once perf command is finished
+these files are checked for values we expect for command.
+
+The attr tests consist of following parts:
+
+tests/attr.c
+------------
+This is the sys_perf_event_open hook implementation. The hook
+is triggered when the PERF_TEST_ATTR environment variable is
+defined. It must contain name of existing directory with access
+and write permissions.
+
+For each sys_perf_event_open call event details are stored in
+separate file. Besides 'struct perf_event_attr' values we also
+store 'fd' and 'group_fd' values to allow checking for groups.
+
+tests/attr.py
+-------------
+This is the python script that does all the hard work. It reads
+the test definition, executes it and checks results.
+
+tests/attr/
+-----------
+Directory containing all attr test definitions.
+Following tests are defined (with perf commands):
+
+ perf record kill (test-record-basic)
+ perf record -b kill (test-record-branch-any)
+ perf record -j any kill (test-record-branch-filter-any)
+ perf record -j any_call kill (test-record-branch-filter-any_call)
+ perf record -j any_ret kill (test-record-branch-filter-any_ret)
+ perf record -j hv kill (test-record-branch-filter-hv)
+ perf record -j ind_call kill (test-record-branch-filter-ind_call)
+ perf record -j k kill (test-record-branch-filter-k)
+ perf record -j u kill (test-record-branch-filter-u)
+ perf record -c 123 kill (test-record-count)
+ perf record -d kill (test-record-data)
+ perf record -F 100 kill (test-record-freq)
+ perf record -g kill (test-record-graph-default)
+ perf record -g kill (test-record-graph-default-aarch64)
+ perf record --call-graph dwarf kill (test-record-graph-dwarf)
+ perf record --call-graph fp kill (test-record-graph-fp)
+ perf record --call-graph fp kill (test-record-graph-fp-aarch64)
+ perf record -e '{cycles,instructions}' kill (test-record-group1)
+ perf record -e '{cycles/period=1/,instructions/period=2/}:S' kill (test-record-group2)
+ perf record -e '{cycles,cache-misses}:S' kill (test-record-group-sampling1)
+ perf record -c 10000 -e '{cycles,cache-misses}:S' kill (test-record-group-sampling2)
+ perf record -D kill (test-record-no-delay)
+ perf record -i kill (test-record-no-inherit)
+ perf record -n kill (test-record-no-samples)
+ perf record -c 100 -P kill (test-record-period)
+ perf record -c 1 --pfm-events=cycles:period=2 (test-record-pfm-period)
+ perf record -R kill (test-record-raw)
+ perf record -c 2 -e arm_spe_0// -- kill (test-record-spe-period)
+ perf record -e arm_spe_0/period=3/ -- kill (test-record-spe-period-term)
+ perf record -e arm_spe_0/pa_enable=1/ -- kill (test-record-spe-physical-address)
+ perf stat -e cycles kill (test-stat-basic)
+ perf stat kill (test-stat-default)
+ perf stat -d kill (test-stat-detailed-1)
+ perf stat -dd kill (test-stat-detailed-2)
+ perf stat -ddd kill (test-stat-detailed-3)
+ perf stat -e '{cycles,instructions}' kill (test-stat-group1)
+ perf stat -i -e cycles kill (test-stat-no-inherit)
diff --git a/tools/perf/tests/shell/attr/base-record b/tools/perf/tests/shell/attr/base-record
new file mode 100644
index 000000000000..b44e4e6e4443
--- /dev/null
+++ b/tools/perf/tests/shell/attr/base-record
@@ -0,0 +1,41 @@
+[event]
+fd=1
+group_fd=-1
+# 0 or PERF_FLAG_FD_CLOEXEC flag
+flags=0|8
+cpu=*
+type=0|1
+size=136
+config=0|1
+sample_period=*
+sample_type=263
+read_format=0|4|20
+disabled=1
+inherit=1
+pinned=0
+exclusive=0
+exclude_user=0
+exclude_kernel=0|1
+exclude_hv=0|1
+exclude_idle=0
+mmap=1
+comm=1
+freq=1
+inherit_stat=0
+enable_on_exec=1
+task=1
+watermark=0
+precise_ip=0|1|2|3
+mmap_data=0
+sample_id_all=1
+exclude_host=0|1
+exclude_guest=0|1
+exclude_callchain_kernel=0
+exclude_callchain_user=0
+wakeup_events=0
+bp_type=0
+config1=0
+config2=0
+branch_sample_type=0
+sample_regs_user=0
+sample_stack_user=0
diff --git a/tools/perf/tests/shell/attr/base-record-spe b/tools/perf/tests/shell/attr/base-record-spe
new file mode 100644
index 000000000000..08fa96b59240
--- /dev/null
+++ b/tools/perf/tests/shell/attr/base-record-spe
@@ -0,0 +1,40 @@
+[event]
+fd=*
+group_fd=-1
+flags=*
+cpu=*
+type=*
+size=*
+config=*
+sample_period=*
+sample_type=*
+read_format=*
+disabled=*
+inherit=*
+pinned=*
+exclusive=*
+exclude_user=*
+exclude_kernel=*
+exclude_hv=*
+exclude_idle=*
+mmap=*
+comm=*
+freq=*
+inherit_stat=*
+enable_on_exec=*
+task=*
+watermark=*
+precise_ip=*
+mmap_data=*
+sample_id_all=*
+exclude_host=*
+exclude_guest=*
+exclude_callchain_kernel=*
+exclude_callchain_user=*
+wakeup_events=*
+bp_type=*
+config1=*
+config2=*
+branch_sample_type=*
+sample_regs_user=*
+sample_stack_user=*
diff --git a/tools/perf/tests/shell/attr/base-stat b/tools/perf/tests/shell/attr/base-stat
new file mode 100644
index 000000000000..fccd8ec4d1b0
--- /dev/null
+++ b/tools/perf/tests/shell/attr/base-stat
@@ -0,0 +1,41 @@
+[event]
+fd=1
+group_fd=-1
+# 0 or PERF_FLAG_FD_CLOEXEC flag
+flags=0|8
+cpu=*
+type=0
+size=136
+config=0
+sample_period=0
+sample_type=65536
+read_format=3
+disabled=1
+inherit=1
+pinned=0
+exclusive=0
+exclude_user=0
+exclude_kernel=0|1
+exclude_hv=0|1
+exclude_idle=0
+mmap=0
+comm=0
+freq=0
+inherit_stat=0
+enable_on_exec=1
+task=0
+watermark=0
+precise_ip=0
+mmap_data=0
+sample_id_all=0
+exclude_host=0|1
+exclude_guest=0|1
+exclude_callchain_kernel=0
+exclude_callchain_user=0
+wakeup_events=0
+bp_type=0
+config1=0
+config2=0
+branch_sample_type=0
+sample_regs_user=0
+sample_stack_user=0
diff --git a/tools/perf/tests/shell/attr/system-wide-dummy b/tools/perf/tests/shell/attr/system-wide-dummy
new file mode 100644
index 000000000000..a1e1d6a263bf
--- /dev/null
+++ b/tools/perf/tests/shell/attr/system-wide-dummy
@@ -0,0 +1,52 @@
+# Event added by system-wide or CPU perf-record to handle the race of
+# processes starting while /proc is processed.
+[event]
+fd=1
+group_fd=-1
+cpu=*
+pid=-1
+flags=8
+type=1
+size=136
+config=9
+sample_period=1
+# PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME |
+# PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER
+sample_type=65671
+read_format=4|20
+# Event will be enabled right away.
+disabled=0
+inherit=1
+pinned=0
+exclusive=0
+exclude_user=0
+exclude_kernel=1
+exclude_hv=1
+exclude_idle=0
+mmap=1
+comm=1
+freq=0
+inherit_stat=0
+enable_on_exec=0
+task=1
+watermark=0
+precise_ip=0
+mmap_data=0
+sample_id_all=1
+exclude_host=0
+exclude_guest=1
+exclude_callchain_kernel=0
+exclude_callchain_user=0
+mmap2=1
+comm_exec=1
+context_switch=0
+write_backward=0
+namespaces=0
+use_clockid=0
+wakeup_events=0
+bp_type=0
+config1=0
+config2=0
+branch_sample_type=0
+sample_regs_user=0
+sample_stack_user=0
diff --git a/tools/perf/tests/shell/attr/test-record-C0 b/tools/perf/tests/shell/attr/test-record-C0
new file mode 100644
index 000000000000..1049ac8b52f2
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-C0
@@ -0,0 +1,24 @@
+[config]
+command = record
+args = --no-bpf-event -C 0 kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+cpu=0
+
+# no enable on exec for CPU attached
+enable_on_exec=0
+
+# PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME |
+# PERF_SAMPLE_PERIOD | PERF_SAMPLE_IDENTIFIER
+# + PERF_SAMPLE_CPU added by -C 0
+sample_type=65927
+
+# Dummy event handles mmaps, comm and task.
+mmap=0
+comm=0
+task=0
+inherit=0
+
+[event:system-wide-dummy]
+inherit=0
diff --git a/tools/perf/tests/shell/attr/test-record-basic b/tools/perf/tests/shell/attr/test-record-basic
new file mode 100644
index 000000000000..b0ca42a5ecc9
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-basic
@@ -0,0 +1,6 @@
+[config]
+command = record
+args = --no-bpf-event kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
diff --git a/tools/perf/tests/shell/attr/test-record-branch-any b/tools/perf/tests/shell/attr/test-record-branch-any
new file mode 100644
index 000000000000..1a99b3ce6b89
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-branch-any
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -b kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=8
diff --git a/tools/perf/tests/shell/attr/test-record-branch-filter-any b/tools/perf/tests/shell/attr/test-record-branch-filter-any
new file mode 100644
index 000000000000..709768b508c6
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-branch-filter-any
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j any kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=8
diff --git a/tools/perf/tests/shell/attr/test-record-branch-filter-any_call b/tools/perf/tests/shell/attr/test-record-branch-filter-any_call
new file mode 100644
index 000000000000..f943221f7825
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-branch-filter-any_call
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j any_call kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=16
diff --git a/tools/perf/tests/shell/attr/test-record-branch-filter-any_ret b/tools/perf/tests/shell/attr/test-record-branch-filter-any_ret
new file mode 100644
index 000000000000..fd4f5b4154a9
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-branch-filter-any_ret
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j any_ret kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=32
diff --git a/tools/perf/tests/shell/attr/test-record-branch-filter-hv b/tools/perf/tests/shell/attr/test-record-branch-filter-hv
new file mode 100644
index 000000000000..4e52d685ebe1
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-branch-filter-hv
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j hv kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=8
diff --git a/tools/perf/tests/shell/attr/test-record-branch-filter-ind_call b/tools/perf/tests/shell/attr/test-record-branch-filter-ind_call
new file mode 100644
index 000000000000..e08c6ab3796e
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-branch-filter-ind_call
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j ind_call kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=64
diff --git a/tools/perf/tests/shell/attr/test-record-branch-filter-k b/tools/perf/tests/shell/attr/test-record-branch-filter-k
new file mode 100644
index 000000000000..b4b98f84fc2f
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-branch-filter-k
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j k kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=8
diff --git a/tools/perf/tests/shell/attr/test-record-branch-filter-u b/tools/perf/tests/shell/attr/test-record-branch-filter-u
new file mode 100644
index 000000000000..fb9610edbb0d
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-branch-filter-u
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j u kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=8
diff --git a/tools/perf/tests/shell/attr/test-record-count b/tools/perf/tests/shell/attr/test-record-count
new file mode 100644
index 000000000000..5e9b9019d786
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-count
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event -c 123 kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_period=123
+sample_type=7
+freq=0
diff --git a/tools/perf/tests/shell/attr/test-record-data b/tools/perf/tests/shell/attr/test-record-data
new file mode 100644
index 000000000000..a99bb13149c2
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-data
@@ -0,0 +1,10 @@
+[config]
+command = record
+args = --no-bpf-event -d kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+# sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME |
+# PERF_SAMPLE_ADDR | PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC
+sample_type=33039
+mmap_data=1
diff --git a/tools/perf/tests/shell/attr/test-record-dummy-C0 b/tools/perf/tests/shell/attr/test-record-dummy-C0
new file mode 100644
index 000000000000..91499405fff4
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-dummy-C0
@@ -0,0 +1,55 @@
+[config]
+command = record
+args = --no-bpf-event -e dummy -C 0 kill >/dev/null 2>&1
+ret = 1
+
+[event]
+fd=1
+group_fd=-1
+cpu=0
+pid=-1
+flags=8
+type=1
+size=136
+config=9
+sample_period=4000
+# PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME |
+# PERF_SAMPLE_PERIOD
+# + PERF_SAMPLE_CPU added by -C 0
+sample_type=391
+read_format=4|20
+disabled=0
+inherit=0
+pinned=0
+exclusive=0
+exclude_user=0
+exclude_kernel=0
+exclude_hv=0
+exclude_idle=0
+mmap=1
+comm=1
+freq=1
+inherit_stat=0
+enable_on_exec=0
+task=1
+watermark=0
+precise_ip=0
+mmap_data=0
+sample_id_all=1
+exclude_host=0
+exclude_guest=0
+exclude_callchain_kernel=0
+exclude_callchain_user=0
+mmap2=1
+comm_exec=1
+context_switch=0
+write_backward=0
+namespaces=0
+use_clockid=0
+wakeup_events=0
+bp_type=0
+config1=0
+config2=0
+branch_sample_type=0
+sample_regs_user=0
+sample_stack_user=0
diff --git a/tools/perf/tests/shell/attr/test-record-freq b/tools/perf/tests/shell/attr/test-record-freq
new file mode 100644
index 000000000000..89e29f6b2ae0
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-freq
@@ -0,0 +1,7 @@
+[config]
+command = record
+args = --no-bpf-event -F 100 kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_period=100
diff --git a/tools/perf/tests/shell/attr/test-record-graph-default b/tools/perf/tests/shell/attr/test-record-graph-default
new file mode 100644
index 000000000000..f0a18b4ea4f5
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-graph-default
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event -g kill >/dev/null 2>&1
+ret = 1
+# arm64 enables registers in the default mode (fp)
+arch = !aarch64
+
+[event:base-record]
+sample_type=295
diff --git a/tools/perf/tests/shell/attr/test-record-graph-default-aarch64 b/tools/perf/tests/shell/attr/test-record-graph-default-aarch64
new file mode 100644
index 000000000000..e98d62efb6f7
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-graph-default-aarch64
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event -g kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event:base-record]
+sample_type=4391
+sample_regs_user=1073741824
diff --git a/tools/perf/tests/shell/attr/test-record-graph-dwarf b/tools/perf/tests/shell/attr/test-record-graph-dwarf
new file mode 100644
index 000000000000..ae92061d611d
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-graph-dwarf
@@ -0,0 +1,12 @@
+[config]
+command = record
+args = --no-bpf-event --call-graph dwarf -- kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=45359
+exclude_callchain_user=1
+sample_stack_user=8192
+# TODO different for each arch, no support for that now
+sample_regs_user=*
+mmap_data=1
diff --git a/tools/perf/tests/shell/attr/test-record-graph-fp b/tools/perf/tests/shell/attr/test-record-graph-fp
new file mode 100644
index 000000000000..a6e60e839205
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-graph-fp
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
+ret = 1
+# arm64 enables registers in fp mode
+arch = !aarch64
+
+[event:base-record]
+sample_type=295
diff --git a/tools/perf/tests/shell/attr/test-record-graph-fp-aarch64 b/tools/perf/tests/shell/attr/test-record-graph-fp-aarch64
new file mode 100644
index 000000000000..cbeea9971285
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-graph-fp-aarch64
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event:base-record]
+sample_type=4391
+sample_regs_user=1073741824
diff --git a/tools/perf/tests/shell/attr/test-record-group-sampling b/tools/perf/tests/shell/attr/test-record-group-sampling
new file mode 100644
index 000000000000..86a940d7895d
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-group-sampling
@@ -0,0 +1,40 @@
+[config]
+command = record
+args = --no-bpf-event -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
+ret = 1
+kernel_until = 6.12
+
+[event-1:base-record]
+fd=1
+group_fd=-1
+sample_type=343
+read_format=12|28
+inherit=0
+
+[event-2:base-record]
+fd=2
+group_fd=1
+
+# cache-misses
+type=0
+config=3
+
+# default | PERF_SAMPLE_READ | PERF_SAMPLE_PERIOD
+sample_type=343
+
+# PERF_FORMAT_ID | PERF_FORMAT_GROUP | PERF_FORMAT_LOST
+read_format=12|28
+task=0
+mmap=0
+comm=0
+enable_on_exec=0
+disabled=0
+
+# inherit is disabled for group sampling
+inherit=0
+
+# sampling disabled
+sample_freq=0
+sample_period=0
+freq=0
+write_backward=0
diff --git a/tools/perf/tests/shell/attr/test-record-group-sampling1 b/tools/perf/tests/shell/attr/test-record-group-sampling1
new file mode 100644
index 000000000000..4748ab7bf684
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-group-sampling1
@@ -0,0 +1,50 @@
+[config]
+command = record
+args = --no-bpf-event -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
+ret = 1
+kernel_since = 6.12
+
+[event-1:base-record]
+fd=1
+group_fd=-1
+
+# cycles
+type=0
+config=0
+
+# default | PERF_SAMPLE_READ | PERF_SAMPLE_PERIOD
+sample_type=343
+
+# PERF_FORMAT_ID | PERF_FORMAT_GROUP | PERF_FORMAT_LOST | PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING
+read_format=28|31
+task=1
+mmap=1
+comm=1
+enable_on_exec=1
+disabled=1
+
+# inherit is enabled for group sampling
+inherit=1
+
+[event-2:base-record]
+fd=2
+group_fd=1
+
+# cache-misses
+type=0
+config=3
+
+# default | PERF_SAMPLE_READ | PERF_SAMPLE_PERIOD
+sample_type=343
+
+# PERF_FORMAT_ID | PERF_FORMAT_GROUP | PERF_FORMAT_LOST | PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING
+read_format=28|31
+task=0
+mmap=0
+comm=0
+enable_on_exec=0
+disabled=0
+freq=0
+
+# inherit is enabled for group sampling
+inherit=1
diff --git a/tools/perf/tests/shell/attr/test-record-group-sampling2 b/tools/perf/tests/shell/attr/test-record-group-sampling2
new file mode 100644
index 000000000000..e0432244a0eb
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-group-sampling2
@@ -0,0 +1,61 @@
+[config]
+command = record
+args = --no-bpf-event -c 10000 -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
+ret = 1
+kernel_since = 6.12
+
+[event-1:base-record]
+fd=1
+group_fd=-1
+
+# cycles
+type=0
+config=0
+
+# default | PERF_SAMPLE_READ
+sample_type=87
+
+# PERF_FORMAT_ID | PERF_FORMAT_GROUP | PERF_FORMAT_LOST | PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING
+read_format=28|31
+task=1
+mmap=1
+comm=1
+enable_on_exec=1
+disabled=1
+
+# inherit is enabled for group sampling
+inherit=1
+
+# sampling disabled
+sample_freq=0
+sample_period=10000
+freq=0
+write_backward=0
+
+[event-2:base-record]
+fd=2
+group_fd=1
+
+# cache-misses
+type=0
+config=3
+
+# default | PERF_SAMPLE_READ
+sample_type=87
+
+# PERF_FORMAT_ID | PERF_FORMAT_GROUP | PERF_FORMAT_LOST | PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING
+read_format=28|31
+task=0
+mmap=0
+comm=0
+enable_on_exec=0
+disabled=0
+
+# inherit is enabled for group sampling
+inherit=1
+
+# sampling disabled
+sample_freq=0
+sample_period=0
+freq=0
+write_backward=0
diff --git a/tools/perf/tests/shell/attr/test-record-group1 b/tools/perf/tests/shell/attr/test-record-group1
new file mode 100644
index 000000000000..eeb1db392bc9
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-group1
@@ -0,0 +1,23 @@
+[config]
+command = record
+args = --no-bpf-event -e '{cycles,instructions}' kill >/dev/null 2>&1
+ret = 1
+
+[event-1:base-record]
+fd=1
+group_fd=-1
+sample_type=327
+read_format=4|20
+
+[event-2:base-record]
+fd=2
+group_fd=1
+type=0
+config=1
+sample_type=327
+read_format=4|20
+mmap=0
+comm=0
+task=0
+enable_on_exec=0
+disabled=0
diff --git a/tools/perf/tests/shell/attr/test-record-group2 b/tools/perf/tests/shell/attr/test-record-group2
new file mode 100644
index 000000000000..891d41a7bddf
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-group2
@@ -0,0 +1,30 @@
+[config]
+command = record
+args = --no-bpf-event -e '{cycles/period=1234000/,instructions/period=6789000/}:S' kill >/dev/null 2>&1
+ret = 1
+kernel_until = 6.12
+
+[event-1:base-record]
+fd=1
+group_fd=-1
+config=0|1
+sample_period=1234000
+sample_type=87
+read_format=12|28
+inherit=0
+freq=0
+
+[event-2:base-record]
+fd=2
+group_fd=1
+config=0|1
+sample_period=6789000
+sample_type=87
+read_format=12|28
+disabled=0
+inherit=0
+mmap=0
+comm=0
+freq=0
+enable_on_exec=0
+task=0
diff --git a/tools/perf/tests/shell/attr/test-record-group3 b/tools/perf/tests/shell/attr/test-record-group3
new file mode 100644
index 000000000000..249be884959e
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-group3
@@ -0,0 +1,31 @@
+[config]
+command = record
+args = --no-bpf-event -e '{cycles/period=1234000/,instructions/period=6789000/}:S' kill >/dev/null 2>&1
+ret = 1
+kernel_since = 6.12
+
+[event-1:base-record]
+fd=1
+group_fd=-1
+config=0|1
+sample_period=1234000
+sample_type=87
+read_format=28|31
+disabled=1
+inherit=1
+freq=0
+
+[event-2:base-record]
+fd=2
+group_fd=1
+config=0|1
+sample_period=6789000
+sample_type=87
+read_format=28|31
+disabled=0
+inherit=1
+mmap=0
+comm=0
+freq=0
+enable_on_exec=0
+task=0
diff --git a/tools/perf/tests/shell/attr/test-record-no-buffering b/tools/perf/tests/shell/attr/test-record-no-buffering
new file mode 100644
index 000000000000..583dcbb078ba
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-no-buffering
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event --no-buffering kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=263
+watermark=0
+wakeup_events=1
diff --git a/tools/perf/tests/shell/attr/test-record-no-inherit b/tools/perf/tests/shell/attr/test-record-no-inherit
new file mode 100644
index 000000000000..15d1dc162e1c
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-no-inherit
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -i kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=263
+inherit=0
diff --git a/tools/perf/tests/shell/attr/test-record-no-samples b/tools/perf/tests/shell/attr/test-record-no-samples
new file mode 100644
index 000000000000..596fbd6d5a2c
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-no-samples
@@ -0,0 +1,7 @@
+[config]
+command = record
+args = --no-bpf-event -n kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_period=0
diff --git a/tools/perf/tests/shell/attr/test-record-period b/tools/perf/tests/shell/attr/test-record-period
new file mode 100644
index 000000000000..119101154c5e
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-period
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -c 100 -P kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_period=100
+freq=0
diff --git a/tools/perf/tests/shell/attr/test-record-pfm-period b/tools/perf/tests/shell/attr/test-record-pfm-period
new file mode 100644
index 000000000000..368f5b814094
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-pfm-period
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event -c 10000 --pfm-events=cycles:period=77777 kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_period=77777
+sample_type=7
+freq=0
diff --git a/tools/perf/tests/shell/attr/test-record-raw b/tools/perf/tests/shell/attr/test-record-raw
new file mode 100644
index 000000000000..13a5f7860c78
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-raw
@@ -0,0 +1,7 @@
+[config]
+command = record
+args = --no-bpf-event -R kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=1415
diff --git a/tools/perf/tests/shell/attr/test-record-spe-period b/tools/perf/tests/shell/attr/test-record-spe-period
new file mode 100644
index 000000000000..75f8c9cd8e3f
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-spe-period
@@ -0,0 +1,12 @@
+[config]
+command = record
+args = --no-bpf-event -c 2 -e arm_spe_0// -- kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event-10:base-record-spe]
+sample_period=2
+freq=0
+
+# dummy event
+[event-1:base-record-spe]
diff --git a/tools/perf/tests/shell/attr/test-record-spe-period-term b/tools/perf/tests/shell/attr/test-record-spe-period-term
new file mode 100644
index 000000000000..8f60a4fec657
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-spe-period-term
@@ -0,0 +1,12 @@
+[config]
+command = record
+args = --no-bpf-event -e arm_spe_0/period=3/ -- kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event-10:base-record-spe]
+sample_period=3
+freq=0
+
+# dummy event
+[event-1:base-record-spe]
diff --git a/tools/perf/tests/shell/attr/test-record-spe-physical-address b/tools/perf/tests/shell/attr/test-record-spe-physical-address
new file mode 100644
index 000000000000..7ebcf5012ce3
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-spe-physical-address
@@ -0,0 +1,12 @@
+[config]
+command = record
+args = --no-bpf-event -e arm_spe_0/pa_enable=1/ -- kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event-10:base-record-spe]
+# 622727 is the decimal of IP|TID|TIME|CPU|IDENTIFIER|DATA_SRC|PHYS_ADDR
+sample_type=622727
+
+# dummy event
+[event-1:base-record-spe] \ No newline at end of file
diff --git a/tools/perf/tests/shell/attr/test-record-user-regs-no-sve-aarch64 b/tools/perf/tests/shell/attr/test-record-user-regs-no-sve-aarch64
new file mode 100644
index 000000000000..bed765450ca9
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-user-regs-no-sve-aarch64
@@ -0,0 +1,9 @@
+# Test that asking for VG fails if the system doesn't support SVE. This
+# applies both before and after the feature was added in 6.1
+[config]
+command = record
+args = --no-bpf-event --user-regs=vg kill >/dev/null 2>&1
+ret = 129
+test_ret = true
+arch = aarch64
+auxv = auxv["AT_HWCAP"] & 0x400000 == 0
diff --git a/tools/perf/tests/shell/attr/test-record-user-regs-old-sve-aarch64 b/tools/perf/tests/shell/attr/test-record-user-regs-old-sve-aarch64
new file mode 100644
index 000000000000..15ebfc3418e3
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-user-regs-old-sve-aarch64
@@ -0,0 +1,10 @@
+# Test that asking for VG always fails on old kernels because it was
+# added in 6.1. This applies to systems that either support or don't
+# support SVE.
+[config]
+command = record
+args = --no-bpf-event --user-regs=vg kill >/dev/null 2>&1
+ret = 129
+test_ret = true
+arch = aarch64
+kernel_until = 6.1
diff --git a/tools/perf/tests/shell/attr/test-record-user-regs-sve-aarch64 b/tools/perf/tests/shell/attr/test-record-user-regs-sve-aarch64
new file mode 100644
index 000000000000..a65113cd7311
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-record-user-regs-sve-aarch64
@@ -0,0 +1,14 @@
+# Test that asking for VG works if the system has SVE and after the
+# feature was added in 6.1
+[config]
+command = record
+args = --no-bpf-event --user-regs=vg kill >/dev/null 2>&1
+ret = 1
+test_ret = true
+arch = aarch64
+auxv = auxv["AT_HWCAP"] & 0x400000 == 0x400000
+kernel_since = 6.1
+
+[event:base-record]
+sample_type=4359
+sample_regs_user=70368744177664
diff --git a/tools/perf/tests/shell/attr/test-stat-C0 b/tools/perf/tests/shell/attr/test-stat-C0
new file mode 100644
index 000000000000..a2c76d10b2bb
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-stat-C0
@@ -0,0 +1,10 @@
+[config]
+command = stat
+args = -e cycles -C 0 kill >/dev/null 2>&1
+ret = 1
+
+[event:base-stat]
+# events are disabled by default when attached to cpu
+disabled=1
+enable_on_exec=0
+optional=1
diff --git a/tools/perf/tests/shell/attr/test-stat-basic b/tools/perf/tests/shell/attr/test-stat-basic
new file mode 100644
index 000000000000..69867d049fda
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-stat-basic
@@ -0,0 +1,7 @@
+[config]
+command = stat
+args = -e cycles kill >/dev/null 2>&1
+ret = 1
+
+[event:base-stat]
+optional=1
diff --git a/tools/perf/tests/shell/attr/test-stat-default b/tools/perf/tests/shell/attr/test-stat-default
new file mode 100644
index 000000000000..e47fb4944679
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-stat-default
@@ -0,0 +1,229 @@
+[config]
+command = stat
+args = kill >/dev/null 2>&1
+ret = 1
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK
+[event1:base-stat]
+fd=1
+type=1
+config=1
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES
+[event2:base-stat]
+fd=2
+type=1
+config=3
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS
+[event3:base-stat]
+fd=3
+type=1
+config=4
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS
+[event4:base-stat]
+fd=4
+type=1
+config=2
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES
+[event5:base-stat]
+fd=5
+type=0
+config=0
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+[event6:base-stat]
+fd=6
+type=0
+config=7
+optional=1
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+[event7:base-stat]
+fd=7
+type=0
+config=8
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS
+[event8:base-stat]
+fd=8
+type=0
+config=1
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
+[event9:base-stat]
+fd=9
+type=0
+config=4
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
+[event10:base-stat]
+fd=10
+type=0
+config=5
+optional=1
+
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
+[event20:base-stat]
+fd=20
+type=4
+config=4109
+optional=1
+
+# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
+[event21:base-stat]
+fd=21
+type=4
+config=17039629
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
+[event22:base-stat]
+fd=22
+type=4
+config=60
+optional=1
+
+# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
+[event23:base-stat]
+fd=23
+type=4
+config=2097421
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+[event24:base-stat]
+fd=24
+type=4
+config=316
+optional=1
+
+# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
+[event25:base-stat]
+fd=25
+type=4
+config=412
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
+[event26:base-stat]
+fd=26
+type=4
+config=572
+optional=1
+
+# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
+[event27:base-stat]
+fd=27
+type=4
+config=706
+optional=1
+
+# PERF_TYPE_RAW / UOPS_ISSUED.ANY
+[event28:base-stat]
+fd=28
+type=4
+config=270
+optional=1
diff --git a/tools/perf/tests/shell/attr/test-stat-detailed-1 b/tools/perf/tests/shell/attr/test-stat-detailed-1
new file mode 100644
index 000000000000..3d500d3e0c5c
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-stat-detailed-1
@@ -0,0 +1,271 @@
+[config]
+command = stat
+args = -d kill >/dev/null 2>&1
+ret = 1
+
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK
+[event1:base-stat]
+fd=1
+type=1
+config=1
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES
+[event2:base-stat]
+fd=2
+type=1
+config=3
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS
+[event3:base-stat]
+fd=3
+type=1
+config=4
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS
+[event4:base-stat]
+fd=4
+type=1
+config=2
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES
+[event5:base-stat]
+fd=5
+type=0
+config=0
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+[event6:base-stat]
+fd=6
+type=0
+config=7
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+[event7:base-stat]
+fd=7
+type=0
+config=8
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS
+[event8:base-stat]
+fd=8
+type=0
+config=1
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
+[event9:base-stat]
+fd=9
+type=0
+config=4
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
+[event10:base-stat]
+fd=10
+type=0
+config=5
+optional=1
+
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
+[event20:base-stat]
+fd=20
+type=4
+config=4109
+optional=1
+
+# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
+[event21:base-stat]
+fd=21
+type=4
+config=17039629
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
+[event22:base-stat]
+fd=22
+type=4
+config=60
+optional=1
+
+# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
+[event23:base-stat]
+fd=23
+type=4
+config=2097421
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+[event24:base-stat]
+fd=24
+type=4
+config=316
+optional=1
+
+# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
+[event25:base-stat]
+fd=25
+type=4
+config=412
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
+[event26:base-stat]
+fd=26
+type=4
+config=572
+optional=1
+
+# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
+[event27:base-stat]
+fd=27
+type=4
+config=706
+optional=1
+
+# PERF_TYPE_RAW / UOPS_ISSUED.ANY
+[event28:base-stat]
+fd=28
+type=4
+config=270
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event29:base-stat]
+fd=29
+type=3
+config=0
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event30:base-stat]
+fd=30
+type=3
+config=65536
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_LL << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event31:base-stat]
+fd=31
+type=3
+config=2
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_LL << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event32:base-stat]
+fd=32
+type=3
+config=65538
+optional=1
diff --git a/tools/perf/tests/shell/attr/test-stat-detailed-2 b/tools/perf/tests/shell/attr/test-stat-detailed-2
new file mode 100644
index 000000000000..01777a63752f
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-stat-detailed-2
@@ -0,0 +1,331 @@
+[config]
+command = stat
+args = -dd kill >/dev/null 2>&1
+ret = 1
+
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK
+[event1:base-stat]
+fd=1
+type=1
+config=1
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES
+[event2:base-stat]
+fd=2
+type=1
+config=3
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS
+[event3:base-stat]
+fd=3
+type=1
+config=4
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS
+[event4:base-stat]
+fd=4
+type=1
+config=2
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES
+[event5:base-stat]
+fd=5
+type=0
+config=0
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+[event6:base-stat]
+fd=6
+type=0
+config=7
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+[event7:base-stat]
+fd=7
+type=0
+config=8
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS
+[event8:base-stat]
+fd=8
+type=0
+config=1
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
+[event9:base-stat]
+fd=9
+type=0
+config=4
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
+[event10:base-stat]
+fd=10
+type=0
+config=5
+optional=1
+
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
+[event20:base-stat]
+fd=20
+type=4
+config=4109
+optional=1
+
+# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
+[event21:base-stat]
+fd=21
+type=4
+config=17039629
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
+[event22:base-stat]
+fd=22
+type=4
+config=60
+optional=1
+
+# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
+[event23:base-stat]
+fd=23
+type=4
+config=2097421
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+[event24:base-stat]
+fd=24
+type=4
+config=316
+optional=1
+
+# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
+[event25:base-stat]
+fd=25
+type=4
+config=412
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
+[event26:base-stat]
+fd=26
+type=4
+config=572
+optional=1
+
+# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
+[event27:base-stat]
+fd=27
+type=4
+config=706
+optional=1
+
+# PERF_TYPE_RAW / UOPS_ISSUED.ANY
+[event28:base-stat]
+fd=28
+type=4
+config=270
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event29:base-stat]
+fd=29
+type=3
+config=0
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event30:base-stat]
+fd=30
+type=3
+config=65536
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_LL << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event31:base-stat]
+fd=31
+type=3
+config=2
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_LL << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event32:base-stat]
+fd=32
+type=3
+config=65538
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_L1I << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event33:base-stat]
+fd=33
+type=3
+config=1
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_L1I << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event34:base-stat]
+fd=34
+type=3
+config=65537
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_DTLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event35:base-stat]
+fd=35
+type=3
+config=3
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_DTLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event36:base-stat]
+fd=36
+type=3
+config=65539
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_ITLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event37:base-stat]
+fd=37
+type=3
+config=4
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_ITLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event38:base-stat]
+fd=38
+type=3
+config=65540
+optional=1
diff --git a/tools/perf/tests/shell/attr/test-stat-detailed-3 b/tools/perf/tests/shell/attr/test-stat-detailed-3
new file mode 100644
index 000000000000..8400abd7e1e4
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-stat-detailed-3
@@ -0,0 +1,351 @@
+[config]
+command = stat
+args = -ddd kill >/dev/null 2>&1
+ret = 1
+
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK
+[event1:base-stat]
+fd=1
+type=1
+config=1
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES
+[event2:base-stat]
+fd=2
+type=1
+config=3
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS
+[event3:base-stat]
+fd=3
+type=1
+config=4
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS
+[event4:base-stat]
+fd=4
+type=1
+config=2
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES
+[event5:base-stat]
+fd=5
+type=0
+config=0
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+[event6:base-stat]
+fd=6
+type=0
+config=7
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+[event7:base-stat]
+fd=7
+type=0
+config=8
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS
+[event8:base-stat]
+fd=8
+type=0
+config=1
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
+[event9:base-stat]
+fd=9
+type=0
+config=4
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
+[event10:base-stat]
+fd=10
+type=0
+config=5
+optional=1
+
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
+[event20:base-stat]
+fd=20
+type=4
+config=4109
+optional=1
+
+# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
+[event21:base-stat]
+fd=21
+type=4
+config=17039629
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
+[event22:base-stat]
+fd=22
+type=4
+config=60
+optional=1
+
+# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
+[event23:base-stat]
+fd=23
+type=4
+config=2097421
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+[event24:base-stat]
+fd=24
+type=4
+config=316
+optional=1
+
+# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
+[event25:base-stat]
+fd=25
+type=4
+config=412
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
+[event26:base-stat]
+fd=26
+type=4
+config=572
+optional=1
+
+# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
+[event27:base-stat]
+fd=27
+type=4
+config=706
+optional=1
+
+# PERF_TYPE_RAW / UOPS_ISSUED.ANY
+[event28:base-stat]
+fd=28
+type=4
+config=270
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event29:base-stat]
+fd=29
+type=3
+config=0
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event30:base-stat]
+fd=30
+type=3
+config=65536
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_LL << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event31:base-stat]
+fd=31
+type=3
+config=2
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_LL << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event32:base-stat]
+fd=32
+type=3
+config=65538
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_L1I << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event33:base-stat]
+fd=33
+type=3
+config=1
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_L1I << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event34:base-stat]
+fd=34
+type=3
+config=65537
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_DTLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event35:base-stat]
+fd=35
+type=3
+config=3
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_DTLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event36:base-stat]
+fd=36
+type=3
+config=65539
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_ITLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event37:base-stat]
+fd=37
+type=3
+config=4
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_ITLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event38:base-stat]
+fd=38
+type=3
+config=65540
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event39:base-stat]
+fd=39
+type=3
+config=512
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event40:base-stat]
+fd=40
+type=3
+config=66048
+optional=1
diff --git a/tools/perf/tests/shell/attr/test-stat-group1 b/tools/perf/tests/shell/attr/test-stat-group1
new file mode 100644
index 000000000000..1746751123dc
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-stat-group1
@@ -0,0 +1,17 @@
+[config]
+command = stat
+args = -e '{cycles,instructions}' kill >/dev/null 2>&1
+ret = 1
+
+[event-1:base-stat]
+fd=1
+group_fd=-1
+read_format=3|15
+
+[event-2:base-stat]
+fd=2
+group_fd=1
+config=1
+disabled=0
+enable_on_exec=0
+read_format=3|15
diff --git a/tools/perf/tests/shell/attr/test-stat-no-inherit b/tools/perf/tests/shell/attr/test-stat-no-inherit
new file mode 100644
index 000000000000..924fbb9300d1
--- /dev/null
+++ b/tools/perf/tests/shell/attr/test-stat-no-inherit
@@ -0,0 +1,8 @@
+[config]
+command = stat
+args = -i -e cycles kill >/dev/null 2>&1
+ret = 1
+
+[event:base-stat]
+inherit=0
+optional=1
diff --git a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
new file mode 100755
index 000000000000..8226449ac5c3
--- /dev/null
+++ b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
@@ -0,0 +1,106 @@
+#!/bin/bash
+# perf_probe :: Reject blacklisted probes (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# test_adding_blacklisted of perf_probe test
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# Blacklisted functions should not be added successfully as probes,
+# they must be skipped.
+#
+
+# include working environment
+. ../common/init.sh
+
+TEST_RESULT=0
+
+# skip if not supported
+BLACKFUNC_LIST=`head -n 5 /sys/kernel/debug/kprobes/blacklist 2> /dev/null | cut -f2`
+if [ -z "$BLACKFUNC_LIST" ]; then
+ print_overall_skipped
+ exit 2
+fi
+
+# try to find vmlinux with DWARF debug info
+VMLINUX_FILE=$(perf probe -v random_probe |& grep "Using.*for symbols" | sed -r 's/^Using (.*) for symbols$/\1/')
+
+# remove all previously added probes
+clear_all_probes
+
+
+### adding blacklisted function
+REGEX_SCOPE_FAIL="Failed to find scope of probe point"
+REGEX_SKIP_MESSAGE=" is blacklisted function, skip it\."
+REGEX_NOT_FOUND_MESSAGE="Probe point \'$RE_EVENT\' not found."
+REGEX_ERROR_MESSAGE="Error: Failed to add events."
+REGEX_INVALID_ARGUMENT="Failed to write event: Invalid argument"
+REGEX_SYMBOL_FAIL="Failed to find symbol at $RE_ADDRESS"
+REGEX_OUT_SECTION="$RE_EVENT is out of \.\w+, skip it"
+REGEX_MISSING_DECL_LINE="A function DIE doesn't have decl_line. Maybe broken DWARF?"
+
+BLACKFUNC=""
+SKIP_DWARF=0
+
+for BLACKFUNC in $BLACKFUNC_LIST; do
+ echo "Probing $BLACKFUNC"
+
+ # functions from blacklist should be skipped by perf probe
+ ! $CMD_PERF probe $BLACKFUNC > $LOGS_DIR/adding_blacklisted.log 2> $LOGS_DIR/adding_blacklisted.err
+ PERF_EXIT_CODE=$?
+
+ # check for bad DWARF polluting the result
+ ../common/check_all_patterns_found.pl "$REGEX_MISSING_DECL_LINE" >/dev/null < $LOGS_DIR/adding_blacklisted.err
+
+ if [ $? -eq 0 ]; then
+ SKIP_DWARF=1
+ echo "Result polluted by broken DWARF, trying another probe"
+
+ # confirm that the broken DWARF comes from assembler
+ if [ -n "$VMLINUX_FILE" ]; then
+ readelf -wi "$VMLINUX_FILE" |
+ awk -v probe="$BLACKFUNC" '/DW_AT_language/ { comp_lang = $0 }
+ $0 ~ probe { if (comp_lang) { print comp_lang }; exit }' |
+ grep -q "MIPS assembler"
+
+ CHECK_EXIT_CODE=$?
+ if [ $CHECK_EXIT_CODE -ne 0 ]; then
+ SKIP_DWARF=0 # broken DWARF while available
+ break
+ fi
+ fi
+ else
+ ../common/check_all_lines_matched.pl "$REGEX_SKIP_MESSAGE" "$REGEX_NOT_FOUND_MESSAGE" "$REGEX_ERROR_MESSAGE" "$REGEX_SCOPE_FAIL" "$REGEX_INVALID_ARGUMENT" "$REGEX_SYMBOL_FAIL" "$REGEX_OUT_SECTION" < $LOGS_DIR/adding_blacklisted.err
+ CHECK_EXIT_CODE=$?
+
+ SKIP_DWARF=0
+ break
+ fi
+done
+
+if [ $SKIP_DWARF -eq 1 ]; then
+ print_testcase_skipped "adding blacklisted function $BLACKFUNC"
+else
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding blacklisted function $BLACKFUNC"
+ (( TEST_RESULT += $? ))
+fi
+
+### listing not-added probe
+
+# blacklisted probes should NOT appear in perf-list output
+$CMD_PERF list probe:\* > $LOGS_DIR/adding_blacklisted_list.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "Metric Groups:" < $LOGS_DIR/adding_blacklisted_list.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing blacklisted probe (should NOT be listed)"
+(( TEST_RESULT += $? ))
+
+
+# print overall results
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
new file mode 100755
index 000000000000..df288cf90cd6
--- /dev/null
+++ b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
@@ -0,0 +1,301 @@
+#!/bin/bash
+# perf_probe :: Add probes, list and remove them (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# test_adding_kernel of perf_probe test
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests adding of probes, their correct listing
+# and removing.
+#
+
+# include working environment
+. ../common/init.sh
+
+TEST_RESULT=0
+
+# shellcheck source=lib/probe_vfs_getname.sh
+. "$(dirname "$0")/../lib/probe_vfs_getname.sh"
+
+TEST_PROBE=${TEST_PROBE:-"inode_permission"}
+
+# set NO_DEBUGINFO to skip testcase if debuginfo is not present
+# skip_if_no_debuginfo returns 2 if debuginfo is not present
+skip_if_no_debuginfo
+if [ $? -eq 2 ]; then
+ NO_DEBUGINFO=1
+fi
+
+check_kprobes_available
+if [ $? -ne 0 ]; then
+ print_overall_skipped
+ exit 2
+fi
+
+
+### basic probe adding
+
+for opt in "" "-a" "--add"; do
+ clear_all_probes
+ $CMD_PERF probe $opt $TEST_PROBE 2> $LOGS_DIR/adding_kernel_add$opt.err
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_add$opt.err
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding probe $TEST_PROBE :: $opt"
+ (( TEST_RESULT += $? ))
+done
+
+
+### listing added probe :: perf list
+
+# any added probes should appear in perf-list output
+$CMD_PERF list probe:\* > $LOGS_DIR/adding_kernel_list.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "probe:${TEST_PROBE}(?:_\d+)?\s+\[Tracepoint event\]" "Metric Groups:" < $LOGS_DIR/adding_kernel_list.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf list"
+(( TEST_RESULT += $? ))
+
+
+### listing added probe :: perf probe -l
+
+# '-l' should list all the added probes as well
+$CMD_PERF probe -l > $LOGS_DIR/adding_kernel_list-l.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "\s*probe:${TEST_PROBE}(?:_\d+)?\s+\(on ${TEST_PROBE}(?:[:\+]$RE_NUMBER_HEX)?@.+\)" < $LOGS_DIR/adding_kernel_list-l.log
+CHECK_EXIT_CODE=$?
+
+if [ $NO_DEBUGINFO ] ; then
+ print_testcase_skipped $NO_DEBUGINFO $NO_DEBUGINFO "Skipped due to missing debuginfo"
+else
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf probe -l"
+fi
+
+(( TEST_RESULT += $? ))
+
+
+### using added probe
+
+$CMD_PERF stat -e probe:$TEST_PROBE\* -o $LOGS_DIR/adding_kernel_using_probe.log -- cat /proc/uptime > /dev/null
+PERF_EXIT_CODE=$?
+
+REGEX_STAT_HEADER="\s*Performance counter stats for \'cat /proc/uptime\':"
+REGEX_STAT_VALUES="\s*\d+\s+probe:$TEST_PROBE"
+# the value should be greater than 1
+REGEX_STAT_VALUE_NONZERO="\s*[1-9][0-9]*\s+probe:$TEST_PROBE"
+REGEX_STAT_TIME="\s*$RE_NUMBER\s+seconds (?:time elapsed|user|sys)"
+../common/check_all_lines_matched.pl "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUES" "$REGEX_STAT_TIME" "$RE_LINE_COMMENT" "$RE_LINE_EMPTY" < $LOGS_DIR/adding_kernel_using_probe.log
+CHECK_EXIT_CODE=$?
+../common/check_all_patterns_found.pl "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUE_NONZERO" "$REGEX_STAT_TIME" < $LOGS_DIR/adding_kernel_using_probe.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using added probe"
+(( TEST_RESULT += $? ))
+
+
+### removing added probe
+
+# '-d' should remove the probe
+$CMD_PERF probe -d $TEST_PROBE\* 2> $LOGS_DIR/adding_kernel_removing.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "Removed event: probe:$TEST_PROBE" < $LOGS_DIR/adding_kernel_removing.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "deleting added probe"
+(( TEST_RESULT += $? ))
+
+
+### listing removed probe
+
+# removed probes should NOT appear in perf-list output
+$CMD_PERF list probe:\* > $LOGS_DIR/adding_kernel_list_removed.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "Metric Groups:" < $LOGS_DIR/adding_kernel_list_removed.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing removed probe (should NOT be listed)"
+(( TEST_RESULT += $? ))
+
+
+### dry run
+
+# the '-n' switch should run it in dry mode
+$CMD_PERF probe -n --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_dryrun.err
+PERF_EXIT_CODE=$?
+
+# check for the output (should be the same as usual)
+../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_dryrun.err
+CHECK_EXIT_CODE=$?
+
+# check that no probe was added in real
+! ( $CMD_PERF probe -l | grep "probe:$TEST_PROBE" )
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "dry run :: adding probe"
+(( TEST_RESULT += $? ))
+
+
+### force-adding probes
+
+# when using '--force' a probe should be added even if it is already there
+$CMD_PERF probe --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_01.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_forceadd_01.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: first probe adding"
+(( TEST_RESULT += $? ))
+
+# adding existing probe without '--force' should fail
+! $CMD_PERF probe --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_02.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Error: event \"$TEST_PROBE\" already exists." "Error: Failed to add events." < $LOGS_DIR/adding_kernel_forceadd_02.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second probe adding (without force)"
+(( TEST_RESULT += $? ))
+
+# adding existing probe with '--force' should pass
+NO_OF_PROBES=`$CMD_PERF probe -l $TEST_PROBE| wc -l`
+$CMD_PERF probe --force --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_03.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Added new events?:" "probe:${TEST_PROBE}_${NO_OF_PROBES}" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_forceadd_03.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second probe adding (with force)"
+(( TEST_RESULT += $? ))
+
+
+### using doubled probe
+
+# since they are the same, they should produce the same results
+$CMD_PERF stat -e probe:$TEST_PROBE -e probe:${TEST_PROBE}_${NO_OF_PROBES} -x';' -o $LOGS_DIR/adding_kernel_using_two.log -- bash -c 'cat /proc/cpuinfo > /dev/null'
+PERF_EXIT_CODE=$?
+
+REGEX_LINE="$RE_NUMBER;+probe:${TEST_PROBE}_?(?:$NO_OF_PROBES)?;$RE_NUMBER;$RE_NUMBER"
+../common/check_all_lines_matched.pl "$REGEX_LINE" "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/adding_kernel_using_two.log
+CHECK_EXIT_CODE=$?
+
+VALUE_1=`grep "$TEST_PROBE;" $LOGS_DIR/adding_kernel_using_two.log | awk -F';' '{print $1}'`
+VALUE_2=`grep "${TEST_PROBE}_${NO_OF_PROBES};" $LOGS_DIR/adding_kernel_using_two.log | awk -F';' '{print $1}'`
+
+test $VALUE_1 -eq $VALUE_2
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using doubled probe"
+
+
+### removing multiple probes
+
+# using wildcards should remove all matching probes
+$CMD_PERF probe --del \* 2> $LOGS_DIR/adding_kernel_removing_wildcard.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Removed event: probe:$TEST_PROBE" "Removed event: probe:${TEST_PROBE}_1" < $LOGS_DIR/adding_kernel_removing_wildcard.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "removing multiple probes"
+(( TEST_RESULT += $? ))
+
+
+### wildcard adding support
+
+$CMD_PERF probe -nf --max-probes=512 -a 'vfs_* $params' 2> $LOGS_DIR/adding_kernel_adding_wildcard.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "probe:vfs_mknod" "probe:vfs_create" "probe:vfs_rmdir" "probe:vfs_link" "probe:vfs_write" < $LOGS_DIR/adding_kernel_adding_wildcard.err
+CHECK_EXIT_CODE=$?
+
+if [ $NO_DEBUGINFO ] ; then
+ print_testcase_skipped $NO_DEBUGINFO $NO_DEBUGINFO "Skipped due to missing debuginfo"
+else
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "wildcard adding support"
+fi
+
+(( TEST_RESULT += $? ))
+
+
+### non-existing variable
+
+# perf probe should survive a non-existing variable probing attempt
+{ $CMD_PERF probe 'vfs_read somenonexistingrandomstuffwhichisalsoprettylongorevenlongertoexceed64' ; } 2> $LOGS_DIR/adding_kernel_nonexisting.err
+PERF_EXIT_CODE=$?
+
+# the exitcode should not be 0 or segfault
+test $PERF_EXIT_CODE -ne 139 -a $PERF_EXIT_CODE -ne 0
+PERF_EXIT_CODE=$?
+
+# check that the error message is reasonable
+../common/check_all_patterns_found.pl "Failed to find" "somenonexistingrandomstuffwhichisalsoprettylongorevenlongertoexceed64" < $LOGS_DIR/adding_kernel_nonexisting.err
+CHECK_EXIT_CODE=$?
+../common/check_all_patterns_found.pl "in this function|at this address" "Error" "Failed to add events" < $LOGS_DIR/adding_kernel_nonexisting.err
+(( CHECK_EXIT_CODE += $? ))
+../common/check_all_lines_matched.pl "Failed to find" "Error" "Probe point .+ not found" "optimized out" "Use.+\-\-range option to show.+location range" < $LOGS_DIR/adding_kernel_nonexisting.err
+(( CHECK_EXIT_CODE += $? ))
+../common/check_no_patterns_found.pl "$RE_SEGFAULT" < $LOGS_DIR/adding_kernel_nonexisting.err
+(( CHECK_EXIT_CODE += $? ))
+
+if [ $NO_DEBUGINFO ]; then
+ print_testcase_skipped $NO_DEBUGINFO $NO_DEBUGINFO "Skipped due to missing debuginfo"
+else
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "non-existing variable"
+fi
+
+(( TEST_RESULT += $? ))
+
+
+### function with return value
+
+# adding probe with return value
+$CMD_PERF probe --add "$TEST_PROBE%return \$retval" 2> $LOGS_DIR/adding_kernel_func_retval_add.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE%return with \\\$retval" < $LOGS_DIR/adding_kernel_func_retval_add.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function with retval :: add"
+(( TEST_RESULT += $? ))
+
+# recording some data
+$CMD_PERF record -e probe:$TEST_PROBE\* -o $CURRENT_TEST_DIR/perf.data -- cat /proc/cpuinfo > /dev/null 2> $LOGS_DIR/adding_kernel_func_retval_record.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/adding_kernel_func_retval_record.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function with retval :: record"
+(( TEST_RESULT += $? ))
+
+# perf script should report the function calls with the correct arg values
+$CMD_PERF script -i $CURRENT_TEST_DIR/perf.data > $LOGS_DIR/adding_kernel_func_retval_script.log
+PERF_EXIT_CODE=$?
+
+REGEX_SCRIPT_LINE="\s*cat\s+$RE_NUMBER\s+\[$RE_NUMBER\]\s+$RE_NUMBER:\s+probe:$TEST_PROBE\w*:\s+\($RE_NUMBER_HEX\s+<\-\s+$RE_NUMBER_HEX\)\s+arg1=$RE_NUMBER_HEX"
+../common/check_all_lines_matched.pl "$REGEX_SCRIPT_LINE" < $LOGS_DIR/adding_kernel_func_retval_script.log
+CHECK_EXIT_CODE=$?
+../common/check_all_patterns_found.pl "$REGEX_SCRIPT_LINE" < $LOGS_DIR/adding_kernel_func_retval_script.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function argument probing :: script"
+(( TEST_RESULT += $? ))
+
+
+clear_all_probes
+
+# print overall results
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/tests/shell/base_probe/test_basic.sh b/tools/perf/tests/shell/base_probe/test_basic.sh
new file mode 100755
index 000000000000..9d8b5afbeddd
--- /dev/null
+++ b/tools/perf/tests/shell/base_probe/test_basic.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+# perf_probe :: Basic perf probe functionality (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# test_basic of perf_probe test
+# Author: Michael Petlan <mpetlan@redhat.com>
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+#
+# Description:
+#
+# This test tests basic functionality of perf probe command.
+#
+
+# include working environment
+. ../common/init.sh
+
+TEST_RESULT=0
+
+if ! check_kprobes_available; then
+ print_overall_skipped
+ exit 2
+fi
+
+
+### help message
+
+if [ "$PARAM_GENERAL_HELP_TEXT_CHECK" = "y" ]; then
+ # test that a help message is shown and looks reasonable
+ $CMD_PERF probe --help > $LOGS_DIR/basic_helpmsg.log 2> $LOGS_DIR/basic_helpmsg.err
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "PERF-PROBE" "NAME" "SYNOPSIS" "DESCRIPTION" "OPTIONS" "PROBE\s+SYNTAX" "PROBE\s+ARGUMENT" "LINE\s+SYNTAX" < $LOGS_DIR/basic_helpmsg.log
+ CHECK_EXIT_CODE=$?
+ ../common/check_all_patterns_found.pl "LAZY\s+MATCHING" "FILTER\s+PATTERN" "EXAMPLES" "SEE\s+ALSO" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "vmlinux" "module=" "source=" "verbose" "quiet" "add=" "del=" "list.*EVENT" "line=" "vars=" "externs" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "no-inlines" "funcs.*FILTER" "filter=FILTER" "force" "dry-run" "max-probes" "exec=" "demangle-kernel" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_no_patterns_found.pl "No manual entry for" < $LOGS_DIR/basic_helpmsg.err
+ (( CHECK_EXIT_CODE += $? ))
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "help message"
+ (( TEST_RESULT += $? ))
+else
+ print_testcase_skipped "help message"
+fi
+
+
+### usage message
+
+# without any args perf-probe should print usage
+$CMD_PERF probe 2> $LOGS_DIR/basic_usage.log > /dev/null
+
+../common/check_all_patterns_found.pl "[Uu]sage" "perf probe" "verbose" "quiet" "add" "del" "force" "line" "vars" "externs" "range" < $LOGS_DIR/basic_usage.log
+CHECK_EXIT_CODE=$?
+
+print_results 0 $CHECK_EXIT_CODE "usage message"
+(( TEST_RESULT += $? ))
+
+
+### quiet switch
+
+# '--quiet' should mute all output
+$CMD_PERF probe --quiet --add vfs_read > $LOGS_DIR/basic_quiet01.log 2> $LOGS_DIR/basic_quiet01.err
+PERF_EXIT_CODE=$?
+$CMD_PERF probe --quiet --del vfs_read > $LOGS_DIR/basic_quiet03.log 2> $LOGS_DIR/basic_quiet02.err
+(( PERF_EXIT_CODE += $? ))
+
+test "`cat $LOGS_DIR/basic_quiet*log $LOGS_DIR/basic_quiet*err | wc -l`" -eq 0
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "quiet switch"
+(( TEST_RESULT += $? ))
+
+
+# print overall results
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/tests/shell/base_probe/test_invalid_options.sh b/tools/perf/tests/shell/base_probe/test_invalid_options.sh
new file mode 100755
index 000000000000..92f7254eb32a
--- /dev/null
+++ b/tools/perf/tests/shell/base_probe/test_invalid_options.sh
@@ -0,0 +1,82 @@
+#!/bin/bash
+# perf_probe :: Reject invalid options (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# test_invalid_options of perf_probe test
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test checks whether the invalid and incompatible options are reported
+#
+
+# include working environment
+. ../common/init.sh
+
+TEST_RESULT=0
+
+if ! check_kprobes_available; then
+ print_overall_skipped
+ exit 2
+fi
+
+# Check for presence of DWARF
+$CMD_PERF check feature -q dwarf
+[ $? -ne 0 ] && HINT_FAIL="Some of the tests need DWARF to run"
+
+### missing argument
+
+# some options require an argument
+for opt in '-a' '-d' '-L' '-V'; do
+ ! $CMD_PERF probe $opt 2> $LOGS_DIR/invalid_options_missing_argument$opt.err
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "Error: switch .* requires a value" < $LOGS_DIR/invalid_options_missing_argument$opt.err
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "missing argument for $opt"
+ (( TEST_RESULT += $? ))
+done
+
+
+### unnecessary argument
+
+# some options may omit the argument
+for opt in '-F' '-l'; do
+ $CMD_PERF probe -F > /dev/null 2> $LOGS_DIR/invalid_options_unnecessary_argument$opt.err
+ PERF_EXIT_CODE=$?
+
+ test ! -s $LOGS_DIR/invalid_options_unnecessary_argument$opt.err
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "unnecessary argument for $opt"
+ (( TEST_RESULT += $? ))
+done
+
+
+### mutually exclusive options
+
+# some options are mutually exclusive
+test -e $LOGS_DIR/invalid_options_mutually_exclusive.log && rm -f $LOGS_DIR/invalid_options_mutually_exclusive.log
+for opt in '-a xxx -d xxx' '-a xxx -L foo' '-a xxx -V foo' '-a xxx -l' '-a xxx -F' \
+ '-d xxx -L foo' '-d xxx -V foo' '-d xxx -l' '-d xxx -F' \
+ '-L foo -V bar' '-L foo -l' '-L foo -F' '-V foo -l' '-V foo -F' '-l -F'; do
+ ! $CMD_PERF probe $opt > /dev/null 2> $LOGS_DIR/aux.log
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "Error: switch .+ cannot be used with switch .+" < $LOGS_DIR/aux.log
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "mutually exclusive options :: $opt"
+ (( TEST_RESULT += $? ))
+
+ # gather the logs
+ cat $LOGS_DIR/aux.log | grep "Error" >> $LOGS_DIR/invalid_options_mutually_exclusive.log
+done
+
+
+# print overall results
+print_overall_results "$TEST_RESULT" $HINT_FAIL
+exit $?
diff --git a/tools/perf/tests/shell/base_probe/test_line_semantics.sh b/tools/perf/tests/shell/base_probe/test_line_semantics.sh
new file mode 100755
index 000000000000..20435b6bf6bc
--- /dev/null
+++ b/tools/perf/tests/shell/base_probe/test_line_semantics.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+# perf_probe :: Check patterns for line semantics (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# test_line_semantics of perf_probe test
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test checks whether the semantic errors of line option's
+# arguments are properly reported.
+#
+
+# include working environment
+. ../common/init.sh
+
+TEST_RESULT=0
+
+if ! check_kprobes_available; then
+ print_overall_skipped
+ exit 2
+fi
+
+# Check for presence of DWARF
+$CMD_PERF check feature -q dwarf
+[ $? -ne 0 ] && HINT_FAIL="Some of the tests need DWARF to run"
+
+### acceptable --line descriptions
+
+# testing acceptance of valid patterns for the '--line' option
+VALID_PATTERNS="func func:10 func:0-10 func:2+10 func@source.c func@source.c:1 source.c:1 source.c:1+1 source.c:1-10"
+for desc in $VALID_PATTERNS; do
+ ! ( $CMD_PERF probe --line $desc 2>&1 | grep -q "Semantic error" )
+ CHECK_EXIT_CODE=$?
+
+ print_results 0 $CHECK_EXIT_CODE "acceptable descriptions :: $desc"
+ (( TEST_RESULT += $? ))
+done
+
+
+### unacceptable --line descriptions
+
+# testing handling of invalid patterns for the '--line' option
+INVALID_PATTERNS="func:foo func:1-foo func:1+foo func;lazy\*pattern"
+for desc in $INVALID_PATTERNS; do
+ $CMD_PERF probe --line $desc 2>&1 | grep -q "Semantic error"
+ CHECK_EXIT_CODE=$?
+
+ print_results 0 $CHECK_EXIT_CODE "unacceptable descriptions :: $desc"
+ (( TEST_RESULT += $? ))
+done
+
+
+# print overall results
+print_overall_results "$TEST_RESULT" $HINT_FAIL
+exit $?
diff --git a/tools/perf/tests/shell/base_report/setup.sh b/tools/perf/tests/shell/base_report/setup.sh
new file mode 100755
index 000000000000..b03501b2e8fc
--- /dev/null
+++ b/tools/perf/tests/shell/base_report/setup.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+# perftool-testsuite :: perf_report
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# setup.sh of perf report test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# We need some sample data for perf-report testing
+#
+#
+
+# include working environment
+. ../common/init.sh
+
+test -d "$HEADER_TAR_DIR" || mkdir -p "$HEADER_TAR_DIR"
+
+SW_EVENT="cpu-clock"
+
+$CMD_PERF record -asdg -e $SW_EVENT -o $CURRENT_TEST_DIR/perf.data -- $CMD_LONGER_SLEEP 2> $LOGS_DIR/setup.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/setup.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "prepare the perf.data file"
+TEST_RESULT=$?
+
+print_overall_results $TEST_RESULT
+exit $?
diff --git a/tools/perf/tests/shell/base_report/stderr-whitelist.txt b/tools/perf/tests/shell/base_report/stderr-whitelist.txt
new file mode 100644
index 000000000000..e3341401b47c
--- /dev/null
+++ b/tools/perf/tests/shell/base_report/stderr-whitelist.txt
@@ -0,0 +1,5 @@
+no symbols found in .*, maybe install a debug package
+was updated .*is prelink enabled.+ Restart the long running apps that use it
+Warning:
+\d+ out of order events recorded.
+detected invalid bpf_prog_info
diff --git a/tools/perf/tests/shell/base_report/test_basic.sh b/tools/perf/tests/shell/base_report/test_basic.sh
new file mode 100755
index 000000000000..2398eba4d3fd
--- /dev/null
+++ b/tools/perf/tests/shell/base_report/test_basic.sh
@@ -0,0 +1,190 @@
+#!/bin/bash
+# perf_report :: Basic perf report options (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# test_basic of perf_report test
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests basic functionality of perf report command.
+#
+#
+
+# include working environment
+. ../common/init.sh
+
+TEST_RESULT=0
+
+
+### help message
+
+if [ "$PARAM_GENERAL_HELP_TEXT_CHECK" = "y" ]; then
+ # test that a help message is shown and looks reasonable
+ $CMD_PERF report --help > $LOGS_DIR/basic_helpmsg.log 2> $LOGS_DIR/basic_helpmsg.err
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "PERF-REPORT" "NAME" "SYNOPSIS" "DESCRIPTION" "OPTIONS" "OVERHEAD\s+CALCULATION" "SEE ALSO" < $LOGS_DIR/basic_helpmsg.log
+ CHECK_EXIT_CODE=$?
+ ../common/check_all_patterns_found.pl "input" "verbose" "show-nr-samples" "show-cpu-utilization" "threads" "comms" "pid" "tid" "dsos" "symbols" "symbol-filter" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "hide-unresolved" "sort" "fields" "parent" "exclude-other" "column-widths" "field-separator" "dump-raw-trace" "children" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "call-graph" "max-stack" "inverted" "ignore-callees" "pretty" "stdio" "tui" "gtk" "vmlinux" "kallsyms" "modules" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "force" "symfs" "cpu" "disassembler-style" "source" "asm-raw" "show-total-period" "show-info" "branch-stack" "group" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_all_patterns_found.pl "branch-history" "objdump" "demangle" "percent-limit" "percentage" "header" "itrace" "full-source-path" "show-ref-call-graph" < $LOGS_DIR/basic_helpmsg.log
+ (( CHECK_EXIT_CODE += $? ))
+ ../common/check_no_patterns_found.pl "No manual entry for" < $LOGS_DIR/basic_helpmsg.err
+ (( CHECK_EXIT_CODE += $? ))
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "help message"
+ (( TEST_RESULT += $? ))
+else
+ print_testcase_skipped "help message"
+fi
+
+
+### basic execution
+
+# test that perf report is even working
+$CMD_PERF report -i $CURRENT_TEST_DIR/perf.data --stdio > $LOGS_DIR/basic_basic.log 2> $LOGS_DIR/basic_basic.err
+PERF_EXIT_CODE=$?
+
+REGEX_LOST_SAMPLES_INFO="#\s*Total Lost Samples:\s+$RE_NUMBER"
+REGEX_SAMPLES_INFO="#\s*Samples:\s+(?:$RE_NUMBER)\w?\s+of\s+event\s+'$RE_EVENT_ANY'"
+REGEX_LINES_HEADER="#\s*Children\s+Self\s+Command\s+Shared Object\s+Symbol"
+REGEX_LINES="\s*$RE_NUMBER%\s+$RE_NUMBER%\s+\S+\s+\[kernel\.(?:vmlinux)|(?:kallsyms)\]\s+\[[k\.]\]\s+\w+"
+../common/check_all_patterns_found.pl "$REGEX_LOST_SAMPLES_INFO" "$REGEX_SAMPLES_INFO" "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_basic.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_basic.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "basic execution"
+(( TEST_RESULT += $? ))
+
+
+### number of samples
+
+# '--show-nr-samples' should show number of samples for each symbol
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --show-nr-samples > $LOGS_DIR/basic_nrsamples.log 2> $LOGS_DIR/basic_nrsamples.err
+PERF_EXIT_CODE=$?
+
+REGEX_LINES_HEADER="#\s*Children\s+Self\s+Samples\s+Command\s+Shared Object\s+Symbol"
+REGEX_LINES="\s*$RE_NUMBER%\s+$RE_NUMBER%\s+$RE_NUMBER\s+\S+\s+\[kernel\.(?:vmlinux)|(?:kallsyms)\]\s+\[[k\.]\]\s+\w+"
+../common/check_all_patterns_found.pl "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_nrsamples.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_nrsamples.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "number of samples"
+(( TEST_RESULT += $? ))
+
+
+### header
+
+# '--header' and '--header-only' should show perf report header
+$CMD_PERF report -i $CURRENT_TEST_DIR/perf.data --stdio --header-only > $LOGS_DIR/basic_header.log
+PERF_EXIT_CODE=$?
+
+REGEX_LINE_TIMESTAMP="#\s+captured on\s*:\s*$RE_DATE_TIME"
+REGEX_LINE_HOSTNAME="#\s+hostname\s*:\s*$MY_HOSTNAME"
+REGEX_LINE_KERNEL="#\s+os release\s*:\s*${MY_KERNEL_VERSION//+/\\+}"
+REGEX_LINE_PERF="#\s+perf version\s*:\s*"
+REGEX_LINE_ARCH="#\s+arch\s*:\s*$MY_ARCH"
+REGEX_LINE_CPUS_ONLINE="#\s+nrcpus online\s*:\s*$MY_CPUS_ONLINE"
+REGEX_LINE_CPUS_AVAIL="#\s+nrcpus avail\s*:\s*$MY_CPUS_AVAILABLE"
+# disable precise check for "nrcpus avail" in BASIC runmode
+test $PERFTOOL_TESTSUITE_RUNMODE -lt $RUNMODE_STANDARD && REGEX_LINE_CPUS_AVAIL="#\s+nrcpus avail\s*:\s*$RE_NUMBER"
+../common/check_all_patterns_found.pl "$REGEX_LINE_TIMESTAMP" "$REGEX_LINE_HOSTNAME" "$REGEX_LINE_KERNEL" "$REGEX_LINE_PERF" "$REGEX_LINE_ARCH" "$REGEX_LINE_CPUS_ONLINE" "$REGEX_LINE_CPUS_AVAIL" < $LOGS_DIR/basic_header.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "header"
+(( TEST_RESULT += $? ))
+
+# '--header' and '--header-only' should use creation time
+OLD_TIMESTAMP=`$CMD_PERF report --stdio --header-only -i $CURRENT_TEST_DIR/perf.data | grep "captured on"`
+PERF_EXIT_CODE=$?
+
+( tar -C $CURRENT_TEST_DIR -c perf.data | xz > $CURRENT_TEST_DIR/perf.data.tar.xz ; xzcat $CURRENT_TEST_DIR/perf.data.tar.xz | tar x -C $HEADER_TAR_DIR )
+(( PERF_EXIT_CODE += $? ))
+
+NEW_TIMESTAMP=`$CMD_PERF report --stdio --header-only -i $HEADER_TAR_DIR/perf.data | grep "captured on"`
+(( PERF_EXIT_CODE += $? ))
+
+test "$OLD_TIMESTAMP" = "$NEW_TIMESTAMP"
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "header timestamp"
+(( TEST_RESULT += $? ))
+
+
+### show CPU utilization
+
+# '--showcpuutilization' should show percentage for both system and userspace mode
+$CMD_PERF report -i $CURRENT_TEST_DIR/perf.data --stdio --showcpuutilization > $LOGS_DIR/basic_cpuut.log 2> $LOGS_DIR/basic_cpuut.err
+PERF_EXIT_CODE=$?
+
+REGEX_LINES_HEADER="#\s*Children\s+Self\s+sys\s+usr\s+Command\s+Shared Object\s+Symbol"
+REGEX_LINES="\s*$RE_NUMBER%\s+$RE_NUMBER%\s+$RE_NUMBER%\s+$RE_NUMBER%\s+\S+\s+\[kernel\.(?:vmlinux)|(?:kallsyms)\]\s+\[[k\.]\]\s+\w+"
+../common/check_all_patterns_found.pl "$REGEX_LINES_HEADER" "$REGEX_LINES" < $LOGS_DIR/basic_cpuut.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_cpuut.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "show CPU utilization"
+(( TEST_RESULT += $? ))
+
+
+### pid
+
+# '--pid=' should limit the output for a process with the given pid only
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --pid=1 > $LOGS_DIR/basic_pid.log 2> $LOGS_DIR/basic_pid.err
+PERF_EXIT_CODE=$?
+
+grep -P -v '^#' $LOGS_DIR/basic_pid.log | grep -P '\s+[\d\.]+%' | ../common/check_all_lines_matched.pl "systemd|init"
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_pid.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "pid"
+(( TEST_RESULT += $? ))
+
+
+### non-existing symbol
+
+# '--symbols' should show only the given symbols
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --symbols=dummynonexistingsymbol > $LOGS_DIR/basic_symbols.log 2> $LOGS_DIR/basic_symbols.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/basic_symbols.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_symbols.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "non-existing symbol"
+(( TEST_RESULT += $? ))
+
+
+### symbol filter
+
+# '--symbol-filter' should filter symbols based on substrings
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data --symbol-filter=map > $LOGS_DIR/basic_symbolfilter.log 2> $LOGS_DIR/basic_symbolfilter.err
+PERF_EXIT_CODE=$?
+
+grep -P -v '^#' $LOGS_DIR/basic_symbolfilter.log | grep -P '\s+[\d\.]+%' | ../common/check_all_lines_matched.pl "\[[k\.]\]\s+.*map"
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/basic_symbolfilter.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "symbol filter"
+(( TEST_RESULT += $? ))
+
+
+# TODO: $CMD_PERF report -n --showcpuutilization -TUxDg 2> 01.log
+
+# print overall results
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/tests/shell/common/check_all_lines_matched.pl b/tools/perf/tests/shell/common/check_all_lines_matched.pl
new file mode 100755
index 000000000000..fded48959a3f
--- /dev/null
+++ b/tools/perf/tests/shell/common/check_all_lines_matched.pl
@@ -0,0 +1,39 @@
+#!/usr/bin/perl
+# SPDX-License-Identifier: GPL-2.0
+
+@regexps = @ARGV;
+
+$max_printed_lines = 20;
+$max_printed_lines = $ENV{TESTLOG_ERR_MSG_MAX_LINES} if (defined $ENV{TESTLOG_ERR_MSG_MAX_LINES});
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+$passed = 1;
+$lines_printed = 0;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ $line_matched = 0;
+ for $r (@regexps)
+ {
+ if (/$r/)
+ {
+ $line_matched = 1;
+ last;
+ }
+ }
+
+ unless ($line_matched)
+ {
+ if ($lines_printed++ < $max_printed_lines)
+ {
+ print "Line did not match any pattern: \"$_\"\n" unless $quiet;
+ }
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/tests/shell/common/check_all_patterns_found.pl b/tools/perf/tests/shell/common/check_all_patterns_found.pl
new file mode 100755
index 000000000000..11bdf1d3460a
--- /dev/null
+++ b/tools/perf/tests/shell/common/check_all_patterns_found.pl
@@ -0,0 +1,34 @@
+#!/usr/bin/perl
+# SPDX-License-Identifier: GPL-2.0
+
+@regexps = @ARGV;
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+%found = ();
+$passed = 1;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ for $r (@regexps)
+ {
+ if (/$r/)
+ {
+ $found{$r} = 1; # FIXME: maybe add counters -- how many times was the regexp matched
+ }
+ }
+}
+
+for $r (@regexps)
+{
+ unless (exists $found{$r})
+ {
+ print "Regexp not found: \"$r\"\n" unless $quiet;
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/tests/shell/common/check_errors_whitelisted.pl b/tools/perf/tests/shell/common/check_errors_whitelisted.pl
new file mode 100755
index 000000000000..c57d355dd76e
--- /dev/null
+++ b/tools/perf/tests/shell/common/check_errors_whitelisted.pl
@@ -0,0 +1,51 @@
+#!/usr/bin/perl
+# SPDX-License-Identifier: GPL-2.0
+
+$whitelist_file = shift;
+
+if (defined $whitelist_file)
+{
+ open (INFILE, $whitelist_file) or die "Checker error: Unable to open the whitelist file: $whitelist_file\n";
+ @regexps = <INFILE>;
+ close INFILE or die "Checker error: Unable to close the whitelist file: $whitelist_file\n";
+}
+else
+{
+ @regexps = ();
+}
+
+$max_printed_lines = 20;
+$max_printed_lines = $ENV{TESTLOG_ERR_MSG_MAX_LINES} if (defined $ENV{TESTLOG_ERR_MSG_MAX_LINES});
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+$passed = 1;
+$lines_printed = 0;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ $line_matched = 0;
+ for $r (@regexps)
+ {
+ chomp $r;
+ if (/$r/)
+ {
+ $line_matched = 1;
+ last;
+ }
+ }
+
+ unless ($line_matched)
+ {
+ if ($lines_printed++ < $max_printed_lines)
+ {
+ print "Line did not match any pattern: \"$_\"\n" unless $quiet;
+ }
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/tests/shell/common/check_no_patterns_found.pl b/tools/perf/tests/shell/common/check_no_patterns_found.pl
new file mode 100755
index 000000000000..770999e87a5f
--- /dev/null
+++ b/tools/perf/tests/shell/common/check_no_patterns_found.pl
@@ -0,0 +1,34 @@
+#!/usr/bin/perl
+# SPDX-License-Identifier: GPL-2.0
+
+@regexps = @ARGV;
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+%found = ();
+$passed = 1;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ for $r (@regexps)
+ {
+ if (/$r/)
+ {
+ $found{$r} = 1;
+ }
+ }
+}
+
+for $r (@regexps)
+{
+ if (exists $found{$r})
+ {
+ print "Regexp found: \"$r\"\n" unless $quiet;
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/tests/shell/common/init.sh b/tools/perf/tests/shell/common/init.sh
new file mode 100644
index 000000000000..26c7525651e0
--- /dev/null
+++ b/tools/perf/tests/shell/common/init.sh
@@ -0,0 +1,143 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# init.sh
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This file should be used for initialization of basic functions
+# for checking, reporting results etc.
+#
+#
+
+
+. ../common/settings.sh
+. ../common/patterns.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+
+_echo()
+{
+ test "$TESTLOG_VERBOSITY" -ne 0 && echo -e "$@"
+}
+
+print_results()
+{
+ PERF_RETVAL="$1"; shift
+ CHECK_RETVAL="$1"; shift
+ FAILURE_REASON=""
+ TASK_COMMENT="$*"
+ if [ $PERF_RETVAL -eq 0 ] && [ $CHECK_RETVAL -eq 0 ]; then
+ _echo "$MPASS-- [ PASS ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT"
+ return 0
+ else
+ if [ $PERF_RETVAL -ne 0 ]; then
+ FAILURE_REASON="command exitcode"
+ fi
+ if [ $CHECK_RETVAL -ne 0 ]; then
+ test -n "$FAILURE_REASON" && FAILURE_REASON="$FAILURE_REASON + "
+ FAILURE_REASON="$FAILURE_REASON""output regexp parsing"
+ fi
+ _echo "$MFAIL-- [ FAIL ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT ($FAILURE_REASON)"
+ return 1
+ fi
+}
+
+print_overall_results()
+{
+ RETVAL="$1"; shift
+ TASK_COMMENT="$*"
+ test -n "$TASK_COMMENT" && TASK_COMMENT=":: $TASK_COMMENT"
+
+ if [ $RETVAL -eq 0 ]; then
+ _echo "$MALLPASS## [ PASS ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY"
+ else
+ _echo "$MALLFAIL## [ FAIL ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY :: $RETVAL failures found $TASK_COMMENT"
+ fi
+ return $RETVAL
+}
+
+print_testcase_skipped()
+{
+ TASK_COMMENT="$*"
+ _echo "$MSKIP-- [ SKIP ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT :: testcase skipped"
+ return 0
+}
+
+print_overall_skipped()
+{
+ _echo "$MSKIP## [ SKIP ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME :: testcase skipped"
+ return 0
+}
+
+print_warning()
+{
+ WARN_COMMENT="$*"
+ _echo "$MWARN-- [ WARN ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $WARN_COMMENT"
+ return 0
+}
+
+# this function should skip a testcase if the testsuite is not run in
+# a runmode that fits the testcase --> if the suite runs in BASIC mode
+# all STANDARD and EXPERIMENTAL testcases will be skipped; if the suite
+# runs in STANDARD mode, all EXPERIMENTAL testcases will be skipped and
+# if the suite runs in EXPERIMENTAL mode, nothing is skipped
+consider_skipping()
+{
+ TESTCASE_RUNMODE="$1"
+ # the runmode of a testcase needs to be at least the current suite's runmode
+ if [ $PERFTOOL_TESTSUITE_RUNMODE -lt $TESTCASE_RUNMODE ]; then
+ print_overall_skipped
+ exit 2
+ fi
+}
+
+detect_baremetal()
+{
+ # return values:
+ # 0 = bare metal
+ # 1 = virtualization detected
+ # 2 = unknown state
+ VIRT=`systemd-detect-virt 2>/dev/null`
+ test $? -eq 127 && return 2
+ test "$VIRT" = "none"
+}
+
+detect_intel()
+{
+ # return values:
+ # 0 = is Intel
+ # 1 = is not Intel or unknown
+ grep "vendor_id" < /proc/cpuinfo | grep -q "GenuineIntel"
+}
+
+detect_amd()
+{
+ # return values:
+ # 0 = is AMD
+ # 1 = is not AMD or unknown
+ grep "vendor_id" < /proc/cpuinfo | grep -q "AMD"
+}
+
+# base probe utility
+check_kprobes_available()
+{
+ test -e /sys/kernel/debug/tracing/kprobe_events
+}
+
+check_uprobes_available()
+{
+ test -e /sys/kernel/debug/tracing/uprobe_events
+}
+
+clear_all_probes()
+{
+ echo 0 > /sys/kernel/debug/tracing/events/enable
+ check_kprobes_available && echo > /sys/kernel/debug/tracing/kprobe_events
+ check_uprobes_available && echo > /sys/kernel/debug/tracing/uprobe_events
+}
+
+check_sdt_support()
+{
+ $CMD_PERF list sdt | grep sdt > /dev/null 2> /dev/null
+}
diff --git a/tools/perf/tests/shell/common/patterns.sh b/tools/perf/tests/shell/common/patterns.sh
new file mode 100644
index 000000000000..21dab25c7b7f
--- /dev/null
+++ b/tools/perf/tests/shell/common/patterns.sh
@@ -0,0 +1,268 @@
+# SPDX-License-Identifier: GPL-2.0
+
+export RE_NUMBER="[0-9\.]+"
+# Number
+# Examples:
+# 123.456
+
+
+export RE_NUMBER_HEX="[0-9A-Fa-f]+"
+# Hexadecimal number
+# Examples:
+# 1234
+# a58d
+# aBcD
+# deadbeef
+
+
+export RE_DATE_YYYYMMDD="[0-9]{4}-(?:(?:01|03|05|07|08|10|12)-(?:[0-2][0-9]|3[0-1])|02-[0-2][0-9]|(?:(?:04|06|09|11)-(?:[0-2][0-9]|30)))"
+# Date in YYYY-MM-DD form
+# Examples:
+# 1990-02-29
+# 0015-07-31
+# 2456-12-31
+#! 2012-13-01
+#! 1963-09-31
+
+
+export RE_TIME="(?:[0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]"
+# Time
+# Examples:
+# 15:12:27
+# 23:59:59
+#! 24:00:00
+#! 11:25:60
+#! 17:60:15
+
+
+export RE_DATE_TIME="\w+\s+\w+\s+$RE_NUMBER\s+$RE_TIME\s+$RE_NUMBER"
+# Time and date
+# Examples:
+# Wed Feb 12 10:46:26 2020
+# Mon Mar 2 13:27:06 2020
+#! St úno 12 10:57:21 CET 2020
+#! Po úno 14 15:17:32 2010
+
+
+export RE_ADDRESS="0x$RE_NUMBER_HEX"
+# Memory address
+# Examples:
+# 0x123abc
+# 0xffffffff9abe8ae8
+# 0x0
+
+
+export RE_ADDRESS_NOT_NULL="0x[0-9A-Fa-f]*[1-9A-Fa-f]+[0-9A-Fa-f]*"
+# Memory address (not NULL)
+# Examples:
+# 0xffffffff9abe8ae8
+#! 0x0
+#! 0x0000000000000000
+
+export RE_PROCESS_PID="[^\/]+\/\d+"
+# A process with PID
+# Example:
+# sleep/4102
+# test_overhead./866185
+# in:imjournal/1096
+# random#$& test/866607
+
+export RE_EVENT_ANY="[\w\-\:\/_=,]+"
+# Name of any event (universal)
+# Examples:
+# cpu-cycles
+# cpu/event=12,umask=34/
+# r41e1
+# nfs:nfs_getattr_enter
+
+
+export RE_EVENT="[\w\-:_]+"
+# Name of an usual event
+# Examples:
+# cpu-cycles
+
+
+export RE_EVENT_RAW="r$RE_NUMBER_HEX"
+# Specification of a raw event
+# Examples:
+# r41e1
+# r1a
+
+
+export RE_EVENT_CPU="cpu/(\w+=$RE_NUMBER_HEX,?)+/p*"
+# Specification of a CPU event
+# Examples:
+# cpu/event=12,umask=34/pp
+
+
+export RE_EVENT_UNCORE="uncore/[\w_]+/"
+# Specification of an uncore event
+# Examples:
+# uncore/qhl_request_local_reads/
+
+
+export RE_EVENT_SUBSYSTEM="[\w\-]+:[\w\-]+"
+# Name of an event from subsystem
+# Examples:
+# ext4:ext4_ordered_write_end
+# sched:sched_switch
+
+
+export RE_FILE_NAME="[\w\+\.-]+"
+# A filename
+# Examples:
+# libstdc++.so.6
+#! some/path
+
+
+export RE_PATH_ABSOLUTE="(?:\/$RE_FILE_NAME)+"
+# A full filepath
+# Examples:
+# /usr/lib64/somelib.so.5.4.0
+# /lib/modules/4.3.0-rc5/kernel/fs/xfs/xfs.ko
+# /usr/bin/mv
+#! some/relative/path
+#! ./some/relative/path
+
+
+export RE_PATH="(?:$RE_FILE_NAME)?$RE_PATH_ABSOLUTE"
+# A filepath
+# Examples:
+# /usr/lib64/somelib.so.5.4.0
+# /lib/modules/4.3.0-rc5/kernel/fs/xfs/xfs.ko
+# ./.emacs
+# src/fs/file.c
+
+
+export RE_DSO="(?:$RE_PATH_ABSOLUTE(?: \(deleted\))?|\[kernel\.kallsyms\]|\[unknown\]|\[vdso\]|\[kernel\.vmlinux\][\.\w]*)"
+# A DSO name in various result tables
+# Examples:
+# /usr/lib64/somelib.so.5.4.0
+# /usr/bin/somebinart (deleted)
+# /lib/modules/4.3.0-rc5/kernel/fs/xfs/xfs.ko
+# [kernel.kallsyms]
+# [kernel.vmlinux]
+# [vdso]
+# [unknown]
+
+
+export RE_LINE_COMMENT="^#.*"
+# A comment line
+# Examples:
+# # Started on Thu Sep 10 11:43:00 2015
+
+
+export RE_LINE_EMPTY="^\s*$"
+# An empty line with possible whitespaces
+# Examples:
+#
+
+
+export RE_LINE_RECORD1="^\[\s+perf\s+record:\s+Woken up $RE_NUMBER times? to write data\s+\].*$"
+# The first line of perf-record "OK" output
+# Examples:
+# [ perf record: Woken up 1 times to write data ]
+
+
+export RE_LINE_RECORD2="^\[\s+perf\s+record:\s+Captured and wrote $RE_NUMBER\s*MB\s+(?:[\w\+\.-]*(?:$RE_PATH)?\/)?perf\.data(?:\.\d+)?\s*\(~?$RE_NUMBER samples\)\s+\].*$"
+# The second line of perf-record "OK" output
+# Examples:
+# [ perf record: Captured and wrote 0.405 MB perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf.data (~109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB /some/temp/dir/perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perf.data.3 (109 samples) ]
+
+
+export RE_LINE_RECORD2_TOLERANT="^\[\s+perf\s+record:\s+Captured and wrote $RE_NUMBER\s*MB\s+(?:[\w\+\.-]*(?:$RE_PATH)?\/)?perf\.data(?:\.\d+)?\s*(?:\(~?$RE_NUMBER samples\))?\s+\].*$"
+# The second line of perf-record "OK" output, even no samples is OK here
+# Examples:
+# [ perf record: Captured and wrote 0.405 MB perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf.data (~109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB /some/temp/dir/perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perf.data.3 (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf.data ]
+
+
+export RE_LINE_RECORD2_TOLERANT_FILENAME="^\[\s+perf\s+record:\s+Captured and wrote $RE_NUMBER\s*MB\s+(?:[\w\+\.-]*(?:$RE_PATH)?\/)?perf\w*\.data(?:\.\d+)?\s*\(~?$RE_NUMBER samples\)\s+\].*$"
+# The second line of perf-record "OK" output
+# Examples:
+# [ perf record: Captured and wrote 0.405 MB perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf_ls.data (~109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf_aNyCaSe.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perfdata.data.3 (109 samples) ]
+#! [ perf record: Captured and wrote 0.405 MB /some/temp/dir/my_own.data (109 samples) ]
+#! [ perf record: Captured and wrote 0.405 MB ./UPPERCASE.data (109 samples) ]
+#! [ perf record: Captured and wrote 0.405 MB ./aNyKiNDoF.data.3 (109 samples) ]
+#! [ perf record: Captured and wrote 0.405 MB perf.data ]
+
+
+export RE_LINE_TRACE_FULL="^\s*$RE_NUMBER\s*\(\s*$RE_NUMBER\s*ms\s*\):\s*$RE_PROCESS_PID\s+.*\)\s+=\s+(:?\-?$RE_NUMBER|0x$RE_NUMBER_HEX).*$"
+# A line of perf-trace output
+# Examples:
+# 0.115 ( 0.005 ms): sleep/4102 open(filename: 0xd09e2ab2, flags: CLOEXEC ) = 3
+# 0.157 ( 0.005 ms): sleep/4102 mmap(len: 3932736, prot: EXEC|READ, flags: PRIVATE|DENYWRITE, fd: 3 ) = 0x7f89d0605000
+#! 0.115 ( 0.005 ms): sleep/4102 open(filename: 0xd09e2ab2, flags: CLOEXEC ) =
+
+export RE_LINE_TRACE_ONE_PROC="^\s*$RE_NUMBER\s*\(\s*$RE_NUMBER\s*ms\s*\):\s*\w+\(.*\)\s+=\s+(?:\-?$RE_NUMBER|0x$RE_NUMBER_HEX).*$"
+# A line of perf-trace output
+# Examples:
+# 0.115 ( 0.005 ms): open(filename: 0xd09e2ab2, flags: CLOEXEC ) = 3
+# 0.157 ( 0.005 ms): mmap(len: 3932736, prot: EXEC|READ, flags: PRIVATE|DENYWRITE, fd: 3 ) = 0x7f89d0605000
+#! 0.115 ( 0.005 ms): open(filename: 0xd09e2ab2, flags: CLOEXEC ) =
+
+export RE_LINE_TRACE_CONTINUED="^\s*(:?$RE_NUMBER|\?)\s*\(\s*($RE_NUMBER\s*ms\s*)?\):\s*($RE_PROCESS_PID\s*)?\.\.\.\s*\[continued\]:\s+\w+\(\).*\s+=\s+(?:\-?$RE_NUMBER|0x$RE_NUMBER_HEX).*$"
+# A line of perf-trace output
+# Examples:
+# 0.000 ( 0.000 ms): ... [continued]: nanosleep()) = 0
+# 0.000 ( 0.000 ms): ... [continued]: nanosleep()) = 0x00000000
+# ? ( ): packagekitd/94838 ... [continued]: poll()) = 0 (Timeout)
+#! 0.000 ( 0.000 ms): ... [continued]: nanosleep()) =
+
+export RE_LINE_TRACE_UNFINISHED="^\s*$RE_NUMBER\s*\(\s*\):\s*$RE_PROCESS_PID\s+.*\)\s+\.\.\.\s*$"
+# A line of perf-trace output
+# Examples:
+# 901.040 ( ): in:imjournal/1096 ppoll(ufds: 0x7f701a5adb70, nfds: 1, tsp: 0x7f701a5adaf0, sigsetsize: 8) ...
+# 613.727 ( ): gmain/1099 poll(ufds: 0x56248f6b64b0, nfds: 2, timeout_msecs: 3996) ...
+
+export RE_LINE_TRACE_SUMMARY_HEADER="\s*syscall\s+calls\s+(?:errors\s+)?total\s+min\s+avg\s+max\s+stddev"
+# A header of a perf-trace summary table
+# Example:
+# syscall calls total min avg max stddev
+# syscall calls errors total min avg max stddev
+
+
+export RE_LINE_TRACE_SUMMARY_CONTENT="^\s*\w+\s+(?:$RE_NUMBER\s+){5,6}$RE_NUMBER%"
+# A line of a perf-trace summary table
+# Example:
+# open 3 0.017 0.005 0.006 0.007 10.90%
+# openat 2 0 0.017 0.008 0.009 0.010 12.29%
+
+
+export RE_LINE_REPORT_CONTENT="^\s+$RE_NUMBER%\s+\w+\s+\S+\s+\S+\s+\S+" # FIXME
+# A line from typicap perf report --stdio output
+# Example:
+# 100.00% sleep [kernel.vmlinux] [k] syscall_return_slowpath
+
+
+export RE_TASK="\s+[\w~\/ \.\+:#-]+(?:\[-1(?:\/\d+)?\]|\[\d+(?:\/\d+)?\])"
+# A name of a task used for perf sched timehist -s
+# Example:
+# sleep[62755]
+# runtest.sh[62762]
+# gmain[705/682]
+# xfsaild/dm-0[495]
+# kworker/u8:1-ev[62714]
+# :-1[-1/62756]
+# :-1[-1]
+# :-1[62756]
+
+
+export RE_SEGFAULT=".*(?:Segmentation\sfault|SIGSEGV|\score\s|dumped|segfault).*"
+# Possible variations of the segfault message
+# Example:
+# /bin/bash: line 1: 32 Segmentation fault timeout 15s
+# Segmentation fault (core dumped)
+# Program terminated with signal SIGSEGV
+#! WARNING: 12323431 isn't a 'cpu_core', please use a CPU list in the 'cpu_core' range (0-15)
diff --git a/tools/perf/tests/shell/common/settings.sh b/tools/perf/tests/shell/common/settings.sh
new file mode 100644
index 000000000000..cba1b338f96f
--- /dev/null
+++ b/tools/perf/tests/shell/common/settings.sh
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# settings.sh
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This file contains global settings for the whole testsuite.
+# Its purpose is to make it easier when it is necessary i.e. to
+# change the usual sample command which is used in all of the tests
+# in many files.
+#
+# This file is intended to be sourced in the tests.
+#
+
+#### which perf to use in the testing
+export CMD_PERF=${CMD_PERF:-`which perf`}
+
+#### basic programs examinated by perf
+export CMD_BASIC_SLEEP="sleep 0.1"
+export CMD_QUICK_SLEEP="sleep 0.01"
+export CMD_LONGER_SLEEP="sleep 2"
+export CMD_DOUBLE_LONGER_SLEEP="sleep 4"
+export CMD_VERY_LONG_SLEEP="sleep 30"
+export CMD_SIMPLE="true"
+
+#### testsuite run mode
+# define constants:
+export RUNMODE_BASIC=0
+export RUNMODE_STANDARD=1
+export RUNMODE_EXPERIMENTAL=2
+# default runmode is STANDARD
+export PERFTOOL_TESTSUITE_RUNMODE=${PERFTOOL_TESTSUITE_RUNMODE:-$RUNMODE_STANDARD}
+
+#### common settings
+export TESTLOG_VERBOSITY=${TESTLOG_VERBOSITY:-2}
+export TESTLOG_FORCE_COLOR=${TESTLOG_FORCE_COLOR:-n}
+export TESTLOG_ERR_MSG_MAX_LINES=${TESTLOG_ERR_MSG_MAX_LINES:-20}
+export TESTLOG_CLEAN=${TESTLOG_CLEAN:-y}
+
+#### other environment-related settings
+export TEST_IGNORE_MISSING_PMU=${TEST_IGNORE_MISSING_PMU:-n}
+
+#### clear locale
+export LC_ALL=C
+
+#### colors
+if [ -t 1 ] || [ "$TESTLOG_FORCE_COLOR" = "yes" ]; then
+ export MPASS="\e[32m"
+ export MALLPASS="\e[1;32m"
+ export MFAIL="\e[31m"
+ export MALLFAIL="\e[1;31m"
+ export MWARN="\e[1;35m"
+ export MSKIP="\e[33m"
+ export MHIGH="\e[1;33m"
+ export MEND="\e[m"
+else
+ export MPASS=""
+ export MALLPASS=""
+ export MFAIL=""
+ export MALLFAIL=""
+ export MWARN=""
+ export MSKIP=""
+ export MHIGH=""
+ export MEND=""
+fi
+
+### general info
+DIR_PATH=`dirname "$(readlink -e "$0")"`
+
+TEST_NAME=`basename $DIR_PATH | sed 's/base/perf/'`; export TEST_NAME
+MY_ARCH=`arch`; export MY_ARCH
+
+# storing logs and temporary files variables
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ # when $PERFSUITE_RUN_DIR is set to something, all the logs and temp files will be placed there
+ # --> the $PERFSUITE_RUN_DIR/perf_something/examples and $PERFSUITE_RUN_DIR/perf_something/logs
+ # dirs will be used for that
+ PERFSUITE_RUN_DIR=`readlink -f $PERFSUITE_RUN_DIR`; export PERFSUITE_RUN_DIR
+ export CURRENT_TEST_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME"
+ export MAKE_TARGET_DIR="$CURRENT_TEST_DIR/examples"
+ export LOGS_DIR="$CURRENT_TEST_DIR/logs"
+ export HEADER_TAR_DIR="$CURRENT_TEST_DIR/header_tar"
+ test -d "$CURRENT_TEST_DIR" || mkdir -p "$CURRENT_TEST_DIR"
+ test -d "$LOGS_DIR" || mkdir -p "$LOGS_DIR"
+else
+ # when $PERFSUITE_RUN_DIR is not set, logs will be placed here
+ export CURRENT_TEST_DIR="."
+ export LOGS_DIR="."
+ export HEADER_TAR_DIR="./header_tar"
+fi
+
+
+#### test parametrization
+if [ ! -d ./common ]; then
+ # set parameters based on runmode
+ if [ -f ../common/parametrization.$PERFTOOL_TESTSUITE_RUNMODE.sh ]; then
+ # shellcheck source=/dev/null
+ . ../common/parametrization.$PERFTOOL_TESTSUITE_RUNMODE.sh
+ fi
+ # if some parameters haven't been set until now, set them to default
+ if [ -f ../common/parametrization.sh ]; then
+ . ../common/parametrization.sh
+ fi
+fi
diff --git a/tools/perf/tests/shell/coresight/Makefile b/tools/perf/tests/shell/coresight/Makefile
index b070e779703e..fa08fd9a5991 100644
--- a/tools/perf/tests/shell/coresight/Makefile
+++ b/tools/perf/tests/shell/coresight/Makefile
@@ -24,6 +24,6 @@ CLEANDIRS = $(SUBDIRS:%=clean-%)
clean: $(CLEANDIRS)
$(CLEANDIRS):
- $(call QUIET_CLEAN, test-$(@:clean-%=%)) $(Q)$(MAKE) -C $(@:clean-%=%) clean >/dev/null
+ $(call QUIET_CLEAN, test-$(@:clean-%=%)) $(MAKE) -C $(@:clean-%=%) clean >/dev/null
.PHONY: all clean $(SUBDIRS) $(CLEANDIRS) $(INSTALLDIRS)
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop.sh b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
index 2d65defb7e0f..c63bc8c73e26 100755
--- a/tools/perf/tests/shell/coresight/asm_pure_loop.sh
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
@@ -1,5 +1,5 @@
#!/bin/sh -e
-# CoreSight / ASM Pure Loop
+# CoreSight / ASM Pure Loop (exclusive)
# SPDX-License-Identifier: GPL-2.0
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
index ddcc9bb850f5..8e29630957c8 100755
--- a/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
+++ b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
@@ -1,5 +1,5 @@
#!/bin/sh -e
-# CoreSight / Memcpy 16k 10 Threads
+# CoreSight / Memcpy 16k 10 Threads (exclusive)
# SPDX-License-Identifier: GPL-2.0
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
index 2ce5e139b2fd..0c4c82a1c8e1 100755
--- a/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
@@ -1,5 +1,5 @@
#!/bin/sh -e
-# CoreSight / Thread Loop 10 Threads - Check TID
+# CoreSight / Thread Loop 10 Threads - Check TID (exclusive)
# SPDX-License-Identifier: GPL-2.0
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
index 3ad9498753d7..d3aea9fc6ced 100755
--- a/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
@@ -1,5 +1,5 @@
#!/bin/sh -e
-# CoreSight / Thread Loop 2 Threads - Check TID
+# CoreSight / Thread Loop 2 Threads - Check TID (exclusive)
# SPDX-License-Identifier: GPL-2.0
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
index 4fbb4a29aad3..7429d3a2ae43 100755
--- a/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
@@ -1,5 +1,5 @@
#!/bin/sh -e
-# CoreSight / Unroll Loop Thread 10
+# CoreSight / Unroll Loop Thread 10 (exclusive)
# SPDX-License-Identifier: GPL-2.0
# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
diff --git a/tools/perf/tests/shell/ftrace.sh b/tools/perf/tests/shell/ftrace.sh
new file mode 100755
index 000000000000..c243731d2fbf
--- /dev/null
+++ b/tools/perf/tests/shell/ftrace.sh
@@ -0,0 +1,86 @@
+#!/bin/sh
+# perf ftrace tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# perf ftrace commands only works for root
+if [ "$(id -u)" != 0 ]; then
+ echo "perf ftrace test [Skipped: no permission]"
+ exit 2
+fi
+
+output=$(mktemp /tmp/__perf_test.ftrace.XXXXXX)
+
+cleanup() {
+ rm -f "${output}"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+# this will be set in test_ftrace_trace()
+target_function=
+
+test_ftrace_list() {
+ echo "perf ftrace list test"
+ perf ftrace -F > "${output}"
+ # this will be used in test_ftrace_trace()
+ sleep_functions=$(grep 'sys_.*sleep$' "${output}")
+ echo "syscalls for sleep:"
+ echo "${sleep_functions}"
+ echo "perf ftrace list test [Success]"
+}
+
+test_ftrace_trace() {
+ echo "perf ftrace trace test"
+ perf ftrace trace --graph-opts depth=5 sleep 0.1 > "${output}"
+ # it should have some function name contains 'sleep'
+ grep "^#" "${output}"
+ grep -F 'sleep()' "${output}"
+ # find actual syscall function name
+ for FN in ${sleep_functions}; do
+ if grep -q "${FN}" "${output}"; then
+ target_function="${FN}"
+ echo "perf ftrace trace test [Success]"
+ return
+ fi
+ done
+
+ echo "perf ftrace trace test [Failure: sleep syscall not found]"
+ exit 1
+}
+
+test_ftrace_latency() {
+ echo "perf ftrace latency test"
+ echo "target function: ${target_function}"
+ perf ftrace latency -T "${target_function}" sleep 0.1 > "${output}"
+ grep "^#" "${output}"
+ grep "###" "${output}"
+ echo "perf ftrace latency test [Success]"
+}
+
+test_ftrace_profile() {
+ echo "perf ftrace profile test"
+ perf ftrace profile --graph-opts depth=5 sleep 0.1 > "${output}"
+ grep ^# "${output}"
+ time_re="[[:space:]]+1[[:digit:]]{5}\.[[:digit:]]{3}"
+ # 100283.000 100283.000 100283.000 1 __x64_sys_clock_nanosleep
+ # Check for one *clock_nanosleep line with a Count of just 1 that takes a bit more than 0.1 seconds
+ # Strip the _x64_sys part to work with other architectures
+ grep -E "^${time_re}${time_re}${time_re}[[:space:]]+1[[:space:]]+.*clock_nanosleep" "${output}"
+ echo "perf ftrace profile test [Success]"
+}
+
+test_ftrace_list
+test_ftrace_trace
+test_ftrace_latency
+test_ftrace_profile
+
+cleanup
+exit 0
diff --git a/tools/perf/tests/shell/lib/attr.py b/tools/perf/tests/shell/lib/attr.py
new file mode 100644
index 000000000000..3db9a7d78715
--- /dev/null
+++ b/tools/perf/tests/shell/lib/attr.py
@@ -0,0 +1,476 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from __future__ import print_function
+
+import os
+import sys
+import glob
+import optparse
+import platform
+import tempfile
+import logging
+import re
+import shutil
+import subprocess
+
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
+
+def data_equal(a, b):
+ # Allow multiple values in assignment separated by '|'
+ a_list = a.split('|')
+ b_list = b.split('|')
+
+ for a_item in a_list:
+ for b_item in b_list:
+ if (a_item == b_item):
+ return True
+ elif (a_item == '*') or (b_item == '*'):
+ return True
+
+ return False
+
+class Fail(Exception):
+ def __init__(self, test, msg):
+ self.msg = msg
+ self.test = test
+ def getMsg(self):
+ return '\'%s\' - %s' % (self.test.path, self.msg)
+
+class Notest(Exception):
+ def __init__(self, test, arch):
+ self.arch = arch
+ self.test = test
+ def getMsg(self):
+ return '[%s] \'%s\'' % (self.arch, self.test.path)
+
+class Unsup(Exception):
+ def __init__(self, test):
+ self.test = test
+ def getMsg(self):
+ return '\'%s\'' % self.test.path
+
+class Event(dict):
+ terms = [
+ 'cpu',
+ 'flags',
+ 'type',
+ 'size',
+ 'config',
+ 'sample_period',
+ 'sample_type',
+ 'read_format',
+ 'disabled',
+ 'inherit',
+ 'pinned',
+ 'exclusive',
+ 'exclude_user',
+ 'exclude_kernel',
+ 'exclude_hv',
+ 'exclude_idle',
+ 'mmap',
+ 'comm',
+ 'freq',
+ 'inherit_stat',
+ 'enable_on_exec',
+ 'task',
+ 'watermark',
+ 'precise_ip',
+ 'mmap_data',
+ 'sample_id_all',
+ 'exclude_host',
+ 'exclude_guest',
+ 'exclude_callchain_kernel',
+ 'exclude_callchain_user',
+ 'wakeup_events',
+ 'bp_type',
+ 'config1',
+ 'config2',
+ 'branch_sample_type',
+ 'sample_regs_user',
+ 'sample_stack_user',
+ ]
+
+ def add(self, data):
+ for key, val in data:
+ log.debug(" %s = %s" % (key, val))
+ self[key] = val
+
+ def __init__(self, name, data, base):
+ log.debug(" Event %s" % name);
+ self.name = name;
+ self.group = ''
+ self.add(base)
+ self.add(data)
+
+ def equal(self, other):
+ for t in Event.terms:
+ log.debug(" [%s] %s %s" % (t, self[t], other[t]));
+ if t not in self or t not in other:
+ return False
+ if not data_equal(self[t], other[t]):
+ return False
+ return True
+
+ def optional(self):
+ if 'optional' in self and self['optional'] == '1':
+ return True
+ return False
+
+ def diff(self, other):
+ for t in Event.terms:
+ if t not in self or t not in other:
+ continue
+ if not data_equal(self[t], other[t]):
+ log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
+
+def parse_version(version):
+ if not version:
+ return None
+ return [int(v) for v in version.split(".")[0:2]]
+
+# Test file description needs to have following sections:
+# [config]
+# - just single instance in file
+# - needs to specify:
+# 'command' - perf command name
+# 'args' - special command arguments
+# 'ret' - Skip test if Perf doesn't exit with this value (0 by default)
+# 'test_ret'- If set to 'true', fail test instead of skipping for 'ret' argument
+# 'arch' - architecture specific test (optional)
+# comma separated list, ! at the beginning
+# negates it.
+# 'auxv' - Truthy statement that is evaled in the scope of the auxv map. When false,
+# the test is skipped. For example 'auxv["AT_HWCAP"] == 10'. (optional)
+# 'kernel_since' - Inclusive kernel version from which the test will start running. Only the
+# first two values are supported, for example "6.1" (optional)
+# 'kernel_until' - Exclusive kernel version from which the test will stop running. (optional)
+# [eventX:base]
+# - one or multiple instances in file
+# - expected values assignments
+class Test(object):
+ def __init__(self, path, options):
+ parser = configparser.ConfigParser()
+ parser.read(path)
+
+ log.warning("running '%s'" % path)
+
+ self.path = path
+ self.test_dir = options.test_dir
+ self.perf = options.perf
+ self.command = parser.get('config', 'command')
+ self.args = parser.get('config', 'args')
+
+ try:
+ self.ret = parser.get('config', 'ret')
+ except:
+ self.ret = 0
+
+ self.test_ret = parser.getboolean('config', 'test_ret', fallback=False)
+
+ try:
+ self.arch = parser.get('config', 'arch')
+ log.warning("test limitation '%s'" % self.arch)
+ except:
+ self.arch = ''
+
+ self.auxv = parser.get('config', 'auxv', fallback=None)
+ self.kernel_since = parse_version(parser.get('config', 'kernel_since', fallback=None))
+ self.kernel_until = parse_version(parser.get('config', 'kernel_until', fallback=None))
+ self.expect = {}
+ self.result = {}
+ log.debug(" loading expected events");
+ self.load_events(path, self.expect)
+
+ def is_event(self, name):
+ if name.find("event") == -1:
+ return False
+ else:
+ return True
+
+ def skip_test_kernel_since(self):
+ if not self.kernel_since:
+ return False
+ return not self.kernel_since <= parse_version(platform.release())
+
+ def skip_test_kernel_until(self):
+ if not self.kernel_until:
+ return False
+ return not parse_version(platform.release()) < self.kernel_until
+
+ def skip_test_auxv(self):
+ def new_auxv(a, pattern):
+ items = list(filter(None, pattern.split(a)))
+ # AT_HWCAP is hex but doesn't have a prefix, so special case it
+ if items[0] == "AT_HWCAP":
+ value = int(items[-1], 16)
+ else:
+ try:
+ value = int(items[-1], 0)
+ except:
+ value = items[-1]
+ return (items[0], value)
+
+ if not self.auxv:
+ return False
+ auxv = subprocess.check_output("LD_SHOW_AUXV=1 sleep 0", shell=True) \
+ .decode(sys.stdout.encoding)
+ pattern = re.compile(r"[: ]+")
+ auxv = dict([new_auxv(a, pattern) for a in auxv.splitlines()])
+ return not eval(self.auxv)
+
+ def skip_test_arch(self, myarch):
+ # If architecture not set always run test
+ if self.arch == '':
+ # log.warning("test for arch %s is ok" % myarch)
+ return False
+
+ # Allow multiple values in assignment separated by ','
+ arch_list = self.arch.split(',')
+
+ # Handle negated list such as !s390x,ppc
+ if arch_list[0][0] == '!':
+ arch_list[0] = arch_list[0][1:]
+ log.warning("excluded architecture list %s" % arch_list)
+ for arch_item in arch_list:
+ # log.warning("test for %s arch is %s" % (arch_item, myarch))
+ if arch_item == myarch:
+ return True
+ return False
+
+ for arch_item in arch_list:
+ # log.warning("test for architecture '%s' current '%s'" % (arch_item, myarch))
+ if arch_item == myarch:
+ return False
+ return True
+
+ def restore_sample_rate(self, value=10000):
+ try:
+ # Check value of sample_rate
+ with open("/proc/sys/kernel/perf_event_max_sample_rate", "r") as fIn:
+ curr_value = fIn.readline()
+ # If too low restore to reasonable value
+ if not curr_value or int(curr_value) < int(value):
+ with open("/proc/sys/kernel/perf_event_max_sample_rate", "w") as fOut:
+ fOut.write(str(value))
+
+ except IOError as e:
+ log.warning("couldn't restore sample_rate value: I/O error %s" % e)
+ except ValueError as e:
+ log.warning("couldn't restore sample_rate value: Value error %s" % e)
+ except TypeError as e:
+ log.warning("couldn't restore sample_rate value: Type error %s" % e)
+
+ def load_events(self, path, events):
+ parser_event = configparser.ConfigParser()
+ parser_event.read(path)
+
+ # The event record section header contains 'event' word,
+ # optionaly followed by ':' allowing to load 'parent
+ # event' first as a base
+ for section in filter(self.is_event, parser_event.sections()):
+
+ parser_items = parser_event.items(section);
+ base_items = {}
+
+ # Read parent event if there's any
+ if (':' in section):
+ base = section[section.index(':') + 1:]
+ parser_base = configparser.ConfigParser()
+ parser_base.read(self.test_dir + '/' + base)
+ base_items = parser_base.items('event')
+
+ e = Event(section, parser_items, base_items)
+ events[section] = e
+
+ def run_cmd(self, tempdir):
+ junk1, junk2, junk3, junk4, myarch = (os.uname())
+
+ if self.skip_test_arch(myarch):
+ raise Notest(self, myarch)
+
+ if self.skip_test_auxv():
+ raise Notest(self, "auxv skip")
+
+ if self.skip_test_kernel_since():
+ raise Notest(self, "old kernel skip")
+
+ if self.skip_test_kernel_until():
+ raise Notest(self, "new kernel skip")
+
+ self.restore_sample_rate()
+ cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
+ self.perf, self.command, tempdir, self.args)
+ ret = os.WEXITSTATUS(os.system(cmd))
+
+ log.info(" '%s' ret '%s', expected '%s'" % (cmd, str(ret), str(self.ret)))
+
+ if not data_equal(str(ret), str(self.ret)):
+ if self.test_ret:
+ raise Fail(self, "Perf exit code failure")
+ else:
+ raise Unsup(self)
+
+ def compare(self, expect, result):
+ match = {}
+
+ log.debug(" compare");
+
+ # For each expected event find all matching
+ # events in result. Fail if there's not any.
+ for exp_name, exp_event in expect.items():
+ exp_list = []
+ res_event = {}
+ log.debug(" matching [%s]" % exp_name)
+ for res_name, res_event in result.items():
+ log.debug(" to [%s]" % res_name)
+ if (exp_event.equal(res_event)):
+ exp_list.append(res_name)
+ log.debug(" ->OK")
+ else:
+ log.debug(" ->FAIL");
+
+ log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
+
+ # we did not any matching event - fail
+ if not exp_list:
+ if exp_event.optional():
+ log.debug(" %s does not match, but is optional" % exp_name)
+ else:
+ if not res_event:
+ log.debug(" res_event is empty");
+ else:
+ exp_event.diff(res_event)
+ raise Fail(self, 'match failure');
+
+ match[exp_name] = exp_list
+
+ # For each defined group in the expected events
+ # check we match the same group in the result.
+ for exp_name, exp_event in expect.items():
+ group = exp_event.group
+
+ if (group == ''):
+ continue
+
+ for res_name in match[exp_name]:
+ res_group = result[res_name].group
+ if res_group not in match[group]:
+ raise Fail(self, 'group failure')
+
+ log.debug(" group: [%s] matches group leader %s" %
+ (exp_name, str(match[group])))
+
+ log.debug(" matched")
+
+ def resolve_groups(self, events):
+ for name, event in events.items():
+ group_fd = event['group_fd'];
+ if group_fd == '-1':
+ continue;
+
+ for iname, ievent in events.items():
+ if (ievent['fd'] == group_fd):
+ event.group = iname
+ log.debug('[%s] has group leader [%s]' % (name, iname))
+ break;
+
+ def run(self):
+ tempdir = tempfile.mkdtemp();
+
+ try:
+ # run the test script
+ self.run_cmd(tempdir);
+
+ # load events expectation for the test
+ log.debug(" loading result events");
+ for f in glob.glob(tempdir + '/event*'):
+ self.load_events(f, self.result);
+
+ # resolve group_fd to event names
+ self.resolve_groups(self.expect);
+ self.resolve_groups(self.result);
+
+ # do the expectation - results matching - both ways
+ self.compare(self.expect, self.result)
+ self.compare(self.result, self.expect)
+
+ finally:
+ # cleanup
+ shutil.rmtree(tempdir)
+
+
+def run_tests(options):
+ for f in glob.glob(options.test_dir + '/' + options.test):
+ try:
+ Test(f, options).run()
+ except Unsup as obj:
+ log.warning("unsupp %s" % obj.getMsg())
+ except Notest as obj:
+ log.warning("skipped %s" % obj.getMsg())
+
+def setup_log(verbose):
+ global log
+ level = logging.CRITICAL
+
+ if verbose == 1:
+ level = logging.WARNING
+ if verbose == 2:
+ level = logging.INFO
+ if verbose >= 3:
+ level = logging.DEBUG
+
+ log = logging.getLogger('test')
+ log.setLevel(level)
+ ch = logging.StreamHandler()
+ ch.setLevel(level)
+ formatter = logging.Formatter('%(message)s')
+ ch.setFormatter(formatter)
+ log.addHandler(ch)
+
+USAGE = '''%s [OPTIONS]
+ -d dir # tests dir
+ -p path # perf binary
+ -t test # single test
+ -v # verbose level
+''' % sys.argv[0]
+
+def main():
+ parser = optparse.OptionParser(usage=USAGE)
+
+ parser.add_option("-t", "--test",
+ action="store", type="string", dest="test")
+ parser.add_option("-d", "--test-dir",
+ action="store", type="string", dest="test_dir")
+ parser.add_option("-p", "--perf",
+ action="store", type="string", dest="perf")
+ parser.add_option("-v", "--verbose",
+ default=0, action="count", dest="verbose")
+
+ options, args = parser.parse_args()
+ if args:
+ parser.error('FAILED wrong arguments %s' % ' '.join(args))
+ return -1
+
+ setup_log(options.verbose)
+
+ if not options.test_dir:
+ print('FAILED no -d option specified')
+ sys.exit(-1)
+
+ if not options.test:
+ options.test = 'test*'
+
+ try:
+ run_tests(options)
+
+ except Fail as obj:
+ print("FAILED %s" % obj.getMsg())
+ sys.exit(-1)
+
+ sys.exit(0)
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/perf/tests/shell/lib/coresight.sh b/tools/perf/tests/shell/lib/coresight.sh
index 11ed2c25ed91..184d62e7e5bd 100644
--- a/tools/perf/tests/shell/lib/coresight.sh
+++ b/tools/perf/tests/shell/lib/coresight.sh
@@ -18,7 +18,7 @@ BIN="$DIR/$TEST"
# If the test tool/binary does not exist and is executable then skip the test
if ! test -x "$BIN"; then exit 2; fi
# If CoreSight is not available, skip the test
-perf list cs_etm | grep -q cs_etm || exit 2
+perf list pmu | grep -q cs_etm || exit 2
DATD="."
# If the data dir env is set then make the data dir use that instead of ./
if test -n "$PERF_TEST_CORESIGHT_DATADIR"; then
diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh
index 5d59c32ae3e7..561c93b75d77 100644
--- a/tools/perf/tests/shell/lib/perf_has_symbol.sh
+++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh
@@ -3,7 +3,7 @@
perf_has_symbol()
{
- if perf test -vv "Symbols" 2>&1 | grep "[[:space:]]$1$"; then
+ if perf test -vv -F "Symbols" 2>&1 | grep "[[:space:]]$1$"; then
echo "perf does have symbol '$1'"
return 0
fi
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
index ea55d5ea1ced..b066d721f897 100644
--- a/tools/perf/tests/shell/lib/perf_json_output_lint.py
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -15,6 +15,7 @@ ap.add_argument('--event', action='store_true')
ap.add_argument('--per-core', action='store_true')
ap.add_argument('--per-thread', action='store_true')
ap.add_argument('--per-cache', action='store_true')
+ap.add_argument('--per-cluster', action='store_true')
ap.add_argument('--per-die', action='store_true')
ap.add_argument('--per-node', action='store_true')
ap.add_argument('--per-socket', action='store_true')
@@ -49,12 +50,14 @@ def check_json_output(expected_items):
'cgroup': lambda x: True,
'cpu': lambda x: isint(x),
'cache': lambda x: True,
+ 'cluster': lambda x: True,
'die': lambda x: True,
'event': lambda x: True,
'event-runtime': lambda x: isfloat(x),
'interval': lambda x: isfloat(x),
'metric-unit': lambda x: True,
'metric-value': lambda x: isfloat(x),
+ 'metric-threshold': lambda x: x in ['unknown', 'good', 'less good', 'nearly bad', 'bad'],
'metricgroup': lambda x: True,
'node': lambda x: True,
'pcnt-running': lambda x: isfloat(x),
@@ -66,14 +69,16 @@ def check_json_output(expected_items):
for item in json.loads(input):
if expected_items != -1:
count = len(item)
- if count != expected_items and count >= 1 and count <= 6 and 'metric-value' in item:
+ if count not in expected_items and count >= 1 and count <= 7 and 'metric-value' in item:
# Events that generate >1 metric may have isolated metric
# values and possibly other prefixes like interval, core,
# aggregate-number, or event-runtime/pcnt-running from multiplexing.
pass
- elif count != expected_items and count >= 1 and count <= 5 and 'metricgroup' in item:
+ elif count not in expected_items and count >= 1 and count <= 5 and 'metricgroup' in item:
pass
- elif count != expected_items:
+ elif count - 1 in expected_items and 'metric-threshold' in item:
+ pass
+ elif count not in expected_items:
raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
f' in \'{item}\'')
for key, value in item.items():
@@ -85,11 +90,11 @@ def check_json_output(expected_items):
try:
if args.no_args or args.system_wide or args.event:
- expected_items = 7
+ expected_items = [5, 7]
elif args.interval or args.per_thread or args.system_wide_no_aggr:
- expected_items = 8
- elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cache:
- expected_items = 9
+ expected_items = [6, 8]
+ elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cluster or args.per_cache:
+ expected_items = [7, 9]
else:
# If no option is specified, don't check the number of items.
expected_items = -1
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
index 50a34a9cc040..0b94216c9c46 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation.py
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -1,4 +1,4 @@
-#SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0
import re
import csv
import json
@@ -6,36 +6,61 @@ import argparse
from pathlib import Path
import subprocess
+
+class TestError:
+ def __init__(self, metric: list[str], wl: str, value: list[float], low: float, up=float('nan'), description=str()):
+ self.metric: list = metric # multiple metrics in relationship type tests
+ self.workloads = [wl] # multiple workloads possible
+ self.collectedValue: list = value
+ self.valueLowBound = low
+ self.valueUpBound = up
+ self.description = description
+
+ def __repr__(self) -> str:
+ if len(self.metric) > 1:
+ return "\nMetric Relationship Error: \tThe collected value of metric {0}\n\
+ \tis {1} in workload(s): {2} \n\
+ \tbut expected value range is [{3}, {4}]\n\
+ \tRelationship rule description: \'{5}\'".format(self.metric, self.collectedValue, self.workloads,
+ self.valueLowBound, self.valueUpBound, self.description)
+ elif len(self.collectedValue) == 0:
+ return "\nNo Metric Value Error: \tMetric {0} returns with no value \n\
+ \tworkload(s): {1}".format(self.metric, self.workloads)
+ else:
+ return "\nWrong Metric Value Error: \tThe collected value of metric {0}\n\
+ \tis {1} in workload(s): {2}\n\
+ \tbut expected value range is [{3}, {4}]"\
+ .format(self.metric, self.collectedValue, self.workloads,
+ self.valueLowBound, self.valueUpBound)
+
+
class Validator:
def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics=''):
self.rulefname = rulefname
self.reportfname = reportfname
self.rules = None
- self.collectlist:str = metrics
+ self.collectlist: str = metrics
self.metrics = self.__set_metrics(metrics)
self.skiplist = set()
self.tolerance = t
self.workloads = [x for x in workload.split(",") if x]
- self.wlidx = 0 # idx of current workloads
- self.allresults = dict() # metric results of all workload
- self.allignoremetrics = dict() # metrics with no results or negative results
- self.allfailtests = dict()
+ self.wlidx = 0 # idx of current workloads
+ self.allresults = dict() # metric results of all workload
self.alltotalcnt = dict()
self.allpassedcnt = dict()
- self.allerrlist = dict()
- self.results = dict() # metric results of current workload
+ self.results = dict() # metric results of current workload
# vars for test pass/failure statistics
- self.ignoremetrics= set() # metrics with no results or negative results, neg result counts as a failed test
- self.failtests = dict()
+ # metrics with no results or negative results, neg result counts failed tests
+ self.ignoremetrics = set()
self.totalcnt = 0
self.passedcnt = 0
# vars for errors
self.errlist = list()
# vars for Rule Generator
- self.pctgmetrics = set() # Percentage rule
+ self.pctgmetrics = set() # Percentage rule
# vars for debug
self.datafname = datafname
@@ -69,10 +94,10 @@ class Validator:
ensure_ascii=True,
indent=4)
- def get_results(self, idx:int = 0):
- return self.results[idx]
+ def get_results(self, idx: int = 0):
+ return self.results.get(idx)
- def get_bounds(self, lb, ub, error, alias={}, ridx:int = 0) -> list:
+ def get_bounds(self, lb, ub, error, alias={}, ridx: int = 0) -> list:
"""
Get bounds and tolerance from lb, ub, and error.
If missing lb, use 0.0; missing ub, use float('inf); missing error, use self.tolerance.
@@ -85,7 +110,7 @@ class Validator:
tolerance, denormalized base on upper bound value
"""
# init ubv and lbv to invalid values
- def get_bound_value (bound, initval, ridx):
+ def get_bound_value(bound, initval, ridx):
val = initval
if isinstance(bound, int) or isinstance(bound, float):
val = bound
@@ -113,10 +138,10 @@ class Validator:
return lbv, ubv, denormerr
- def get_value(self, name:str, ridx:int = 0) -> list:
+ def get_value(self, name: str, ridx: int = 0) -> list:
"""
Get value of the metric from self.results.
- If result of this metric is not provided, the metric name will be added into self.ignoremetics and self.errlist.
+ If result of this metric is not provided, the metric name will be added into self.ignoremetics.
All future test(s) on this metric will fail.
@param name: name of the metric
@@ -142,38 +167,43 @@ class Validator:
Check if metrics value are non-negative.
One metric is counted as one test.
Failure: when metric value is negative or not provided.
- Metrics with negative value will be added into the self.failtests['PositiveValueTest'] and self.ignoremetrics.
+ Metrics with negative value will be added into self.ignoremetrics.
"""
negmetric = dict()
pcnt = 0
tcnt = 0
rerun = list()
- for name, val in self.get_results().items():
+ results = self.get_results()
+ if not results:
+ return
+ for name, val in results.items():
if val < 0:
negmetric[name] = val
rerun.append(name)
else:
pcnt += 1
tcnt += 1
+ # The first round collect_perf() run these metrics with simple workload
+ # "true". We give metrics a second chance with a longer workload if less
+ # than 20 metrics failed positive test.
if len(rerun) > 0 and len(rerun) < 20:
second_results = dict()
self.second_test(rerun, second_results)
for name, val in second_results.items():
- if name not in negmetric: continue
+ if name not in negmetric:
+ continue
if val >= 0:
del negmetric[name]
pcnt += 1
- self.failtests['PositiveValueTest']['Total Tests'] = tcnt
- self.failtests['PositiveValueTest']['Passed Tests'] = pcnt
if len(negmetric.keys()):
self.ignoremetrics.update(negmetric.keys())
- negmessage = ["{0}(={1:.4f})".format(name, val) for name, val in negmetric.items()]
- self.failtests['PositiveValueTest']['Failed Tests'].append({'NegativeValue': negmessage})
+ self.errlist.extend(
+ [TestError([m], self.workloads[self.wlidx], negmetric[m], 0) for m in negmetric.keys()])
return
- def evaluate_formula(self, formula:str, alias:dict, ridx:int = 0):
+ def evaluate_formula(self, formula: str, alias: dict, ridx: int = 0):
"""
Evaluate the value of formula.
@@ -187,10 +217,11 @@ class Validator:
sign = "+"
f = str()
- #TODO: support parenthesis?
+ # TODO: support parenthesis?
for i in range(len(formula)):
if i+1 == len(formula) or formula[i] in ('+', '-', '*', '/'):
- s = alias[formula[b:i]] if i+1 < len(formula) else alias[formula[b:]]
+ s = alias[formula[b:i]] if i + \
+ 1 < len(formula) else alias[formula[b:]]
v = self.get_value(s, ridx)
if not v:
errs.append(s)
@@ -228,49 +259,49 @@ class Validator:
alias = dict()
for m in rule['Metrics']:
alias[m['Alias']] = m['Name']
- lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'], alias, ridx=rule['RuleIndex'])
- val, f = self.evaluate_formula(rule['Formula'], alias, ridx=rule['RuleIndex'])
+ lbv, ubv, t = self.get_bounds(
+ rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'], alias, ridx=rule['RuleIndex'])
+ val, f = self.evaluate_formula(
+ rule['Formula'], alias, ridx=rule['RuleIndex'])
+
+ lb = rule['RangeLower']
+ ub = rule['RangeUpper']
+ if isinstance(lb, str):
+ if lb in alias:
+ lb = alias[lb]
+ if isinstance(ub, str):
+ if ub in alias:
+ ub = alias[ub]
+
if val == -1:
- self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Description':f})
+ self.errlist.append(TestError([m['Name'] for m in rule['Metrics']], self.workloads[self.wlidx], [],
+ lb, ub, rule['Description']))
elif not self.check_bound(val, lbv, ubv, t):
- lb = rule['RangeLower']
- ub = rule['RangeUpper']
- if isinstance(lb, str):
- if lb in alias:
- lb = alias[lb]
- if isinstance(ub, str):
- if ub in alias:
- ub = alias[ub]
- self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Formula':f,
- 'RangeLower': lb, 'LowerBoundValue': self.get_value(lb),
- 'RangeUpper': ub, 'UpperBoundValue':self.get_value(ub),
- 'ErrorThreshold': t, 'CollectedValue': val})
+ self.errlist.append(TestError([m['Name'] for m in rule['Metrics']], self.workloads[self.wlidx], [val],
+ lb, ub, rule['Description']))
else:
self.passedcnt += 1
- self.failtests['RelationshipTest']['Passed Tests'] += 1
self.totalcnt += 1
- self.failtests['RelationshipTest']['Total Tests'] += 1
return
-
# Single Metric Test
- def single_test(self, rule:dict):
+ def single_test(self, rule: dict):
"""
Validate if the metrics are in the required value range.
eg. lower_bound <= metrics_value <= upper_bound
One metric is counted as one test in this type of test.
One rule may include one or more metrics.
Failure: when the metric value not provided or the value is outside the bounds.
- This test updates self.total_cnt and records failed tests in self.failtest['SingleMetricTest'].
+ This test updates self.total_cnt.
@param rule: dict with metrics to validate and the value range requirement
"""
- lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'])
+ lbv, ubv, t = self.get_bounds(
+ rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'])
metrics = rule['Metrics']
passcnt = 0
totalcnt = 0
- faillist = list()
failures = dict()
rerun = list()
for m in metrics:
@@ -286,25 +317,20 @@ class Validator:
second_results = dict()
self.second_test(rerun, second_results)
for name, val in second_results.items():
- if name not in failures: continue
+ if name not in failures:
+ continue
if self.check_bound(val, lbv, ubv, t):
passcnt += 1
del failures[name]
else:
- failures[name] = val
+ failures[name] = [val]
self.results[0][name] = val
self.totalcnt += totalcnt
self.passedcnt += passcnt
- self.failtests['SingleMetricTest']['Total Tests'] += totalcnt
- self.failtests['SingleMetricTest']['Passed Tests'] += passcnt
if len(failures.keys()) != 0:
- faillist = [{'MetricName':name, 'CollectedValue':val} for name, val in failures.items()]
- self.failtests['SingleMetricTest']['Failed Tests'].append({'RuleIndex':rule['RuleIndex'],
- 'RangeLower': rule['RangeLower'],
- 'RangeUpper': rule['RangeUpper'],
- 'ErrorThreshold':rule['ErrorThreshold'],
- 'Failure':faillist})
+ self.errlist.extend([TestError([name], self.workloads[self.wlidx], val,
+ rule['RangeLower'], rule['RangeUpper']) for name, val in failures.items()])
return
@@ -312,19 +338,11 @@ class Validator:
"""
Create final report and write into a JSON file.
"""
- alldata = list()
- for i in range(0, len(self.workloads)):
- reportstas = {"Total Rule Count": self.alltotalcnt[i], "Passed Rule Count": self.allpassedcnt[i]}
- data = {"Metric Validation Statistics": reportstas, "Tests in Category": self.allfailtests[i],
- "Errors":self.allerrlist[i]}
- alldata.append({"Workload": self.workloads[i], "Report": data})
-
- json_str = json.dumps(alldata, indent=4)
- print("Test validation finished. Final report: ")
- print(json_str)
+ print(self.errlist)
if self.debug:
- allres = [{"Workload": self.workloads[i], "Results": self.allresults[i]} for i in range(0, len(self.workloads))]
+ allres = [{"Workload": self.workloads[i], "Results": self.allresults[i]}
+ for i in range(0, len(self.workloads))]
self.json_dump(allres, self.datafname)
def check_rule(self, testtype, metric_list):
@@ -342,13 +360,13 @@ class Validator:
return True
# Start of Collector and Converter
- def convert(self, data: list, metricvalues:dict):
+ def convert(self, data: list, metricvalues: dict):
"""
Convert collected metric data from the -j output to dict of {metric_name:value}.
"""
for json_string in data:
try:
- result =json.loads(json_string)
+ result = json.loads(json_string)
if "metric-unit" in result and result["metric-unit"] != "(null)" and result["metric-unit"] != "":
name = result["metric-unit"].split(" ")[1] if len(result["metric-unit"].split(" ")) > 1 \
else result["metric-unit"]
@@ -365,9 +383,10 @@ class Validator:
print(" ".join(command))
cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8')
data = [x+'}' for x in cmd.stderr.split('}\n') if x]
+ if data[0][0] != '{':
+ data[0] = data[0][data[0].find('{'):]
return data
-
def collect_perf(self, workload: str):
"""
Collect metric data with "perf stat -M" on given workload with -a and -j.
@@ -385,14 +404,18 @@ class Validator:
if rule["TestType"] == "RelationshipTest":
metrics = [m["Name"] for m in rule["Metrics"]]
if not any(m not in collectlist[0] for m in metrics):
- collectlist[rule["RuleIndex"]] = [",".join(list(set(metrics)))]
+ collectlist[rule["RuleIndex"]] = [
+ ",".join(list(set(metrics)))]
for idx, metrics in collectlist.items():
- if idx == 0: wl = "true"
- else: wl = workload
+ if idx == 0:
+ wl = "true"
+ else:
+ wl = workload
for metric in metrics:
data = self._run_perf(metric, wl)
- if idx not in self.results: self.results[idx] = dict()
+ if idx not in self.results:
+ self.results[idx] = dict()
self.convert(data, self.results[idx])
return
@@ -412,7 +435,8 @@ class Validator:
2) create metric name list
"""
command = ['perf', 'list', '-j', '--details', 'metrics']
- cmd = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
+ cmd = subprocess.run(command, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, encoding='utf-8')
try:
data = json.loads(cmd.stdout)
for m in data:
@@ -453,12 +477,12 @@ class Validator:
rules = data['RelationshipRules']
self.skiplist = set([name.lower() for name in data['SkipList']])
self.rules = self.remove_unsupported_rules(rules)
- pctgrule = {'RuleIndex':0,
- 'TestType':'SingleMetricTest',
- 'RangeLower':'0',
+ pctgrule = {'RuleIndex': 0,
+ 'TestType': 'SingleMetricTest',
+ 'RangeLower': '0',
'RangeUpper': '100',
'ErrorThreshold': self.tolerance,
- 'Description':'Metrics in percent unit have value with in [0, 100]',
+ 'Description': 'Metrics in percent unit have value with in [0, 100]',
'Metrics': [{'Name': m.lower()} for m in self.pctgmetrics]}
self.rules.append(pctgrule)
@@ -469,8 +493,9 @@ class Validator:
idx += 1
if self.debug:
- #TODO: need to test and generate file name correctly
- data = {'RelationshipRules':self.rules, 'SupportedMetrics': [{"MetricName": name} for name in self.metrics]}
+ # TODO: need to test and generate file name correctly
+ data = {'RelationshipRules': self.rules, 'SupportedMetrics': [
+ {"MetricName": name} for name in self.metrics]}
self.json_dump(data, self.fullrulefname)
return
@@ -482,20 +507,17 @@ class Validator:
@param key: key to the dictionaries (index of self.workloads).
'''
self.allresults[key] = self.results
- self.allignoremetrics[key] = self.ignoremetrics
- self.allfailtests[key] = self.failtests
self.alltotalcnt[key] = self.totalcnt
self.allpassedcnt[key] = self.passedcnt
- self.allerrlist[key] = self.errlist
- #Initialize data structures before data validation of each workload
+ # Initialize data structures before data validation of each workload
def _init_data(self):
- testtypes = ['PositiveValueTest', 'RelationshipTest', 'SingleMetricTest']
+ testtypes = ['PositiveValueTest',
+ 'RelationshipTest', 'SingleMetricTest']
self.results = dict()
- self.ignoremetrics= set()
+ self.ignoremetrics = set()
self.errlist = list()
- self.failtests = {k:{'Total Tests':0, 'Passed Tests':0, 'Failed Tests':[]} for k in testtypes}
self.totalcnt = 0
self.passedcnt = 0
@@ -513,6 +535,9 @@ class Validator:
'''
if not self.collectlist:
self.parse_perf_metrics()
+ if not self.metrics:
+ print("No metric found for testing")
+ return 0
self.create_rules()
for i in range(0, len(self.workloads)):
self.wlidx = i
@@ -525,32 +550,33 @@ class Validator:
testtype = r['TestType']
if not self.check_rule(testtype, r['Metrics']):
continue
- if testtype == 'RelationshipTest':
+ if testtype == 'RelationshipTest':
self.relationship_test(r)
elif testtype == 'SingleMetricTest':
self.single_test(r)
else:
print("Unsupported Test Type: ", testtype)
- self.errlist.append("Unsupported Test Type from rule: " + r['RuleIndex'])
- self._storewldata(i)
print("Workload: ", self.workloads[i])
- print("Total metrics collected: ", self.failtests['PositiveValueTest']['Total Tests'])
- print("Non-negative metric count: ", self.failtests['PositiveValueTest']['Passed Tests'])
print("Total Test Count: ", self.totalcnt)
print("Passed Test Count: ", self.passedcnt)
-
+ self._storewldata(i)
self.create_report()
- return sum(self.alltotalcnt.values()) != sum(self.allpassedcnt.values())
+ return len(self.errlist) > 0
# End of Class Validator
def main() -> None:
- parser = argparse.ArgumentParser(description="Launch metric value validation")
-
- parser.add_argument("-rule", help="Base validation rule file", required=True)
- parser.add_argument("-output_dir", help="Path for validator output file, report file", required=True)
- parser.add_argument("-debug", help="Debug run, save intermediate data to files", action="store_true", default=False)
- parser.add_argument("-wl", help="Workload to run while data collection", default="true")
+ parser = argparse.ArgumentParser(
+ description="Launch metric value validation")
+
+ parser.add_argument(
+ "-rule", help="Base validation rule file", required=True)
+ parser.add_argument(
+ "-output_dir", help="Path for validator output file, report file", required=True)
+ parser.add_argument("-debug", help="Debug run, save intermediate data to files",
+ action="store_true", default=False)
+ parser.add_argument(
+ "-wl", help="Workload to run while data collection", default="true")
parser.add_argument("-m", help="Metric list to validate", default="")
args = parser.parse_args()
outpath = Path(args.output_dir)
@@ -559,8 +585,8 @@ def main() -> None:
datafile = Path.joinpath(outpath, 'perf_data.json')
validator = Validator(args.rule, reportf, debug=args.debug,
- datafname=datafile, fullrulefname=fullrule, workload=args.wl,
- metrics=args.m)
+ datafname=datafile, fullrulefname=fullrule, workload=args.wl,
+ metrics=args.m)
ret = validator.test()
return ret
@@ -569,6 +595,3 @@ def main() -> None:
if __name__ == "__main__":
import sys
sys.exit(main())
-
-
-
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index bf4c1fb71c4b..5c33ec7a5a63 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -13,7 +13,12 @@ cleanup_probe_vfs_getname() {
add_probe_vfs_getname() {
add_probe_verbose=$1
if [ $had_vfs_getname -eq 1 ] ; then
- line=$(perf probe -L getname_flags 2>&1 | grep -E 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
+ result_filename_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*"
+ line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_filename_re" | sed -r "s/$result_filename_re/\1/")
+ if [ -z "$line" ] ; then
+ result_aname_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->aname = NULL;"
+ line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_aname_re" | sed -r "s/$result_aname_re/\1/")
+ fi
perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
perf probe $add_probe_verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
fi
@@ -27,7 +32,7 @@ skip_if_no_debuginfo() {
# check if perf is compiled with libtraceevent support
skip_no_probe_record_support() {
if [ $had_vfs_getname -eq 1 ] ; then
- perf record --dry-run -e $1 2>&1 | grep "libtraceevent is necessary for tracepoint support" && return 2
- return 1
+ perf check feature -q libtraceevent && return 1
+ return 2
fi
}
diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh
index 3cc158a64326..9a176ceae4a3 100644
--- a/tools/perf/tests/shell/lib/stat_output.sh
+++ b/tools/perf/tests/shell/lib/stat_output.sh
@@ -79,7 +79,7 @@ check_per_thread()
echo "[Skip] paranoid and not root"
return
fi
- perf stat --per-thread -a $2 true
+ perf stat --per-thread -p $$ $2 true
commachecker --per-thread
echo "[Success]"
}
@@ -97,6 +97,18 @@ check_per_cache_instance()
echo "[Success]"
}
+check_per_cluster()
+{
+ echo -n "Checking $1 output: per cluster "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-cluster -a $2 true
+ echo "[Success]"
+}
+
check_per_die()
{
echo -n "Checking $1 output: per die "
diff --git a/tools/perf/tests/shell/list.sh b/tools/perf/tests/shell/list.sh
index 8a868ae64560..76a9846cff22 100755
--- a/tools/perf/tests/shell/list.sh
+++ b/tools/perf/tests/shell/list.sh
@@ -24,8 +24,11 @@ trap trap_cleanup EXIT TERM INT
test_list_json() {
echo "Json output test"
+ # Generate perf list json output into list_output file.
perf list -j -o "${list_output}"
- $PYTHON -m json.tool "${list_output}"
+ # Validate the json using python, redirect the json copy to /dev/null as
+ # otherwise the test may block writing to stdout.
+ $PYTHON -m json.tool "${list_output}" /dev/null
echo "Json output test [Success]"
}
diff --git a/tools/perf/tests/shell/lock_contention.sh b/tools/perf/tests/shell/lock_contention.sh
index c1ec5762215b..30d195d4c62f 100755
--- a/tools/perf/tests/shell/lock_contention.sh
+++ b/tools/perf/tests/shell/lock_contention.sh
@@ -27,7 +27,7 @@ check() {
exit
fi
- if ! perf list | grep -q lock:contention_begin; then
+ if ! perf list tracepoint | grep -q lock:contention_begin; then
echo "[Skip] No lock contention tracepoints"
err=2
exit
diff --git a/tools/perf/tests/shell/perftool-testsuite_probe.sh b/tools/perf/tests/shell/perftool-testsuite_probe.sh
new file mode 100755
index 000000000000..7b1bfd0f888f
--- /dev/null
+++ b/tools/perf/tests/shell/perftool-testsuite_probe.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# perftool-testsuite_probe (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+test -d "$(dirname "$0")/base_probe" || exit 2
+cd "$(dirname "$0")/base_probe" || exit 2
+status=0
+
+PERFSUITE_RUN_DIR=$(mktemp -d /tmp/"$(basename "$0" .sh)".XXX)
+export PERFSUITE_RUN_DIR
+
+for testcase in setup.sh test_*; do # skip setup.sh if not present or not executable
+ test -x "$testcase" || continue
+ ./"$testcase"
+ (( status += $? ))
+done
+
+if ! [ "$PERFTEST_KEEP_LOGS" = "y" ]; then
+ rm -rf "$PERFSUITE_RUN_DIR"
+fi
+
+test $status -ne 0 && exit 1
+exit 0
diff --git a/tools/perf/tests/shell/perftool-testsuite_report.sh b/tools/perf/tests/shell/perftool-testsuite_report.sh
new file mode 100755
index 000000000000..a8cf75b4e77e
--- /dev/null
+++ b/tools/perf/tests/shell/perftool-testsuite_report.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# perftool-testsuite_report (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+test -d "$(dirname "$0")/base_report" || exit 2
+cd "$(dirname "$0")/base_report" || exit 2
+status=0
+
+PERFSUITE_RUN_DIR=$(mktemp -d /tmp/"$(basename "$0" .sh)".XXX)
+export PERFSUITE_RUN_DIR
+
+for testcase in setup.sh test_*; do # skip setup.sh if not present or not executable
+ test -x "$testcase" || continue
+ ./"$testcase"
+ (( status += $? ))
+done
+
+if ! [ "$PERFTEST_KEEP_LOGS" = "y" ]; then
+ rm -rf "$PERFSUITE_RUN_DIR"
+fi
+
+test $status -ne 0 && exit 1
+exit 0
diff --git a/tools/perf/tests/shell/pipe_test.sh b/tools/perf/tests/shell/pipe_test.sh
index a78d35d2cff0..e459aa99a951 100755
--- a/tools/perf/tests/shell/pipe_test.sh
+++ b/tools/perf/tests/shell/pipe_test.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf pipe recording and injection test
# SPDX-License-Identifier: GPL-2.0
@@ -11,31 +11,117 @@ sym="noploop"
skip_test_missing_symbol ${sym}
data=$(mktemp /tmp/perf.data.XXXXXX)
+data2=$(mktemp /tmp/perf.data2.XXXXXX)
prog="perf test -w noploop"
-task="perf"
+[ "$(uname -m)" = "s390x" ] && prog="$prog 3"
+err=0
-if ! perf record -e task-clock:u -o - ${prog} | perf report -i - --task | grep ${task}; then
- echo "cannot find the test file in the perf report"
- exit 1
-fi
+set -e
-if ! perf record -e task-clock:u -o - ${prog} | perf inject -b | perf report -i - | grep ${sym}; then
- echo "cannot find noploop function in pipe #1"
- exit 1
-fi
+cleanup() {
+ rm -rf "${data}"
+ rm -rf "${data}".old
+ rm -rf "${data2}"
+ rm -rf "${data2}".old
-perf record -e task-clock:u -o - ${prog} | perf inject -b -o ${data}
-if ! perf report -i ${data} | grep ${sym}; then
- echo "cannot find noploop function in pipe #2"
- exit 1
-fi
+ trap - EXIT TERM INT
+}
-perf record -e task-clock:u -o ${data} ${prog}
-if ! perf inject -b -i ${data} | perf report -i - | grep ${sym}; then
- echo "cannot find noploop function in pipe #3"
- exit 1
-fi
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+test_record_report() {
+ echo
+ echo "Record+report pipe test"
+
+ task="perf"
+ if ! perf record -e task-clock:u -o - ${prog} | perf report -i - --task | grep -q ${task}
+ then
+ echo "Record+report pipe test [Failed - cannot find the test file in the perf report #1]"
+ err=1
+ return
+ fi
+
+ if ! perf record -g -e task-clock:u -o - ${prog} | perf report -i - --task | grep -q ${task}
+ then
+ echo "Record+report pipe test [Failed - cannot find the test file in the perf report #2]"
+ err=1
+ return
+ fi
+
+ perf record -g -e task-clock:u -o - ${prog} > ${data}
+ if ! perf report -i ${data} --task | grep -q ${task}
+ then
+ echo "Record+report pipe test [Failed - cannot find the test file in the perf report #3]"
+ err=1
+ return
+ fi
+
+ echo "Record+report pipe test [Success]"
+}
+
+test_inject_bids() {
+ inject_opt=$1
+
+ echo
+ echo "Inject ${inject_opt} build-ids test"
+
+ if ! perf record -e task-clock:u -o - ${prog} | perf inject ${inject_opt}| perf report -i - | grep -q ${sym}
+ then
+ echo "Inject build-ids test [Failed - cannot find noploop function in pipe #1]"
+ err=1
+ return
+ fi
+
+ if ! perf record -g -e task-clock:u -o - ${prog} | perf inject ${inject_opt} | perf report -i - | grep -q ${sym}
+ then
+ echo "Inject ${inject_opt} build-ids test [Failed - cannot find noploop function in pipe #2]"
+ err=1
+ return
+ fi
+
+ perf record -e task-clock:u -o - ${prog} | perf inject ${inject_opt} -o ${data}
+ if ! perf report -i ${data} | grep -q ${sym}; then
+ echo "Inject ${inject_opt} build-ids test [Failed - cannot find noploop function in pipe #3]"
+ err=1
+ return
+ fi
+
+ perf record -e task-clock:u -o ${data} ${prog}
+ if ! perf inject ${inject_opt} -i ${data} | perf report -i - | grep -q ${sym}; then
+ echo "Inject ${inject_opt} build-ids test [Failed - cannot find noploop function in pipe #4]"
+ err=1
+ return
+ fi
+
+ perf record -e task-clock:u -o - ${prog} > ${data}
+ if ! perf inject ${inject_opt} -i ${data} | perf report -i - | grep -q ${sym}; then
+ echo "Inject ${inject_opt} build-ids test [Failed - cannot find noploop function in pipe #5]"
+ err=1
+ return
+ fi
+
+ perf record -e task-clock:u -o - ${prog} > ${data}
+ perf inject ${inject_opt} -i ${data} -o ${data2}
+ if ! perf report -i ${data2} | grep -q ${sym}; then
+ echo "Inject ${inject_opt} build-ids test [Failed - cannot find noploop function in pipe #6]"
+ err=1
+ return
+ fi
+
+ echo "Inject ${inject_opt} build-ids test [Success]"
+}
+
+test_record_report
+test_inject_bids -B
+test_inject_bids -b
+test_inject_bids --buildid-all
+test_inject_bids --mmap2-buildid-all
+
+cleanup
+exit $err
-rm -f ${data} ${data}.old
-exit 0
diff --git a/tools/perf/tests/shell/probe_vfs_getname.sh b/tools/perf/tests/shell/probe_vfs_getname.sh
index 554e12e83c55..0c5aacc446b3 100755
--- a/tools/perf/tests/shell/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/probe_vfs_getname.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# Add vfs_getname probe to get syscall args filenames
+# Add vfs_getname probe to get syscall args filenames (exclusive)
# SPDX-License-Identifier: GPL-2.0
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 72c65570db37..d5e5193cceb6 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# probe libc's inet_pton & backtrace it with ping
+# probe libc's inet_pton & backtrace it with ping (exclusive)
# Installs a probe on libc's inet_pton function, that will use uprobes,
# then use 'perf trace' on a ping to localhost asking for just one packet
@@ -40,20 +40,11 @@ trace_libc_inet_pton_backtrace() {
case "$(uname -m)" in
s390x)
eventattr='call-graph=dwarf,max-stack=4'
- echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
- echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
- ;;
- ppc64|ppc64le)
- eventattr='max-stack=4'
- # Add gaih_inet to expected backtrace only if it is part of libc.
- if nm $libc | grep -F -q gaih_inet.; then
- echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
- fi
- echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
- echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+ echo "((__GI_)?getaddrinfo|text_to_binary_address)\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+ echo "(gaih_inet|main)\+0x[[:xdigit:]]+[[:space:]]\(inlined|.*/bin/ping.*\)$" >> $expected
;;
*)
- eventattr='max-stack=3'
+ eventattr='max-stack=4'
echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
;;
esac
@@ -63,7 +54,10 @@ trace_libc_inet_pton_backtrace() {
# Check presence of libtraceevent support to run perf record
skip_no_probe_record_support "$event_name/$eventattr/"
- [ $? -eq 2 ] && return 2
+ if [ $? -eq 2 ]; then
+ echo "WARN: Skipping test trace_libc_inet_pton_backtrace. No libtraceevent support."
+ return 2
+ fi
perf record -e $event_name/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1
# check if perf data file got created in above step.
@@ -73,14 +67,25 @@ trace_libc_inet_pton_backtrace() {
fi
perf script -i $perf_data | tac | grep -m1 ^ping -B9 | tac > $perf_script
- exec 3<$perf_script
exec 4<$expected
- while read line <&3 && read -r pattern <&4; do
+ while read -r pattern <&4; do
+ echo "Pattern: $pattern"
[ -z "$pattern" ] && break
- echo $line
- echo "$line" | grep -E -q "$pattern"
- if [ $? -ne 0 ] ; then
- printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line"
+
+ found=0
+
+ # Search lines in the perf script result
+ exec 3<$perf_script
+ while read line <&3; do
+ [ -z "$line" ] && break
+ echo " Matching: $line"
+ ! echo "$line" | grep -E -q "$pattern"
+ found=$?
+ [ $found -eq 1 ] && break
+ done
+
+ if [ $found -ne 1 ] ; then
+ printf "FAIL: Didn't find the expected backtrace entry \"%s\"\n" "$pattern"
return 1
fi
done
diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
index 5eedbe29bba1..5940fdc1df37 100755
--- a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# Use vfs_getname probe to get syscall args filenames
+# Use vfs_getname probe to get syscall args filenames (exclusive)
# Uses the 'perf test shell' library to add probe:vfs_getname to the system
# then use it with 'perf record' using 'touch' to write to a temp file, then
@@ -21,7 +21,10 @@ record_open_file() {
echo "Recording open file:"
# Check presence of libtraceevent support to run perf record
skip_no_probe_record_support "probe:vfs_getname*"
- [ $? -eq 2 ] && return 2
+ if [ $? -eq 2 ]; then
+ echo "WARN: Skipping test record_open_file. No libtraceevent support"
+ return 2
+ fi
perf record -o ${perfdata} -e probe:vfs_getname\* touch $file
}
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
index 3d1a7759a7b2..0fc7a909ae9b 100755
--- a/tools/perf/tests/shell/record.sh
+++ b/tools/perf/tests/shell/record.sh
@@ -1,5 +1,5 @@
-#!/bin/sh
-# perf record tests
+#!/bin/bash
+# perf record tests (exclusive)
# SPDX-License-Identifier: GPL-2.0
set -e
@@ -17,10 +17,21 @@ skip_test_missing_symbol ${testsym}
err=0
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+script_output=$(mktemp /tmp/__perf_test.perf.data.XXXXX.script)
testprog="perf test -w thloop"
cpu_pmu_dir="/sys/bus/event_source/devices/cpu*"
br_cntr_file="/caps/branch_counter_nr"
br_cntr_output="branch stack counters"
+br_cntr_script_output="br_cntr: A"
+
+default_fd_limit=$(ulimit -Sn)
+# With option --threads=cpu the number of open file descriptors should be
+# equal to sum of: nmb_cpus * nmb_events (2+dummy),
+# nmb_threads for perf.data.n (equal to nmb_cpus) and
+# 2*nmb_cpus of pipes = 4*nmb_cpus (each pipe has 2 ends)
+# All together it needs 8*nmb_cpus file descriptors plus some are also used
+# outside of testing, thus raising the limit to 16*nmb_cpus
+min_fd_limit=$(($(getconf _NPROCESSORS_ONLN) * 16))
cleanup() {
rm -rf "${perfdata}"
@@ -83,7 +94,7 @@ test_per_thread() {
test_register_capture() {
echo "Register capture test"
- if ! perf list | grep -q 'br_inst_retired.near_call'
+ if ! perf list pmu | grep -q 'br_inst_retired.near_call'
then
echo "Register capture test [Skipped missing event]"
return
@@ -165,7 +176,7 @@ test_workload() {
}
test_branch_counter() {
- echo "Basic branch counter test"
+ echo "Branch counter test"
# Check if the branch counter feature is supported
for dir in $cpu_pmu_dir
do
@@ -175,26 +186,133 @@ test_branch_counter() {
return
fi
done
- if ! perf record -o "${perfdata}" -j any,counter ${testprog} 2> /dev/null
+ if ! perf record -o "${perfdata}" -e "{branches:p,instructions}" -j any,counter ${testprog} 2> /dev/null
then
- echo "Basic branch counter test [Failed record]"
+ echo "Branch counter record test [Failed record]"
err=1
return
fi
if ! perf report -i "${perfdata}" -D -q | grep -q "$br_cntr_output"
then
- echo "Basic branch record test [Failed missing output]"
+ echo "Branch counter report test [Failed missing output]"
+ err=1
+ return
+ fi
+ if ! perf script -i "${perfdata}" -F +brstackinsn,+brcntr | grep -q "$br_cntr_script_output"
+ then
+ echo " Branch counter script test [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Branch counter test [Success]"
+}
+
+test_cgroup() {
+ echo "Cgroup sampling test"
+ if ! perf record -aB --synth=cgroup --all-cgroups -o "${perfdata}" ${testprog} 2> /dev/null
+ then
+ echo "Cgroup sampling [Skipped not supported]"
+ return
+ fi
+ if ! perf report -i "${perfdata}" -D | grep -q "CGROUP"
+ then
+ echo "Cgroup sampling [Failed missing output]"
+ err=1
+ return
+ fi
+ if ! perf script -i "${perfdata}" -F cgroup | grep -q -v "unknown"
+ then
+ echo "Cgroup sampling [Failed cannot resolve cgroup names]"
+ err=1
+ return
+ fi
+ echo "Cgroup sampling test [Success]"
+}
+
+test_leader_sampling() {
+ echo "Basic leader sampling test"
+ if ! perf record -o "${perfdata}" -e "{instructions,instructions}:Su" -- \
+ perf test -w brstack 2> /dev/null
+ then
+ echo "Leader sampling [Failed record]"
+ err=1
+ return
+ fi
+ index=0
+ perf script -i "${perfdata}" > $script_output
+ while IFS= read -r line
+ do
+ # Check if the two instruction counts are equal in each record
+ instructions=$(echo $line | awk '{for(i=1;i<=NF;i++) if($i=="instructions:") print $(i-1)}')
+ if [ $(($index%2)) -ne 0 ] && [ ${instructions}x != ${prev_instructions}x ]
+ then
+ echo "Leader sampling [Failed inconsistent instructions count]"
+ err=1
+ return
+ fi
+ index=$(($index+1))
+ prev_instructions=$instructions
+ done < $script_output
+ echo "Basic leader sampling test [Success]"
+}
+
+test_topdown_leader_sampling() {
+ echo "Topdown leader sampling test"
+ if ! perf stat -e "{slots,topdown-retiring}" true 2> /dev/null
+ then
+ echo "Topdown leader sampling [Skipped event parsing failed]"
+ return
+ fi
+ if ! perf record -o "${perfdata}" -e "{instructions,slots,topdown-retiring}:S" true 2> /dev/null
+ then
+ echo "Topdown leader sampling [Failed topdown events not reordered correctly]"
+ err=1
+ return
+ fi
+ echo "Topdown leader sampling test [Success]"
+}
+
+test_precise_max() {
+ echo "precise_max attribute test"
+ if ! perf stat -e "cycles,instructions" true 2> /dev/null
+ then
+ echo "precise_max attribute [Skipped no hardware events]"
+ return
+ fi
+ # Just to make sure it doesn't fail
+ if ! perf record -o "${perfdata}" -e "cycles:P" true 2> /dev/null
+ then
+ echo "precise_max attribute [Failed cycles:P event]"
err=1
return
fi
- echo "Basic branch counter test [Success]"
+ # On AMD, cycles and instructions events are treated differently
+ if ! perf record -o "${perfdata}" -e "instructions:P" true 2> /dev/null
+ then
+ echo "precise_max attribute [Failed instructions:P event]"
+ err=1
+ return
+ fi
+ echo "precise_max attribute test [Success]"
}
+# raise the limit of file descriptors to minimum
+if [[ $default_fd_limit -lt $min_fd_limit ]]; then
+ ulimit -Sn $min_fd_limit
+fi
+
test_per_thread
test_register_capture
test_system_wide
test_workload
test_branch_counter
+test_cgroup
+test_leader_sampling
+test_topdown_leader_sampling
+test_precise_max
+
+# restore the default value
+ulimit -Sn $default_fd_limit
cleanup
exit $err
diff --git a/tools/perf/tests/shell/record_bpf_filter.sh b/tools/perf/tests/shell/record_bpf_filter.sh
index 31c593966e8c..1b58ccc1fd88 100755
--- a/tools/perf/tests/shell/record_bpf_filter.sh
+++ b/tools/perf/tests/shell/record_bpf_filter.sh
@@ -22,15 +22,16 @@ trap trap_cleanup EXIT TERM INT
test_bpf_filter_priv() {
echo "Checking BPF-filter privilege"
- if [ "$(id -u)" != 0 ]
- then
- echo "bpf-filter test [Skipped permission]"
- err=2
- return
- fi
if ! perf record -e task-clock --filter 'period > 1' \
-o /dev/null --quiet true 2>&1
then
+ if [ "$(id -u)" != 0 ]
+ then
+ echo "try 'sudo perf record --setup-filter pin' first."
+ echo "bpf-filter test [Skipped permission]"
+ err=2
+ return
+ fi
echo "bpf-filter test [Skipped missing BPF support]"
err=2
return
@@ -67,7 +68,7 @@ test_bpf_filter_fail() {
# 'cpu' requires PERF_SAMPLE_CPU flag
if ! perf record -e task-clock --filter 'cpu > 0' \
- -o /dev/null true 2>&1 | grep PERF_SAMPLE_CPU
+ -o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CPU
then
echo "Failing bpf-filter test [Failed forbidden CPU]"
err=1
@@ -97,7 +98,7 @@ test_bpf_filter_group() {
fi
if ! perf record -e task-clock --filter 'cpu > 0 || ip > 0' \
- -o /dev/null true 2>&1 | grep PERF_SAMPLE_CPU
+ -o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CPU
then
echo "Group bpf-filter test [Failed forbidden CPU]"
err=1
@@ -105,7 +106,7 @@ test_bpf_filter_group() {
fi
if ! perf record -e task-clock --filter 'period > 0 || code_pgsz > 4096' \
- -o /dev/null true 2>&1 | grep PERF_SAMPLE_CODE_PAGE_SIZE
+ -o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CODE_PAGE_SIZE
then
echo "Group bpf-filter test [Failed forbidden CODE_PAGE_SIZE]"
err=1
@@ -115,6 +116,65 @@ test_bpf_filter_group() {
echo "Group bpf-filter test [Success]"
}
+test_bpf_filter_multi() {
+ echo "Multiple bpf-filter test"
+
+ if ! perf record -e task-clock --filter 'period > 100000' \
+ -e page-faults --filter 'ip < 0xffffffff00000000' \
+ -o "${perfdata}" true 2> /dev/null
+ then
+ echo "Multiple bpf-filter test [Failed record]"
+ err=1
+ return
+ fi
+
+ if ! perf script -i "${perfdata}" -F period,event | grep task-clock | \
+ awk '{ if (int($1) <= 100000) { print $0; exit(1); } }'
+ then
+ echo "Multiple bpf-filter test [Failed task-clock period]"
+ err=1
+ return
+ fi
+
+ if perf script -i "${perfdata}" -F event,ip | grep page-fault | \
+ grep 'ffffffff[0-9a-f]*'
+ then
+ echo "Multiple bpf-filter test [Failed page-faults ip]"
+ err=1
+ return
+ fi
+
+ echo "Multiple bpf-filter test [Success]"
+}
+
+test_bpf_filter_cgroup() {
+ echo "Cgroup bpf-filter test"
+
+ if ! perf record -e task-clock --filter 'cgroup == /' \
+ -a --all-cgroups --synth=cgroup -o "${perfdata}" true 2> /dev/null
+ then
+ echo "Cgroup bpf-filter test [Skipped cgroup not supported]"
+ return
+ fi
+
+ # 'cgroup' requires PERF_SAMPLE_CGROUP flag
+ if ! perf record -e task-clock --filter 'cgroup == /' \
+ -o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CGROUP
+ then
+ echo "Cgroup bpf-filter test [Failed CGROUP requires --all-cgroups]"
+ err=1
+ return
+ fi
+
+ if ! perf report -i "${perfdata}" -s cgroup -q | grep -q -F '100.00%'
+ then
+ echo "Cgroup bpf-filter test [Failed root cgroup does not have 100%]"
+ err=1
+ return
+ fi
+
+ echo "Cgroup bpf-filter test [Success]"
+}
test_bpf_filter_priv
@@ -130,5 +190,13 @@ if [ $err = 0 ]; then
test_bpf_filter_group
fi
+if [ $err = 0 ]; then
+ test_bpf_filter_multi
+fi
+
+if [ $err = 0 ]; then
+ test_bpf_filter_cgroup
+fi
+
cleanup
exit $err
diff --git a/tools/perf/tests/shell/record_lbr.sh b/tools/perf/tests/shell/record_lbr.sh
new file mode 100755
index 000000000000..8d750ee631f8
--- /dev/null
+++ b/tools/perf/tests/shell/record_lbr.sh
@@ -0,0 +1,161 @@
+#!/bin/bash
+# perf record LBR tests (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+if [ ! -f /sys/devices/cpu/caps/branches ] && [ ! -f /sys/devices/cpu_core/caps/branches ]
+then
+ echo "Skip: only x86 CPUs support LBR"
+ exit 2
+fi
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -rf "${perfdata}"
+ rm -rf "${perfdata}".old
+ rm -rf "${perfdata}".txt
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+
+lbr_callgraph_test() {
+ test="LBR callgraph"
+
+ echo "$test"
+ if ! perf record -e cycles --call-graph lbr -o "${perfdata}" perf test -w thloop
+ then
+ echo "$test [Failed support missing]"
+ if [ $err -eq 0 ]
+ then
+ err=2
+ fi
+ return
+ fi
+
+ if ! perf report --stitch-lbr -i "${perfdata}" > "${perfdata}".txt
+ then
+ cat "${perfdata}".txt
+ echo "$test [Failed in perf report]"
+ err=1
+ return
+ fi
+
+ echo "$test [Success]"
+}
+
+lbr_test() {
+ local branch_flags=$1
+ local test="LBR $2 test"
+ local threshold=$3
+ local out
+ local sam_nr
+ local bs_nr
+ local zero_nr
+ local r
+
+ echo "$test"
+ if ! perf record -e cycles $branch_flags -o "${perfdata}" perf test -w thloop
+ then
+ echo "$test [Failed support missing]"
+ perf record -e cycles $branch_flags -o "${perfdata}" perf test -w thloop || true
+ if [ $err -eq 0 ]
+ then
+ err=2
+ fi
+ return
+ fi
+
+ out=$(perf report -D -i "${perfdata}" 2> /dev/null | grep -A1 'PERF_RECORD_SAMPLE')
+ sam_nr=$(echo "$out" | grep -c 'PERF_RECORD_SAMPLE' || true)
+ if [ $sam_nr -eq 0 ]
+ then
+ echo "$test [Failed no samples captured]"
+ err=1
+ return
+ fi
+ echo "$test: $sam_nr samples"
+
+ bs_nr=$(echo "$out" | grep -c 'branch stack: nr:' || true)
+ if [ $sam_nr -ne $bs_nr ]
+ then
+ echo "$test [Failed samples missing branch stacks]"
+ err=1
+ return
+ fi
+
+ zero_nr=$(echo "$out" | grep -c 'branch stack: nr:0' || true)
+ r=$(($zero_nr * 100 / $bs_nr))
+ if [ $r -gt $threshold ]; then
+ echo "$test [Failed empty br stack ratio exceed $threshold%: $r%]"
+ err=1
+ return
+ fi
+
+ echo "$test [Success]"
+}
+
+parallel_lbr_test() {
+ err=0
+ perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+ lbr_test "$1" "$2" "$3"
+ cleanup
+ exit $err
+}
+
+lbr_callgraph_test
+
+# Sequential
+lbr_test "-b" "any branch" 2
+lbr_test "-j any_call" "any call" 2
+lbr_test "-j any_ret" "any ret" 2
+lbr_test "-j ind_call" "any indirect call" 2
+lbr_test "-j ind_jmp" "any indirect jump" 100
+lbr_test "-j call" "direct calls" 2
+lbr_test "-j ind_call,u" "any indirect user call" 100
+lbr_test "-a -b" "system wide any branch" 2
+lbr_test "-a -j any_call" "system wide any call" 2
+
+# Parallel
+parallel_lbr_test "-b" "parallel any branch" 100 &
+pid1=$!
+parallel_lbr_test "-j any_call" "parallel any call" 100 &
+pid2=$!
+parallel_lbr_test "-j any_ret" "parallel any ret" 100 &
+pid3=$!
+parallel_lbr_test "-j ind_call" "parallel any indirect call" 100 &
+pid4=$!
+parallel_lbr_test "-j ind_jmp" "parallel any indirect jump" 100 &
+pid5=$!
+parallel_lbr_test "-j call" "parallel direct calls" 100 &
+pid6=$!
+parallel_lbr_test "-j ind_call,u" "parallel any indirect user call" 100 &
+pid7=$!
+parallel_lbr_test "-a -b" "parallel system wide any branch" 100 &
+pid8=$!
+parallel_lbr_test "-a -j any_call" "parallel system wide any call" 100 &
+pid9=$!
+
+for pid in $pid1 $pid2 $pid3 $pid4 $pid5 $pid6 $pid7 $pid8 $pid9
+do
+ set +e
+ wait $pid
+ child_err=$?
+ set -e
+ if ([ $err -eq 2 ] && [ $child_err -eq 1 ]) || [ $err -eq 0 ]
+ then
+ err=$child_err
+ fi
+done
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
index 67c925f3a15a..678947fe69ee 100755
--- a/tools/perf/tests/shell/record_offcpu.sh
+++ b/tools/perf/tests/shell/record_offcpu.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# perf record offcpu profiling tests
+# perf record offcpu profiling tests (exclusive)
# SPDX-License-Identifier: GPL-2.0
set -e
diff --git a/tools/perf/tests/shell/script.sh b/tools/perf/tests/shell/script.sh
index fa4d71e2e72a..d3e2958d2242 100755
--- a/tools/perf/tests/shell/script.sh
+++ b/tools/perf/tests/shell/script.sh
@@ -17,7 +17,7 @@ cleanup()
sane=$(echo "${temp_dir}" | cut -b 1-21)
if [ "${sane}" = "/tmp/perf-test-script" ] ; then
echo "--- Cleaning up ---"
- rm -f "${temp_dir}/"*
+ rm -rf "${temp_dir:?}/"*
rmdir "${temp_dir}"
fi
}
@@ -61,11 +61,38 @@ _end_of_file_
esac
perf record $cmd_flags -o "${perfdatafile}" true
+ # Disable lsan to avoid warnings about python memory leaks.
+ export ASAN_OPTIONS=detect_leaks=0
perf script -i "${perfdatafile}" -s "${db_test}"
+ export ASAN_OPTIONS=
echo "DB test [Success]"
}
+test_parallel_perf()
+{
+ echo "parallel-perf test"
+ if ! python3 --version >/dev/null 2>&1 ; then
+ echo "SKIP: no python3"
+ err=2
+ return
+ fi
+ pp=$(dirname "$0")/../../scripts/python/parallel-perf.py
+ if [ ! -f "${pp}" ] ; then
+ echo "SKIP: parallel-perf.py script not found "
+ err=2
+ return
+ fi
+ perf_data="${temp_dir}/pp-perf.data"
+ output1_dir="${temp_dir}/output1"
+ output2_dir="${temp_dir}/output2"
+ perf record -o "${perf_data}" --sample-cpu uname
+ python3 "${pp}" -o "${output1_dir}" --jobs 4 --verbose -- perf script -i "${perf_data}"
+ python3 "${pp}" -o "${output2_dir}" --jobs 4 --verbose --per-cpu -- perf script -i "${perf_data}"
+ echo "parallel-perf test [Success]"
+}
+
test_db
+test_parallel_perf
cleanup
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
index f1818fa6d9ce..fc2d8cc6e5e0 100755
--- a/tools/perf/tests/shell/stat+csv_output.sh
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -42,6 +42,7 @@ function commachecker()
;; "--per-socket") exp=8
;; "--per-node") exp=8
;; "--per-die") exp=8
+ ;; "--per-cluster") exp=8
;; "--per-cache") exp=8
esac
@@ -79,6 +80,7 @@ then
check_system_wide_no_aggr "CSV" "$perf_cmd"
check_per_core "CSV" "$perf_cmd"
check_per_cache_instance "CSV" "$perf_cmd"
+ check_per_cluster "CSV" "$perf_cmd"
check_per_die "CSV" "$perf_cmd"
check_per_socket "CSV" "$perf_cmd"
else
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
index 3bc900533a5d..6b630d33c328 100755
--- a/tools/perf/tests/shell/stat+json_output.sh
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -105,7 +105,7 @@ check_per_thread()
echo "[Skip] paranoia and not root"
return
fi
- perf stat -j --per-thread -a -o "${stat_output}" true
+ perf stat -j --per-thread -p $$ -o "${stat_output}" true
$PYTHON $pythonchecker --per-thread --file "${stat_output}"
echo "[Success]"
}
@@ -122,6 +122,18 @@ check_per_cache_instance()
echo "[Success]"
}
+check_per_cluster()
+{
+ echo -n "Checking json output: per cluster "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-cluster -a true 2>&1 | $PYTHON $pythonchecker --per-cluster
+ echo "[Success]"
+}
+
check_per_die()
{
echo -n "Checking json output: per die "
@@ -200,6 +212,7 @@ then
check_system_wide_no_aggr
check_per_core
check_per_cache_instance
+ check_per_cluster
check_per_die
check_per_socket
else
diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh
index 4fcdd1a9142c..0f7967be60af 100755
--- a/tools/perf/tests/shell/stat+std_output.sh
+++ b/tools/perf/tests/shell/stat+std_output.sh
@@ -13,7 +13,7 @@ stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX)
event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses)
event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches")
-skip_metric=("stalled cycles per insn" "tma_")
+skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound" "TopdownL1" "percent of slots")
cleanup() {
rm -f "${stat_output}"
@@ -40,6 +40,7 @@ function commachecker()
;; "--per-node") prefix=3
;; "--per-die") prefix=3
;; "--per-cache") prefix=3
+ ;; "--per-cluster") prefix=3
esac
while read line
@@ -99,6 +100,7 @@ then
check_system_wide_no_aggr "STD" "$perf_cmd"
check_per_core "STD" "$perf_cmd"
check_per_cache_instance "STD" "$perf_cmd"
+ check_per_cluster "STD" "$perf_cmd"
check_per_die "STD" "$perf_cmd"
check_per_socket "STD" "$perf_cmd"
else
diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
index 3f1e67795490..68323d636fb7 100755
--- a/tools/perf/tests/shell/stat.sh
+++ b/tools/perf/tests/shell/stat.sh
@@ -73,9 +73,33 @@ test_topdown_groups() {
err=1
return
fi
- if perf stat -e '{topdown-retiring,slots}' true 2>&1 | grep -E -q "<not supported>"
+ if perf stat -e 'instructions,topdown-retiring,slots' true 2>&1 | grep -E -q "<not supported>"
then
- echo "Topdown event group test [Failed slots not reordered first]"
+ echo "Topdown event group test [Failed slots not reordered first in no-group case]"
+ err=1
+ return
+ fi
+ if perf stat -e '{instructions,topdown-retiring,slots}' true 2>&1 | grep -E -q "<not supported>"
+ then
+ echo "Topdown event group test [Failed slots not reordered first in single group case]"
+ err=1
+ return
+ fi
+ if perf stat -e '{instructions,slots},topdown-retiring' true 2>&1 | grep -E -q "<not supported>"
+ then
+ echo "Topdown event group test [Failed topdown metrics event not move into slots group]"
+ err=1
+ return
+ fi
+ if perf stat -e '{instructions,slots},{topdown-retiring}' true 2>&1 | grep -E -q "<not supported>"
+ then
+ echo "Topdown event group test [Failed topdown metrics group not merge into slots group]"
+ err=1
+ return
+ fi
+ if perf stat -e '{instructions,r400,r8000}' true 2>&1 | grep -E -q "<not supported>"
+ then
+ echo "Topdown event group test [Failed raw format slots not reordered first]"
err=1
return
fi
@@ -117,16 +141,18 @@ test_cputype() {
# Find a known PMU for cputype.
pmu=""
- for i in cpu cpu_atom armv8_pmuv3_0
+ devs="/sys/bus/event_source/devices"
+ for i in $devs/cpu $devs/cpu_atom $devs/armv8_pmuv3_0 $devs/armv8_cortex_*
do
- if test -d "/sys/devices/$i"
+ i_base=$(basename "$i")
+ if test -d "$i"
then
- pmu="$i"
+ pmu="$i_base"
break
fi
- if perf stat -e "$i/instructions/" true > /dev/null 2>&1
+ if perf stat -e "$i_base/instructions/" true > /dev/null 2>&1
then
- pmu="$i"
+ pmu="$i_base"
break
fi
done
@@ -146,6 +172,34 @@ test_cputype() {
echo "cputype test [Success]"
}
+test_hybrid() {
+ # Test the default stat command on hybrid devices opens one cycles event for
+ # each CPU type.
+ echo "hybrid test"
+
+ # Count the number of core PMUs, assume minimum of 1
+ pmus=$(ls /sys/bus/event_source/devices/*/cpus 2>/dev/null | wc -l)
+ if [ "$pmus" -lt 1 ]
+ then
+ pmus=1
+ fi
+
+ # Run default Perf stat
+ cycles_events=$(perf stat -- true 2>&1 | grep -E "/cycles/[uH]*| cycles[:uH]* " -c)
+
+ # The expectation is that default output will have a cycles events on each
+ # hybrid PMU. In situations with no cycles PMU events, like virtualized, this
+ # can fall back to task-clock and so the end count may be 0. Fail if neither
+ # condition holds.
+ if [ "$pmus" -ne "$cycles_events" ] && [ "0" -ne "$cycles_events" ]
+ then
+ echo "hybrid test [Found $pmus PMUs but $cycles_events cycles events. Failed]"
+ err=1
+ return
+ fi
+ echo "hybrid test [Success]"
+}
+
test_default_stat
test_stat_record_report
test_stat_record_script
@@ -153,4 +207,5 @@ test_stat_repeat_weak_groups
test_topdown_groups
test_topdown_weak_groups
test_cputype
+test_hybrid
exit $err
diff --git a/tools/perf/tests/shell/stat_all_metricgroups.sh b/tools/perf/tests/shell/stat_all_metricgroups.sh
index 55ef9c9ded2d..c6d61a4ac3e7 100755
--- a/tools/perf/tests/shell/stat_all_metricgroups.sh
+++ b/tools/perf/tests/shell/stat_all_metricgroups.sh
@@ -1,9 +1,7 @@
-#!/bin/sh
+#!/bin/bash
# perf all metricgroups test
# SPDX-License-Identifier: GPL-2.0
-set -e
-
ParanoidAndNotRoot()
{
[ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
@@ -14,11 +12,37 @@ if ParanoidAndNotRoot 0
then
system_wide_flag=""
fi
-
+err=0
for m in $(perf list --raw-dump metricgroups)
do
echo "Testing $m"
- perf stat -M "$m" $system_wide_flag sleep 0.01
+ result=$(perf stat -M "$m" $system_wide_flag sleep 0.01 2>&1)
+ result_err=$?
+ if [[ $result_err -gt 0 ]]
+ then
+ if [[ "$result" =~ \
+ "Access to performance monitoring and observability operations is limited" ]]
+ then
+ echo "Permission failure"
+ echo $result
+ if [[ $err -eq 0 ]]
+ then
+ err=2 # Skip
+ fi
+ elif [[ "$result" =~ "in per-thread mode, enable system wide" ]]
+ then
+ echo "Permissions - need system wide mode"
+ echo $result
+ if [[ $err -eq 0 ]]
+ then
+ err=2 # Skip
+ fi
+ else
+ echo "Metric group $m failed"
+ echo $result
+ err=1 # Fail
+ fi
+ fi
done
-exit 0
+exit $err
diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
index 54774525e18a..73e9347e88a9 100755
--- a/tools/perf/tests/shell/stat_all_metrics.sh
+++ b/tools/perf/tests/shell/stat_all_metrics.sh
@@ -2,42 +2,87 @@
# perf all metrics test
# SPDX-License-Identifier: GPL-2.0
+ParanoidAndNotRoot()
+{
+ [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
+}
+
+system_wide_flag="-a"
+if ParanoidAndNotRoot 0
+then
+ system_wide_flag=""
+fi
+
err=0
for m in $(perf list --raw-dump metrics); do
echo "Testing $m"
- result=$(perf stat -M "$m" true 2>&1)
- if [[ "$result" =~ ${m:0:50} ]] || [[ "$result" =~ "<not supported>" ]]
+ result=$(perf stat -M "$m" $system_wide_flag -- sleep 0.01 2>&1)
+ result_err=$?
+ if [[ $result_err -gt 0 ]]
then
- continue
+ if [[ "$result" =~ \
+ "Access to performance monitoring and observability operations is limited" ]]
+ then
+ echo "Permission failure"
+ echo $result
+ if [[ $err -eq 0 ]]
+ then
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "in per-thread mode, enable system wide" ]]
+ then
+ echo "Permissions - need system wide mode"
+ echo $result
+ if [[ $err -eq 0 ]]
+ then
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "<not supported>" ]]
+ then
+ echo "Not supported events"
+ echo $result
+ if [[ $err -eq 0 ]]
+ then
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
+ then
+ echo "FP issues"
+ echo $result
+ if [[ $err -eq 0 ]]
+ then
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "PMM" ]]
+ then
+ echo "Optane memory issues"
+ echo $result
+ if [[ $err -eq 0 ]]
+ then
+ err=2 # Skip
+ fi
+ continue
+ fi
fi
- # Failed so try system wide.
- result=$(perf stat -M "$m" -a sleep 0.01 2>&1)
+
if [[ "$result" =~ ${m:0:50} ]]
then
continue
fi
- # Failed again, possibly the workload was too small so retry with something
- # longer.
- result=$(perf stat -M "$m" perf bench internals synthesize 2>&1)
+
+ # Failed, possibly the workload was too small so retry with something longer.
+ result=$(perf stat -M "$m" $system_wide_flag -- perf bench internals synthesize 2>&1)
if [[ "$result" =~ ${m:0:50} ]]
then
continue
fi
echo "Metric '$m' not printed in:"
echo "$result"
- if [[ "$err" != "1" ]]
- then
- err=2
- if [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
- then
- echo "Skip, not fail, for FP issues"
- elif [[ "$result" =~ "PMM" ]]
- then
- echo "Skip, not fail, for Optane memory issues"
- else
- err=1
- fi
- fi
+ err=1
done
exit "$err"
diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh
index d2a3506e0d19..8b148b300be1 100755
--- a/tools/perf/tests/shell/stat_all_pmu.sh
+++ b/tools/perf/tests/shell/stat_all_pmu.sh
@@ -1,23 +1,51 @@
-#!/bin/sh
-# perf all PMU test
+#!/bin/bash
+# perf all PMU test (exclusive)
# SPDX-License-Identifier: GPL-2.0
set -e
+err=0
+result=""
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ echo "$result"
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
# Test all PMU events; however exclude parameterized ones (name contains '?')
-for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]\+?[[:graph:]]\+[[:space:]]//g'); do
+for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]\+?[[:graph:]]\+[[:space:]]//g')
+do
echo "Testing $p"
result=$(perf stat -e "$p" true 2>&1)
- if ! echo "$result" | grep -q "$p" && ! echo "$result" | grep -q "<not supported>" ; then
- # We failed to see the event and it is supported. Possibly the workload was
- # too small so retry with something longer.
- result=$(perf stat -e "$p" perf bench internals synthesize 2>&1)
- if ! echo "$result" | grep -q "$p" ; then
- echo "Event '$p' not printed in:"
- echo "$result"
- exit 1
- fi
+ if echo "$result" | grep -q "$p"
+ then
+ # Event seen in output.
+ continue
+ fi
+ if echo "$result" | grep -q "<not supported>"
+ then
+ # Event not supported, so ignore.
+ continue
+ fi
+ if echo "$result" | grep -q "Access to performance monitoring and observability operations is limited."
+ then
+ # Access is limited, so ignore.
+ continue
+ fi
+
+ # We failed to see the event and it is supported. Possibly the workload was
+ # too small so retry with something longer.
+ result=$(perf stat -e "$p" perf bench internals synthesize 2>&1)
+ if echo "$result" | grep -q "$p"
+ then
+ # Event seen in output.
+ continue
fi
+ echo "Error: event '$p' not printed in:"
+ echo "$result"
+ err=1
done
-exit 0
+trap - EXIT TERM INT
+exit $err
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index a87bb2814b4c..95d2ad5d17c6 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -1,45 +1,74 @@
#!/bin/sh
-# perf stat --bpf-counters test
+# perf stat --bpf-counters test (exclusive)
# SPDX-License-Identifier: GPL-2.0
set -e
-# check whether $2 is within +/- 10% of $1
+workload="perf test -w sqrtloop"
+
+# check whether $2 is within +/- 20% of $1
compare_number()
{
- first_num=$1
- second_num=$2
-
- # upper bound is first_num * 110%
- upper=$(expr $first_num + $first_num / 10 )
- # lower bound is first_num * 90%
- lower=$(expr $first_num - $first_num / 10 )
-
- if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then
- echo "The difference between $first_num and $second_num are greater than 10%."
- exit 1
- fi
+ first_num=$1
+ second_num=$2
+
+ # upper bound is first_num * 120%
+ upper=$(expr $first_num + $first_num / 5 )
+ # lower bound is first_num * 80%
+ lower=$(expr $first_num - $first_num / 5 )
+
+ if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then
+ echo "The difference between $first_num and $second_num are greater than 20%."
+ exit 1
+ fi
+}
+
+check_counts()
+{
+ base_instructions=$1
+ bpf_instructions=$2
+
+ if [ "$base_instructions" = "<not" ]; then
+ echo "Skipping: instructions event not counted"
+ exit 2
+ fi
+ if [ "$bpf_instructions" = "<not" ]; then
+ echo "Failed: instructions not counted with --bpf-counters"
+ exit 1
+ fi
+}
+
+test_bpf_counters()
+{
+ printf "Testing --bpf-counters "
+ base_instructions=$(perf stat --no-big-num -e instructions -- $workload 2>&1 | awk '/instructions/ {print $1}')
+ bpf_instructions=$(perf stat --no-big-num --bpf-counters -e instructions -- $workload 2>&1 | awk '/instructions/ {print $1}')
+ check_counts $base_instructions $bpf_instructions
+ compare_number $base_instructions $bpf_instructions
+ echo "[Success]"
+}
+
+test_bpf_modifier()
+{
+ printf "Testing bpf event modifier "
+ stat_output=$(perf stat --no-big-num -e instructions/name=base_instructions/,instructions/name=bpf_instructions/b -- $workload 2>&1)
+ base_instructions=$(echo "$stat_output"| awk '/base_instructions/ {print $1}')
+ bpf_instructions=$(echo "$stat_output"| awk '/bpf_instructions/ {print $1}')
+ check_counts $base_instructions $bpf_instructions
+ compare_number $base_instructions $bpf_instructions
+ echo "[Success]"
}
# skip if --bpf-counters is not supported
-if ! perf stat -e cycles --bpf-counters true > /dev/null 2>&1; then
+if ! perf stat -e instructions --bpf-counters true > /dev/null 2>&1; then
if [ "$1" = "-v" ]; then
echo "Skipping: --bpf-counters not supported"
- perf --no-pager stat -e cycles --bpf-counters true || true
+ perf --no-pager stat -e instructions --bpf-counters true || true
fi
exit 2
fi
-base_cycles=$(perf stat --no-big-num -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
-if [ "$base_cycles" = "<not" ]; then
- echo "Skipping: cycles event not counted"
- exit 2
-fi
-bpf_cycles=$(perf stat --no-big-num --bpf-counters -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
-if [ "$bpf_cycles" = "<not" ]; then
- echo "Failed: cycles not counted with --bpf-counters"
- exit 1
-fi
+test_bpf_counters
+test_bpf_modifier
-compare_number $base_cycles $bpf_cycles
exit 0
diff --git a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
index e75d0780dc78..2ec69060c42f 100755
--- a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
@@ -58,22 +58,9 @@ check_system_wide_counted()
fi
}
-check_cpu_list_counted()
-{
- check_cpu_list_counted_output=$(perf stat -C 0,1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1 2>&1)
- if echo ${check_cpu_list_counted_output} | grep -q -F "<not "; then
- echo "Some CPU events are not counted"
- if [ "${verbose}" = "1" ]; then
- echo ${check_cpu_list_counted_output}
- fi
- exit 1
- fi
-}
-
check_bpf_counter
find_cgroups
check_system_wide_counted
-check_cpu_list_counted
exit 0
diff --git a/tools/perf/tests/shell/stat_metrics_values.sh b/tools/perf/tests/shell/stat_metrics_values.sh
index 7ca172599aa6..279f19c5919a 100755
--- a/tools/perf/tests/shell/stat_metrics_values.sh
+++ b/tools/perf/tests/shell/stat_metrics_values.sh
@@ -19,6 +19,8 @@ echo "Output will be stored in: $tmpdir"
$PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}"
ret=$?
rm -rf $tmpdir
-
+if [ $ret -ne 0 ]; then
+ echo "Metric validation return with erros. Please check metrics reported with errors."
+fi
exit $ret
diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
index e342e6c8aa50..9caa36130175 100755
--- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
@@ -6,7 +6,15 @@ shelldir=$(dirname "$0")
# shellcheck source=lib/perf_has_symbol.sh
. "${shelldir}"/lib/perf_has_symbol.sh
-lscpu | grep -q "aarch64" || exit 2
+if [ "$(uname -m)" != "aarch64" ]; then
+ exit 2
+fi
+
+if perf version --build-options | grep HAVE_DWARF_UNWIND_SUPPORT | grep -q OFF
+then
+ echo "Skipping, no dwarf unwind support"
+ exit 2
+fi
skip_test_missing_symbol leafloop
@@ -20,28 +28,21 @@ cleanup_files()
trap cleanup_files EXIT TERM INT
-# Add a 1 second delay to skip samples that are not in the leaf() function
# shellcheck disable=SC2086
-perf record -o "$PERF_DATA" --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null &
-PID=$!
-
-echo " + Recording (PID=$PID)..."
-sleep 2
-echo " + Stopping perf-record..."
+perf record -o "$PERF_DATA" --call-graph fp -e cycles//u --user-callchains -- $TEST_PROGRAM
-kill $PID
-wait $PID
+# Try opening the file so any immediate errors are visible in the log
+perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4
-# expected perf-script output:
+# expected perf-script output if 'leaf' has been inserted correctly:
#
-# program
+# perf
# 728 leaf
# 753 parent
# 76c leafloop
-# ...
+# ... remaining stack to main() ...
-perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4
-perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4 | \
- awk '{ if ($2 != "") sym[i++] = $2 } END { if (sym[0] != "leaf" ||
- sym[1] != "parent" ||
- sym[2] != "leafloop") exit 1 }'
+# Each frame is separated by a tab, some spaces and an address
+SEP="[[:space:]]+ [[:xdigit:]]+"
+perf script -i "$PERF_DATA" -F comm,ip,sym | tr '\n' ' ' | \
+ grep -E -q "perf $SEP leaf $SEP parent $SEP leafloop"
diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
index 65dd85207125..573af9235b72 100755
--- a/tools/perf/tests/shell/test_arm_coresight.sh
+++ b/tools/perf/tests/shell/test_arm_coresight.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# Check Arm CoreSight trace data recording and synthesized samples
+# Check Arm CoreSight trace data recording and synthesized samples (exclusive)
# Uses the 'perf record' to record trace data with Arm CoreSight sinks;
# then verify if there have any branch samples and instruction samples
@@ -12,7 +12,7 @@
glb_err=0
skip_if_no_cs_etm_event() {
- perf list | grep -q 'cs_etm//' && return 0
+ perf list pmu | grep -q 'cs_etm//' && return 0
# cs_etm event doesn't exist
return 2
@@ -188,7 +188,7 @@ arm_cs_etm_snapshot_test() {
arm_cs_etm_basic_test() {
echo "Recording trace with '$*'"
- perf record -o ${perfdata} "$@" -- ls > /dev/null 2>&1
+ perf record -o ${perfdata} "$@" -m,8M -- ls > /dev/null 2>&1
perf_script_branch_samples ls &&
perf_report_branch_samples ls &&
diff --git a/tools/perf/tests/shell/test_arm_coresight_disasm.sh b/tools/perf/tests/shell/test_arm_coresight_disasm.sh
new file mode 100755
index 000000000000..be2d26303f94
--- /dev/null
+++ b/tools/perf/tests/shell/test_arm_coresight_disasm.sh
@@ -0,0 +1,65 @@
+#!/bin/sh
+# Check Arm CoreSight disassembly script completes without errors (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+# The disassembly script reconstructs ranges of instructions and gives these to objdump to
+# decode. objdump doesn't like ranges that go backwards, but these are a good indication
+# that decoding has gone wrong either in OpenCSD, Perf or in the range reconstruction in
+# the script. Test all 3 parts are working correctly by running the script.
+
+skip_if_no_cs_etm_event() {
+ perf list pmu | grep -q 'cs_etm//' && return 0
+
+ # cs_etm event doesn't exist
+ return 2
+}
+
+skip_if_no_cs_etm_event || exit 2
+
+# Assume an error unless we reach the very end
+set -e
+glb_err=1
+
+perfdata_dir=$(mktemp -d /tmp/__perf_test.perf.data.XXXXX)
+perfdata=${perfdata_dir}/perf.data
+file=$(mktemp /tmp/temporary_file.XXXXX)
+# Relative path works whether it's installed or running from repo
+script_path=$(dirname "$0")/../../scripts/python/arm-cs-trace-disasm.py
+
+cleanup_files()
+{
+ set +e
+ rm -rf ${perfdata_dir}
+ rm -f ${file}
+ trap - EXIT TERM INT
+ exit $glb_err
+}
+
+trap cleanup_files EXIT TERM INT
+
+# Ranges start and end on branches, so check for some likely branch instructions
+sep="\s\|\s"
+branch_search="\sbl${sep}b${sep}b.ne${sep}b.eq${sep}cbz\s"
+
+## Test kernel ##
+if [ -e /proc/kcore ]; then
+ echo "Testing kernel disassembly"
+ perf record -o ${perfdata} -e cs_etm//k --kcore -- touch $file > /dev/null 2>&1
+ perf script -i ${perfdata} -s python:${script_path} -- \
+ -d --stop-sample=30 2> /dev/null > ${file}
+ grep -q -e ${branch_search} ${file}
+ echo "Found kernel branches"
+else
+ # kcore is required for correct kernel decode due to runtime code patching
+ echo "No kcore, skipping kernel test"
+fi
+
+## Test user ##
+echo "Testing userspace disassembly"
+perf record -o ${perfdata} -e cs_etm//u -- touch $file > /dev/null 2>&1
+perf script -i ${perfdata} -s python:${script_path} -- \
+ -d --stop-sample=30 2> /dev/null > ${file}
+grep -q -e ${branch_search} ${file}
+echo "Found userspace branches"
+
+glb_err=0
diff --git a/tools/perf/tests/shell/test_arm_spe.sh b/tools/perf/tests/shell/test_arm_spe.sh
index 03d5c7d12ee5..a69aab70dd8a 100755
--- a/tools/perf/tests/shell/test_arm_spe.sh
+++ b/tools/perf/tests/shell/test_arm_spe.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# Check Arm SPE trace data recording and synthesized samples
+# Check Arm SPE trace data recording and synthesized samples (exclusive)
# Uses the 'perf record' to record trace data of Arm SPE events;
# then verify if any SPE event samples are generated by SPE with
@@ -9,7 +9,7 @@
# German Gomez <german.gomez@arm.com>, 2021
skip_if_no_arm_spe_event() {
- perf list | grep -E -q 'arm_spe_[0-9]+//' && return 0
+ perf list pmu | grep -E -q 'arm_spe_[0-9]+//' && return 0
# arm_spe event doesn't exist
return 2
@@ -107,7 +107,37 @@ arm_spe_system_wide_test() {
arm_spe_report "SPE system-wide testing" $err
}
+arm_spe_discard_test() {
+ echo "SPE discard mode"
+
+ for f in /sys/bus/event_source/devices/arm_spe_*; do
+ if [ -e "$f/format/discard" ]; then
+ cpu=$(cut -c -1 "$f/cpumask")
+ break
+ fi
+ done
+
+ if [ -z $cpu ]; then
+ arm_spe_report "SPE discard mode not present" 2
+ return
+ fi
+
+ # Test can use wildcard SPE instance and Perf will only open the event
+ # on instances that have that format flag. But make sure the target
+ # runs on an instance with discard mode otherwise we're not testing
+ # anything.
+ perf record -o ${perfdata} -e arm_spe/discard/ -N -B --no-bpf-event \
+ -- taskset --cpu-list $cpu true
+
+ if perf report -i ${perfdata} --stats | grep 'AUX events\|AUXTRACE events'; then
+ arm_spe_report "SPE discard mode found unexpected data" 1
+ else
+ arm_spe_report "SPE discard mode" 0
+ fi
+}
+
arm_spe_snapshot_test
arm_spe_system_wide_test
+arm_spe_discard_test
exit $glb_err
diff --git a/tools/perf/tests/shell/test_arm_spe_fork.sh b/tools/perf/tests/shell/test_arm_spe_fork.sh
index 1a7e6a82d0e3..8efeef9fb956 100755
--- a/tools/perf/tests/shell/test_arm_spe_fork.sh
+++ b/tools/perf/tests/shell/test_arm_spe_fork.sh
@@ -5,7 +5,7 @@
# German Gomez <german.gomez@arm.com>, 2022
skip_if_no_arm_spe_event() {
- perf list | grep -E -q 'arm_spe_[0-9]+//' && return 0
+ perf list pmu | grep -E -q 'arm_spe_[0-9]+//' && return 0
return 2
}
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
index 5f14d0cb013f..e01df7581393 100755
--- a/tools/perf/tests/shell/test_brstack.sh
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -30,7 +30,7 @@ test_user_branches() {
echo "Testing user branch stack sampling"
perf record -o $TMPDIR/perf.data --branch-filter any,save_type,u -- ${TESTPROG} > /dev/null 2>&1
- perf script -i $TMPDIR/perf.data --fields brstacksym | xargs -n1 > $TMPDIR/perf.script
+ perf script -i $TMPDIR/perf.data --fields brstacksym | tr -s ' ' '\n' > $TMPDIR/perf.script
# example of branch entries:
# brstack_foo+0x14/brstack_bar+0x40/P/-/-/0/CALL
@@ -59,7 +59,7 @@ test_filter() {
echo "Testing branch stack filtering permutation ($test_filter_filter,$test_filter_expect)"
perf record -o $TMPDIR/perf.data --branch-filter $test_filter_filter,save_type,u -- ${TESTPROG} > /dev/null 2>&1
- perf script -i $TMPDIR/perf.data --fields brstack | xargs -n1 > $TMPDIR/perf.script
+ perf script -i $TMPDIR/perf.data --fields brstack | tr -s ' ' '\n' | grep '.' > $TMPDIR/perf.script
# fail if we find any branch type that doesn't match any of the expected ones
# also consider UNKNOWN branch types (-)
diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh
index 3dfa91832aa8..c86da0235059 100755
--- a/tools/perf/tests/shell/test_data_symbol.sh
+++ b/tools/perf/tests/shell/test_data_symbol.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# Test data symbol
+# Test data symbol (exclusive)
# SPDX-License-Identifier: GPL-2.0
# Leo Yan <leo.yan@linaro.org>, 2022
diff --git a/tools/perf/tests/shell/test_intel_pt.sh b/tools/perf/tests/shell/test_intel_pt.sh
index 723ec501f99a..f3a9a040bacc 100755
--- a/tools/perf/tests/shell/test_intel_pt.sh
+++ b/tools/perf/tests/shell/test_intel_pt.sh
@@ -1,11 +1,11 @@
#!/bin/sh
-# Miscellaneous Intel PT testing
+# Miscellaneous Intel PT testing (exclusive)
# SPDX-License-Identifier: GPL-2.0
set -e
# Skip if no Intel PT
-perf list | grep -q 'intel_pt//' || exit 2
+perf list pmu | grep -q 'intel_pt//' || exit 2
shelldir=$(dirname "$0")
# shellcheck source=lib/waiting.sh
@@ -644,6 +644,33 @@ test_pipe()
return 0
}
+test_pause_resume()
+{
+ echo "--- Test with pause / resume ---"
+ if ! perf_record_no_decode -o "${perfdatafile}" -e intel_pt/aux-action=start-paused/u uname ; then
+ echo "SKIP: pause / resume is not supported"
+ return 2
+ fi
+ if ! perf_record_no_bpf -o "${perfdatafile}" \
+ -e intel_pt/aux-action=start-paused/u \
+ -e instructions/period=50000,aux-action=resume,name=Resume/u \
+ -e instructions/period=100000,aux-action=pause,name=Pause/u uname ; then
+ echo "perf record with pause / resume failed"
+ return 1
+ fi
+ if ! perf script -i "${perfdatafile}" --itrace=b -Fperiod,event | \
+ awk 'BEGIN {paused=1;branches=0}
+ /Resume/ {paused=0}
+ /branches/ {if (paused) exit 1;branches=1}
+ /Pause/ {paused=1}
+ END {if (!branches) exit 1}' ; then
+ echo "perf record with pause / resume failed"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
count_result()
{
if [ "$1" -eq 2 ] ; then
@@ -672,6 +699,7 @@ test_power_event || ret=$? ; count_result $ret ; ret=0
test_no_tnt || ret=$? ; count_result $ret ; ret=0
test_event_trace || ret=$? ; count_result $ret ; ret=0
test_pipe || ret=$? ; count_result $ret ; ret=0
+test_pause_resume || ret=$? ; count_result $ret ; ret=0
cleanup
diff --git a/tools/perf/tests/shell/test_stat_intel_tpebs.sh b/tools/perf/tests/shell/test_stat_intel_tpebs.sh
new file mode 100755
index 000000000000..f95fc64bf0a7
--- /dev/null
+++ b/tools/perf/tests/shell/test_stat_intel_tpebs.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# test Intel TPEBS counting mode (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+grep -q GenuineIntel /proc/cpuinfo || { echo Skipping non-Intel; exit 2; }
+
+# Use this event for testing because it should exist in all platforms
+event=cache-misses:R
+
+# Hybrid platforms output like "cpu_atom/cache-misses/R", rather than as above
+alt_name=/cache-misses/R
+
+# Without this cmd option, default value or zero is returned
+#echo "Testing without --record-tpebs"
+#result=$(perf stat -e "$event" true 2>&1)
+#[[ "$result" =~ $event || "$result" =~ $alt_name ]] || exit 1
+
+# In platforms that do not support TPEBS, it should execute without error.
+echo "Testing with --record-tpebs"
+result=$(perf stat -e "$event" --record-tpebs -a sleep 0.01 2>&1)
+[[ "$result" =~ "perf record" && "$result" =~ $event || "$result" =~ $alt_name ]] || exit 1
diff --git a/tools/perf/tests/shell/test_task_analyzer.sh b/tools/perf/tests/shell/test_task_analyzer.sh
index 92d15154ba79..e194fcf61df3 100755
--- a/tools/perf/tests/shell/test_task_analyzer.sh
+++ b/tools/perf/tests/shell/test_task_analyzer.sh
@@ -1,5 +1,5 @@
#!/bin/bash
-# perf script task-analyzer tests
+# perf script task-analyzer tests (exclusive)
# SPDX-License-Identifier: GPL-2.0
tmpdir=$(mktemp -d /tmp/perf-script-task-analyzer-XXXXX)
@@ -11,6 +11,9 @@ if [ -e "$perfdir/scripts/python/Perf-Trace-Util" ]; then
export PERF_EXEC_PATH=$perfdir
fi
+# Disable lsan to avoid warnings about python memory leaks.
+export ASAN_OPTIONS=detect_leaks=0
+
cleanup() {
rm -f perf.data
rm -f perf.data.old
@@ -52,8 +55,8 @@ find_str_or_fail() {
# check if perf is compiled with libtraceevent support
skip_no_probe_record_support() {
- perf version --build-options | grep -q " OFF .* HAVE_LIBTRACEEVENT" && return 2
- return 0
+ perf check feature -q libtraceevent && return 0
+ return 2
}
prepare_perf_data() {
diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
index 319f36ebb9a4..33387c329f92 100755
--- a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
+++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
@@ -4,6 +4,13 @@
set -e
+# Skip if there's no probe command.
+if ! perf | grep probe
+then
+ echo "Skip: probe command isn't present"
+ exit 2
+fi
+
# skip if there's no gcc
if ! [ -x "$(command -v gcc)" ]; then
echo "failed: no gcc compiler"
@@ -77,7 +84,7 @@ gcc -g -Og -flto -c ${temp_dir}/testfile-foo.c -o ${temp_dir}/testfile-foo.o
gcc -g -Og -c ${temp_dir}/testfile-main.c -o ${temp_dir}/testfile-main.o
gcc -g -Og -o ${temp_dir}/testfile ${temp_dir}/testfile-foo.o ${temp_dir}/testfile-main.o
-perf probe -x ${temp_dir}/testfile --funcs foo
+perf probe -x ${temp_dir}/testfile --funcs foo | grep "foo"
perf probe -x ${temp_dir}/testfile foo
cleanup
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 3146a1eece07..708a13f00635 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# Check open filename arg using perf trace + vfs_getname
+# Check open filename arg using perf trace + vfs_getname (exclusive)
# Uses the 'perf test shell' library to add probe:vfs_getname to the system
# then use it with 'perf trace' using 'touch' to write to a temp file, then
@@ -19,7 +19,7 @@ skip_if_no_perf_trace || exit 2
. "$(dirname $0)"/lib/probe_vfs_getname.sh
trace_open_vfs_getname() {
- evts="$(echo "$(perf list syscalls:sys_enter_open* 2>/dev/null | grep -E 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/')" | sed ':a;N;s:\n:,:g')"
+ evts="$(echo "$(perf list tracepoint 2>/dev/null | grep -E 'syscalls:sys_enter_open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/')" | sed ':a;N;s:\n:,:g')"
perf trace -e $evts touch $file 2>&1 | \
grep -E " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +\"?${file}\"?, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
}
diff --git a/tools/perf/tests/shell/trace_btf_enum.sh b/tools/perf/tests/shell/trace_btf_enum.sh
new file mode 100755
index 000000000000..8d1e6bbeac90
--- /dev/null
+++ b/tools/perf/tests/shell/trace_btf_enum.sh
@@ -0,0 +1,66 @@
+#!/bin/sh
+# perf trace enum augmentation tests
+# SPDX-License-Identifier: GPL-2.0
+
+err=0
+set -e
+
+syscall="landlock_add_rule"
+non_syscall="timer:hrtimer_init,timer:hrtimer_start"
+
+TESTPROG="perf test -w landlock"
+
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+skip_if_no_perf_trace || exit 2
+
+check_vmlinux() {
+ echo "Checking if vmlinux exists"
+ if ! ls /sys/kernel/btf/vmlinux 1>/dev/null 2>&1
+ then
+ echo "trace+enum test [Skipped missing vmlinux BTF support]"
+ err=2
+ fi
+}
+
+trace_landlock() {
+ echo "Tracing syscall ${syscall}"
+
+ # test flight just to see if landlock_add_rule is available
+ if ! perf trace $TESTPROG 2>&1 | grep -q landlock
+ then
+ echo "No landlock system call found, skipping to non-syscall tracing."
+ return
+ fi
+
+ if perf trace -e $syscall $TESTPROG 2>&1 | \
+ grep -q -E ".*landlock_add_rule\(ruleset_fd: 11, rule_type: (LANDLOCK_RULE_PATH_BENEATH|LANDLOCK_RULE_NET_PORT), rule_attr: 0x[a-f0-9]+, flags: 45\) = -1.*"
+ then
+ err=0
+ else
+ err=1
+ fi
+}
+
+trace_non_syscall() {
+ echo "Tracing non-syscall tracepoint ${non-syscall}"
+ if perf trace -e $non_syscall --max-events=1 2>&1 | \
+ grep -q -E '.*timer:hrtimer_.*\(.*mode: HRTIMER_MODE_.*\)$'
+ then
+ err=0
+ else
+ err=1
+ fi
+}
+
+check_vmlinux
+
+if [ $err = 0 ]; then
+ trace_landlock
+fi
+
+if [ $err = 0 ]; then
+ trace_non_syscall
+fi
+
+exit $err
diff --git a/tools/perf/tests/shell/trace_btf_general.sh b/tools/perf/tests/shell/trace_btf_general.sh
new file mode 100755
index 000000000000..e9ee727f3433
--- /dev/null
+++ b/tools/perf/tests/shell/trace_btf_general.sh
@@ -0,0 +1,94 @@
+#!/bin/bash
+# perf trace BTF general tests
+# SPDX-License-Identifier: GPL-2.0
+
+err=0
+set -e
+
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+file1=$(mktemp /tmp/file1_XXXX)
+file2=$(echo $file1 | sed 's/file1/file2/g')
+
+buffer="buffer content"
+perf_config_tmp=$(mktemp /tmp/.perfconfig_XXXXX)
+
+trap cleanup EXIT TERM INT HUP
+
+check_vmlinux() {
+ echo "Checking if vmlinux BTF exists"
+ if [ ! -f /sys/kernel/btf/vmlinux ]
+ then
+ echo "Skipped due to missing vmlinux BTF"
+ return 2
+ fi
+ return 0
+}
+
+trace_test_string() {
+ echo "Testing perf trace's string augmentation"
+ if ! perf trace -e renameat* --max-events=1 -- mv ${file1} ${file2} 2>&1 | \
+ grep -q -E "^mv/[0-9]+ renameat(2)?\(.*, \"${file1}\", .*, \"${file2}\", .*\) += +[0-9]+$"
+ then
+ echo "String augmentation test failed"
+ err=1
+ fi
+}
+
+trace_test_buffer() {
+ echo "Testing perf trace's buffer augmentation"
+ # echo will insert a newline (\10) at the end of the buffer
+ if ! perf trace -e write --max-events=1 -- echo "${buffer}" 2>&1 | \
+ grep -q -E "^echo/[0-9]+ write\([0-9]+, ${buffer}.*, [0-9]+\) += +[0-9]+$"
+ then
+ echo "Buffer augmentation test failed"
+ err=1
+ fi
+}
+
+trace_test_struct_btf() {
+ echo "Testing perf trace's struct augmentation"
+ if ! perf trace -e clock_nanosleep --force-btf --max-events=1 -- sleep 1 2>&1 | \
+ grep -q -E "^sleep/[0-9]+ clock_nanosleep\(0, 0, \{1,\}, 0x[0-9a-f]+\) += +[0-9]+$"
+ then
+ echo "BTF struct augmentation test failed"
+ err=1
+ fi
+}
+
+cleanup() {
+ rm -rf ${file1} ${file2} ${perf_config_tmp}
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+
+# don't overwrite user's perf config
+trace_config() {
+ export PERF_CONFIG=${perf_config_tmp}
+ perf config trace.show_arg_names=false trace.show_duration=false \
+ trace.show_timestamp=false trace.args_alignment=0
+}
+
+skip_if_no_perf_trace || exit 2
+check_vmlinux || exit 2
+
+trace_config
+
+trace_test_string
+
+if [ $err = 0 ]; then
+ trace_test_buffer
+fi
+
+if [ $err = 0 ]; then
+ trace_test_struct_btf
+fi
+
+cleanup
+
+exit $err
diff --git a/tools/perf/tests/shell/trace_exit_race.sh b/tools/perf/tests/shell/trace_exit_race.sh
new file mode 100755
index 000000000000..fbb0adc33a88
--- /dev/null
+++ b/tools/perf/tests/shell/trace_exit_race.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+# perf trace exit race
+# SPDX-License-Identifier: GPL-2.0
+
+# Check that the last events of a perf trace'd subprocess are not
+# lost. Specifically, trace the exiting syscall of "true" 10 times and ensure
+# the output contains 10 correct lines.
+
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+skip_if_no_perf_trace || exit 2
+
+if [ "$1" = "-v" ]; then
+ verbose="1"
+fi
+
+iter=10
+regexp=" +[0-9]+\.[0-9]+ [0-9]+ syscalls:sys_enter_exit_group\(\)$"
+
+trace_shutdown_race() {
+ for _ in $(seq $iter); do
+ perf trace --no-comm -e syscalls:sys_enter_exit_group true 2>>$file
+ done
+ result="$(grep -c -E "$regexp" $file)"
+ [ $result = $iter ]
+}
+
+
+file=$(mktemp /tmp/temporary_file.XXXXX)
+
+# Do not use whatever ~/.perfconfig file, it may change the output
+# via trace.{show_timestamp,show_prefix,etc}
+export PERF_CONFIG=/dev/null
+
+trace_shutdown_race
+err=$?
+
+if [ $err != 0 ] && [ "${verbose}" = "1" ]; then
+ lines_not_matching=$(mktemp /tmp/temporary_file.XXXXX)
+ if grep -v -E "$regexp" $file > $lines_not_matching ; then
+ echo "Lines not matching the expected regexp: '$regexp':"
+ cat $lines_not_matching
+ else
+ echo "Missing output, expected $iter but only got $result"
+ fi
+ rm -f $lines_not_matching
+fi
+
+rm -f ${file}
+exit $err