summaryrefslogtreecommitdiff
path: root/tools/perf/tests/shell
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/tests/shell')
-rwxr-xr-xtools/perf/tests/shell/amd-ibs-swfilt.sh67
-rwxr-xr-xtools/perf/tests/shell/annotate.sh56
-rwxr-xr-xtools/perf/tests/shell/base_report/setup.sh18
-rwxr-xr-xtools/perf/tests/shell/base_report/test_basic.sh52
-rw-r--r--tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S2
-rwxr-xr-xtools/perf/tests/shell/diff.sh12
-rw-r--r--tools/perf/tests/shell/lib/attr.py8
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py7
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py12
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh20
-rw-r--r--tools/perf/tests/shell/lib/stat_output.sh13
-rwxr-xr-xtools/perf/tests/shell/perf-report-hierarchy.sh43
-rwxr-xr-xtools/perf/tests/shell/perftool-testsuite_probe.sh1
-rwxr-xr-xtools/perf/tests/shell/probe_vfs_getname.sh9
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh1
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh9
-rwxr-xr-xtools/perf/tests/shell/record.sh92
-rwxr-xr-xtools/perf/tests/shell/record_bpf_filter.sh4
-rwxr-xr-xtools/perf/tests/shell/record_lbr.sh5
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh71
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+event_uniquifying.sh77
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh14
-rwxr-xr-xtools/perf/tests/shell/stat+std_output.sh8
-rwxr-xr-xtools/perf/tests/shell/stat.sh83
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh112
-rwxr-xr-xtools/perf/tests/shell/stat_all_pmu.sh48
-rwxr-xr-xtools/perf/tests/shell/stat_metrics_values.sh17
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh72
-rwxr-xr-xtools/perf/tests/shell/test_data_symbol.sh61
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh5
-rwxr-xr-xtools/perf/tests/shell/test_stat_intel_tpebs.sh89
-rwxr-xr-xtools/perf/tests/shell/test_uprobe_from_different_cu.sh11
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh10
-rwxr-xr-xtools/perf/tests/shell/trace_btf_enum.sh3
-rwxr-xr-xtools/perf/tests/shell/trace_btf_general.sh1
-rwxr-xr-xtools/perf/tests/shell/trace_exit_race.sh1
-rwxr-xr-xtools/perf/tests/shell/trace_record_replay.sh21
-rwxr-xr-xtools/perf/tests/shell/trace_summary.sh77
39 files changed, 975 insertions, 239 deletions
diff --git a/tools/perf/tests/shell/amd-ibs-swfilt.sh b/tools/perf/tests/shell/amd-ibs-swfilt.sh
new file mode 100755
index 000000000000..83937aa687cc
--- /dev/null
+++ b/tools/perf/tests/shell/amd-ibs-swfilt.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+# AMD IBS software filtering
+
+echo "check availability of IBS swfilt"
+
+# check if IBS PMU is available
+if [ ! -d /sys/bus/event_source/devices/ibs_op ]; then
+ echo "[SKIP] IBS PMU does not exist"
+ exit 2
+fi
+
+# check if IBS PMU has swfilt format
+if [ ! -f /sys/bus/event_source/devices/ibs_op/format/swfilt ]; then
+ echo "[SKIP] IBS PMU does not have swfilt"
+ exit 2
+fi
+
+echo "run perf record with modifier and swfilt"
+
+# setting any modifiers should fail
+perf record -B -e ibs_op//u -o /dev/null true 2> /dev/null
+if [ $? -eq 0 ]; then
+ echo "[FAIL] IBS PMU should not accept exclude_kernel"
+ exit 1
+fi
+
+# setting it with swfilt should be fine
+perf record -B -e ibs_op/swfilt/u -o /dev/null true
+if [ $? -ne 0 ]; then
+ echo "[FAIL] IBS op PMU cannot handle swfilt for exclude_kernel"
+ exit 1
+fi
+
+# setting it with swfilt=1 should be fine
+perf record -B -e ibs_op/swfilt=1/k -o /dev/null true
+if [ $? -ne 0 ]; then
+ echo "[FAIL] IBS op PMU cannot handle swfilt for exclude_user"
+ exit 1
+fi
+
+# check ibs_fetch PMU as well
+perf record -B -e ibs_fetch/swfilt/u -o /dev/null true
+if [ $? -ne 0 ]; then
+ echo "[FAIL] IBS fetch PMU cannot handle swfilt for exclude_kernel"
+ exit 1
+fi
+
+# check system wide recording
+perf record -aB --synth=no -e ibs_op/swfilt/k -o /dev/null true
+if [ $? -ne 0 ]; then
+ echo "[FAIL] IBS op PMU cannot handle swfilt in system-wide mode"
+ exit 1
+fi
+
+echo "check number of samples with swfilt"
+
+kernel_sample=$(perf record -e ibs_op/swfilt/u -o- true | perf script -i- -F misc | grep -c ^K)
+if [ ${kernel_sample} -ne 0 ]; then
+ echo "[FAIL] unexpected kernel samples: " ${kernel_sample}
+ exit 1
+fi
+
+user_sample=$(perf record -e ibs_fetch/swfilt/k -o- true | perf script -i- -F misc | grep -c ^U)
+if [ ${user_sample} -ne 0 ]; then
+ echo "[FAIL] unexpected user samples: " ${user_sample}
+ exit 1
+fi
diff --git a/tools/perf/tests/shell/annotate.sh b/tools/perf/tests/shell/annotate.sh
index 1590a37363de..16a1ccd06089 100755
--- a/tools/perf/tests/shell/annotate.sh
+++ b/tools/perf/tests/shell/annotate.sh
@@ -35,54 +35,78 @@ trap_cleanup() {
trap trap_cleanup EXIT TERM INT
test_basic() {
- echo "Basic perf annotate test"
- if ! perf record -o "${perfdata}" ${testprog} 2> /dev/null
+ mode=$1
+ echo "${mode} perf annotate test"
+ if [ "x${mode}" == "xBasic" ]
then
- echo "Basic annotate [Failed: perf record]"
+ perf record -o "${perfdata}" ${testprog} 2> /dev/null
+ else
+ perf record -o - ${testprog} 2> /dev/null > "${perfdata}"
+ fi
+ if [ "x$?" != "x0" ]
+ then
+ echo "${mode} annotate [Failed: perf record]"
err=1
return
fi
# Generate the annotated output file
- perf annotate --no-demangle -i "${perfdata}" --stdio 2> /dev/null | head -250 > "${perfout}"
+ if [ "x${mode}" == "xBasic" ]
+ then
+ perf annotate --no-demangle -i "${perfdata}" --stdio 2> /dev/null > "${perfout}"
+ else
+ perf annotate --no-demangle -i - --stdio 2> /dev/null < "${perfdata}" > "${perfout}"
+ fi
# check if it has the target symbol
- if ! grep "${testsym}" "${perfout}"
+ if ! head -250 "${perfout}" | grep -q "${testsym}"
then
- echo "Basic annotate [Failed: missing target symbol]"
+ echo "${mode} annotate [Failed: missing target symbol]"
err=1
return
fi
# check if it has the disassembly lines
- if ! grep "${disasm_regex}" "${perfout}"
+ if ! head -250 "${perfout}" | grep -q "${disasm_regex}"
then
- echo "Basic annotate [Failed: missing disasm output from default disassembler]"
+ echo "${mode} annotate [Failed: missing disasm output from default disassembler]"
err=1
return
fi
# check again with a target symbol name
- if ! perf annotate --no-demangle -i "${perfdata}" "${testsym}" 2> /dev/null | \
- head -250 | grep -m 3 "${disasm_regex}"
+ if [ "x${mode}" == "xBasic" ]
then
- echo "Basic annotate [Failed: missing disasm output when specifying the target symbol]"
+ perf annotate --no-demangle -i "${perfdata}" "${testsym}" 2> /dev/null > "${perfout}"
+ else
+ perf annotate --no-demangle -i - "${testsym}" 2> /dev/null < "${perfdata}" > "${perfout}"
+ fi
+
+ if ! head -250 "${perfout}"| grep -q -m 3 "${disasm_regex}"
+ then
+ echo "${mode} annotate [Failed: missing disasm output when specifying the target symbol]"
err=1
return
fi
# check one more with external objdump tool (forced by --objdump option)
- if ! perf annotate --no-demangle -i "${perfdata}" --objdump=objdump 2> /dev/null | \
- head -250 | grep -m 3 "${disasm_regex}"
+ if [ "x${mode}" == "xBasic" ]
+ then
+ perf annotate --no-demangle -i "${perfdata}" --objdump=objdump 2> /dev/null > "${perfout}"
+ else
+ perf annotate --no-demangle -i - "${testsym}" 2> /dev/null < "${perfdata}" > "${perfout}"
+ fi
+ if ! head -250 "${perfout}" | grep -q -m 3 "${disasm_regex}"
then
- echo "Basic annotate [Failed: missing disasm output from non default disassembler (using --objdump)]"
+ echo "${mode} annotate [Failed: missing disasm output from non default disassembler (using --objdump)]"
err=1
return
fi
- echo "Basic annotate test [Success]"
+ echo "${mode} annotate test [Success]"
}
-test_basic
+test_basic Basic
+test_basic Pipe
cleanup
exit $err
diff --git a/tools/perf/tests/shell/base_report/setup.sh b/tools/perf/tests/shell/base_report/setup.sh
index b03501b2e8fc..8634e7e0dda6 100755
--- a/tools/perf/tests/shell/base_report/setup.sh
+++ b/tools/perf/tests/shell/base_report/setup.sh
@@ -15,6 +15,8 @@
# include working environment
. ../common/init.sh
+TEST_RESULT=0
+
test -d "$HEADER_TAR_DIR" || mkdir -p "$HEADER_TAR_DIR"
SW_EVENT="cpu-clock"
@@ -26,7 +28,21 @@ PERF_EXIT_CODE=$?
CHECK_EXIT_CODE=$?
print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "prepare the perf.data file"
-TEST_RESULT=$?
+(( TEST_RESULT += $? ))
+
+# Some minimal parallel workload.
+$CMD_PERF record --latency -o $CURRENT_TEST_DIR/perf.data.1 bash -c "for i in {1..100} ; do cat /proc/cpuinfo 1> /dev/null & done; sleep 1" 2> $LOGS_DIR/setup-latency.log
+PERF_EXIT_CODE=$?
+
+echo ==================
+cat $LOGS_DIR/setup-latency.log
+echo ==================
+
+../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/setup-latency.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "prepare the perf.data.1 file"
+(( TEST_RESULT += $? ))
print_overall_results $TEST_RESULT
exit $?
diff --git a/tools/perf/tests/shell/base_report/test_basic.sh b/tools/perf/tests/shell/base_report/test_basic.sh
index 2398eba4d3fd..adfd8713b8f8 100755
--- a/tools/perf/tests/shell/base_report/test_basic.sh
+++ b/tools/perf/tests/shell/base_report/test_basic.sh
@@ -183,6 +183,58 @@ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "symbol filter"
(( TEST_RESULT += $? ))
+### latency and parallelism
+
+# Record with --latency should record with context switches.
+$CMD_PERF report -i $CURRENT_TEST_DIR/perf.data.1 --stdio --header-only > $LOGS_DIR/latency_header.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl ", context_switch = 1, " < $LOGS_DIR/latency_header.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "latency header"
+(( TEST_RESULT += $? ))
+
+
+# The default report for latency profile should show Overhead and Latency fields (in that order).
+$CMD_PERF report --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/latency_default.log 2> $LOGS_DIR/latency_default.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "# Overhead Latency Command" < $LOGS_DIR/latency_default.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/latency_default.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "default report for latency profile"
+(( TEST_RESULT += $? ))
+
+
+# The latency report for latency profile should show Latency and Overhead fields (in that order).
+$CMD_PERF report --latency --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/latency_latency.log 2> $LOGS_DIR/latency_latency.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "# Latency Overhead Command" < $LOGS_DIR/latency_latency.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/latency_latency.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "latency report for latency profile"
+(( TEST_RESULT += $? ))
+
+
+# Ensure parallelism histogram with parallelism filter does not fail/crash.
+$CMD_PERF report --hierarchy --sort latency,parallelism,comm,symbol --parallelism=1,2 --stdio -i $CURRENT_TEST_DIR/perf.data.1 > $LOGS_DIR/parallelism_hierarchy.log 2> $LOGS_DIR/parallelism_hierarchy.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "# Latency Parallelism / Command / Symbol" < $LOGS_DIR/parallelism_hierarchy.log
+CHECK_EXIT_CODE=$?
+../common/check_errors_whitelisted.pl "stderr-whitelist.txt" < $LOGS_DIR/parallelism_hierarchy.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "parallelism histogram"
+(( TEST_RESULT += $? ))
+
+
# TODO: $CMD_PERF report -n --showcpuutilization -TUxDg 2> 01.log
# print overall results
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
index 75cf084a927d..577760046772 100644
--- a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
@@ -26,3 +26,5 @@ skip:
mov x0, #0
mov x8, #93 // __NR_exit syscall
svc #0
+
+.section .note.GNU-stack, "", @progbits
diff --git a/tools/perf/tests/shell/diff.sh b/tools/perf/tests/shell/diff.sh
index 14b87af88703..e05a5dc49479 100755
--- a/tools/perf/tests/shell/diff.sh
+++ b/tools/perf/tests/shell/diff.sh
@@ -39,13 +39,13 @@ make_data() {
file="$1"
if ! perf record -o "${file}" ${testprog} 2> /dev/null
then
- echo "Workload record [Failed record]"
+ echo "Workload record [Failed record]" >&2
echo 1
return
fi
if ! perf report -i "${file}" -q | grep -q "${testsym}"
then
- echo "Workload record [Failed missing output]"
+ echo "Workload record [Failed missing output]" >&2
echo 1
return
fi
@@ -55,12 +55,12 @@ make_data() {
test_two_files() {
echo "Basic two file diff test"
err=$(make_data "${perfdata1}")
- if [ $err != 0 ]
+ if [ "$err" != 0 ]
then
return
fi
err=$(make_data "${perfdata2}")
- if [ $err != 0 ]
+ if [ "$err" != 0 ]
then
return
fi
@@ -77,12 +77,12 @@ test_two_files() {
test_three_files() {
echo "Basic three file diff test"
err=$(make_data "${perfdata1}")
- if [ $err != 0 ]
+ if [ "$err" != 0 ]
then
return
fi
err=$(make_data "${perfdata2}")
- if [ $err != 0 ]
+ if [ "$err" != 0 ]
then
return
fi
diff --git a/tools/perf/tests/shell/lib/attr.py b/tools/perf/tests/shell/lib/attr.py
index 3db9a7d78715..bfccc727d9b2 100644
--- a/tools/perf/tests/shell/lib/attr.py
+++ b/tools/perf/tests/shell/lib/attr.py
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-from __future__ import print_function
-
+import configparser
import os
import sys
import glob
@@ -13,11 +12,6 @@ import re
import shutil
import subprocess
-try:
- import configparser
-except ImportError:
- import ConfigParser as configparser
-
def data_equal(a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
index b066d721f897..9e772a89ce38 100644
--- a/tools/perf/tests/shell/lib/perf_json_output_lint.py
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -19,6 +19,7 @@ ap.add_argument('--per-cluster', action='store_true')
ap.add_argument('--per-die', action='store_true')
ap.add_argument('--per-node', action='store_true')
ap.add_argument('--per-socket', action='store_true')
+ap.add_argument('--metric-only', action='store_true')
ap.add_argument('--file', type=argparse.FileType('r'), default=sys.stdin)
args = ap.parse_args()
@@ -64,6 +65,8 @@ def check_json_output(expected_items):
'socket': lambda x: True,
'thread': lambda x: True,
'unit': lambda x: True,
+ 'insn per cycle': lambda x: isfloat(x),
+ 'GHz': lambda x: True, # FIXME: it seems unintended for --metric-only
}
input = '[\n' + ','.join(Lines) + '\n]'
for item in json.loads(input):
@@ -78,6 +81,8 @@ def check_json_output(expected_items):
pass
elif count - 1 in expected_items and 'metric-threshold' in item:
pass
+ elif count in expected_items and 'insn per cycle' in item:
+ pass
elif count not in expected_items:
raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
f' in \'{item}\'')
@@ -95,6 +100,8 @@ try:
expected_items = [6, 8]
elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cluster or args.per_cache:
expected_items = [7, 9]
+ elif args.metric_only:
+ expected_items = [1, 2]
else:
# If no option is specified, don't check the number of items.
expected_items = -1
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
index 0b94216c9c46..dea8ef1977bf 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation.py
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -35,7 +35,8 @@ class TestError:
class Validator:
- def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics=''):
+ def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='',
+ workload='true', metrics='', cputype='cpu'):
self.rulefname = rulefname
self.reportfname = reportfname
self.rules = None
@@ -43,6 +44,7 @@ class Validator:
self.metrics = self.__set_metrics(metrics)
self.skiplist = set()
self.tolerance = t
+ self.cputype = cputype
self.workloads = [x for x in workload.split(",") if x]
self.wlidx = 0 # idx of current workloads
@@ -377,7 +379,7 @@ class Validator:
def _run_perf(self, metric, workload: str):
tool = 'perf'
- command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"]
+ command = [tool, 'stat', '--cputype', self.cputype, '-j', '-M', f"{metric}", "-a"]
wl = workload.split()
command.extend(wl)
print(" ".join(command))
@@ -443,6 +445,8 @@ class Validator:
if 'MetricName' not in m:
print("Warning: no metric name")
continue
+ if 'Unit' in m and m['Unit'] != self.cputype:
+ continue
name = m['MetricName'].lower()
self.metrics.add(name)
if 'ScaleUnit' in m and (m['ScaleUnit'] == '1%' or m['ScaleUnit'] == '100%'):
@@ -578,6 +582,8 @@ def main() -> None:
parser.add_argument(
"-wl", help="Workload to run while data collection", default="true")
parser.add_argument("-m", help="Metric list to validate", default="")
+ parser.add_argument("-cputype", help="Only test metrics for the given CPU/PMU type",
+ default="cpu")
args = parser.parse_args()
outpath = Path(args.output_dir)
reportf = Path.joinpath(outpath, 'perf_report.json')
@@ -586,7 +592,7 @@ def main() -> None:
validator = Validator(args.rule, reportf, debug=args.debug,
datafname=datafile, fullrulefname=fullrule, workload=args.wl,
- metrics=args.m)
+ metrics=args.m, cputype=args.cputype)
ret = validator.test()
return ret
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index 5c33ec7a5a63..58debce9ab42 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -13,14 +13,28 @@ cleanup_probe_vfs_getname() {
add_probe_vfs_getname() {
add_probe_verbose=$1
if [ $had_vfs_getname -eq 1 ] ; then
- result_filename_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*"
- line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_filename_re" | sed -r "s/$result_filename_re/\1/")
+ result_initname_re="[[:space:]]+([[:digit:]]+)[[:space:]]+initname.*"
+ line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_initname_re" | sed -r "s/$result_initname_re/\1/")
+
+ # Search the old regular expressions so that this will
+ # pass on older kernels as well.
+ if [ -z "$line" ] ; then
+ result_filename_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*"
+ line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_filename_re" | sed -r "s/$result_filename_re/\1/")
+ fi
+
if [ -z "$line" ] ; then
result_aname_re="[[:space:]]+([[:digit:]]+)[[:space:]]+result->aname = NULL;"
line=$(perf probe -L getname_flags 2>&1 | grep -E "$result_aname_re" | sed -r "s/$result_aname_re/\1/")
fi
+
+ if [ -z "$line" ] ; then
+ echo "Could not find probeable line"
+ return 2
+ fi
+
perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
- perf probe $add_probe_verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
+ perf probe $add_probe_verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring" || return 1
fi
}
diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh
index 9a176ceae4a3..c2ec7881ec1d 100644
--- a/tools/perf/tests/shell/lib/stat_output.sh
+++ b/tools/perf/tests/shell/lib/stat_output.sh
@@ -148,6 +148,19 @@ check_per_socket()
echo "[Success]"
}
+check_metric_only()
+{
+ echo -n "Checking $1 output: metric only "
+ if [ "$(uname -m)" = "s390x" ] && ! grep '^facilities' /proc/cpuinfo | grep -qw 67
+ then
+ echo "[Skip] CPU-measurement counter facility not installed"
+ return
+ fi
+ perf stat --metric-only $2 -e instructions,cycles true
+ commachecker --metric-only
+ echo "[Success]"
+}
+
# The perf stat options for per-socket, per-core, per-die
# and -A ( no_aggr mode ) uses the info fetched from this
# directory: "/sys/devices/system/cpu/cpu*/topology". For
diff --git a/tools/perf/tests/shell/perf-report-hierarchy.sh b/tools/perf/tests/shell/perf-report-hierarchy.sh
new file mode 100755
index 000000000000..02e3b6aee4ed
--- /dev/null
+++ b/tools/perf/tests/shell/perf-report-hierarchy.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+# perf report --hierarchy
+# SPDX-License-Identifier: GPL-2.0
+# Arnaldo Carvalho de Melo <acme@redhat.com>
+
+set -e
+
+temp_dir=$(mktemp -d /tmp/perf-test-report.XXXXXXXXXX)
+
+cleanup()
+{
+ trap - EXIT TERM INT
+ sane=$(echo "${temp_dir}" | cut -b 1-21)
+ if [ "${sane}" = "/tmp/perf-test-report" ] ; then
+ echo "--- Cleaning up ---"
+ rm -rf "${temp_dir:?}/"*
+ rmdir "${temp_dir}"
+ fi
+}
+
+trap_cleanup()
+{
+ cleanup
+ exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
+test_report_hierarchy()
+{
+ echo "perf report --hierarchy"
+
+ perf_data="${temp_dir}/perf-report-hierarchy-perf.data"
+ perf record -o "${perf_data}" uname
+ perf report --hierarchy -i "${perf_data}" > /dev/null
+ echo "perf report --hierarchy test [Success]"
+}
+
+test_report_hierarchy
+
+cleanup
+
+exit 0
diff --git a/tools/perf/tests/shell/perftool-testsuite_probe.sh b/tools/perf/tests/shell/perftool-testsuite_probe.sh
index 7b1bfd0f888f..3863df16c19b 100755
--- a/tools/perf/tests/shell/perftool-testsuite_probe.sh
+++ b/tools/perf/tests/shell/perftool-testsuite_probe.sh
@@ -2,6 +2,7 @@
# perftool-testsuite_probe (exclusive)
# SPDX-License-Identifier: GPL-2.0
+[ "$(id -u)" = 0 ] || exit 2
test -d "$(dirname "$0")/base_probe" || exit 2
cd "$(dirname "$0")/base_probe" || exit 2
status=0
diff --git a/tools/perf/tests/shell/probe_vfs_getname.sh b/tools/perf/tests/shell/probe_vfs_getname.sh
index 0c5aacc446b3..0f52654c914a 100755
--- a/tools/perf/tests/shell/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/probe_vfs_getname.sh
@@ -8,11 +8,18 @@
. "$(dirname $0)"/lib/probe.sh
skip_if_no_perf_probe || exit 2
+[ "$(id -u)" = 0 ] || exit 2
# shellcheck source=lib/probe_vfs_getname.sh
. "$(dirname $0)"/lib/probe_vfs_getname.sh
-add_probe_vfs_getname || skip_if_no_debuginfo
+add_probe_vfs_getname
err=$?
+
+if [ $err -eq 1 ] ; then
+ skip_if_no_debuginfo
+ err=$?
+fi
+
cleanup_probe_vfs_getname
exit $err
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index d5e5193cceb6..c4bab5b5cc59 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -105,6 +105,7 @@ delete_libc_inet_pton_event() {
# Check for IPv6 interface existence
ip a sh lo | grep -F -q inet6 || exit 2
+[ "$(id -u)" = 0 ] || exit 2
skip_if_no_perf_probe && \
add_libc_inet_pton_event && \
diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
index 5940fdc1df37..1ad252f0d36e 100755
--- a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
@@ -13,6 +13,7 @@
. "$(dirname "$0")/lib/probe.sh"
skip_if_no_perf_probe || exit 2
+[ "$(id -u)" = 0 ] || exit 2
# shellcheck source=lib/probe_vfs_getname.sh
. "$(dirname "$0")/lib/probe_vfs_getname.sh"
@@ -34,8 +35,14 @@ perf_script_filenames() {
grep -E " +touch +[0-9]+ +\[[0-9]+\] +[0-9]+\.[0-9]+: +probe:vfs_getname[_0-9]*: +\([[:xdigit:]]+\) +pathname=\"${file}\""
}
-add_probe_vfs_getname || skip_if_no_debuginfo
+add_probe_vfs_getname
err=$?
+
+if [ $err -eq 1 ] ; then
+ skip_if_no_debuginfo
+ err=$?
+fi
+
if [ $err -ne 0 ] ; then
exit $err
fi
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
index 0fc7a909ae9b..587f62e34414 100755
--- a/tools/perf/tests/shell/record.sh
+++ b/tools/perf/tests/shell/record.sh
@@ -34,13 +34,15 @@ default_fd_limit=$(ulimit -Sn)
min_fd_limit=$(($(getconf _NPROCESSORS_ONLN) * 16))
cleanup() {
- rm -rf "${perfdata}"
- rm -rf "${perfdata}".old
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+ rm -f "${script_output}"
trap - EXIT TERM INT
}
trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
cleanup
exit 1
}
@@ -231,29 +233,50 @@ test_cgroup() {
test_leader_sampling() {
echo "Basic leader sampling test"
- if ! perf record -o "${perfdata}" -e "{instructions,instructions}:Su" -- \
+ if ! perf record -o "${perfdata}" -e "{cycles,cycles}:Su" -- \
perf test -w brstack 2> /dev/null
then
echo "Leader sampling [Failed record]"
err=1
return
fi
+ perf script -i "${perfdata}" | grep brstack > $script_output
+ # Check if the two instruction counts are equal in each record.
+ # However, the throttling code doesn't consider event grouping. During throttling, only the
+ # leader is stopped, causing the slave's counts significantly higher. To temporarily solve this,
+ # let's set the tolerance rate to 80%.
+ # TODO: Revert the code for tolerance once the throttling mechanism is fixed.
index=0
- perf script -i "${perfdata}" > $script_output
+ valid_counts=0
+ invalid_counts=0
+ tolerance_rate=0.8
while IFS= read -r line
do
- # Check if the two instruction counts are equal in each record
- instructions=$(echo $line | awk '{for(i=1;i<=NF;i++) if($i=="instructions:") print $(i-1)}')
- if [ $(($index%2)) -ne 0 ] && [ ${instructions}x != ${prev_instructions}x ]
+ cycles=$(echo $line | awk '{for(i=1;i<=NF;i++) if($i=="cycles:") print $(i-1)}')
+ if [ $(($index%2)) -ne 0 ] && [ ${cycles}x != ${prev_cycles}x ]
then
- echo "Leader sampling [Failed inconsistent instructions count]"
- err=1
- return
+ invalid_counts=$(($invalid_counts+1))
+ else
+ valid_counts=$(($valid_counts+1))
fi
index=$(($index+1))
- prev_instructions=$instructions
- done < $script_output
- echo "Basic leader sampling test [Success]"
+ prev_cycles=$cycles
+ done < "${script_output}"
+ total_counts=$(bc <<< "$invalid_counts+$valid_counts")
+ if (( $(bc <<< "$total_counts <= 0") ))
+ then
+ echo "Leader sampling [No sample generated]"
+ err=1
+ return
+ fi
+ isok=$(bc <<< "scale=2; if (($invalid_counts/$total_counts) < (1-$tolerance_rate)) { 0 } else { 1 };")
+ if [ $isok -eq 1 ]
+ then
+ echo "Leader sampling [Failed inconsistent cycles count]"
+ err=1
+ else
+ echo "Basic leader sampling test [Success]"
+ fi
}
test_topdown_leader_sampling() {
@@ -273,27 +296,42 @@ test_topdown_leader_sampling() {
}
test_precise_max() {
+ local -i skipped=0
+
echo "precise_max attribute test"
- if ! perf stat -e "cycles,instructions" true 2> /dev/null
+ # Just to make sure event cycles is supported for sampling
+ if perf record -o "${perfdata}" -e "cycles" true 2> /dev/null
then
- echo "precise_max attribute [Skipped no hardware events]"
- return
+ if ! perf record -o "${perfdata}" -e "cycles:P" true 2> /dev/null
+ then
+ echo "precise_max attribute [Failed cycles:P event]"
+ err=1
+ return
+ fi
+ else
+ echo "precise_max attribute [Skipped no cycles:P event]"
+ ((skipped+=1))
fi
- # Just to make sure it doesn't fail
- if ! perf record -o "${perfdata}" -e "cycles:P" true 2> /dev/null
+ # On s390 event instructions is not supported for perf record
+ if perf record -o "${perfdata}" -e "instructions" true 2> /dev/null
then
- echo "precise_max attribute [Failed cycles:P event]"
- err=1
- return
+ # On AMD, cycles and instructions events are treated differently
+ if ! perf record -o "${perfdata}" -e "instructions:P" true 2> /dev/null
+ then
+ echo "precise_max attribute [Failed instructions:P event]"
+ err=1
+ return
+ fi
+ else
+ echo "precise_max attribute [Skipped no instructions:P event]"
+ ((skipped+=1))
fi
- # On AMD, cycles and instructions events are treated differently
- if ! perf record -o "${perfdata}" -e "instructions:P" true 2> /dev/null
+ if [ $skipped -eq 2 ]
then
- echo "precise_max attribute [Failed instructions:P event]"
- err=1
- return
+ echo "precise_max attribute [Skipped no hardware events]"
+ else
+ echo "precise_max attribute test [Success]"
fi
- echo "precise_max attribute test [Success]"
}
# raise the limit of file descriptors to minimum
diff --git a/tools/perf/tests/shell/record_bpf_filter.sh b/tools/perf/tests/shell/record_bpf_filter.sh
index 1b58ccc1fd88..4d6c3c1b7fb9 100755
--- a/tools/perf/tests/shell/record_bpf_filter.sh
+++ b/tools/perf/tests/shell/record_bpf_filter.sh
@@ -89,7 +89,7 @@ test_bpf_filter_fail() {
test_bpf_filter_group() {
echo "Group bpf-filter test"
- if ! perf record -e task-clock --filter 'period > 1000 || ip > 0' \
+ if ! perf record -e task-clock --filter 'period > 1000, ip > 0' \
-o /dev/null true 2>/dev/null
then
echo "Group bpf-filter test [Failed should succeed]"
@@ -97,7 +97,7 @@ test_bpf_filter_group() {
return
fi
- if ! perf record -e task-clock --filter 'cpu > 0 || ip > 0' \
+ if ! perf record -e task-clock --filter 'period > 1000 , cpu > 0 || ip > 0' \
-o /dev/null true 2>&1 | grep -q PERF_SAMPLE_CPU
then
echo "Group bpf-filter test [Failed forbidden CPU]"
diff --git a/tools/perf/tests/shell/record_lbr.sh b/tools/perf/tests/shell/record_lbr.sh
index 8d750ee631f8..6fcb5e52b9b4 100755
--- a/tools/perf/tests/shell/record_lbr.sh
+++ b/tools/perf/tests/shell/record_lbr.sh
@@ -4,7 +4,8 @@
set -e
-if [ ! -f /sys/devices/cpu/caps/branches ] && [ ! -f /sys/devices/cpu_core/caps/branches ]
+if [ ! -f /sys/bus/event_source/devices/cpu/caps/branches ] &&
+ [ ! -f /sys/bus/event_source/devices/cpu_core/caps/branches ]
then
echo "Skip: only x86 CPUs support LBR"
exit 2
@@ -93,7 +94,7 @@ lbr_test() {
return
fi
- zero_nr=$(echo "$out" | grep -c 'branch stack: nr:0' || true)
+ zero_nr=$(echo "$out" | grep -A3 'branch stack: nr:0' | grep thread | grep -cv swapper || true)
r=$(($zero_nr * 100 / $bs_nr))
if [ $r -gt $threshold ]; then
echo "$test [Failed empty br stack ratio exceed $threshold%: $r%]"
diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
index 678947fe69ee..21a22efe08f5 100755
--- a/tools/perf/tests/shell/record_offcpu.sh
+++ b/tools/perf/tests/shell/record_offcpu.sh
@@ -7,6 +7,9 @@ set -e
err=0
perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+ts=$(printf "%u" $((~0 << 32))) # OFF_CPU_TIMESTAMP
+dummy_timestamp=${ts%???} # remove the last 3 digits to match perf script
+
cleanup() {
rm -f ${perfdata}
rm -f ${perfdata}.old
@@ -19,6 +22,9 @@ trap_cleanup() {
}
trap trap_cleanup EXIT TERM INT
+test_above_thresh="Threshold test (above threshold)"
+test_below_thresh="Threshold test (below threshold)"
+
test_offcpu_priv() {
echo "Checking off-cpu privilege"
@@ -88,6 +94,63 @@ test_offcpu_child() {
echo "Child task off-cpu test [Success]"
}
+# task blocks longer than the --off-cpu-thresh, perf should collect a direct sample
+test_offcpu_above_thresh() {
+ echo "${test_above_thresh}"
+
+ # collect direct off-cpu samples for tasks blocked for more than 999ms
+ if ! perf record -e dummy --off-cpu --off-cpu-thresh 999 -o ${perfdata} -- sleep 1 2> /dev/null
+ then
+ echo "${test_above_thresh} [Failed record]"
+ err=1
+ return
+ fi
+ # direct sample's timestamp should be lower than the dummy_timestamp of the at-the-end sample
+ # check if a direct sample exists
+ if ! perf script --time "0, ${dummy_timestamp}" -i ${perfdata} -F event | grep -q "offcpu-time"
+ then
+ echo "${test_above_thresh} [Failed missing direct samples]"
+ err=1
+ return
+ fi
+ # there should only be one direct sample, and its period should be higher than off-cpu-thresh
+ if ! perf script --time "0, ${dummy_timestamp}" -i ${perfdata} -F period | \
+ awk '{ if (int($1) > 999000000) exit 0; else exit 1; }'
+ then
+ echo "${test_above_thresh} [Failed off-cpu time too short]"
+ err=1
+ return
+ fi
+ echo "${test_above_thresh} [Success]"
+}
+
+# task blocks shorter than the --off-cpu-thresh, perf should collect an at-the-end sample
+test_offcpu_below_thresh() {
+ echo "${test_below_thresh}"
+
+ # collect direct off-cpu samples for tasks blocked for more than 1.2s
+ if ! perf record -e dummy --off-cpu --off-cpu-thresh 1200 -o ${perfdata} -- sleep 1 2> /dev/null
+ then
+ echo "${test_below_thresh} [Failed record]"
+ err=1
+ return
+ fi
+ # see if there's an at-the-end sample
+ if ! perf script --time "${dummy_timestamp}," -i ${perfdata} -F event | grep -q 'offcpu-time'
+ then
+ echo "${test_below_thresh} [Failed at-the-end samples cannot be found]"
+ err=1
+ return
+ fi
+ # plus there shouldn't be any direct samples
+ if perf script --time "0, ${dummy_timestamp}" -i ${perfdata} -F event | grep -q 'offcpu-time'
+ then
+ echo "${test_below_thresh} [Failed direct samples are found when they shouldn't be]"
+ err=1
+ return
+ fi
+ echo "${test_below_thresh} [Success]"
+}
test_offcpu_priv
@@ -99,5 +162,13 @@ if [ $err = 0 ]; then
test_offcpu_child
fi
+if [ $err = 0 ]; then
+ test_offcpu_above_thresh
+fi
+
+if [ $err = 0 ]; then
+ test_offcpu_below_thresh
+fi
+
cleanup
exit $err
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
index fc2d8cc6e5e0..7a6f6e177402 100755
--- a/tools/perf/tests/shell/stat+csv_output.sh
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -44,6 +44,7 @@ function commachecker()
;; "--per-die") exp=8
;; "--per-cluster") exp=8
;; "--per-cache") exp=8
+ ;; "--metric-only") exp=2
esac
while read line
@@ -75,6 +76,7 @@ check_interval "CSV" "$perf_cmd"
check_event "CSV" "$perf_cmd"
check_per_thread "CSV" "$perf_cmd"
check_per_node "CSV" "$perf_cmd"
+check_metric_only "CSV" "$perf_cmd"
if [ $skip_test -ne 1 ]
then
check_system_wide_no_aggr "CSV" "$perf_cmd"
diff --git a/tools/perf/tests/shell/stat+event_uniquifying.sh b/tools/perf/tests/shell/stat+event_uniquifying.sh
new file mode 100755
index 000000000000..bf54bd6c3e2e
--- /dev/null
+++ b/tools/perf/tests/shell/stat+event_uniquifying.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+# perf stat events uniquifying
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+stat_output=$(mktemp /tmp/__perf_test.stat_output.XXXXX)
+perf_tool=perf
+err=0
+
+test_event_uniquifying() {
+ # We use `clockticks` in `uncore_imc` to verify the uniquify behavior.
+ pmu="uncore_imc"
+ event="clockticks"
+
+ # If the `-A` option is added, the event should be uniquified.
+ #
+ # $perf list -v clockticks
+ #
+ # List of pre-defined events (to be used in -e or -M):
+ #
+ # uncore_imc_0/clockticks/ [Kernel PMU event]
+ # uncore_imc_1/clockticks/ [Kernel PMU event]
+ # uncore_imc_2/clockticks/ [Kernel PMU event]
+ # uncore_imc_3/clockticks/ [Kernel PMU event]
+ # uncore_imc_4/clockticks/ [Kernel PMU event]
+ # uncore_imc_5/clockticks/ [Kernel PMU event]
+ #
+ # ...
+ #
+ # $perf stat -e clockticks -A -- true
+ #
+ # Performance counter stats for 'system wide':
+ #
+ # CPU0 3,773,018 uncore_imc_0/clockticks/
+ # CPU0 3,609,025 uncore_imc_1/clockticks/
+ # CPU0 0 uncore_imc_2/clockticks/
+ # CPU0 3,230,009 uncore_imc_3/clockticks/
+ # CPU0 3,049,897 uncore_imc_4/clockticks/
+ # CPU0 0 uncore_imc_5/clockticks/
+ #
+ # 0.002029828 seconds time elapsed
+
+ echo "stat event uniquifying test"
+ uniquified_event_array=()
+
+ # Skip if the machine does not have `uncore_imc` device.
+ if ! ${perf_tool} list pmu | grep -q ${pmu}; then
+ echo "Target does not support PMU ${pmu} [Skipped]"
+ err=2
+ return
+ fi
+
+ # Check how many uniquified events.
+ while IFS= read -r line; do
+ uniquified_event=$(echo "$line" | awk '{print $1}')
+ uniquified_event_array+=("${uniquified_event}")
+ done < <(${perf_tool} list -v ${event} | grep ${pmu})
+
+ perf_command="${perf_tool} stat -e $event -A -o ${stat_output} -- true"
+ $perf_command
+
+ # Check the output contains all uniquified events.
+ for uniquified_event in "${uniquified_event_array[@]}"; do
+ if ! cat "${stat_output}" | grep -q "${uniquified_event}"; then
+ echo "Event is not uniquified [Failed]"
+ echo "${perf_command}"
+ cat "${stat_output}"
+ err=1
+ break
+ fi
+ done
+}
+
+test_event_uniquifying
+rm -f "${stat_output}"
+exit $err
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
index 6b630d33c328..98fb65274ac4 100755
--- a/tools/perf/tests/shell/stat+json_output.sh
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -173,6 +173,19 @@ check_per_socket()
echo "[Success]"
}
+check_metric_only()
+{
+ echo -n "Checking json output: metric only "
+ if [ "$(uname -m)" = "s390x" ] && ! grep '^facilities' /proc/cpuinfo | grep -qw 67
+ then
+ echo "[Skip] CPU-measurement counter facility not installed"
+ return
+ fi
+ perf stat -j --metric-only -e instructions,cycles -o "${stat_output}" true
+ $PYTHON $pythonchecker --metric-only --file "${stat_output}"
+ echo "[Success]"
+}
+
# The perf stat options for per-socket, per-core, per-die
# and -A ( no_aggr mode ) uses the info fetched from this
# directory: "/sys/devices/system/cpu/cpu*/topology". For
@@ -207,6 +220,7 @@ check_interval
check_event
check_per_thread
check_per_node
+check_metric_only
if [ $skip_test -ne 1 ]
then
check_system_wide_no_aggr
diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh
index 0f7967be60af..6fee67693ba7 100755
--- a/tools/perf/tests/shell/stat+std_output.sh
+++ b/tools/perf/tests/shell/stat+std_output.sh
@@ -30,6 +30,7 @@ trap trap_cleanup EXIT TERM INT
function commachecker()
{
local prefix=1
+ local -i metric_only=0
case "$1"
in "--interval") prefix=2
@@ -41,6 +42,7 @@ function commachecker()
;; "--per-die") prefix=3
;; "--per-cache") prefix=3
;; "--per-cluster") prefix=3
+ ;; "--metric-only") metric_only=1
esac
while read line
@@ -60,6 +62,9 @@ function commachecker()
x=${main_body%#*}
[ "$x" = "" ] && continue
+ # Check metric only - if it has a non-empty result
+ [ $metric_only -eq 1 ] && return 0
+
# Skip metrics without event name
y=${main_body#*#}
for i in "${!skip_metric[@]}"; do
@@ -84,6 +89,8 @@ function commachecker()
exit 1;
}
done < "${stat_output}"
+
+ [ $metric_only -eq 1 ] && exit 1
return 0
}
@@ -95,6 +102,7 @@ check_system_wide "STD" "$perf_cmd"
check_interval "STD" "$perf_cmd"
check_per_thread "STD" "$perf_cmd"
check_per_node "STD" "$perf_cmd"
+check_metric_only "STD" "$perf_cmd"
if [ $skip_test -ne 1 ]
then
check_system_wide_no_aggr "STD" "$perf_cmd"
diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
index 68323d636fb7..8a100a7f2dc1 100755
--- a/tools/perf/tests/shell/stat.sh
+++ b/tools/perf/tests/shell/stat.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# perf stat tests
# SPDX-License-Identifier: GPL-2.0
@@ -67,43 +67,54 @@ test_topdown_groups() {
echo "Topdown event group test [Skipped event parsing failed]"
return
fi
- if perf stat -e '{slots,topdown-retiring}' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed events not supported]"
- err=1
- return
- fi
- if perf stat -e 'instructions,topdown-retiring,slots' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed slots not reordered first in no-group case]"
- err=1
- return
- fi
- if perf stat -e '{instructions,topdown-retiring,slots}' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed slots not reordered first in single group case]"
- err=1
- return
- fi
- if perf stat -e '{instructions,slots},topdown-retiring' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed topdown metrics event not move into slots group]"
- err=1
- return
- fi
- if perf stat -e '{instructions,slots},{topdown-retiring}' true 2>&1 | grep -E -q "<not supported>"
- then
- echo "Topdown event group test [Failed topdown metrics group not merge into slots group]"
- err=1
- return
- fi
- if perf stat -e '{instructions,r400,r8000}' true 2>&1 | grep -E -q "<not supported>"
+ td_err=0
+ do_topdown_group_test() {
+ events=$1
+ failure=$2
+ if perf stat -e "$events" true 2>&1 | grep -E -q "<not supported>"
+ then
+ echo "Topdown event group test [Failed $failure for '$events']"
+ td_err=1
+ return
+ fi
+ }
+ do_topdown_group_test "{slots,topdown-retiring}" "events not supported"
+ do_topdown_group_test "{instructions,r400,r8000}" "raw format slots not reordered first"
+ filler_events=("instructions" "cycles"
+ "context-switches" "faults")
+ for ((i = 0; i < ${#filler_events[@]}; i+=2))
+ do
+ filler1=${filler_events[i]}
+ filler2=${filler_events[i+1]}
+ do_topdown_group_test "$filler1,topdown-retiring,slots" \
+ "slots not reordered first in no-group case"
+ do_topdown_group_test "slots,$filler1,topdown-retiring" \
+ "topdown metrics event not reordered in no-group case"
+ do_topdown_group_test "{$filler1,topdown-retiring,slots}" \
+ "slots not reordered first in single group case"
+ do_topdown_group_test "{$filler1,slots},topdown-retiring" \
+ "topdown metrics event not move into slots group"
+ do_topdown_group_test "topdown-retiring,{$filler1,slots}" \
+ "topdown metrics event not move into slots group last"
+ do_topdown_group_test "{$filler1,slots},{topdown-retiring}" \
+ "topdown metrics group not merge into slots group"
+ do_topdown_group_test "{topdown-retiring},{$filler1,slots}" \
+ "topdown metrics group not merge into slots group last"
+ do_topdown_group_test "{$filler1,slots},$filler2,topdown-retiring" \
+ "non-adjacent topdown metrics group not move into slots group"
+ do_topdown_group_test "$filler2,topdown-retiring,{$filler1,slots}" \
+ "non-adjacent topdown metrics group not move into slots group last"
+ do_topdown_group_test "{$filler1,slots},{$filler2,topdown-retiring}" \
+ "metrics group not merge into slots group"
+ do_topdown_group_test "{$filler1,topdown-retiring},{$filler2,slots}" \
+ "metrics group not merge into slots group last"
+ done
+ if test "$td_err" -eq 0
then
- echo "Topdown event group test [Failed raw format slots not reordered first]"
- err=1
- return
+ echo "Topdown event group test [Success]"
+ else
+ err="$td_err"
fi
- echo "Topdown event group test [Success]"
}
test_topdown_weak_groups() {
diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
index 73e9347e88a9..6fa585a1e34c 100755
--- a/tools/perf/tests/shell/stat_all_metrics.sh
+++ b/tools/perf/tests/shell/stat_all_metrics.sh
@@ -7,80 +7,96 @@ ParanoidAndNotRoot()
[ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
}
+test_prog="sleep 0.01"
system_wide_flag="-a"
if ParanoidAndNotRoot 0
then
system_wide_flag=""
+ test_prog="perf test -w noploop"
fi
err=0
for m in $(perf list --raw-dump metrics); do
echo "Testing $m"
- result=$(perf stat -M "$m" $system_wide_flag -- sleep 0.01 2>&1)
+ result=$(perf stat -M "$m" $system_wide_flag -- $test_prog 2>&1)
result_err=$?
- if [[ $result_err -gt 0 ]]
+ if [[ $result_err -eq 0 && "$result" =~ ${m:0:50} ]]
then
- if [[ "$result" =~ \
- "Access to performance monitoring and observability operations is limited" ]]
+ # No error result and metric shown.
+ continue
+ fi
+ if [[ "$result" =~ "Cannot resolve IDs for" ]]
+ then
+ echo "Metric contains missing events"
+ echo $result
+ err=1 # Fail
+ continue
+ elif [[ "$result" =~ \
+ "Access to performance monitoring and observability operations is limited" ]]
+ then
+ echo "Permission failure"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "Permission failure"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
- elif [[ "$result" =~ "in per-thread mode, enable system wide" ]]
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "in per-thread mode, enable system wide" ]]
+ then
+ echo "Permissions - need system wide mode"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "Permissions - need system wide mode"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
- elif [[ "$result" =~ "<not supported>" ]]
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "<not supported>" ]]
+ then
+ echo "Not supported events"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "Not supported events"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
- elif [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "<not counted>" ]]
+ then
+ echo "Not counted events"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "FP issues"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
- elif [[ "$result" =~ "PMM" ]]
+ err=2 # Skip
+ fi
+ continue
+ elif [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
+ then
+ echo "FP issues"
+ echo $result
+ if [[ $err -eq 0 ]]
then
- echo "Optane memory issues"
- echo $result
- if [[ $err -eq 0 ]]
- then
- err=2 # Skip
- fi
- continue
+ err=2 # Skip
fi
- fi
-
- if [[ "$result" =~ ${m:0:50} ]]
+ continue
+ elif [[ "$result" =~ "PMM" ]]
then
+ echo "Optane memory issues"
+ echo $result
+ if [[ $err -eq 0 ]]
+ then
+ err=2 # Skip
+ fi
continue
fi
# Failed, possibly the workload was too small so retry with something longer.
result=$(perf stat -M "$m" $system_wide_flag -- perf bench internals synthesize 2>&1)
- if [[ "$result" =~ ${m:0:50} ]]
+ result_err=$?
+ if [[ $result_err -eq 0 && "$result" =~ ${m:0:50} ]]
then
+ # No error result and metric shown.
continue
fi
- echo "Metric '$m' not printed in:"
+ echo "Metric '$m' has non-zero error '$result_err' or not printed in:"
echo "$result"
err=1
done
diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh
index 8b148b300be1..9c466c0efa85 100755
--- a/tools/perf/tests/shell/stat_all_pmu.sh
+++ b/tools/perf/tests/shell/stat_all_pmu.sh
@@ -2,7 +2,6 @@
# perf all PMU test (exclusive)
# SPDX-License-Identifier: GPL-2.0
-set -e
err=0
result=""
@@ -16,34 +15,55 @@ trap trap_cleanup EXIT TERM INT
# Test all PMU events; however exclude parameterized ones (name contains '?')
for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]\+?[[:graph:]]\+[[:space:]]//g')
do
- echo "Testing $p"
- result=$(perf stat -e "$p" true 2>&1)
- if echo "$result" | grep -q "$p"
+ echo -n "Testing $p -- "
+ output=$(perf stat -e "$p" true 2>&1)
+ stat_result=$?
+ if echo "$output" | grep -q "$p"
then
# Event seen in output.
- continue
- fi
- if echo "$result" | grep -q "<not supported>"
- then
- # Event not supported, so ignore.
- continue
+ if [ $stat_result -eq 0 ] && ! echo "$output" | grep -q "<not supported>"
+ then
+ # Event supported.
+ echo "supported"
+ continue
+ elif echo "$output" | grep -q "<not supported>"
+ then
+ # Event not supported, so ignore.
+ echo "not supported"
+ continue
+ elif echo "$output" | grep -q "No permission to enable"
+ then
+ # No permissions, so ignore.
+ echo "no permission to enable"
+ continue
+ elif echo "$output" | grep -q "Bad event name"
+ then
+ # Non-existent event.
+ echo "Error: Bad event name"
+ echo "$output"
+ err=1
+ continue
+ fi
fi
- if echo "$result" | grep -q "Access to performance monitoring and observability operations is limited."
+
+ if echo "$output" | grep -q "Access to performance monitoring and observability operations is limited."
then
# Access is limited, so ignore.
+ echo "access limited"
continue
fi
# We failed to see the event and it is supported. Possibly the workload was
# too small so retry with something longer.
- result=$(perf stat -e "$p" perf bench internals synthesize 2>&1)
- if echo "$result" | grep -q "$p"
+ output=$(perf stat -e "$p" perf bench internals synthesize 2>&1)
+ if echo "$output" | grep -q "$p"
then
# Event seen in output.
+ echo "supported"
continue
fi
echo "Error: event '$p' not printed in:"
- echo "$result"
+ echo "$output"
err=1
done
diff --git a/tools/perf/tests/shell/stat_metrics_values.sh b/tools/perf/tests/shell/stat_metrics_values.sh
index 279f19c5919a..30566f0b5427 100755
--- a/tools/perf/tests/shell/stat_metrics_values.sh
+++ b/tools/perf/tests/shell/stat_metrics_values.sh
@@ -16,11 +16,16 @@ workload="perf bench futex hash -r 2 -s"
# Add -debug, save data file and full rule file
echo "Launch python validation script $pythonvalidator"
echo "Output will be stored in: $tmpdir"
-$PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}"
-ret=$?
-rm -rf $tmpdir
-if [ $ret -ne 0 ]; then
- echo "Metric validation return with erros. Please check metrics reported with errors."
-fi
+for cputype in /sys/bus/event_source/devices/cpu_*; do
+ cputype=$(basename "$cputype")
+ echo "Testing metrics for: $cputype"
+ $PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}" \
+ -cputype "${cputype}"
+ ret=$?
+ rm -rf $tmpdir
+ if [ $ret -ne 0 ]; then
+ echo "Metric validation return with errors. Please check metrics reported with errors."
+ fi
+done
exit $ret
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
index e01df7581393..9138fa83bf36 100755
--- a/tools/perf/tests/shell/test_brstack.sh
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Check branch stack sampling
# SPDX-License-Identifier: GPL-2.0
@@ -17,35 +17,50 @@ fi
skip_test_missing_symbol brstack_bench
+err=0
TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX)
TESTPROG="perf test -w brstack"
cleanup() {
rm -rf $TMPDIR
+ trap - EXIT TERM INT
}
-trap cleanup EXIT TERM INT
+trap_cleanup() {
+ set +e
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
test_user_branches() {
echo "Testing user branch stack sampling"
- perf record -o $TMPDIR/perf.data --branch-filter any,save_type,u -- ${TESTPROG} > /dev/null 2>&1
- perf script -i $TMPDIR/perf.data --fields brstacksym | tr -s ' ' '\n' > $TMPDIR/perf.script
+ perf record -o "$TMPDIR/perf.data" --branch-filter any,save_type,u -- ${TESTPROG} > "$TMPDIR/record.txt" 2>&1
+ perf script -i "$TMPDIR/perf.data" --fields brstacksym > "$TMPDIR/perf.script"
# example of branch entries:
# brstack_foo+0x14/brstack_bar+0x40/P/-/-/0/CALL
- set -x
- grep -E -m1 "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/IND_CALL/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_foo\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/CALL/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_bench\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_bar\+[^ ]*/brstack_foo\+[^ ]*/RET/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_foo\+[^ ]*/brstack_bench\+[^ ]*/RET/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack_bench\+[^ ]*/brstack_bench\+[^ ]*/COND/.*$" $TMPDIR/perf.script
- grep -E -m1 "^brstack\+[^ ]*/brstack\+[^ ]*/UNCOND/.*$" $TMPDIR/perf.script
- set +x
-
+ expected=(
+ "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/IND_CALL/.*$"
+ "^brstack_foo\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$"
+ "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/CALL/.*$"
+ "^brstack_bench\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$"
+ "^brstack_bar\+[^ ]*/brstack_foo\+[^ ]*/RET/.*$"
+ "^brstack_foo\+[^ ]*/brstack_bench\+[^ ]*/RET/.*$"
+ "^brstack_bench\+[^ ]*/brstack_bench\+[^ ]*/COND/.*$"
+ "^brstack\+[^ ]*/brstack\+[^ ]*/UNCOND/.*$"
+ )
+ for x in "${expected[@]}"
+ do
+ if ! tr -s ' ' '\n' < "$TMPDIR/perf.script" | grep -E -m1 -q "$x"
+ then
+ echo "Branches missing $x"
+ err=1
+ fi
+ done
# some branch types are still not being tested:
# IND COND_CALL COND_RET SYSCALL SYSRET IRQ SERROR NO_TX
}
@@ -57,14 +72,28 @@ test_filter() {
test_filter_expect=$2
echo "Testing branch stack filtering permutation ($test_filter_filter,$test_filter_expect)"
-
- perf record -o $TMPDIR/perf.data --branch-filter $test_filter_filter,save_type,u -- ${TESTPROG} > /dev/null 2>&1
- perf script -i $TMPDIR/perf.data --fields brstack | tr -s ' ' '\n' | grep '.' > $TMPDIR/perf.script
+ perf record -o "$TMPDIR/perf.data" --branch-filter "$test_filter_filter,save_type,u" -- ${TESTPROG} > "$TMPDIR/record.txt" 2>&1
+ perf script -i "$TMPDIR/perf.data" --fields brstack > "$TMPDIR/perf.script"
# fail if we find any branch type that doesn't match any of the expected ones
# also consider UNKNOWN branch types (-)
- if grep -E -vm1 "^[^ ]*/($test_filter_expect|-|( *))/.*$" $TMPDIR/perf.script; then
- return 1
+ if [ ! -s "$TMPDIR/perf.script" ]
+ then
+ echo "Empty script output"
+ err=1
+ return
+ fi
+ # Look for lines not matching test_filter_expect ignoring issues caused
+ # by empty output
+ tr -s ' ' '\n' < "$TMPDIR/perf.script" | grep '.' | \
+ grep -E -vm1 "^[^ ]*/($test_filter_expect|-|( *))/.*$" \
+ > "$TMPDIR/perf.script-filtered" || true
+ if [ -s "$TMPDIR/perf.script-filtered" ]
+ then
+ echo "Unexpected branch filter in script output"
+ cat "$TMPDIR/perf.script"
+ err=1
+ return
fi
}
@@ -80,3 +109,6 @@ test_filter "any_ret" "RET|COND_RET|SYSRET|ERET"
test_filter "call,cond" "CALL|SYSCALL|COND"
test_filter "any_call,cond" "CALL|IND_CALL|COND_CALL|IRQ|SYSCALL|COND"
test_filter "cond,any_call,any_ret" "COND|CALL|IND_CALL|COND_CALL|SYSCALL|IRQ|RET|COND_RET|SYSRET|ERET"
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh
index c86da0235059..d61b5659a46d 100755
--- a/tools/perf/tests/shell/test_data_symbol.sh
+++ b/tools/perf/tests/shell/test_data_symbol.sh
@@ -5,8 +5,6 @@
# Leo Yan <leo.yan@linaro.org>, 2022
shelldir=$(dirname "$0")
-# shellcheck source=lib/waiting.sh
-. "${shelldir}"/lib/waiting.sh
# shellcheck source=lib/perf_has_symbol.sh
. "${shelldir}"/lib/perf_has_symbol.sh
@@ -18,7 +16,7 @@ skip_if_no_mem_event() {
skip_if_no_mem_event || exit 2
-skip_test_missing_symbol buf1
+skip_test_missing_symbol workload_datasym_buf1
TEST_PROGRAM="perf test -w datasym"
PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
@@ -26,18 +24,19 @@ ERR_FILE=$(mktemp /tmp/__perf_test.stderr.XXXXX)
check_result() {
# The memory report format is as below:
- # 99.92% ... [.] buf1+0x38
+ # 99.92% ... [.] workload_datasym_buf1+0x38
result=$(perf mem report -i ${PERF_DATA} -s symbol_daddr -q 2>&1 |
- awk '/buf1/ { print $4 }')
+ awk '/workload_datasym_buf1/ { print $4 }')
- # Testing is failed if has no any sample for "buf1"
+ # Testing is failed if has no any sample for "workload_datasym_buf1"
[ -z "$result" ] && return 1
while IFS= read -r line; do
- # The "data1" and "data2" fields in structure "buf1" have
- # offset "0x0" and "0x38", returns failure if detect any
- # other offset value.
- if [ "$line" != "buf1+0x0" ] && [ "$line" != "buf1+0x38" ]; then
+ # The "data1" and "data2" fields in structure
+ # "workload_datasym_buf1" have offset "0x0" and "0x38", returns
+ # failure if detect any other offset value.
+ if [ "$line" != "workload_datasym_buf1+0x0" ] && \
+ [ "$line" != "workload_datasym_buf1+0x38" ]; then
return 1
fi
done <<< "$result"
@@ -55,24 +54,38 @@ trap cleanup_files exit term int
echo "Recording workload..."
-# perf mem/c2c internally uses IBS PMU on AMD CPU which doesn't support
-# user/kernel filtering and per-process monitoring, spin program on
-# specific CPU and test in per-CPU mode.
is_amd=$(grep -E -c 'vendor_id.*AuthenticAMD' /proc/cpuinfo)
if (($is_amd >= 1)); then
- perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}" &
-else
- perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}" &
-fi
-
-PERFPID=$!
-
-wait_for_perf_to_start ${PERFPID} "${ERR_FILE}"
+ mem_events="$(perf mem record -v -e list 2>&1)"
+ if ! [[ "$mem_events" =~ ^mem\-ldst.*ibs_op/(.*)/.*available ]]; then
+ echo "ERROR: mem-ldst event is not matching"
+ exit 1
+ fi
+
+ # --ldlat on AMD:
+ # o Zen4 and earlier uarch does not support ldlat
+ # o Even on supported platforms, it's disabled (--ldlat=0) by default.
+ ldlat=${BASH_REMATCH[1]}
+ if [[ -n $ldlat ]]; then
+ if ! [[ "$ldlat" =~ ldlat=0 ]]; then
+ echo "ERROR: ldlat not initialized to 0?"
+ exit 1
+ fi
-sleep 1
+ mem_events="$(perf mem record -v --ldlat=150 -e list 2>&1)"
+ if ! [[ "$mem_events" =~ ^mem-ldst.*ibs_op/ldlat=150/.*available ]]; then
+ echo "ERROR: --ldlat not honored?"
+ exit 1
+ fi
+ fi
-kill $PERFPID
-wait $PERFPID
+ # perf mem/c2c internally uses IBS PMU on AMD CPU which doesn't
+ # support user/kernel filtering and per-process monitoring on older
+ # kernels, spin program on specific CPU and test in per-CPU mode.
+ perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}"
+else
+ perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}"
+fi
check_result
exit $?
diff --git a/tools/perf/tests/shell/test_intel_pt.sh b/tools/perf/tests/shell/test_intel_pt.sh
index f3a9a040bacc..32a9b8dcb200 100755
--- a/tools/perf/tests/shell/test_intel_pt.sh
+++ b/tools/perf/tests/shell/test_intel_pt.sh
@@ -288,6 +288,11 @@ test_jitdump()
jitdump_incl_dir="${script_dir}/../../util"
jitdump_h="${jitdump_incl_dir}/jitdump.h"
+ if ! perf check feature -q libelf ; then
+ echo "SKIP: libelf is needed for jitdump"
+ return 2
+ fi
+
if [ ! -e "${jitdump_h}" ] ; then
echo "SKIP: Include file jitdump.h not found"
return 2
diff --git a/tools/perf/tests/shell/test_stat_intel_tpebs.sh b/tools/perf/tests/shell/test_stat_intel_tpebs.sh
index f95fc64bf0a7..a330ecdb7ba5 100755
--- a/tools/perf/tests/shell/test_stat_intel_tpebs.sh
+++ b/tools/perf/tests/shell/test_stat_intel_tpebs.sh
@@ -3,20 +3,83 @@
# SPDX-License-Identifier: GPL-2.0
set -e
-grep -q GenuineIntel /proc/cpuinfo || { echo Skipping non-Intel; exit 2; }
-# Use this event for testing because it should exist in all platforms
-event=cache-misses:R
+ParanoidAndNotRoot() {
+ [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
+}
-# Hybrid platforms output like "cpu_atom/cache-misses/R", rather than as above
-alt_name=/cache-misses/R
+if ! grep -q GenuineIntel /proc/cpuinfo
+then
+ echo "Skipping non-Intel"
+ exit 2
+fi
-# Without this cmd option, default value or zero is returned
-#echo "Testing without --record-tpebs"
-#result=$(perf stat -e "$event" true 2>&1)
-#[[ "$result" =~ $event || "$result" =~ $alt_name ]] || exit 1
+if ParanoidAndNotRoot 0
+then
+ echo "Skipping paranoid >0 and not root"
+ exit 2
+fi
-# In platforms that do not support TPEBS, it should execute without error.
-echo "Testing with --record-tpebs"
-result=$(perf stat -e "$event" --record-tpebs -a sleep 0.01 2>&1)
-[[ "$result" =~ "perf record" && "$result" =~ $event || "$result" =~ $alt_name ]] || exit 1
+stat_output=$(mktemp /tmp/__perf_stat_tpebs_output.XXXXX)
+
+cleanup() {
+ rm -rf "${stat_output}"
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ echo "Unexpected signal in ${FUNCNAME[1]}"
+ cat "${stat_output}"
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+# Event to be used in tests
+event=cache-misses
+
+if ! perf record -e "${event}:p" -a -o /dev/null sleep 0.01 > "${stat_output}" 2>&1
+then
+ echo "Missing ${event} support"
+ cleanup
+ exit 2
+fi
+
+test_with_record_tpebs() {
+ echo "Testing with --record-tpebs"
+ if ! perf stat -e "${event}:R" --record-tpebs -a sleep 0.01 > "${stat_output}" 2>&1
+ then
+ echo "Testing with --record-tpebs [Failed perf stat]"
+ cat "${stat_output}"
+ exit 1
+ fi
+
+ # Expected output:
+ # $ perf stat --record-tpebs -e cache-misses:R -a sleep 0.01
+ # Events enabled
+ # [ perf record: Woken up 2 times to write data ]
+ # [ perf record: Captured and wrote 0.056 MB - ]
+ #
+ # Performance counter stats for 'system wide':
+ #
+ # 0 cache-misses:R
+ #
+ # 0.013963299 seconds time elapsed
+ if ! grep "perf record" "${stat_output}"
+ then
+ echo "Testing with --record-tpebs [Failed missing perf record]"
+ cat "${stat_output}"
+ exit 1
+ fi
+ if ! grep "${event}:R" "${stat_output}" && ! grep "/${event}/R" "${stat_output}"
+ then
+ echo "Testing with --record-tpebs [Failed missing event name]"
+ cat "${stat_output}"
+ exit 1
+ fi
+ echo "Testing with --record-tpebs [Success]"
+}
+
+test_with_record_tpebs
+cleanup
+exit 0
diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
index 33387c329f92..7adf9755d6de 100755
--- a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
+++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
@@ -4,12 +4,11 @@
set -e
-# Skip if there's no probe command.
-if ! perf | grep probe
-then
- echo "Skip: probe command isn't present"
- exit 2
-fi
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+skip_if_no_perf_probe || exit 2
+[ "$(id -u)" == 0 ] || exit 2
# skip if there's no gcc
if ! [ -x "$(command -v gcc)" ]; then
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 708a13f00635..5d5019988d61 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -15,6 +15,7 @@
skip_if_no_perf_probe || exit 2
skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
. "$(dirname $0)"/lib/probe_vfs_getname.sh
@@ -24,9 +25,14 @@ trace_open_vfs_getname() {
grep -E " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +\"?${file}\"?, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
}
-
-add_probe_vfs_getname || skip_if_no_debuginfo
+add_probe_vfs_getname
err=$?
+
+if [ $err -eq 1 ] ; then
+ skip_if_no_debuginfo
+ err=$?
+fi
+
if [ $err -ne 0 ] ; then
exit $err
fi
diff --git a/tools/perf/tests/shell/trace_btf_enum.sh b/tools/perf/tests/shell/trace_btf_enum.sh
index 8d1e6bbeac90..f0b49f7fb57d 100755
--- a/tools/perf/tests/shell/trace_btf_enum.sh
+++ b/tools/perf/tests/shell/trace_btf_enum.sh
@@ -6,13 +6,14 @@ err=0
set -e
syscall="landlock_add_rule"
-non_syscall="timer:hrtimer_init,timer:hrtimer_start"
+non_syscall="timer:hrtimer_setup,timer:hrtimer_start"
TESTPROG="perf test -w landlock"
# shellcheck source=lib/probe.sh
. "$(dirname $0)"/lib/probe.sh
skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
check_vmlinux() {
echo "Checking if vmlinux exists"
diff --git a/tools/perf/tests/shell/trace_btf_general.sh b/tools/perf/tests/shell/trace_btf_general.sh
index e9ee727f3433..a25d8744695e 100755
--- a/tools/perf/tests/shell/trace_btf_general.sh
+++ b/tools/perf/tests/shell/trace_btf_general.sh
@@ -76,6 +76,7 @@ trace_config() {
skip_if_no_perf_trace || exit 2
check_vmlinux || exit 2
+[ "$(id -u)" = 0 ] || exit 2
trace_config
diff --git a/tools/perf/tests/shell/trace_exit_race.sh b/tools/perf/tests/shell/trace_exit_race.sh
index fbb0adc33a88..1e247693e756 100755
--- a/tools/perf/tests/shell/trace_exit_race.sh
+++ b/tools/perf/tests/shell/trace_exit_race.sh
@@ -10,6 +10,7 @@
. "$(dirname $0)"/lib/probe.sh
skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
if [ "$1" = "-v" ]; then
verbose="1"
diff --git a/tools/perf/tests/shell/trace_record_replay.sh b/tools/perf/tests/shell/trace_record_replay.sh
new file mode 100755
index 000000000000..6b4ed863c1ef
--- /dev/null
+++ b/tools/perf/tests/shell/trace_record_replay.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+# perf trace record and replay
+# SPDX-License-Identifier: GPL-2.0
+
+# Check that perf trace works with record and replay
+
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
+
+file=$(mktemp /tmp/temporary_file.XXXXX)
+
+perf trace record -o ${file} sleep 1 || exit 1
+if ! perf trace -i ${file} 2>&1 | grep nanosleep; then
+ echo "Failed: cannot find *nanosleep syscall"
+ exit 1
+fi
+
+rm -f ${file}
diff --git a/tools/perf/tests/shell/trace_summary.sh b/tools/perf/tests/shell/trace_summary.sh
new file mode 100755
index 000000000000..f9bb7f9388be
--- /dev/null
+++ b/tools/perf/tests/shell/trace_summary.sh
@@ -0,0 +1,77 @@
+#!/bin/sh
+# perf trace summary (exclusive)
+# SPDX-License-Identifier: GPL-2.0
+
+# Check that perf trace works with various summary mode
+
+# shellcheck source=lib/probe.sh
+. "$(dirname $0)"/lib/probe.sh
+
+skip_if_no_perf_trace || exit 2
+[ "$(id -u)" = 0 ] || exit 2
+
+OUTPUT=$(mktemp /tmp/perf_trace_test.XXXXX)
+
+test_perf_trace() {
+ args=$1
+ workload="true"
+ search="^\s*(open|read|close).*[0-9]+%$"
+
+ echo "testing: perf trace ${args} -- ${workload}"
+ perf trace ${args} -- ${workload} >${OUTPUT} 2>&1
+ if [ $? -ne 0 ]; then
+ echo "Error: perf trace ${args} failed unexpectedly"
+ cat ${OUTPUT}
+ rm -f ${OUTPUT}
+ exit 1
+ fi
+
+ count=$(grep -E -c -m 3 "${search}" ${OUTPUT})
+ if [ "${count}" != "3" ]; then
+ echo "Error: cannot find enough pattern ${search} in the output"
+ cat ${OUTPUT}
+ rm -f ${OUTPUT}
+ exit 1
+ fi
+}
+
+# summary only for a process
+test_perf_trace "-s"
+
+# normal output with summary at the end
+test_perf_trace "-S"
+
+# summary only with an explicit summary mode
+test_perf_trace "-s --summary-mode=thread"
+
+# summary with normal output - total summary mode
+test_perf_trace "-S --summary-mode=total"
+
+# summary only for system wide - per-thread summary
+test_perf_trace "-as --summary-mode=thread --no-bpf-summary"
+
+# summary only for system wide - total summary mode
+test_perf_trace "-as --summary-mode=total --no-bpf-summary"
+
+if ! perf check feature -q bpf; then
+ echo "Skip --bpf-summary tests as perf built without libbpf"
+ rm -f ${OUTPUT}
+ exit 2
+fi
+
+# summary only for system wide - per-thread summary with BPF
+test_perf_trace "-as --summary-mode=thread --bpf-summary"
+
+# summary only for system wide - total summary mode with BPF
+test_perf_trace "-as --summary-mode=total --bpf-summary"
+
+# summary with normal output for system wide - total summary mode with BPF
+test_perf_trace "-aS --summary-mode=total --bpf-summary"
+
+# summary only for system wide - cgroup summary mode with BPF
+test_perf_trace "-as --summary-mode=cgroup --bpf-summary"
+
+# summary with normal output for system wide - cgroup summary mode with BPF
+test_perf_trace "-aS --summary-mode=cgroup --bpf-summary"
+
+rm -f ${OUTPUT}