summaryrefslogtreecommitdiff
path: root/tools/perf/tests/shell
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/tests/shell')
-rwxr-xr-xtools/perf/tests/shell/buildid.sh12
-rwxr-xr-xtools/perf/tests/shell/daemon.sh113
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py13
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py574
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation_rules.json398
-rw-r--r--tools/perf/tests/shell/lib/stat_output.sh169
-rwxr-xr-xtools/perf/tests/shell/lock_contention.sh70
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh10
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh4
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh178
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh15
-rwxr-xr-xtools/perf/tests/shell/stat+shadow_stat.sh4
-rwxr-xr-xtools/perf/tests/shell/stat+std_output.sh108
-rwxr-xr-xtools/perf/tests/shell/stat.sh44
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh6
-rwxr-xr-xtools/perf/tests/shell/stat_all_pfm.sh51
-rwxr-xr-xtools/perf/tests/shell/stat_metrics_values.sh30
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh11
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh6
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe.sh2
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh12
-rwxr-xr-xtools/perf/tests/shell/test_perf_data_converter_json.sh72
-rwxr-xr-xtools/perf/tests/shell/test_task_analyzer.sh104
23 files changed, 1694 insertions, 312 deletions
diff --git a/tools/perf/tests/shell/buildid.sh b/tools/perf/tests/shell/buildid.sh
index 0ce22ea0a7f1..3383ca3399d4 100755
--- a/tools/perf/tests/shell/buildid.sh
+++ b/tools/perf/tests/shell/buildid.sh
@@ -83,12 +83,12 @@ check()
# in case of pe-file.exe file
echo $1 | grep ".exe"
if [ $? -eq 0 ]; then
- if [ -x $1 -a ! -x $file ]; then
+ if [ -x $1 ] && [ ! -x $file ]; then
echo "failed: file ${file} executable does not exist"
exit 1
fi
- if [ ! -x $file -a ! -e $file ]; then
+ if [ ! -x $file ] && [ ! -e $file ]; then
echo "failed: file ${file} does not exist"
exit 1
fi
@@ -136,10 +136,10 @@ test_record()
log_err=$(mktemp /tmp/perf.log.err.XXX)
perf="perf --buildid-dir ${build_id_dir}"
- echo "running: perf record $@"
- ${perf} record --buildid-all -o ${data} $@ 1>${log_out} 2>${log_err}
+ echo "running: perf record $*"
+ ${perf} record --buildid-all -o ${data} "$@" 1>${log_out} 2>${log_err}
if [ $? -ne 0 ]; then
- echo "failed: record $@"
+ echo "failed: record $*"
echo "see log: ${log_err}"
exit 1
fi
@@ -172,4 +172,4 @@ if [ ${run_pe} -eq 1 ]; then
rm -r ${wineprefix}
fi
-exit ${err}
+exit 0
diff --git a/tools/perf/tests/shell/daemon.sh b/tools/perf/tests/shell/daemon.sh
index 45fc24af5b07..4c598cfc5afa 100755
--- a/tools/perf/tests/shell/daemon.sh
+++ b/tools/perf/tests/shell/daemon.sh
@@ -11,11 +11,16 @@ check_line_first()
local lock=$5
local up=$6
- local line_name=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $2 }'`
- local line_base=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $3 }'`
- local line_output=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $4 }'`
- local line_lock=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $5 }'`
- local line_up=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $6 }'`
+ local line_name
+ line_name=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $2 }'`
+ local line_base
+ line_base=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $3 }'`
+ local line_output
+ line_output=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $4 }'`
+ local line_lock
+ line_lock=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $5 }'`
+ local line_up
+ line_up=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $6 }'`
if [ "${name}" != "${line_name}" ]; then
echo "FAILED: wrong name"
@@ -54,13 +59,20 @@ check_line_other()
local ack=$7
local up=$8
- local line_name=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $2 }'`
- local line_run=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $3 }'`
- local line_base=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $4 }'`
- local line_output=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $5 }'`
- local line_control=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $6 }'`
- local line_ack=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $7 }'`
- local line_up=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $8 }'`
+ local line_name
+ line_name=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $2 }'`
+ local line_run
+ line_run=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $3 }'`
+ local line_base
+ line_base=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $4 }'`
+ local line_output
+ line_output=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $5 }'`
+ local line_control
+ line_control=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $6 }'`
+ local line_ack
+ line_ack=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $7 }'`
+ local line_up
+ line_up=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $8 }'`
if [ "${name}" != "${line_name}" ]; then
echo "FAILED: wrong name"
@@ -102,8 +114,10 @@ daemon_exit()
{
local config=$1
- local line=`perf daemon --config ${config} -x: | head -1`
- local pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'`
+ local line
+ line=`perf daemon --config ${config} -x: | head -1`
+ local pid
+ pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'`
# Reset trap handler.
trap - SIGINT SIGTERM
@@ -123,7 +137,7 @@ daemon_start()
perf daemon start --config ${config}
# Clean up daemon if interrupted.
- trap "echo 'FAILED: Signal caught'; daemon_exit ${config}; exit 1" SIGINT SIGTERM
+ trap 'echo "FAILED: Signal caught"; daemon_exit "${config}"; exit 1' SIGINT SIGTERM
# wait for the session to ping
local state="FAIL"
@@ -144,8 +158,10 @@ test_list()
{
echo "test daemon list"
- local config=$(mktemp /tmp/perf.daemon.config.XXX)
- local base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+ local config
+ config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base
+ base=$(mktemp -d /tmp/perf.daemon.base.XXX)
cat <<EOF > ${config}
[daemon]
@@ -165,19 +181,22 @@ EOF
# check first line
# pid:daemon:base:base/output:base/lock
- local line=`perf daemon --config ${config} -x: | head -1`
+ local line
+ line=`perf daemon --config ${config} -x: | head -1`
check_line_first ${line} daemon ${base} ${base}/output ${base}/lock "0"
# check 1st session
# pid:size:-e cpu-clock:base/size:base/size/output:base/size/control:base/size/ack:0
- local line=`perf daemon --config ${config} -x: | head -2 | tail -1`
+ local line
+ line=`perf daemon --config ${config} -x: | head -2 | tail -1`
check_line_other "${line}" size "-e cpu-clock -m 1 sleep 10" ${base}/session-size \
${base}/session-size/output ${base}/session-size/control \
${base}/session-size/ack "0"
# check 2nd session
# pid:time:-e task-clock:base/time:base/time/output:base/time/control:base/time/ack:0
- local line=`perf daemon --config ${config} -x: | head -3 | tail -1`
+ local line
+ line=`perf daemon --config ${config} -x: | head -3 | tail -1`
check_line_other "${line}" time "-e task-clock -m 1 sleep 10" ${base}/session-time \
${base}/session-time/output ${base}/session-time/control \
${base}/session-time/ack "0"
@@ -193,8 +212,10 @@ test_reconfig()
{
echo "test daemon reconfig"
- local config=$(mktemp /tmp/perf.daemon.config.XXX)
- local base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+ local config
+ config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base
+ base=$(mktemp -d /tmp/perf.daemon.base.XXX)
# prepare config
cat <<EOF > ${config}
@@ -215,10 +236,12 @@ EOF
# check 2nd session
# pid:time:-e task-clock:base/time:base/time/output:base/time/control:base/time/ack:0
- local line=`perf daemon --config ${config} -x: | head -3 | tail -1`
+ local line
+ line=`perf daemon --config ${config} -x: | head -3 | tail -1`
check_line_other "${line}" time "-e task-clock -m 1 sleep 10" ${base}/session-time \
${base}/session-time/output ${base}/session-time/control ${base}/session-time/ack "0"
- local pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'`
+ local pid
+ pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'`
# prepare new config
local config_new=${config}.new
@@ -249,7 +272,8 @@ EOF
# check reconfigured 2nd session
# pid:time:-e task-clock:base/time:base/time/output:base/time/control:base/time/ack:0
- local line=`perf daemon --config ${config} -x: | head -3 | tail -1`
+ local line
+ line=`perf daemon --config ${config} -x: | head -3 | tail -1`
check_line_other "${line}" time "-e cpu-clock -m 1 sleep 10" ${base}/session-time \
${base}/session-time/output ${base}/session-time/control ${base}/session-time/ack "0"
@@ -276,7 +300,8 @@ EOF
state=`perf daemon ping --config ${config} --session size | awk '{ print $1 }'`
done
- local one=`perf daemon --config ${config} -x: | wc -l`
+ local one
+ one=`perf daemon --config ${config} -x: | wc -l`
if [ ${one} -ne "1" ]; then
echo "FAILED: wrong list output"
@@ -312,8 +337,10 @@ test_stop()
{
echo "test daemon stop"
- local config=$(mktemp /tmp/perf.daemon.config.XXX)
- local base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+ local config
+ config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base
+ base=$(mktemp -d /tmp/perf.daemon.base.XXX)
# prepare config
cat <<EOF > ${config}
@@ -332,8 +359,12 @@ EOF
# start daemon
daemon_start ${config} size
- local pid_size=`perf daemon --config ${config} -x: | head -2 | tail -1 | awk 'BEGIN { FS = ":" } ; { print $1 }'`
- local pid_time=`perf daemon --config ${config} -x: | head -3 | tail -1 | awk 'BEGIN { FS = ":" } ; { print $1 }'`
+ local pid_size
+ pid_size=`perf daemon --config ${config} -x: | head -2 | tail -1 |
+ awk 'BEGIN { FS = ":" } ; { print $1 }'`
+ local pid_time
+ pid_time=`perf daemon --config ${config} -x: | head -3 | tail -1 |
+ awk 'BEGIN { FS = ":" } ; { print $1 }'`
# check that sessions are running
if [ ! -d "/proc/${pid_size}" ]; then
@@ -364,8 +395,10 @@ test_signal()
{
echo "test daemon signal"
- local config=$(mktemp /tmp/perf.daemon.config.XXX)
- local base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+ local config
+ config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base
+ base=$(mktemp -d /tmp/perf.daemon.base.XXX)
# prepare config
cat <<EOF > ${config}
@@ -389,7 +422,7 @@ EOF
daemon_exit ${config}
# count is 2 perf.data for signals and 1 for perf record finished
- count=`ls ${base}/session-test/ | grep perf.data | wc -l`
+ count=`ls ${base}/session-test/*perf.data* | wc -l`
if [ ${count} -ne 3 ]; then
error=1
echo "FAILED: perf data no generated"
@@ -403,8 +436,10 @@ test_ping()
{
echo "test daemon ping"
- local config=$(mktemp /tmp/perf.daemon.config.XXX)
- local base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+ local config
+ config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base
+ base=$(mktemp -d /tmp/perf.daemon.base.XXX)
# prepare config
cat <<EOF > ${config}
@@ -426,7 +461,7 @@ EOF
size=`perf daemon ping --config ${config} --session size | awk '{ print $1 }'`
type=`perf daemon ping --config ${config} --session time | awk '{ print $1 }'`
- if [ ${size} != "OK" -o ${type} != "OK" ]; then
+ if [ ${size} != "OK" ] || [ ${type} != "OK" ]; then
error=1
echo "FAILED: daemon ping failed"
fi
@@ -442,8 +477,10 @@ test_lock()
{
echo "test daemon lock"
- local config=$(mktemp /tmp/perf.daemon.config.XXX)
- local base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+ local config
+ config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base
+ base=$(mktemp -d /tmp/perf.daemon.base.XXX)
# prepare config
cat <<EOF > ${config}
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
index 61f3059ca54b..ea55d5ea1ced 100644
--- a/tools/perf/tests/shell/lib/perf_json_output_lint.py
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -14,6 +14,7 @@ ap.add_argument('--system-wide', action='store_true')
ap.add_argument('--event', action='store_true')
ap.add_argument('--per-core', action='store_true')
ap.add_argument('--per-thread', action='store_true')
+ap.add_argument('--per-cache', action='store_true')
ap.add_argument('--per-die', action='store_true')
ap.add_argument('--per-node', action='store_true')
ap.add_argument('--per-socket', action='store_true')
@@ -47,12 +48,14 @@ def check_json_output(expected_items):
'counter-value': lambda x: is_counter_value(x),
'cgroup': lambda x: True,
'cpu': lambda x: isint(x),
+ 'cache': lambda x: True,
'die': lambda x: True,
'event': lambda x: True,
'event-runtime': lambda x: isfloat(x),
'interval': lambda x: isfloat(x),
'metric-unit': lambda x: True,
'metric-value': lambda x: isfloat(x),
+ 'metricgroup': lambda x: True,
'node': lambda x: True,
'pcnt-running': lambda x: isfloat(x),
'socket': lambda x: True,
@@ -63,10 +66,12 @@ def check_json_output(expected_items):
for item in json.loads(input):
if expected_items != -1:
count = len(item)
- if count != expected_items and count >= 1 and count <= 4 and 'metric-value' in item:
+ if count != expected_items and count >= 1 and count <= 6 and 'metric-value' in item:
# Events that generate >1 metric may have isolated metric
- # values and possibly other prefixes like interval, core and
- # aggregate-number.
+ # values and possibly other prefixes like interval, core,
+ # aggregate-number, or event-runtime/pcnt-running from multiplexing.
+ pass
+ elif count != expected_items and count >= 1 and count <= 5 and 'metricgroup' in item:
pass
elif count != expected_items:
raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
@@ -83,7 +88,7 @@ try:
expected_items = 7
elif args.interval or args.per_thread or args.system_wide_no_aggr:
expected_items = 8
- elif args.per_core or args.per_socket or args.per_node or args.per_die:
+ elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cache:
expected_items = 9
else:
# If no option is specified, don't check the number of items.
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
new file mode 100644
index 000000000000..50a34a9cc040
--- /dev/null
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -0,0 +1,574 @@
+#SPDX-License-Identifier: GPL-2.0
+import re
+import csv
+import json
+import argparse
+from pathlib import Path
+import subprocess
+
+class Validator:
+ def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics=''):
+ self.rulefname = rulefname
+ self.reportfname = reportfname
+ self.rules = None
+ self.collectlist:str = metrics
+ self.metrics = self.__set_metrics(metrics)
+ self.skiplist = set()
+ self.tolerance = t
+
+ self.workloads = [x for x in workload.split(",") if x]
+ self.wlidx = 0 # idx of current workloads
+ self.allresults = dict() # metric results of all workload
+ self.allignoremetrics = dict() # metrics with no results or negative results
+ self.allfailtests = dict()
+ self.alltotalcnt = dict()
+ self.allpassedcnt = dict()
+ self.allerrlist = dict()
+
+ self.results = dict() # metric results of current workload
+ # vars for test pass/failure statistics
+ self.ignoremetrics= set() # metrics with no results or negative results, neg result counts as a failed test
+ self.failtests = dict()
+ self.totalcnt = 0
+ self.passedcnt = 0
+ # vars for errors
+ self.errlist = list()
+
+ # vars for Rule Generator
+ self.pctgmetrics = set() # Percentage rule
+
+ # vars for debug
+ self.datafname = datafname
+ self.debug = debug
+ self.fullrulefname = fullrulefname
+
+ def __set_metrics(self, metrics=''):
+ if metrics != '':
+ return set(metrics.split(","))
+ else:
+ return set()
+
+ def read_json(self, filename: str) -> dict:
+ try:
+ with open(Path(filename).resolve(), "r") as f:
+ data = json.loads(f.read())
+ except OSError as e:
+ print(f"Error when reading file {e}")
+ sys.exit()
+
+ return data
+
+ def json_dump(self, data, output_file):
+ parent = Path(output_file).parent
+ if not parent.exists():
+ parent.mkdir(parents=True)
+
+ with open(output_file, "w+") as output_file:
+ json.dump(data,
+ output_file,
+ ensure_ascii=True,
+ indent=4)
+
+ def get_results(self, idx:int = 0):
+ return self.results[idx]
+
+ def get_bounds(self, lb, ub, error, alias={}, ridx:int = 0) -> list:
+ """
+ Get bounds and tolerance from lb, ub, and error.
+ If missing lb, use 0.0; missing ub, use float('inf); missing error, use self.tolerance.
+
+ @param lb: str/float, lower bound
+ @param ub: str/float, upper bound
+ @param error: float/str, error tolerance
+ @returns: lower bound, return inf if the lower bound is a metric value and is not collected
+ upper bound, return -1 if the upper bound is a metric value and is not collected
+ tolerance, denormalized base on upper bound value
+ """
+ # init ubv and lbv to invalid values
+ def get_bound_value (bound, initval, ridx):
+ val = initval
+ if isinstance(bound, int) or isinstance(bound, float):
+ val = bound
+ elif isinstance(bound, str):
+ if bound == '':
+ val = float("inf")
+ elif bound in alias:
+ vall = self.get_value(alias[ub], ridx)
+ if vall:
+ val = vall[0]
+ elif bound.replace('.', '1').isdigit():
+ val = float(bound)
+ else:
+ print("Wrong bound: {0}".format(bound))
+ else:
+ print("Wrong bound: {0}".format(bound))
+ return val
+
+ ubv = get_bound_value(ub, -1, ridx)
+ lbv = get_bound_value(lb, float('inf'), ridx)
+ t = get_bound_value(error, self.tolerance, ridx)
+
+ # denormalize error threshold
+ denormerr = t * ubv / 100 if ubv != 100 and ubv > 0 else t
+
+ return lbv, ubv, denormerr
+
+ def get_value(self, name:str, ridx:int = 0) -> list:
+ """
+ Get value of the metric from self.results.
+ If result of this metric is not provided, the metric name will be added into self.ignoremetics and self.errlist.
+ All future test(s) on this metric will fail.
+
+ @param name: name of the metric
+ @returns: list with value found in self.results; list is empty when value is not found.
+ """
+ results = []
+ data = self.results[ridx] if ridx in self.results else self.results[0]
+ if name not in self.ignoremetrics:
+ if name in data:
+ results.append(data[name])
+ elif name.replace('.', '1').isdigit():
+ results.append(float(name))
+ else:
+ self.ignoremetrics.add(name)
+ return results
+
+ def check_bound(self, val, lb, ub, err):
+ return True if val <= ub + err and val >= lb - err else False
+
+ # Positive Value Sanity check
+ def pos_val_test(self):
+ """
+ Check if metrics value are non-negative.
+ One metric is counted as one test.
+ Failure: when metric value is negative or not provided.
+ Metrics with negative value will be added into the self.failtests['PositiveValueTest'] and self.ignoremetrics.
+ """
+ negmetric = dict()
+ pcnt = 0
+ tcnt = 0
+ rerun = list()
+ for name, val in self.get_results().items():
+ if val < 0:
+ negmetric[name] = val
+ rerun.append(name)
+ else:
+ pcnt += 1
+ tcnt += 1
+ if len(rerun) > 0 and len(rerun) < 20:
+ second_results = dict()
+ self.second_test(rerun, second_results)
+ for name, val in second_results.items():
+ if name not in negmetric: continue
+ if val >= 0:
+ del negmetric[name]
+ pcnt += 1
+
+ self.failtests['PositiveValueTest']['Total Tests'] = tcnt
+ self.failtests['PositiveValueTest']['Passed Tests'] = pcnt
+ if len(negmetric.keys()):
+ self.ignoremetrics.update(negmetric.keys())
+ negmessage = ["{0}(={1:.4f})".format(name, val) for name, val in negmetric.items()]
+ self.failtests['PositiveValueTest']['Failed Tests'].append({'NegativeValue': negmessage})
+
+ return
+
+ def evaluate_formula(self, formula:str, alias:dict, ridx:int = 0):
+ """
+ Evaluate the value of formula.
+
+ @param formula: the formula to be evaluated
+ @param alias: the dict has alias to metric name mapping
+ @returns: value of the formula is success; -1 if the one or more metric value not provided
+ """
+ stack = []
+ b = 0
+ errs = []
+ sign = "+"
+ f = str()
+
+ #TODO: support parenthesis?
+ for i in range(len(formula)):
+ if i+1 == len(formula) or formula[i] in ('+', '-', '*', '/'):
+ s = alias[formula[b:i]] if i+1 < len(formula) else alias[formula[b:]]
+ v = self.get_value(s, ridx)
+ if not v:
+ errs.append(s)
+ else:
+ f = f + "{0}(={1:.4f})".format(s, v[0])
+ if sign == "*":
+ stack[-1] = stack[-1] * v
+ elif sign == "/":
+ stack[-1] = stack[-1] / v
+ elif sign == '-':
+ stack.append(-v[0])
+ else:
+ stack.append(v[0])
+ if i + 1 < len(formula):
+ sign = formula[i]
+ f += sign
+ b = i + 1
+
+ if len(errs) > 0:
+ return -1, "Metric value missing: "+','.join(errs)
+
+ val = sum(stack)
+ return val, f
+
+ # Relationships Tests
+ def relationship_test(self, rule: dict):
+ """
+ Validate if the metrics follow the required relationship in the rule.
+ eg. lower_bound <= eval(formula)<= upper_bound
+ One rule is counted as ont test.
+ Failure: when one or more metric result(s) not provided, or when formula evaluated outside of upper/lower bounds.
+
+ @param rule: dict with metric name(+alias), formula, and required upper and lower bounds.
+ """
+ alias = dict()
+ for m in rule['Metrics']:
+ alias[m['Alias']] = m['Name']
+ lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'], alias, ridx=rule['RuleIndex'])
+ val, f = self.evaluate_formula(rule['Formula'], alias, ridx=rule['RuleIndex'])
+ if val == -1:
+ self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Description':f})
+ elif not self.check_bound(val, lbv, ubv, t):
+ lb = rule['RangeLower']
+ ub = rule['RangeUpper']
+ if isinstance(lb, str):
+ if lb in alias:
+ lb = alias[lb]
+ if isinstance(ub, str):
+ if ub in alias:
+ ub = alias[ub]
+ self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Formula':f,
+ 'RangeLower': lb, 'LowerBoundValue': self.get_value(lb),
+ 'RangeUpper': ub, 'UpperBoundValue':self.get_value(ub),
+ 'ErrorThreshold': t, 'CollectedValue': val})
+ else:
+ self.passedcnt += 1
+ self.failtests['RelationshipTest']['Passed Tests'] += 1
+ self.totalcnt += 1
+ self.failtests['RelationshipTest']['Total Tests'] += 1
+
+ return
+
+
+ # Single Metric Test
+ def single_test(self, rule:dict):
+ """
+ Validate if the metrics are in the required value range.
+ eg. lower_bound <= metrics_value <= upper_bound
+ One metric is counted as one test in this type of test.
+ One rule may include one or more metrics.
+ Failure: when the metric value not provided or the value is outside the bounds.
+ This test updates self.total_cnt and records failed tests in self.failtest['SingleMetricTest'].
+
+ @param rule: dict with metrics to validate and the value range requirement
+ """
+ lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'])
+ metrics = rule['Metrics']
+ passcnt = 0
+ totalcnt = 0
+ faillist = list()
+ failures = dict()
+ rerun = list()
+ for m in metrics:
+ totalcnt += 1
+ result = self.get_value(m['Name'])
+ if len(result) > 0 and self.check_bound(result[0], lbv, ubv, t) or m['Name'] in self.skiplist:
+ passcnt += 1
+ else:
+ failures[m['Name']] = result
+ rerun.append(m['Name'])
+
+ if len(rerun) > 0 and len(rerun) < 20:
+ second_results = dict()
+ self.second_test(rerun, second_results)
+ for name, val in second_results.items():
+ if name not in failures: continue
+ if self.check_bound(val, lbv, ubv, t):
+ passcnt += 1
+ del failures[name]
+ else:
+ failures[name] = val
+ self.results[0][name] = val
+
+ self.totalcnt += totalcnt
+ self.passedcnt += passcnt
+ self.failtests['SingleMetricTest']['Total Tests'] += totalcnt
+ self.failtests['SingleMetricTest']['Passed Tests'] += passcnt
+ if len(failures.keys()) != 0:
+ faillist = [{'MetricName':name, 'CollectedValue':val} for name, val in failures.items()]
+ self.failtests['SingleMetricTest']['Failed Tests'].append({'RuleIndex':rule['RuleIndex'],
+ 'RangeLower': rule['RangeLower'],
+ 'RangeUpper': rule['RangeUpper'],
+ 'ErrorThreshold':rule['ErrorThreshold'],
+ 'Failure':faillist})
+
+ return
+
+ def create_report(self):
+ """
+ Create final report and write into a JSON file.
+ """
+ alldata = list()
+ for i in range(0, len(self.workloads)):
+ reportstas = {"Total Rule Count": self.alltotalcnt[i], "Passed Rule Count": self.allpassedcnt[i]}
+ data = {"Metric Validation Statistics": reportstas, "Tests in Category": self.allfailtests[i],
+ "Errors":self.allerrlist[i]}
+ alldata.append({"Workload": self.workloads[i], "Report": data})
+
+ json_str = json.dumps(alldata, indent=4)
+ print("Test validation finished. Final report: ")
+ print(json_str)
+
+ if self.debug:
+ allres = [{"Workload": self.workloads[i], "Results": self.allresults[i]} for i in range(0, len(self.workloads))]
+ self.json_dump(allres, self.datafname)
+
+ def check_rule(self, testtype, metric_list):
+ """
+ Check if the rule uses metric(s) that not exist in current platform.
+
+ @param metric_list: list of metrics from the rule.
+ @return: False when find one metric out in Metric file. (This rule should not skipped.)
+ True when all metrics used in the rule are found in Metric file.
+ """
+ if testtype == "RelationshipTest":
+ for m in metric_list:
+ if m['Name'] not in self.metrics:
+ return False
+ return True
+
+ # Start of Collector and Converter
+ def convert(self, data: list, metricvalues:dict):
+ """
+ Convert collected metric data from the -j output to dict of {metric_name:value}.
+ """
+ for json_string in data:
+ try:
+ result =json.loads(json_string)
+ if "metric-unit" in result and result["metric-unit"] != "(null)" and result["metric-unit"] != "":
+ name = result["metric-unit"].split(" ")[1] if len(result["metric-unit"].split(" ")) > 1 \
+ else result["metric-unit"]
+ metricvalues[name.lower()] = float(result["metric-value"])
+ except ValueError as error:
+ continue
+ return
+
+ def _run_perf(self, metric, workload: str):
+ tool = 'perf'
+ command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"]
+ wl = workload.split()
+ command.extend(wl)
+ print(" ".join(command))
+ cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8')
+ data = [x+'}' for x in cmd.stderr.split('}\n') if x]
+ return data
+
+
+ def collect_perf(self, workload: str):
+ """
+ Collect metric data with "perf stat -M" on given workload with -a and -j.
+ """
+ self.results = dict()
+ print(f"Starting perf collection")
+ print(f"Long workload: {workload}")
+ collectlist = dict()
+ if self.collectlist != "":
+ collectlist[0] = {x for x in self.collectlist.split(",")}
+ else:
+ collectlist[0] = set(list(self.metrics))
+ # Create metric set for relationship rules
+ for rule in self.rules:
+ if rule["TestType"] == "RelationshipTest":
+ metrics = [m["Name"] for m in rule["Metrics"]]
+ if not any(m not in collectlist[0] for m in metrics):
+ collectlist[rule["RuleIndex"]] = [",".join(list(set(metrics)))]
+
+ for idx, metrics in collectlist.items():
+ if idx == 0: wl = "true"
+ else: wl = workload
+ for metric in metrics:
+ data = self._run_perf(metric, wl)
+ if idx not in self.results: self.results[idx] = dict()
+ self.convert(data, self.results[idx])
+ return
+
+ def second_test(self, collectlist, second_results):
+ workload = self.workloads[self.wlidx]
+ for metric in collectlist:
+ data = self._run_perf(metric, workload)
+ self.convert(data, second_results)
+
+ # End of Collector and Converter
+
+ # Start of Rule Generator
+ def parse_perf_metrics(self):
+ """
+ Read and parse perf metric file:
+ 1) find metrics with '1%' or '100%' as ScaleUnit for Percent check
+ 2) create metric name list
+ """
+ command = ['perf', 'list', '-j', '--details', 'metrics']
+ cmd = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
+ try:
+ data = json.loads(cmd.stdout)
+ for m in data:
+ if 'MetricName' not in m:
+ print("Warning: no metric name")
+ continue
+ name = m['MetricName'].lower()
+ self.metrics.add(name)
+ if 'ScaleUnit' in m and (m['ScaleUnit'] == '1%' or m['ScaleUnit'] == '100%'):
+ self.pctgmetrics.add(name.lower())
+ except ValueError as error:
+ print(f"Error when parsing metric data")
+ sys.exit()
+
+ return
+
+ def remove_unsupported_rules(self, rules):
+ new_rules = []
+ for rule in rules:
+ add_rule = True
+ for m in rule["Metrics"]:
+ if m["Name"] in self.skiplist or m["Name"] not in self.metrics:
+ add_rule = False
+ break
+ if add_rule:
+ new_rules.append(rule)
+ return new_rules
+
+ def create_rules(self):
+ """
+ Create full rules which includes:
+ 1) All the rules from the "relationshi_rules" file
+ 2) SingleMetric rule for all the 'percent' metrics
+
+ Reindex all the rules to avoid repeated RuleIndex
+ """
+ data = self.read_json(self.rulefname)
+ rules = data['RelationshipRules']
+ self.skiplist = set([name.lower() for name in data['SkipList']])
+ self.rules = self.remove_unsupported_rules(rules)
+ pctgrule = {'RuleIndex':0,
+ 'TestType':'SingleMetricTest',
+ 'RangeLower':'0',
+ 'RangeUpper': '100',
+ 'ErrorThreshold': self.tolerance,
+ 'Description':'Metrics in percent unit have value with in [0, 100]',
+ 'Metrics': [{'Name': m.lower()} for m in self.pctgmetrics]}
+ self.rules.append(pctgrule)
+
+ # Re-index all rules to avoid repeated RuleIndex
+ idx = 1
+ for r in self.rules:
+ r['RuleIndex'] = idx
+ idx += 1
+
+ if self.debug:
+ #TODO: need to test and generate file name correctly
+ data = {'RelationshipRules':self.rules, 'SupportedMetrics': [{"MetricName": name} for name in self.metrics]}
+ self.json_dump(data, self.fullrulefname)
+
+ return
+ # End of Rule Generator
+
+ def _storewldata(self, key):
+ '''
+ Store all the data of one workload into the corresponding data structure for all workloads.
+ @param key: key to the dictionaries (index of self.workloads).
+ '''
+ self.allresults[key] = self.results
+ self.allignoremetrics[key] = self.ignoremetrics
+ self.allfailtests[key] = self.failtests
+ self.alltotalcnt[key] = self.totalcnt
+ self.allpassedcnt[key] = self.passedcnt
+ self.allerrlist[key] = self.errlist
+
+ #Initialize data structures before data validation of each workload
+ def _init_data(self):
+
+ testtypes = ['PositiveValueTest', 'RelationshipTest', 'SingleMetricTest']
+ self.results = dict()
+ self.ignoremetrics= set()
+ self.errlist = list()
+ self.failtests = {k:{'Total Tests':0, 'Passed Tests':0, 'Failed Tests':[]} for k in testtypes}
+ self.totalcnt = 0
+ self.passedcnt = 0
+
+ def test(self):
+ '''
+ The real entry point of the test framework.
+ This function loads the validation rule JSON file and Standard Metric file to create rules for
+ testing and namemap dictionaries.
+ It also reads in result JSON file for testing.
+
+ In the test process, it passes through each rule and launch correct test function bases on the
+ 'TestType' field of the rule.
+
+ The final report is written into a JSON file.
+ '''
+ if not self.collectlist:
+ self.parse_perf_metrics()
+ self.create_rules()
+ for i in range(0, len(self.workloads)):
+ self.wlidx = i
+ self._init_data()
+ self.collect_perf(self.workloads[i])
+ # Run positive value test
+ self.pos_val_test()
+ for r in self.rules:
+ # skip rules that uses metrics not exist in this platform
+ testtype = r['TestType']
+ if not self.check_rule(testtype, r['Metrics']):
+ continue
+ if testtype == 'RelationshipTest':
+ self.relationship_test(r)
+ elif testtype == 'SingleMetricTest':
+ self.single_test(r)
+ else:
+ print("Unsupported Test Type: ", testtype)
+ self.errlist.append("Unsupported Test Type from rule: " + r['RuleIndex'])
+ self._storewldata(i)
+ print("Workload: ", self.workloads[i])
+ print("Total metrics collected: ", self.failtests['PositiveValueTest']['Total Tests'])
+ print("Non-negative metric count: ", self.failtests['PositiveValueTest']['Passed Tests'])
+ print("Total Test Count: ", self.totalcnt)
+ print("Passed Test Count: ", self.passedcnt)
+
+ self.create_report()
+ return sum(self.alltotalcnt.values()) != sum(self.allpassedcnt.values())
+# End of Class Validator
+
+
+def main() -> None:
+ parser = argparse.ArgumentParser(description="Launch metric value validation")
+
+ parser.add_argument("-rule", help="Base validation rule file", required=True)
+ parser.add_argument("-output_dir", help="Path for validator output file, report file", required=True)
+ parser.add_argument("-debug", help="Debug run, save intermediate data to files", action="store_true", default=False)
+ parser.add_argument("-wl", help="Workload to run while data collection", default="true")
+ parser.add_argument("-m", help="Metric list to validate", default="")
+ args = parser.parse_args()
+ outpath = Path(args.output_dir)
+ reportf = Path.joinpath(outpath, 'perf_report.json')
+ fullrule = Path.joinpath(outpath, 'full_rule.json')
+ datafile = Path.joinpath(outpath, 'perf_data.json')
+
+ validator = Validator(args.rule, reportf, debug=args.debug,
+ datafname=datafile, fullrulefname=fullrule, workload=args.wl,
+ metrics=args.m)
+ ret = validator.test()
+
+ return ret
+
+
+if __name__ == "__main__":
+ import sys
+ sys.exit(main())
+
+
+
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation_rules.json b/tools/perf/tests/shell/lib/perf_metric_validation_rules.json
new file mode 100644
index 000000000000..eb6f59e018b7
--- /dev/null
+++ b/tools/perf/tests/shell/lib/perf_metric_validation_rules.json
@@ -0,0 +1,398 @@
+{
+ "SkipList": [
+ "tsx_aborted_cycles",
+ "tsx_transactional_cycles",
+ "C2_Pkg_Residency",
+ "C6_Pkg_Residency",
+ "C1_Core_Residency",
+ "C6_Core_Residency",
+ "tma_false_sharing",
+ "tma_remote_cache",
+ "tma_contested_accesses"
+ ],
+ "RelationshipRules": [
+ {
+ "RuleIndex": 1,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "c",
+ "RangeUpper": "c",
+ "ErrorThreshold": 5.0,
+ "Description": "Intel(R) Optane(TM) Persistent Memory(PMEM) bandwidth total includes Intel(R) Optane(TM) Persistent Memory(PMEM) read bandwidth and Intel(R) Optane(TM) Persistent Memory(PMEM) write bandwidth",
+ "Metrics": [
+ {
+ "Name": "pmem_memory_bandwidth_read",
+ "Alias": "a"
+ },
+ {
+ "Name": "pmem_memory_bandwidth_write",
+ "Alias": "b"
+ },
+ {
+ "Name": "pmem_memory_bandwidth_total",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 2,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "c",
+ "RangeUpper": "c",
+ "ErrorThreshold": 5.0,
+ "Description": "DDR memory bandwidth total includes DDR memory read bandwidth and DDR memory write bandwidth",
+ "Metrics": [
+ {
+ "Name": "memory_bandwidth_read",
+ "Alias": "a"
+ },
+ {
+ "Name": "memory_bandwidth_write",
+ "Alias": "b"
+ },
+ {
+ "Name": "memory_bandwidth_total",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 3,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "100",
+ "RangeUpper": "100",
+ "ErrorThreshold": 5.0,
+ "Description": "Total memory read accesses includes memory reads from last level cache (LLC) addressed to local DRAM and memory reads from the last level cache (LLC) addressed to remote DRAM.",
+ "Metrics": [
+ {
+ "Name": "numa_reads_addressed_to_local_dram",
+ "Alias": "a"
+ },
+ {
+ "Name": "numa_reads_addressed_to_remote_dram",
+ "Alias": "b"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 4,
+ "Formula": "a",
+ "TestType": "SingleMetricTest",
+ "RangeLower": "0.125",
+ "RangeUpper": "",
+ "ErrorThreshold": "",
+ "Description": "",
+ "Metrics": [
+ {
+ "Name": "cpi",
+ "Alias": "a"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 5,
+ "Formula": "",
+ "TestType": "SingleMetricTest",
+ "RangeLower": "0",
+ "RangeUpper": "1",
+ "ErrorThreshold": 5.0,
+ "Description": "Ratio values should be within value range [0,1)",
+ "Metrics": [
+ {
+ "Name": "loads_per_instr",
+ "Alias": ""
+ },
+ {
+ "Name": "stores_per_instr",
+ "Alias": ""
+ },
+ {
+ "Name": "l1d_mpi",
+ "Alias": ""
+ },
+ {
+ "Name": "l1d_demand_data_read_hits_per_instr",
+ "Alias": ""
+ },
+ {
+ "Name": "l1_i_code_read_misses_with_prefetches_per_instr",
+ "Alias": ""
+ },
+ {
+ "Name": "l2_demand_data_read_hits_per_instr",
+ "Alias": ""
+ },
+ {
+ "Name": "l2_mpi",
+ "Alias": ""
+ },
+ {
+ "Name": "l2_demand_data_read_mpi",
+ "Alias": ""
+ },
+ {
+ "Name": "l2_demand_code_mpi",
+ "Alias": ""
+ }
+ ]
+ },
+ {
+ "RuleIndex": 6,
+ "Formula": "a+b+c+d",
+ "TestType": "RelationshipTest",
+ "RangeLower": "100",
+ "RangeUpper": "100",
+ "ErrorThreshold": 5.0,
+ "Description": "Sum of TMA level 1 metrics should be 100%",
+ "Metrics": [
+ {
+ "Name": "tma_frontend_bound",
+ "Alias": "a"
+ },
+ {
+ "Name": "tma_bad_speculation",
+ "Alias": "b"
+ },
+ {
+ "Name": "tma_backend_bound",
+ "Alias": "c"
+ },
+ {
+ "Name": "tma_retiring",
+ "Alias": "d"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 7,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "c",
+ "RangeUpper": "c",
+ "ErrorThreshold": 5.0,
+ "Description": "Sum of the level 2 children should equal level 1 parent",
+ "Metrics": [
+ {
+ "Name": "tma_fetch_latency",
+ "Alias": "a"
+ },
+ {
+ "Name": "tma_fetch_bandwidth",
+ "Alias": "b"
+ },
+ {
+ "Name": "tma_frontend_bound",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 8,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "c",
+ "RangeUpper": "c",
+ "ErrorThreshold": 5.0,
+ "Description": "Sum of the level 2 children should equal level 1 parent",
+ "Metrics": [
+ {
+ "Name": "tma_branch_mispredicts",
+ "Alias": "a"
+ },
+ {
+ "Name": "tma_machine_clears",
+ "Alias": "b"
+ },
+ {
+ "Name": "tma_bad_speculation",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 9,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "c",
+ "RangeUpper": "c",
+ "ErrorThreshold": 5.0,
+ "Description": "Sum of the level 2 children should equal level 1 parent",
+ "Metrics": [
+ {
+ "Name": "tma_memory_bound",
+ "Alias": "a"
+ },
+ {
+ "Name": "tma_core_bound",
+ "Alias": "b"
+ },
+ {
+ "Name": "tma_backend_bound",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 10,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "c",
+ "RangeUpper": "c",
+ "ErrorThreshold": 5.0,
+ "Description": "Sum of the level 2 children should equal level 1 parent",
+ "Metrics": [
+ {
+ "Name": "tma_light_operations",
+ "Alias": "a"
+ },
+ {
+ "Name": "tma_heavy_operations",
+ "Alias": "b"
+ },
+ {
+ "Name": "tma_retiring",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 11,
+ "Formula": "a+b+c",
+ "TestType": "RelationshipTest",
+ "RangeLower": "100",
+ "RangeUpper": "100",
+ "ErrorThreshold": 5.0,
+ "Description": "The all_requests includes the memory_page_empty, memory_page_misses, and memory_page_hits equals.",
+ "Metrics": [
+ {
+ "Name": "memory_page_empty_vs_all_requests",
+ "Alias": "a"
+ },
+ {
+ "Name": "memory_page_misses_vs_all_requests",
+ "Alias": "b"
+ },
+ {
+ "Name": "memory_page_hits_vs_all_requests",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 12,
+ "Formula": "a-b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "0",
+ "RangeUpper": "",
+ "ErrorThreshold": 5.0,
+ "Description": "CPU utilization in kernel mode should always be <= cpu utilization",
+ "Metrics": [
+ {
+ "Name": "cpu_utilization",
+ "Alias": "a"
+ },
+ {
+ "Name": "cpu_utilization_in_kernel_mode",
+ "Alias": "b"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 13,
+ "Formula": "a-b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "0",
+ "RangeUpper": "",
+ "ErrorThreshold": 5.0,
+ "Description": "Total L2 misses per instruction should be >= L2 demand data read misses per instruction",
+ "Metrics": [
+ {
+ "Name": "l2_mpi",
+ "Alias": "a"
+ },
+ {
+ "Name": "l2_demand_data_read_mpi",
+ "Alias": "b"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 14,
+ "Formula": "a-b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "0",
+ "RangeUpper": "",
+ "ErrorThreshold": 5.0,
+ "Description": "Total L2 misses per instruction should be >= L2 demand code misses per instruction",
+ "Metrics": [
+ {
+ "Name": "l2_mpi",
+ "Alias": "a"
+ },
+ {
+ "Name": "l2_demand_code_mpi",
+ "Alias": "b"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 15,
+ "Formula": "b+c+d",
+ "TestType": "RelationshipTest",
+ "RangeLower": "a",
+ "RangeUpper": "a",
+ "ErrorThreshold": 5.0,
+ "Description": "L3 data read, rfo, code misses per instruction equals total L3 misses per instruction.",
+ "Metrics": [
+ {
+ "Name": "llc_mpi",
+ "Alias": "a"
+ },
+ {
+ "Name": "llc_data_read_mpi_demand_plus_prefetch",
+ "Alias": "b"
+ },
+ {
+ "Name": "llc_rfo_read_mpi_demand_plus_prefetch",
+ "Alias": "c"
+ },
+ {
+ "Name": "llc_code_read_mpi_demand_plus_prefetch",
+ "Alias": "d"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 16,
+ "Formula": "a",
+ "TestType": "SingleMetricTest",
+ "RangeLower": "0",
+ "RangeUpper": "8",
+ "ErrorThreshold": 0.0,
+ "Description": "Setting generous range for allowable frequencies",
+ "Metrics": [
+ {
+ "Name": "uncore_freq",
+ "Alias": "a"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 17,
+ "Formula": "a",
+ "TestType": "SingleMetricTest",
+ "RangeLower": "0",
+ "RangeUpper": "8",
+ "ErrorThreshold": 0.0,
+ "Description": "Setting generous range for allowable frequencies",
+ "Metrics": [
+ {
+ "Name": "cpu_operating_frequency",
+ "Alias": "a"
+ }
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh
new file mode 100644
index 000000000000..698343f0ecf9
--- /dev/null
+++ b/tools/perf/tests/shell/lib/stat_output.sh
@@ -0,0 +1,169 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Return true if perf_event_paranoid is > $1 and not running as root.
+function ParanoidAndNotRoot()
+{
+ [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
+}
+
+# $1 name $2 extra_opt
+check_no_args()
+{
+ echo -n "Checking $1 output: no args "
+ perf stat $2 true
+ commachecker --no-args
+ echo "[Success]"
+}
+
+check_system_wide()
+{
+ echo -n "Checking $1 output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -a $2 true
+ commachecker --system-wide
+ echo "[Success]"
+}
+
+check_system_wide_no_aggr()
+{
+ echo -n "Checking $1 output: system wide no aggregation "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -A -a --no-merge $2 true
+ commachecker --system-wide-no-aggr
+ echo "[Success]"
+}
+
+check_interval()
+{
+ echo -n "Checking $1 output: interval "
+ perf stat -I 1000 $2 true
+ commachecker --interval
+ echo "[Success]"
+}
+
+check_event()
+{
+ echo -n "Checking $1 output: event "
+ perf stat -e cpu-clock $2 true
+ commachecker --event
+ echo "[Success]"
+}
+
+check_per_core()
+{
+ echo -n "Checking $1 output: per core "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-core -a $2 true
+ commachecker --per-core
+ echo "[Success]"
+}
+
+check_per_thread()
+{
+ echo -n "Checking $1 output: per thread "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-thread -a $2 true
+ commachecker --per-thread
+ echo "[Success]"
+}
+
+check_per_cache_instance()
+{
+ echo -n "Checking $1 output: per cache instance "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-cache -a $2 true
+ commachecker --per-cache
+ echo "[Success]"
+}
+
+check_per_die()
+{
+ echo -n "Checking $1 output: per die "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-die -a $2 true
+ commachecker --per-die
+ echo "[Success]"
+}
+
+check_per_node()
+{
+ echo -n "Checking $1 output: per node "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-node -a $2 true
+ commachecker --per-node
+ echo "[Success]"
+}
+
+check_per_socket()
+{
+ echo -n "Checking $1 output: per socket "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-socket -a $2 true
+ commachecker --per-socket
+ echo "[Success]"
+}
+
+# The perf stat options for per-socket, per-core, per-die
+# and -A ( no_aggr mode ) uses the info fetched from this
+# directory: "/sys/devices/system/cpu/cpu*/topology". For
+# example, socket value is fetched from "physical_package_id"
+# file in topology directory.
+# Reference: cpu__get_topology_int in util/cpumap.c
+# If the platform doesn't expose topology information, values
+# will be set to -1. For example, incase of pSeries platform
+# of powerpc, value for "physical_package_id" is restricted
+# and set to -1. Check here validates the socket-id read from
+# topology file before proceeding further
+
+FILE_LOC="/sys/devices/system/cpu/cpu*/topology/"
+FILE_NAME="physical_package_id"
+
+function check_for_topology()
+{
+ if ! ParanoidAndNotRoot 0
+ then
+ socket_file=`ls $FILE_LOC/$FILE_NAME | head -n 1`
+ [ -z $socket_file ] && {
+ echo 0
+ return
+ }
+ socket_id=`cat $socket_file`
+ [ $socket_id == -1 ] && {
+ echo 1
+ return
+ }
+ fi
+ echo 0
+}
diff --git a/tools/perf/tests/shell/lock_contention.sh b/tools/perf/tests/shell/lock_contention.sh
index be5fcafb26aa..f2cc187b6186 100755
--- a/tools/perf/tests/shell/lock_contention.sh
+++ b/tools/perf/tests/shell/lock_contention.sh
@@ -11,14 +11,14 @@ result=$(mktemp /tmp/__perf_test.result.XXXXX)
cleanup() {
rm -f ${perfdata}
rm -f ${result}
- trap - exit term int
+ trap - EXIT TERM INT
}
trap_cleanup() {
cleanup
exit ${err}
}
-trap trap_cleanup exit term int
+trap trap_cleanup EXIT TERM INT
check() {
if [ `id -u` != 0 ]; then
@@ -40,8 +40,8 @@ test_record()
perf lock record -o ${perfdata} -- perf bench sched messaging > /dev/null 2>&1
# the output goes to the stderr and we expect only 1 output (-E 1)
perf lock contention -i ${perfdata} -E 1 -q 2> ${result}
- if [ $(cat "${result}" | wc -l) != "1" ]; then
- echo "[Fail] Recorded result count is not 1:" $(cat "${result}" | wc -l)
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
exit
fi
@@ -58,8 +58,8 @@ test_bpf()
# the perf lock contention output goes to the stderr
perf lock con -a -b -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
- if [ $(cat "${result}" | wc -l) != "1" ]; then
- echo "[Fail] BPF result count is not 1:" $(cat "${result}" | wc -l)
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
exit
fi
@@ -70,8 +70,8 @@ test_record_concurrent()
echo "Testing perf lock record and perf lock contention at the same time"
perf lock record -o- -- perf bench sched messaging 2> /dev/null | \
perf lock contention -i- -E 1 -q 2> ${result}
- if [ $(cat "${result}" | wc -l) != "1" ]; then
- echo "[Fail] Recorded result count is not 1:" $(cat "${result}" | wc -l)
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
exit
fi
@@ -81,8 +81,8 @@ test_aggr_task()
{
echo "Testing perf lock contention --threads"
perf lock contention -i ${perfdata} -t -E 1 -q 2> ${result}
- if [ $(cat "${result}" | wc -l) != "1" ]; then
- echo "[Fail] Recorded result count is not 1:" $(cat "${result}" | wc -l)
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
exit
fi
@@ -93,8 +93,8 @@ test_aggr_task()
# the perf lock contention output goes to the stderr
perf lock con -a -b -t -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
- if [ $(cat "${result}" | wc -l) != "1" ]; then
- echo "[Fail] BPF result count is not 1:" $(cat "${result}" | wc -l)
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
exit
fi
@@ -104,8 +104,8 @@ test_aggr_addr()
{
echo "Testing perf lock contention --lock-addr"
perf lock contention -i ${perfdata} -l -E 1 -q 2> ${result}
- if [ $(cat "${result}" | wc -l) != "1" ]; then
- echo "[Fail] Recorded result count is not 1:" $(cat "${result}" | wc -l)
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
exit
fi
@@ -116,8 +116,8 @@ test_aggr_addr()
# the perf lock contention output goes to the stderr
perf lock con -a -b -l -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
- if [ $(cat "${result}" | wc -l) != "1" ]; then
- echo "[Fail] BPF result count is not 1:" $(cat "${result}" | wc -l)
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
err=1
exit
fi
@@ -127,8 +127,8 @@ test_type_filter()
{
echo "Testing perf lock contention --type-filter (w/ spinlock)"
perf lock contention -i ${perfdata} -Y spinlock -q 2> ${result}
- if [ $(grep -c -v spinlock "${result}") != "0" ]; then
- echo "[Fail] Recorded result should not have non-spinlocks:" $(cat "${result}")
+ if [ "$(grep -c -v spinlock "${result}")" != "0" ]; then
+ echo "[Fail] Recorded result should not have non-spinlocks:" "$(cat "${result}")"
err=1
exit
fi
@@ -138,8 +138,8 @@ test_type_filter()
fi
perf lock con -a -b -Y spinlock -q -- perf bench sched messaging > /dev/null 2> ${result}
- if [ $(grep -c -v spinlock "${result}") != "0" ]; then
- echo "[Fail] BPF result should not have non-spinlocks:" $(cat "${result}")
+ if [ "$(grep -c -v spinlock "${result}")" != "0" ]; then
+ echo "[Fail] BPF result should not have non-spinlocks:" "$(cat "${result}")"
err=1
exit
fi
@@ -149,7 +149,7 @@ test_lock_filter()
{
echo "Testing perf lock contention --lock-filter (w/ tasklist_lock)"
perf lock contention -i ${perfdata} -l -q 2> ${result}
- if [ $(grep -c tasklist_lock "${result}") != "1" ]; then
+ if [ "$(grep -c tasklist_lock "${result}")" != "1" ]; then
echo "[Skip] Could not find 'tasklist_lock'"
return
fi
@@ -159,8 +159,8 @@ test_lock_filter()
# find out the type of tasklist_lock
local type=$(head -1 "${result}" | awk '{ print $8 }' | sed -e 's/:.*//')
- if [ $(grep -c -v "${type}" "${result}") != "0" ]; then
- echo "[Fail] Recorded result should not have non-${type} locks:" $(cat "${result}")
+ if [ "$(grep -c -v "${type}" "${result}")" != "0" ]; then
+ echo "[Fail] Recorded result should not have non-${type} locks:" "$(cat "${result}")"
err=1
exit
fi
@@ -170,8 +170,8 @@ test_lock_filter()
fi
perf lock con -a -b -L tasklist_lock -q -- perf bench sched messaging > /dev/null 2> ${result}
- if [ $(grep -c -v "${type}" "${result}") != "0" ]; then
- echo "[Fail] BPF result should not have non-${type} locks:" $(cat "${result}")
+ if [ "$(grep -c -v "${type}" "${result}")" != "0" ]; then
+ echo "[Fail] BPF result should not have non-${type} locks:" "$(cat "${result}")"
err=1
exit
fi
@@ -181,14 +181,14 @@ test_stack_filter()
{
echo "Testing perf lock contention --callstack-filter (w/ unix_stream)"
perf lock contention -i ${perfdata} -v -q 2> ${result}
- if [ $(grep -c unix_stream "${result}") == "0" ]; then
+ if [ "$(grep -c unix_stream "${result}")" = "0" ]; then
echo "[Skip] Could not find 'unix_stream'"
return
fi
perf lock contention -i ${perfdata} -E 1 -S unix_stream -q 2> ${result}
- if [ $(cat "${result}" | wc -l) != "1" ]; then
- echo "[Fail] Recorded result should have a lock from unix_stream:" $(cat "${result}")
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] Recorded result should have a lock from unix_stream:" "$(cat "${result}")"
err=1
exit
fi
@@ -198,8 +198,8 @@ test_stack_filter()
fi
perf lock con -a -b -S unix_stream -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
- if [ $(cat "${result}" | wc -l) != "1" ]; then
- echo "[Fail] BPF result should have a lock from unix_stream:" $(cat "${result}")
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] BPF result should have a lock from unix_stream:" "$(cat "${result}")"
err=1
exit
fi
@@ -209,14 +209,14 @@ test_aggr_task_stack_filter()
{
echo "Testing perf lock contention --callstack-filter with task aggregation"
perf lock contention -i ${perfdata} -v -q 2> ${result}
- if [ $(grep -c unix_stream "${result}") == "0" ]; then
+ if [ "$(grep -c unix_stream "${result}")" = "0" ]; then
echo "[Skip] Could not find 'unix_stream'"
return
fi
perf lock contention -i ${perfdata} -t -E 1 -S unix_stream -q 2> ${result}
- if [ $(cat "${result}" | wc -l) != "1" ]; then
- echo "[Fail] Recorded result should have a task from unix_stream:" $(cat "${result}")
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] Recorded result should have a task from unix_stream:" "$(cat "${result}")"
err=1
exit
fi
@@ -226,8 +226,8 @@ test_aggr_task_stack_filter()
fi
perf lock con -a -b -t -S unix_stream -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
- if [ $(cat "${result}" | wc -l) != "1" ]; then
- echo "[Fail] BPF result should have a task from unix_stream:" $(cat "${result}")
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] BPF result should have a task from unix_stream:" "$(cat "${result}")"
err=1
exit
fi
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index bbb5b3d185fa..89214a6d9951 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -10,11 +10,11 @@
# SPDX-License-Identifier: GPL-2.0
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
-. $(dirname $0)/lib/probe.sh
-. $(dirname $0)/lib/probe_vfs_getname.sh
+. "$(dirname "$0")/lib/probe.sh"
+. "$(dirname "$0")/lib/probe_vfs_getname.sh"
libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
-nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254
+nm -Dg $libc 2>/dev/null | grep -F -q inet_pton || exit 254
event_pattern='probe_libc:inet_pton(\_[[:digit:]]+)?'
@@ -23,7 +23,7 @@ add_libc_inet_pton_event() {
event_name=$(perf probe -f -x $libc -a inet_pton 2>&1 | tail -n +2 | head -n -5 | \
grep -P -o "$event_pattern(?=[[:space:]]\(on inet_pton in $libc\))")
- if [ $? -ne 0 -o -z "$event_name" ] ; then
+ if [ $? -ne 0 ] || [ -z "$event_name" ] ; then
printf "FAIL: could not add event\n"
return 1
fi
@@ -94,7 +94,7 @@ delete_libc_inet_pton_event() {
}
# Check for IPv6 interface existence
-ip a sh lo | fgrep -q inet6 || exit 2
+ip a sh lo | grep -F -q inet6 || exit 2
skip_if_no_perf_probe && \
add_libc_inet_pton_event && \
diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
index 1341437e1bd9..7f664f1889d9 100755
--- a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
@@ -9,11 +9,11 @@
# SPDX-License-Identifier: GPL-2.0
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
-. $(dirname $0)/lib/probe.sh
+. "$(dirname "$0")/lib/probe.sh"
skip_if_no_perf_probe || exit 2
-. $(dirname $0)/lib/probe_vfs_getname.sh
+. "$(dirname "$0")/lib/probe_vfs_getname.sh"
record_open_file() {
echo "Recording open file:"
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
index fb78b6251a4e..34a0701fee05 100755
--- a/tools/perf/tests/shell/stat+csv_output.sh
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -6,7 +6,8 @@
set -e
-skip_test=0
+. $(dirname $0)/lib/stat_output.sh
+
csv_sep=@
stat_output=$(mktemp /tmp/__perf_test.stat_output.csv.XXXXX)
@@ -35,11 +36,12 @@ function commachecker()
;; "--interval") exp=7
;; "--per-thread") exp=7
;; "--system-wide-no-aggr") exp=7
- [ $(uname -m) = "s390x" ] && exp='^[6-7]$'
+ [ "$(uname -m)" = "s390x" ] && exp='^[6-7]$'
;; "--per-core") exp=8
;; "--per-socket") exp=8
;; "--per-node") exp=8
;; "--per-die") exp=8
+ ;; "--per-cache") exp=8
esac
while read line
@@ -62,168 +64,22 @@ function commachecker()
return 0
}
-# Return true if perf_event_paranoid is > $1 and not running as root.
-function ParanoidAndNotRoot()
-{
- [ $(id -u) != 0 ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -gt $1 ]
-}
-
-check_no_args()
-{
- echo -n "Checking CSV output: no args "
- perf stat -x$csv_sep -o "${stat_output}" true
- commachecker --no-args
- echo "[Success]"
-}
-
-check_system_wide()
-{
- echo -n "Checking CSV output: system wide "
- if ParanoidAndNotRoot 0
- then
- echo "[Skip] paranoid and not root"
- return
- fi
- perf stat -x$csv_sep -a -o "${stat_output}" true
- commachecker --system-wide
- echo "[Success]"
-}
-
-check_system_wide_no_aggr()
-{
- echo -n "Checking CSV output: system wide no aggregation "
- if ParanoidAndNotRoot 0
- then
- echo "[Skip] paranoid and not root"
- return
- fi
- perf stat -x$csv_sep -A -a --no-merge -o "${stat_output}" true
- commachecker --system-wide-no-aggr
- echo "[Success]"
-}
-
-check_interval()
-{
- echo -n "Checking CSV output: interval "
- perf stat -x$csv_sep -I 1000 -o "${stat_output}" true
- commachecker --interval
- echo "[Success]"
-}
-
-
-check_event()
-{
- echo -n "Checking CSV output: event "
- perf stat -x$csv_sep -e cpu-clock -o "${stat_output}" true
- commachecker --event
- echo "[Success]"
-}
-
-check_per_core()
-{
- echo -n "Checking CSV output: per core "
- if ParanoidAndNotRoot 0
- then
- echo "[Skip] paranoid and not root"
- return
- fi
- perf stat -x$csv_sep --per-core -a -o "${stat_output}" true
- commachecker --per-core
- echo "[Success]"
-}
-
-check_per_thread()
-{
- echo -n "Checking CSV output: per thread "
- if ParanoidAndNotRoot 0
- then
- echo "[Skip] paranoid and not root"
- return
- fi
- perf stat -x$csv_sep --per-thread -a -o "${stat_output}" true
- commachecker --per-thread
- echo "[Success]"
-}
-
-check_per_die()
-{
- echo -n "Checking CSV output: per die "
- if ParanoidAndNotRoot 0
- then
- echo "[Skip] paranoid and not root"
- return
- fi
- perf stat -x$csv_sep --per-die -a -o "${stat_output}" true
- commachecker --per-die
- echo "[Success]"
-}
-
-check_per_node()
-{
- echo -n "Checking CSV output: per node "
- if ParanoidAndNotRoot 0
- then
- echo "[Skip] paranoid and not root"
- return
- fi
- perf stat -x$csv_sep --per-node -a -o "${stat_output}" true
- commachecker --per-node
- echo "[Success]"
-}
-
-check_per_socket()
-{
- echo -n "Checking CSV output: per socket "
- if ParanoidAndNotRoot 0
- then
- echo "[Skip] paranoid and not root"
- return
- fi
- perf stat -x$csv_sep --per-socket -a -o "${stat_output}" true
- commachecker --per-socket
- echo "[Success]"
-}
-
-# The perf stat options for per-socket, per-core, per-die
-# and -A ( no_aggr mode ) uses the info fetched from this
-# directory: "/sys/devices/system/cpu/cpu*/topology". For
-# example, socket value is fetched from "physical_package_id"
-# file in topology directory.
-# Reference: cpu__get_topology_int in util/cpumap.c
-# If the platform doesn't expose topology information, values
-# will be set to -1. For example, incase of pSeries platform
-# of powerpc, value for "physical_package_id" is restricted
-# and set to -1. Check here validates the socket-id read from
-# topology file before proceeding further
-
-FILE_LOC="/sys/devices/system/cpu/cpu*/topology/"
-FILE_NAME="physical_package_id"
-
-check_for_topology()
-{
- if ! ParanoidAndNotRoot 0
- then
- socket_file=`ls $FILE_LOC/$FILE_NAME | head -n 1`
- [ -z $socket_file ] && return 0
- socket_id=`cat $socket_file`
- [ $socket_id == -1 ] && skip_test=1
- return 0
- fi
-}
+perf_cmd="-x$csv_sep -o ${stat_output}"
-check_for_topology
-check_no_args
-check_system_wide
-check_interval
-check_event
-check_per_thread
-check_per_node
+skip_test=$(check_for_topology)
+check_no_args "CSV" "$perf_cmd"
+check_system_wide "CSV" "$perf_cmd"
+check_interval "CSV" "$perf_cmd"
+check_event "CSV" "$perf_cmd"
+check_per_thread "CSV" "$perf_cmd"
+check_per_node "CSV" "$perf_cmd"
if [ $skip_test -ne 1 ]
then
- check_system_wide_no_aggr
- check_per_core
- check_per_die
- check_per_socket
+ check_system_wide_no_aggr "CSV" "$perf_cmd"
+ check_per_core "CSV" "$perf_cmd"
+ check_per_cache_instance "CSV" "$perf_cmd"
+ check_per_die "CSV" "$perf_cmd"
+ check_per_socket "CSV" "$perf_cmd"
else
echo "[Skip] Skipping tests for system_wide_no_aggr, per_core, per_die and per_socket since socket id exposed via topology is invalid"
fi
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
index f3e4967cc72e..196e22672c50 100755
--- a/tools/perf/tests/shell/stat+json_output.sh
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -40,7 +40,7 @@ trap trap_cleanup EXIT TERM INT
# Return true if perf_event_paranoid is > $1 and not running as root.
function ParanoidAndNotRoot()
{
- [ $(id -u) != 0 ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -gt $1 ]
+ [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
}
check_no_args()
@@ -120,6 +120,18 @@ check_per_thread()
echo "[Success]"
}
+check_per_cache_instance()
+{
+ echo -n "Checking json output: per cache_instance "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-cache -a true 2>&1 | $PYTHON $pythonchecker --per-cache
+ echo "[Success]"
+}
+
check_per_die()
{
echo -n "Checking json output: per die "
@@ -197,6 +209,7 @@ if [ $skip_test -ne 1 ]
then
check_system_wide_no_aggr
check_per_core
+ check_per_cache_instance
check_per_die
check_per_socket
else
diff --git a/tools/perf/tests/shell/stat+shadow_stat.sh b/tools/perf/tests/shell/stat+shadow_stat.sh
index e6e35fc6c882..0e9cba84e757 100755
--- a/tools/perf/tests/shell/stat+shadow_stat.sh
+++ b/tools/perf/tests/shell/stat+shadow_stat.sh
@@ -33,7 +33,7 @@ test_global_aggr()
fi
# use printf for rounding and a leading zero
- res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
+ res=`printf "%.2f" "$(echo "scale=6; $num / $cyc" | bc -q)"`
if [ "$ipc" != "$res" ]; then
echo "IPC is different: $res != $ipc ($num / $cyc)"
exit 1
@@ -67,7 +67,7 @@ test_no_aggr()
fi
# use printf for rounding and a leading zero
- res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
+ res=`printf "%.2f" "$(echo "scale=6; $num / $cyc" | bc -q)"`
if [ "$ipc" != "$res" ]; then
echo "IPC is different for $cpu: $res != $ipc ($num / $cyc)"
exit 1
diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh
new file mode 100755
index 000000000000..f972b31fa0c2
--- /dev/null
+++ b/tools/perf/tests/shell/stat+std_output.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+# perf stat STD output linter
+# SPDX-License-Identifier: GPL-2.0
+# Tests various perf stat STD output commands for
+# default event and metricgroup
+
+set -e
+
+. $(dirname $0)/lib/stat_output.sh
+
+stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX)
+
+event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses)
+event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches")
+skip_metric=("stalled cycles per insn" "tma_")
+
+cleanup() {
+ rm -f "${stat_output}"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+function commachecker()
+{
+ local -i cnt=0
+ local prefix=1
+
+ case "$1"
+ in "--interval") prefix=2
+ ;; "--per-thread") prefix=2
+ ;; "--system-wide-no-aggr") prefix=2
+ ;; "--per-core") prefix=3
+ ;; "--per-socket") prefix=3
+ ;; "--per-node") prefix=3
+ ;; "--per-die") prefix=3
+ ;; "--per-cache") prefix=3
+ esac
+
+ while read line
+ do
+ # Ignore initial "started on" comment.
+ x=${line:0:1}
+ [ "$x" = "#" ] && continue
+ # Ignore initial blank line.
+ [ "$line" = "" ] && continue
+ # Ignore "Performance counter stats"
+ x=${line:0:25}
+ [ "$x" = "Performance counter stats" ] && continue
+ # Ignore "seconds time elapsed" and break
+ [[ "$line" == *"time elapsed"* ]] && break
+
+ main_body=$(echo $line | cut -d' ' -f$prefix-)
+ x=${main_body%#*}
+ [ "$x" = "" ] && continue
+
+ # Skip metrics without event name
+ y=${main_body#*#}
+ for i in "${!skip_metric[@]}"; do
+ [[ "$y" == *"${skip_metric[$i]}"* ]] && break
+ done
+ [[ "$y" == *"${skip_metric[$i]}"* ]] && continue
+
+ # Check default event
+ for i in "${!event_name[@]}"; do
+ [[ "$x" == *"${event_name[$i]}"* ]] && break
+ done
+
+ [[ ! "$x" == *"${event_name[$i]}"* ]] && {
+ echo "Unknown event name in $line" 1>&2
+ exit 1;
+ }
+
+ # Check event metric if it exists
+ [[ ! "$main_body" == *"#"* ]] && continue
+ [[ ! "$main_body" == *"${event_metric[$i]}"* ]] && {
+ echo "wrong event metric. expected ${event_metric[$i]} in $line" 1>&2
+ exit 1;
+ }
+ done < "${stat_output}"
+ return 0
+}
+
+perf_cmd="-o ${stat_output}"
+
+skip_test=$(check_for_topology)
+check_no_args "STD" "$perf_cmd"
+check_system_wide "STD" "$perf_cmd"
+check_interval "STD" "$perf_cmd"
+check_per_thread "STD" "$perf_cmd"
+check_per_node "STD" "$perf_cmd"
+if [ $skip_test -ne 1 ]
+then
+ check_system_wide_no_aggr "STD" "$perf_cmd"
+ check_per_core "STD" "$perf_cmd"
+ check_per_cache_instance "STD" "$perf_cmd"
+ check_per_die "STD" "$perf_cmd"
+ check_per_socket "STD" "$perf_cmd"
+else
+ echo "[Skip] Skipping tests for system_wide_no_aggr, per_core, per_die and per_socket since socket id exposed via topology is invalid"
+fi
+cleanup
+exit 0
diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
index b154fbb15d54..3f1e67795490 100755
--- a/tools/perf/tests/shell/stat.sh
+++ b/tools/perf/tests/shell/stat.sh
@@ -103,10 +103,54 @@ test_topdown_weak_groups() {
echo "Topdown weak groups test [Success]"
}
+test_cputype() {
+ # Test --cputype argument.
+ echo "cputype test"
+
+ # Bogus PMU should fail.
+ if perf stat --cputype="123" -e instructions true > /dev/null 2>&1
+ then
+ echo "cputype test [Bogus PMU didn't fail]"
+ err=1
+ return
+ fi
+
+ # Find a known PMU for cputype.
+ pmu=""
+ for i in cpu cpu_atom armv8_pmuv3_0
+ do
+ if test -d "/sys/devices/$i"
+ then
+ pmu="$i"
+ break
+ fi
+ if perf stat -e "$i/instructions/" true > /dev/null 2>&1
+ then
+ pmu="$i"
+ break
+ fi
+ done
+ if test "x$pmu" = "x"
+ then
+ echo "cputype test [Skipped known PMU not found]"
+ return
+ fi
+
+ # Test running with cputype produces output.
+ if ! perf stat --cputype="$pmu" -e instructions true 2>&1 | grep -E -q "instructions"
+ then
+ echo "cputype test [Failed count missed with given filter]"
+ err=1
+ return
+ fi
+ echo "cputype test [Success]"
+}
+
test_default_stat
test_stat_record_report
test_stat_record_script
test_stat_repeat_weak_groups
test_topdown_groups
test_topdown_weak_groups
+test_cputype
exit $err
diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
index 22e9cb294b40..54774525e18a 100755
--- a/tools/perf/tests/shell/stat_all_metrics.sh
+++ b/tools/perf/tests/shell/stat_all_metrics.sh
@@ -6,20 +6,20 @@ err=0
for m in $(perf list --raw-dump metrics); do
echo "Testing $m"
result=$(perf stat -M "$m" true 2>&1)
- if [[ "$result" =~ "${m:0:50}" ]] || [[ "$result" =~ "<not supported>" ]]
+ if [[ "$result" =~ ${m:0:50} ]] || [[ "$result" =~ "<not supported>" ]]
then
continue
fi
# Failed so try system wide.
result=$(perf stat -M "$m" -a sleep 0.01 2>&1)
- if [[ "$result" =~ "${m:0:50}" ]]
+ if [[ "$result" =~ ${m:0:50} ]]
then
continue
fi
# Failed again, possibly the workload was too small so retry with something
# longer.
result=$(perf stat -M "$m" perf bench internals synthesize 2>&1)
- if [[ "$result" =~ "${m:0:50}" ]]
+ if [[ "$result" =~ ${m:0:50} ]]
then
continue
fi
diff --git a/tools/perf/tests/shell/stat_all_pfm.sh b/tools/perf/tests/shell/stat_all_pfm.sh
new file mode 100755
index 000000000000..4d004f777a6e
--- /dev/null
+++ b/tools/perf/tests/shell/stat_all_pfm.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+# perf all libpfm4 events test
+# SPDX-License-Identifier: GPL-2.0
+
+if perf version --build-options | grep HAVE_LIBPFM | grep -q OFF
+then
+ echo "Skipping, no libpfm4 support"
+ exit 2
+fi
+
+err=0
+for p in $(perf list --raw-dump pfm)
+do
+ if echo "$p" | grep -q unc_
+ then
+ echo "Skipping uncore event '$p' that may require additional options."
+ continue
+ fi
+ echo "Testing $p"
+ result=$(perf stat --pfm-events "$p" true 2>&1)
+ x=$?
+ if echo "$result" | grep -q "failed to parse event $p : invalid or missing unit mask"
+ then
+ continue
+ fi
+ if test "$x" -ne "0"
+ then
+ echo "Unexpected exit code '$x'"
+ err=1
+ fi
+ if ! echo "$result" | grep -q "$p" && ! echo "$result" | grep -q "<not supported>"
+ then
+ # We failed to see the event and it is supported. Possibly the workload was
+ # too small so retry with something longer.
+ result=$(perf stat --pfm-events "$p" perf bench internals synthesize 2>&1)
+ x=$?
+ if test "$x" -ne "0"
+ then
+ echo "Unexpected exit code '$x'"
+ err=1
+ fi
+ if ! echo "$result" | grep -q "$p"
+ then
+ echo "Event '$p' not printed in:"
+ echo "$result"
+ err=1
+ fi
+ fi
+done
+
+exit "$err"
diff --git a/tools/perf/tests/shell/stat_metrics_values.sh b/tools/perf/tests/shell/stat_metrics_values.sh
new file mode 100755
index 000000000000..ad94c936de7e
--- /dev/null
+++ b/tools/perf/tests/shell/stat_metrics_values.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# perf metrics value validation
+# SPDX-License-Identifier: GPL-2.0
+if [ "x$PYTHON" == "x" ]
+then
+ if which python3 > /dev/null
+ then
+ PYTHON=python3
+ else
+ echo Skipping test, python3 not detected please set environment variable PYTHON.
+ exit 2
+ fi
+fi
+
+grep -q GenuineIntel /proc/cpuinfo || { echo Skipping non-Intel; exit 2; }
+
+pythonvalidator=$(dirname $0)/lib/perf_metric_validation.py
+rulefile=$(dirname $0)/lib/perf_metric_validation_rules.json
+tmpdir=$(mktemp -d /tmp/__perf_test.program.XXXXX)
+workload="perf bench futex hash -r 2 -s"
+
+# Add -debug, save data file and full rule file
+echo "Launch python validation script $pythonvalidator"
+echo "Output will be stored in: $tmpdir"
+$PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}"
+ret=$?
+rm -rf $tmpdir
+
+exit $ret
+
diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
index e61d8deaa0c4..66dfdfdad553 100755
--- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
@@ -9,13 +9,14 @@ TEST_PROGRAM="perf test -w leafloop"
cleanup_files()
{
- rm -f $PERF_DATA
+ rm -f "$PERF_DATA"
}
-trap cleanup_files exit term int
+trap cleanup_files EXIT TERM INT
# Add a 1 second delay to skip samples that are not in the leaf() function
-perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null &
+# shellcheck disable=SC2086
+perf record -o "$PERF_DATA" --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null &
PID=$!
echo " + Recording (PID=$PID)..."
@@ -33,8 +34,8 @@ wait $PID
# 76c leafloop
# ...
-perf script -i $PERF_DATA -F comm,ip,sym | head -n4
-perf script -i $PERF_DATA -F comm,ip,sym | head -n4 | \
+perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4
+perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4 | \
awk '{ if ($2 != "") sym[i++] = $2 } END { if (sym[0] != "leaf" ||
sym[1] != "parent" ||
sym[2] != "leafloop") exit 1 }'
diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
index 482009e17bda..f1bf5621160f 100755
--- a/tools/perf/tests/shell/test_arm_coresight.sh
+++ b/tools/perf/tests/shell/test_arm_coresight.sh
@@ -28,11 +28,11 @@ cleanup_files()
rm -f ${perfdata}
rm -f ${file}
rm -f "${perfdata}.old"
- trap - exit term int
+ trap - EXIT TERM INT
exit $glb_err
}
-trap cleanup_files exit term int
+trap cleanup_files EXIT TERM INT
record_touch_file() {
echo "Recording trace (only user mode) with path: CPU$2 => $1"
@@ -89,7 +89,7 @@ is_device_sink() {
# cannot support perf PMU.
echo "$1" | grep -E -q -v "tpiu"
- if [ $? -eq 0 -a -e "$1/enable_sink" ]; then
+ if [ $? -eq 0 ] && [ -e "$1/enable_sink" ]; then
pmu_dev="/sys/bus/event_source/devices/cs_etm/sinks/$2"
diff --git a/tools/perf/tests/shell/test_arm_spe.sh b/tools/perf/tests/shell/test_arm_spe.sh
index aa094d71f5b4..03d5c7d12ee5 100755
--- a/tools/perf/tests/shell/test_arm_spe.sh
+++ b/tools/perf/tests/shell/test_arm_spe.sh
@@ -27,7 +27,7 @@ cleanup_files()
exit $glb_err
}
-trap cleanup_files exit term int
+trap cleanup_files EXIT TERM INT
arm_spe_report() {
if [ $2 = 0 ]; then
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
index 1c49d8293003..09908d71c994 100755
--- a/tools/perf/tests/shell/test_brstack.sh
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -18,7 +18,7 @@ cleanup() {
rm -rf $TMPDIR
}
-trap cleanup exit term int
+trap cleanup EXIT TERM INT
test_user_branches() {
echo "Testing user branch stack sampling"
@@ -47,17 +47,17 @@ test_user_branches() {
# first argument <arg0> is the argument passed to "--branch-stack <arg0>,save_type,u"
# second argument are the expected branch types for the given filter
test_filter() {
- local filter=$1
- local expect=$2
+ test_filter_filter=$1
+ test_filter_expect=$2
- echo "Testing branch stack filtering permutation ($filter,$expect)"
+ echo "Testing branch stack filtering permutation ($test_filter_filter,$test_filter_expect)"
- perf record -o $TMPDIR/perf.data --branch-filter $filter,save_type,u -- ${TESTPROG} > /dev/null 2>&1
+ perf record -o $TMPDIR/perf.data --branch-filter $test_filter_filter,save_type,u -- ${TESTPROG} > /dev/null 2>&1
perf script -i $TMPDIR/perf.data --fields brstack | xargs -n1 > $TMPDIR/perf.script
# fail if we find any branch type that doesn't match any of the expected ones
# also consider UNKNOWN branch types (-)
- if grep -E -vm1 "^[^ ]*/($expect|-|( *))/.*$" $TMPDIR/perf.script; then
+ if grep -E -vm1 "^[^ ]*/($test_filter_expect|-|( *))/.*$" $TMPDIR/perf.script; then
return 1
fi
}
diff --git a/tools/perf/tests/shell/test_perf_data_converter_json.sh b/tools/perf/tests/shell/test_perf_data_converter_json.sh
new file mode 100755
index 000000000000..72ac6c83231c
--- /dev/null
+++ b/tools/perf/tests/shell/test_perf_data_converter_json.sh
@@ -0,0 +1,72 @@
+#!/bin/bash
+# 'perf data convert --to-json' command test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+
+if [ "$PYTHON" = "" ] ; then
+ if which python3 > /dev/null ; then
+ PYTHON=python3
+ elif which python > /dev/null ; then
+ PYTHON=python
+ else
+ echo Skipping test, python not detected please set environment variable PYTHON.
+ exit 2
+ fi
+fi
+
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+result=$(mktemp /tmp/__perf_test.output.json.XXXXX)
+
+cleanup()
+{
+ rm -f "${perfdata}"
+ rm -f "${result}"
+ trap - exit term int
+}
+
+trap_cleanup()
+{
+ cleanup
+ exit ${err}
+}
+trap trap_cleanup exit term int
+
+test_json_converter_command()
+{
+ echo "Testing Perf Data Convertion Command to JSON"
+ perf record -o "$perfdata" -F 99 -g -- perf test -w noploop > /dev/null 2>&1
+ perf data convert --to-json "$result" --force -i "$perfdata" >/dev/null 2>&1
+ if [ $(cat "${result}" | wc -l) -gt "0" ] ; then
+ echo "Perf Data Converter Command to JSON [SUCCESS]"
+ else
+ echo "Perf Data Converter Command to JSON [FAILED]"
+ err=1
+ exit
+ fi
+}
+
+validate_json_format()
+{
+ echo "Validating Perf Data Converted JSON file"
+ if [ -f "$result" ] ; then
+ if $PYTHON -c "import json; json.load(open('$result'))" >/dev/null 2>&1 ; then
+ echo "The file contains valid JSON format [SUCCESS]"
+ else
+ echo "The file does not contain valid JSON format [FAILED]"
+ err=1
+ exit
+ fi
+ else
+ echo "File not found [FAILED]"
+ err=2
+ exit
+ fi
+}
+
+test_json_converter_command
+validate_json_format
+
+exit ${err}
diff --git a/tools/perf/tests/shell/test_task_analyzer.sh b/tools/perf/tests/shell/test_task_analyzer.sh
index a98e4ab66040..0095abbe20ca 100755
--- a/tools/perf/tests/shell/test_task_analyzer.sh
+++ b/tools/perf/tests/shell/test_task_analyzer.sh
@@ -5,12 +5,18 @@
tmpdir=$(mktemp -d /tmp/perf-script-task-analyzer-XXXXX)
err=0
+# set PERF_EXEC_PATH to find scripts in the source directory
+perfdir=$(dirname "$0")/../..
+if [ -e "$perfdir/scripts/python/Perf-Trace-Util" ]; then
+ export PERF_EXEC_PATH=$perfdir
+fi
+
cleanup() {
rm -f perf.data
rm -f perf.data.old
rm -f csv
rm -f csvsummary
- rm -rf $tmpdir
+ rm -rf "$tmpdir"
trap - exit term int
}
@@ -21,7 +27,7 @@ trap_cleanup() {
trap trap_cleanup exit term int
report() {
- if [ $1 = 0 ]; then
+ if [ "$1" = 0 ]; then
echo "PASS: \"$2\""
else
echo "FAIL: \"$2\" Error message: \"$3\""
@@ -31,109 +37,127 @@ report() {
check_exec_0() {
if [ $? != 0 ]; then
- report 1 "invokation of ${$1} command failed"
+ report 1 "invocation of $1 command failed"
fi
}
find_str_or_fail() {
- grep -q "$1" $2
- if [ $? != 0 ]; then
- report 1 $3 "Failed to find required string:'${1}'."
+ grep -q "$1" "$2"
+ if [ "$?" != 0 ]; then
+ report 1 "$3" "Failed to find required string:'${1}'."
else
- report 0 $3
+ report 0 "$3"
fi
}
+# check if perf is compiled with libtraceevent support
+skip_no_probe_record_support() {
+ perf record -e "sched:sched_switch" -a -- sleep 1 2>&1 | grep "libtraceevent is necessary for tracepoint support" && return 2
+ return 0
+}
+
prepare_perf_data() {
# 1s should be sufficient to catch at least some switches
perf record -e sched:sched_switch -a -- sleep 1 > /dev/null 2>&1
+ # check if perf data file got created in above step.
+ if [ ! -e "perf.data" ]; then
+ printf "FAIL: perf record failed to create \"perf.data\" \n"
+ return 1
+ fi
}
# check standard inkvokation with no arguments
test_basic() {
out="$tmpdir/perf.out"
- perf script report task-analyzer > $out
- check_exec_0 "perf"
- find_str_or_fail "Comm" $out ${FUNCNAME[0]}
+ perf script report task-analyzer > "$out"
+ check_exec_0 "perf script report task-analyzer"
+ find_str_or_fail "Comm" "$out" "${FUNCNAME[0]}"
}
test_ns_rename(){
out="$tmpdir/perf.out"
- perf script report task-analyzer --ns --rename-comms-by-tids 0:random > $out
- check_exec_0 "perf"
- find_str_or_fail "Comm" $out ${FUNCNAME[0]}
+ perf script report task-analyzer --ns --rename-comms-by-tids 0:random > "$out"
+ check_exec_0 "perf script report task-analyzer --ns --rename-comms-by-tids 0:random"
+ find_str_or_fail "Comm" "$out" "${FUNCNAME[0]}"
}
test_ms_filtertasks_highlight(){
out="$tmpdir/perf.out"
perf script report task-analyzer --ms --filter-tasks perf --highlight-tasks perf \
- > $out
- check_exec_0 "perf"
- find_str_or_fail "Comm" $out ${FUNCNAME[0]}
+ > "$out"
+ check_exec_0 "perf script report task-analyzer --ms --filter-tasks perf --highlight-tasks perf"
+ find_str_or_fail "Comm" "$out" "${FUNCNAME[0]}"
}
test_extended_times_timelimit_limittasks() {
out="$tmpdir/perf.out"
perf script report task-analyzer --extended-times --time-limit :99999 \
- --limit-to-tasks perf > $out
- check_exec_0 "perf"
- find_str_or_fail "Out-Out" $out ${FUNCNAME[0]}
+ --limit-to-tasks perf > "$out"
+ check_exec_0 "perf script report task-analyzer --extended-times --time-limit :99999 --limit-to-tasks perf"
+ find_str_or_fail "Out-Out" "$out" "${FUNCNAME[0]}"
}
test_summary() {
out="$tmpdir/perf.out"
- perf script report task-analyzer --summary > $out
- check_exec_0 "perf"
- find_str_or_fail "Summary" $out ${FUNCNAME[0]}
+ perf script report task-analyzer --summary > "$out"
+ check_exec_0 "perf script report task-analyzer --summary"
+ find_str_or_fail "Summary" "$out" "${FUNCNAME[0]}"
}
test_summaryextended() {
out="$tmpdir/perf.out"
- perf script report task-analyzer --summary-extended > $out
- check_exec_0 "perf"
- find_str_or_fail "Inter Task Times" $out ${FUNCNAME[0]}
+ perf script report task-analyzer --summary-extended > "$out"
+ check_exec_0 "perf script report task-analyzer --summary-extended"
+ find_str_or_fail "Inter Task Times" "$out" "${FUNCNAME[0]}"
}
test_summaryonly() {
out="$tmpdir/perf.out"
- perf script report task-analyzer --summary-only > $out
- check_exec_0 "perf"
- find_str_or_fail "Summary" $out ${FUNCNAME[0]}
+ perf script report task-analyzer --summary-only > "$out"
+ check_exec_0 "perf script report task-analyzer --summary-only"
+ find_str_or_fail "Summary" "$out" "${FUNCNAME[0]}"
}
test_extended_times_summary_ns() {
out="$tmpdir/perf.out"
- perf script report task-analyzer --extended-times --summary --ns > $out
- check_exec_0 "perf"
- find_str_or_fail "Out-Out" $out ${FUNCNAME[0]}
- find_str_or_fail "Summary" $out ${FUNCNAME[0]}
+ perf script report task-analyzer --extended-times --summary --ns > "$out"
+ check_exec_0 "perf script report task-analyzer --extended-times --summary --ns"
+ find_str_or_fail "Out-Out" "$out" "${FUNCNAME[0]}"
+ find_str_or_fail "Summary" "$out" "${FUNCNAME[0]}"
}
test_csv() {
perf script report task-analyzer --csv csv > /dev/null
- check_exec_0 "perf"
- find_str_or_fail "Comm;" csv ${FUNCNAME[0]}
+ check_exec_0 "perf script report task-analyzer --csv csv"
+ find_str_or_fail "Comm;" csv "${FUNCNAME[0]}"
}
test_csv_extended_times() {
perf script report task-analyzer --csv csv --extended-times > /dev/null
- check_exec_0 "perf"
- find_str_or_fail "Out-Out;" csv ${FUNCNAME[0]}
+ check_exec_0 "perf script report task-analyzer --csv csv --extended-times"
+ find_str_or_fail "Out-Out;" csv "${FUNCNAME[0]}"
}
test_csvsummary() {
perf script report task-analyzer --csv-summary csvsummary > /dev/null
- check_exec_0 "perf"
- find_str_or_fail "Comm;" csvsummary ${FUNCNAME[0]}
+ check_exec_0 "perf script report task-analyzer --csv-summary csvsummary"
+ find_str_or_fail "Comm;" csvsummary "${FUNCNAME[0]}"
}
test_csvsummary_extended() {
perf script report task-analyzer --csv-summary csvsummary --summary-extended \
>/dev/null
- check_exec_0 "perf"
- find_str_or_fail "Out-Out;" csvsummary ${FUNCNAME[0]}
+ check_exec_0 "perf script report task-analyzer --csv-summary csvsummary --summary-extended"
+ find_str_or_fail "Out-Out;" csvsummary "${FUNCNAME[0]}"
}
+skip_no_probe_record_support
+err=$?
+if [ $err -ne 0 ]; then
+ echo "WARN: Skipping tests. No libtraceevent support"
+ cleanup
+ exit $err
+fi
prepare_perf_data
test_basic
test_ns_rename