diff options
Diffstat (limited to 'tools/perf/tests/shell/lib/perf_metric_validation.py')
-rw-r--r-- | tools/perf/tests/shell/lib/perf_metric_validation.py | 22 |
1 files changed, 17 insertions, 5 deletions
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py index a2d235252183..dea8ef1977bf 100644 --- a/tools/perf/tests/shell/lib/perf_metric_validation.py +++ b/tools/perf/tests/shell/lib/perf_metric_validation.py @@ -35,7 +35,8 @@ class TestError: class Validator: - def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics=''): + def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', + workload='true', metrics='', cputype='cpu'): self.rulefname = rulefname self.reportfname = reportfname self.rules = None @@ -43,6 +44,7 @@ class Validator: self.metrics = self.__set_metrics(metrics) self.skiplist = set() self.tolerance = t + self.cputype = cputype self.workloads = [x for x in workload.split(",") if x] self.wlidx = 0 # idx of current workloads @@ -95,7 +97,7 @@ class Validator: indent=4) def get_results(self, idx: int = 0): - return self.results[idx] + return self.results.get(idx) def get_bounds(self, lb, ub, error, alias={}, ridx: int = 0) -> list: """ @@ -173,7 +175,10 @@ class Validator: pcnt = 0 tcnt = 0 rerun = list() - for name, val in self.get_results().items(): + results = self.get_results() + if not results: + return + for name, val in results.items(): if val < 0: negmetric[name] = val rerun.append(name) @@ -374,7 +379,7 @@ class Validator: def _run_perf(self, metric, workload: str): tool = 'perf' - command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"] + command = [tool, 'stat', '--cputype', self.cputype, '-j', '-M', f"{metric}", "-a"] wl = workload.split() command.extend(wl) print(" ".join(command)) @@ -440,6 +445,8 @@ class Validator: if 'MetricName' not in m: print("Warning: no metric name") continue + if 'Unit' in m and m['Unit'] != self.cputype: + continue name = m['MetricName'].lower() self.metrics.add(name) if 'ScaleUnit' in m and (m['ScaleUnit'] == '1%' or m['ScaleUnit'] == '100%'): @@ -532,6 +539,9 @@ class Validator: ''' if not self.collectlist: self.parse_perf_metrics() + if not self.metrics: + print("No metric found for testing") + return 0 self.create_rules() for i in range(0, len(self.workloads)): self.wlidx = i @@ -572,6 +582,8 @@ def main() -> None: parser.add_argument( "-wl", help="Workload to run while data collection", default="true") parser.add_argument("-m", help="Metric list to validate", default="") + parser.add_argument("-cputype", help="Only test metrics for the given CPU/PMU type", + default="cpu") args = parser.parse_args() outpath = Path(args.output_dir) reportf = Path.joinpath(outpath, 'perf_report.json') @@ -580,7 +592,7 @@ def main() -> None: validator = Validator(args.rule, reportf, debug=args.debug, datafname=datafile, fullrulefname=fullrule, workload=args.wl, - metrics=args.m) + metrics=args.m, cputype=args.cputype) ret = validator.test() return ret |