diff options
Diffstat (limited to 'tools/testing/kunit/kunit_parser.py')
| -rw-r--r-- | tools/testing/kunit/kunit_parser.py | 466 |
1 files changed, 251 insertions, 215 deletions
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 05ff334761dd..333cd3a4a56b 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -10,14 +10,16 @@ # Author: Rae Moar <rmoar@google.com> from __future__ import annotations +from dataclasses import dataclass import re +import textwrap -import datetime from enum import Enum, auto -from functools import reduce from typing import Iterable, Iterator, List, Optional, Tuple -class Test(object): +from kunit_printer import Printer, stdout + +class Test: """ A class to represent a test parsed from KTAP results. All KTAP results within a test log are stored in a main Test object as @@ -45,19 +47,21 @@ class Test(object): def __str__(self) -> str: """Returns string representation of a Test class object.""" - return ('Test(' + str(self.status) + ', ' + self.name + - ', ' + str(self.expected_count) + ', ' + - str(self.subtests) + ', ' + str(self.log) + ', ' + - str(self.counts) + ')') + return (f'Test({self.status}, {self.name}, {self.expected_count}, ' + f'{self.subtests}, {self.log}, {self.counts})') def __repr__(self) -> str: """Returns string representation of a Test class object.""" return str(self) - def add_error(self, error_message: str) -> None: + def add_error(self, printer: Printer, error_message: str) -> None: """Records an error that occurred while parsing this test.""" self.counts.errors += 1 - print_error('Test ' + self.name + ': ' + error_message) + printer.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}') + + def ok_status(self) -> bool: + """Returns true if the status was ok, i.e. passed or skipped.""" + return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED) class TestStatus(Enum): """An enumeration class to represent the status of a test.""" @@ -68,36 +72,25 @@ class TestStatus(Enum): NO_TESTS = auto() FAILURE_TO_PARSE_TESTS = auto() +@dataclass class TestCounts: """ Tracks the counts of statuses of all test cases and any errors within a Test. - - Attributes: - passed : int - the number of tests that have passed - failed : int - the number of tests that have failed - crashed : int - the number of tests that have crashed - skipped : int - the number of tests that have skipped - errors : int - the number of errors in the test and subtests - """ - def __init__(self): - """Creates TestCounts object with counts of all test - statuses and test errors set to 0. - """ - self.passed = 0 - self.failed = 0 - self.crashed = 0 - self.skipped = 0 - self.errors = 0 + """ + passed: int = 0 + failed: int = 0 + crashed: int = 0 + skipped: int = 0 + errors: int = 0 def __str__(self) -> str: - """Returns the string representation of a TestCounts object. - """ - return ('Passed: ' + str(self.passed) + - ', Failed: ' + str(self.failed) + - ', Crashed: ' + str(self.crashed) + - ', Skipped: ' + str(self.skipped) + - ', Errors: ' + str(self.errors)) + """Returns the string representation of a TestCounts object.""" + statuses = [('passed', self.passed), ('failed', self.failed), + ('crashed', self.crashed), ('skipped', self.skipped), + ('errors', self.errors)] + return f'Ran {self.total()} tests: ' + \ + ', '.join(f'{s}: {n}' for s, n in statuses if n > 0) def total(self) -> int: """Returns the total number of test cases within a test @@ -128,31 +121,19 @@ class TestCounts: """ if self.total() == 0: return TestStatus.NO_TESTS - elif self.crashed: - # If one of the subtests crash, the expected status - # of the Test is crashed. + if self.crashed: + # Crashes should take priority. return TestStatus.TEST_CRASHED - elif self.failed: - # Otherwise if one of the subtests fail, the - # expected status of the Test is failed. + if self.failed: return TestStatus.FAILURE - elif self.passed: - # Otherwise if one of the subtests pass, the - # expected status of the Test is passed. + if self.passed: + # No failures or crashes, looks good! return TestStatus.SUCCESS - else: - # Finally, if none of the subtests have failed, - # crashed, or passed, the expected status of the - # Test is skipped. - return TestStatus.SKIPPED + # We have only skipped tests. + return TestStatus.SKIPPED def add_status(self, status: TestStatus) -> None: - """ - Increments count of inputted status. - - Parameters: - status - status to be added to the TestCounts object - """ + """Increments the count for `status`.""" if status == TestStatus.SUCCESS: self.passed += 1 elif status == TestStatus.FAILURE: @@ -227,10 +208,11 @@ class LineStream: # Parsing helper methods: -KTAP_START = re.compile(r'KTAP version ([0-9]+)$') -TAP_START = re.compile(r'TAP version ([0-9]+)$') -KTAP_END = re.compile('(List of all partitions:|' +KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$') +TAP_START = re.compile(r'\s*TAP version ([0-9]+)$') +KTAP_END = re.compile(r'\s*(List of all partitions:|' 'Kernel panic - not syncing: VFS:|reboot: System halted)') +EXECUTOR_ERROR = re.compile(r'\s*kunit executor: (.*)$') def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream: """Extracts KTAP lines from the kernel output.""" @@ -258,9 +240,10 @@ def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream: # stop extracting KTAP lines break elif started: - # remove prefix and any indention and yield - # line with line number - line = line[prefix_len:].lstrip() + # remove the prefix, if any. + line = line[prefix_len:] + yield line_num, line + elif EXECUTOR_ERROR.search(line): yield line_num, line return LineStream(lines=isolate_ktap_output(kernel_output)) @@ -268,7 +251,7 @@ KTAP_VERSIONS = [1] TAP_VERSIONS = [13, 14] def check_version(version_num: int, accepted_versions: List[int], - version_type: str, test: Test) -> None: + version_type: str, test: Test, printer: Printer) -> None: """ Adds error to test object if version number is too high or too low. @@ -280,15 +263,14 @@ def check_version(version_num: int, accepted_versions: List[int], version_type - 'KTAP' or 'TAP' depending on the type of version line. test - Test object for current test being parsed + printer - Printer object to output error """ if version_num < min(accepted_versions): - test.add_error(version_type + - ' version lower than expected!') + test.add_error(printer, f'{version_type} version lower than expected!') elif version_num > max(accepted_versions): - test.add_error( - version_type + ' version higher than expected!') + test.add_error(printer, f'{version_type} version higer than expected!') -def parse_ktap_header(lines: LineStream, test: Test) -> bool: +def parse_ktap_header(lines: LineStream, test: Test, printer: Printer) -> bool: """ Parses KTAP/TAP header line and checks version number. Returns False if fails to parse KTAP/TAP header line. @@ -300,6 +282,7 @@ def parse_ktap_header(lines: LineStream, test: Test) -> bool: Parameters: lines - LineStream of KTAP output to parse test - Test object for current test being parsed + printer - Printer object to output results Return: True if successfully parsed KTAP/TAP header line @@ -308,16 +291,16 @@ def parse_ktap_header(lines: LineStream, test: Test) -> bool: tap_match = TAP_START.match(lines.peek()) if ktap_match: version_num = int(ktap_match.group(1)) - check_version(version_num, KTAP_VERSIONS, 'KTAP', test) + check_version(version_num, KTAP_VERSIONS, 'KTAP', test, printer) elif tap_match: version_num = int(tap_match.group(1)) - check_version(version_num, TAP_VERSIONS, 'TAP', test) + check_version(version_num, TAP_VERSIONS, 'TAP', test, printer) else: return False - test.log.append(lines.pop()) + lines.pop() return True -TEST_HEADER = re.compile(r'^# Subtest: (.*)$') +TEST_HEADER = re.compile(r'^\s*# Subtest: (.*)$') def parse_test_header(lines: LineStream, test: Test) -> bool: """ @@ -337,11 +320,11 @@ def parse_test_header(lines: LineStream, test: Test) -> bool: match = TEST_HEADER.match(lines.peek()) if not match: return False - test.log.append(lines.pop()) test.name = match.group(1) + lines.pop() return True -TEST_PLAN = re.compile(r'1\.\.([0-9]+)') +TEST_PLAN = re.compile(r'^\s*1\.\.([0-9]+)') def parse_test_plan(lines: LineStream, test: Test) -> bool: """ @@ -364,14 +347,14 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool: if not match: test.expected_count = None return False - test.log.append(lines.pop()) expected_count = int(match.group(1)) test.expected_count = expected_count + lines.pop() return True -TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') +TEST_RESULT = re.compile(r'^\s*(ok|not ok) ([0-9]+) ?(- )?([^#]*)( # .*)?$') -TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$') +TEST_RESULT_SKIP = re.compile(r'^\s*(ok|not ok) ([0-9]+) ?(- )?(.*) # SKIP ?(.*)$') def peek_test_name_match(lines: LineStream, test: Test) -> bool: """ @@ -396,10 +379,12 @@ def peek_test_name_match(lines: LineStream, test: Test) -> bool: if not match: return False name = match.group(4) - return (name == test.name) + if not name: + return False + return name == test.name def parse_test_result(lines: LineStream, test: Test, - expected_num: int) -> bool: + expected_num: int, printer: Printer) -> bool: """ Parses test result line and stores the status and name in the test object. Reports an error if the test number does not match expected @@ -417,6 +402,7 @@ def parse_test_result(lines: LineStream, test: Test, lines - LineStream of KTAP output to parse test - Test object for current test being parsed expected_num - expected test number for current test + printer - Printer object to output results Return: True if successfully parsed a test result line. @@ -428,19 +414,18 @@ def parse_test_result(lines: LineStream, test: Test, # Check if line matches test result line format if not match: return False - test.log.append(lines.pop()) + lines.pop() # Set name of test object if skip_match: - test.name = skip_match.group(4) + test.name = skip_match.group(4) or skip_match.group(5) else: test.name = match.group(4) # Check test num num = int(match.group(2)) if num != expected_num: - test.add_error('Expected test number ' + - str(expected_num) + ' but found ' + str(num)) + test.add_error(printer, f'Expected test number {expected_num} but found {num}') # Set status of test object status = match.group(1) @@ -461,6 +446,7 @@ def parse_diagnostic(lines: LineStream) -> List[str]: - '# Subtest: [test name]' - '[ok|not ok] [test number] [-] [test name] [optional skip directive]' + - 'KTAP version [version number]' Parameters: lines - LineStream of KTAP output to parse @@ -469,56 +455,17 @@ def parse_diagnostic(lines: LineStream) -> List[str]: Log of diagnostic lines """ log = [] # type: List[str] - while lines and not TEST_RESULT.match(lines.peek()) and not \ - TEST_HEADER.match(lines.peek()): + non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START, TAP_START, TEST_PLAN] + while lines and not any(re.match(lines.peek()) + for re in non_diagnostic_lines): log.append(lines.pop()) return log -DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^# .*?: kunit test case crashed!$') - -def parse_crash_in_log(test: Test) -> bool: - """ - Iterate through the lines of the log to parse for crash message. - If crash message found, set status to crashed and return True. - Otherwise return False. - - Parameters: - test - Test object for current test being parsed - - Return: - True if crash message found in log - """ - for line in test.log: - if DIAGNOSTIC_CRASH_MESSAGE.match(line): - test.status = TestStatus.TEST_CRASHED - return True - return False - # Printing helper methods: DIVIDER = '=' * 60 -RESET = '\033[0;0m' - -def red(text: str) -> str: - """Returns inputted string with red color code.""" - return '\033[1;31m' + text + RESET - -def yellow(text: str) -> str: - """Returns inputted string with yellow color code.""" - return '\033[1;33m' + text + RESET - -def green(text: str) -> str: - """Returns inputted string with green color code.""" - return '\033[1;32m' + text + RESET - -ANSI_LEN = len(red('')) - -def print_with_timestamp(message: str) -> None: - """Prints message with timestamp at beginning.""" - print('[%s] %s' % (datetime.datetime.now().strftime('%H:%M:%S'), message)) - def format_test_divider(message: str, len_message: int) -> str: """ Returns string with message centered in fixed width divider. @@ -542,9 +489,9 @@ def format_test_divider(message: str, len_message: int) -> str: # calculate number of dashes for each side of the divider len_1 = int(difference / 2) len_2 = difference - len_1 - return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2) + return ('=' * len_1) + f' {message} ' + ('=' * len_2) -def print_test_header(test: Test) -> None: +def print_test_header(test: Test, printer: Printer) -> None: """ Prints test header with test name and optionally the expected number of subtests. @@ -554,28 +501,27 @@ def print_test_header(test: Test) -> None: Parameters: test - Test object representing current test being printed + printer - Printer object to output results """ message = test.name + if message != "": + # Add a leading space before the subtest counts only if a test name + # is provided using a "# Subtest" header line. + message += " " if test.expected_count: if test.expected_count == 1: - message += (' (' + str(test.expected_count) + - ' subtest)') + message += '(1 subtest)' else: - message += (' (' + str(test.expected_count) + - ' subtests)') - print_with_timestamp(format_test_divider(message, len(message))) + message += f'({test.expected_count} subtests)' + printer.print_with_timestamp(format_test_divider(message, len(message))) -def print_log(log: Iterable[str]) -> None: - """ - Prints all strings in saved log for test in yellow. +def print_log(log: Iterable[str], printer: Printer) -> None: + """Prints all strings in saved log for test in yellow.""" + formatted = textwrap.dedent('\n'.join(log)) + for line in formatted.splitlines(): + printer.print_with_timestamp(printer.yellow(line)) - Parameters: - log - Iterable object with all strings saved in log for test - """ - for m in log: - print_with_timestamp(yellow(m)) - -def format_test_result(test: Test) -> str: +def format_test_result(test: Test, printer: Printer) -> str: """ Returns string with formatted test result with colored status and test name. @@ -585,24 +531,24 @@ def format_test_result(test: Test) -> str: Parameters: test - Test object representing current test being printed + printer - Printer object to output results Return: String containing formatted test result """ if test.status == TestStatus.SUCCESS: - return (green('[PASSED] ') + test.name) - elif test.status == TestStatus.SKIPPED: - return (yellow('[SKIPPED] ') + test.name) - elif test.status == TestStatus.NO_TESTS: - return (yellow('[NO TESTS RUN] ') + test.name) - elif test.status == TestStatus.TEST_CRASHED: - print_log(test.log) - return (red('[CRASHED] ') + test.name) - else: - print_log(test.log) - return (red('[FAILED] ') + test.name) - -def print_test_result(test: Test) -> None: + return printer.green('[PASSED] ') + test.name + if test.status == TestStatus.SKIPPED: + return printer.yellow('[SKIPPED] ') + test.name + if test.status == TestStatus.NO_TESTS: + return printer.yellow('[NO TESTS RUN] ') + test.name + if test.status == TestStatus.TEST_CRASHED: + print_log(test.log, printer) + return stdout.red('[CRASHED] ') + test.name + print_log(test.log, printer) + return printer.red('[FAILED] ') + test.name + +def print_test_result(test: Test, printer: Printer) -> None: """ Prints result line with status of test. @@ -611,10 +557,11 @@ def print_test_result(test: Test) -> None: Parameters: test - Test object representing current test being printed + printer - Printer object """ - print_with_timestamp(format_test_result(test)) + printer.print_with_timestamp(format_test_result(test, printer)) -def print_test_footer(test: Test) -> None: +def print_test_footer(test: Test, printer: Printer) -> None: """ Prints test footer with status of test. @@ -623,12 +570,72 @@ def print_test_footer(test: Test) -> None: Parameters: test - Test object representing current test being printed + printer - Printer object to output results + """ + message = format_test_result(test, printer) + printer.print_with_timestamp(format_test_divider(message, + len(message) - printer.color_len())) + +def print_test(test: Test, failed_only: bool, printer: Printer) -> None: """ - message = format_test_result(test) - print_with_timestamp(format_test_divider(message, - len(message) - ANSI_LEN)) + Prints Test object to given printer. For a child test, the result line is + printed. For a parent test, the test header, all child test results, and + the test footer are all printed. If failed_only is true, only failed/crashed + tests will be printed. + + Parameters: + test - Test object to print + failed_only - True if only failed/crashed tests should be printed. + printer - Printer object to output results + """ + if test.name == "main": + printer.print_with_timestamp(DIVIDER) + for subtest in test.subtests: + print_test(subtest, failed_only, printer) + printer.print_with_timestamp(DIVIDER) + elif test.subtests != []: + if not failed_only or not test.ok_status(): + print_test_header(test, printer) + for subtest in test.subtests: + print_test(subtest, failed_only, printer) + print_test_footer(test, printer) + else: + if not failed_only or not test.ok_status(): + print_test_result(test, printer) + +def _summarize_failed_tests(test: Test) -> str: + """Tries to summarize all the failing subtests in `test`.""" + + def failed_names(test: Test, parent_name: str) -> List[str]: + # Note: we use 'main' internally for the top-level test. + if not parent_name or parent_name == 'main': + full_name = test.name + else: + full_name = parent_name + '.' + test.name + + if not test.subtests: # this is a leaf node + return [full_name] + + # If all the children failed, just say this subtest failed. + # Don't summarize it down "the top-level test failed", though. + failed_subtests = [sub for sub in test.subtests if not sub.ok_status()] + if parent_name and len(failed_subtests) == len(test.subtests): + return [full_name] + + all_failures = [] # type: List[str] + for t in failed_subtests: + all_failures.extend(failed_names(t, full_name)) + return all_failures + + failures = failed_names(test, '') + # If there are too many failures, printing them out will just be noisy. + if len(failures) > 10: # this is an arbitrary limit + return '' + + return 'Failures: ' + ', '.join(failures) -def print_summary_line(test: Test) -> None: + +def print_summary_line(test: Test, printer: Printer) -> None: """ Prints summary line of test object. Color of line is dependent on status of test. Color is green if test passes, yellow if test is @@ -641,27 +648,24 @@ def print_summary_line(test: Test) -> None: Errors: 0" test - Test object representing current test being printed + printer - Printer object to output results """ if test.status == TestStatus.SUCCESS: - color = green - elif test.status == TestStatus.SKIPPED or test.status == TestStatus.NO_TESTS: - color = yellow + color = stdout.green + elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS): + color = stdout.yellow else: - color = red - counts = test.counts - print_with_timestamp(color('Testing complete. ' + str(counts))) - -def print_error(error_message: str) -> None: - """ - Prints error message with error format. - - Example: - "[ERROR] Test example: missing test plan!" - - Parameters: - error_message - message describing error - """ - print_with_timestamp(red('[ERROR] ') + error_message) + color = stdout.red + printer.print_with_timestamp(color(f'Testing complete. {test.counts}')) + + # Summarize failures that might have gone off-screen since we had a lot + # of tests (arbitrarily defined as >=100 for now). + if test.ok_status() or test.counts.total() < 100: + return + summarized = _summarize_failed_tests(test) + if not summarized: + return + printer.print_with_timestamp(color(summarized)) # Other methods: @@ -675,7 +679,6 @@ def bubble_up_test_results(test: Test) -> None: Parameters: test - Test object for current test being parsed """ - parse_crash_in_log(test) subtests = test.subtests counts = test.counts status = test.status @@ -686,7 +689,7 @@ def bubble_up_test_results(test: Test) -> None: elif test.counts.get_status() == TestStatus.TEST_CRASHED: test.status = TestStatus.TEST_CRASHED -def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: +def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool, printer: Printer) -> Test: """ Finds next test to parse in LineStream, creates new Test object, parses any subtests of the test, populates Test object with all @@ -704,15 +707,32 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: 1..4 [subtests] - - Subtest header line + - Subtest header (must include either the KTAP version line or + "# Subtest" header line) - Example: + Example (preferred format with both KTAP version line and + "# Subtest" line): + + KTAP version 1 + # Subtest: name + 1..3 + [subtests] + ok 1 name + + Example (only "# Subtest" line): # Subtest: name 1..3 [subtests] ok 1 name + Example (only KTAP version line, compliant with KTAP v1 spec): + + KTAP version 1 + 1..3 + [subtests] + ok 1 name + - Test result line Example: @@ -724,30 +744,38 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: expected_num - expected test number for test to be parsed log - list of strings containing any preceding diagnostic lines corresponding to the current test + is_subtest - boolean indicating whether test is a subtest + printer - Printer object to output results Return: Test object populated with characteristics and any subtests """ test = Test() test.log.extend(log) - parent_test = False - main = parse_ktap_header(lines, test) - if main: - # If KTAP/TAP header is found, attempt to parse + + # Parse any errors prior to parsing tests + err_log = parse_diagnostic(lines) + test.log.extend(err_log) + + if not is_subtest: + # If parsing the main/top-level test, parse KTAP version line and # test plan test.name = "main" + parse_ktap_header(lines, test, printer) + test.log.extend(parse_diagnostic(lines)) parse_test_plan(lines, test) parent_test = True else: - # If KTAP/TAP header is not found, test must be subtest - # header or test result line so parse attempt to parser - # subtest header - parent_test = parse_test_header(lines, test) + # If not the main test, attempt to parse a test header containing + # the KTAP version line and/or subtest header line + ktap_line = parse_ktap_header(lines, test, printer) + subtest_line = parse_test_header(lines, test) + test.log.extend(parse_diagnostic(lines)) + parse_test_plan(lines, test) + parent_test = (ktap_line or subtest_line) if parent_test: - # If subtest header is found, attempt to parse - # test plan and print header - parse_test_plan(lines, test) - print_test_header(test) + print_test_header(test, printer) + expected_count = test.expected_count subtests = [] test_num = 1 @@ -760,69 +788,77 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: sub_log = parse_diagnostic(lines) sub_test = Test() if not lines or (peek_test_name_match(lines, test) and - not main): + is_subtest): if expected_count and test_num <= expected_count: # If parser reaches end of test before # parsing expected number of subtests, print # crashed subtest and record error - test.add_error('missing expected subtest!') + test.add_error(printer, 'missing expected subtest!') sub_test.log.extend(sub_log) test.counts.add_status( TestStatus.TEST_CRASHED) - print_test_result(sub_test) + print_test_result(sub_test, printer) else: test.log.extend(sub_log) break else: - sub_test = parse_test(lines, test_num, sub_log) + sub_test = parse_test(lines, test_num, sub_log, True, printer) subtests.append(sub_test) test_num += 1 test.subtests = subtests - if not main: + if is_subtest: # If not main test, look for test result line test.log.extend(parse_diagnostic(lines)) - if (parent_test and peek_test_name_match(lines, test)) or \ - not parent_test: - parse_test_result(lines, test, expected_num) + if test.name != "" and not peek_test_name_match(lines, test): + test.add_error(printer, 'missing subtest result line!') + elif not lines: + print_log(test.log, printer) + test.status = TestStatus.NO_TESTS + test.add_error(printer, 'No more test results!') else: - test.add_error('missing subtest result line!') + parse_test_result(lines, test, expected_num, printer) - # Check for there being no tests + # Check for there being no subtests within parent test if parent_test and len(subtests) == 0: - test.status = TestStatus.NO_TESTS - test.add_error('0 tests run!') + # Don't override a bad status if this test had one reported. + # Assumption: no subtests means CRASHED is from Test.__init__() + if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS): + print_log(test.log, printer) + test.status = TestStatus.NO_TESTS + test.add_error(printer, '0 tests run!') # Add statuses to TestCounts attribute in Test object bubble_up_test_results(test) - if parent_test and not main: + if parent_test and is_subtest: # If test has subtests and is not the main test object, print # footer. - print_test_footer(test) - elif not main: - print_test_result(test) + print_test_footer(test, printer) + elif is_subtest: + print_test_result(test, printer) return test -def parse_run_tests(kernel_output: Iterable[str]) -> Test: +def parse_run_tests(kernel_output: Iterable[str], printer: Printer) -> Test: """ Using kernel output, extract KTAP lines, parse the lines for test - results and print condensed test results and summary line . + results and print condensed test results and summary line. Parameters: kernel_output - Iterable object contains lines of kernel output + printer - Printer object to output results Return: Test - the main test object with all subtests. """ - print_with_timestamp(DIVIDER) + printer.print_with_timestamp(DIVIDER) lines = extract_tap_lines(kernel_output) test = Test() if not lines: - test.add_error('invalid KTAP input!') + test.name = '<missing>' + test.add_error(printer, 'Could not find any KTAP output. Did any KUnit tests run?') test.status = TestStatus.FAILURE_TO_PARSE_TESTS else: - test = parse_test(lines, 0, []) + test = parse_test(lines, 0, [], False, printer) if test.status != TestStatus.NO_TESTS: test.status = test.counts.get_status() - print_with_timestamp(DIVIDER) - print_summary_line(test) + printer.print_with_timestamp(DIVIDER) return test |
