kunit: tool: Only print the summary

Allow only printing the summary at the end of a test run, rather than all
individual test results. This summary will list a few failing tests if
there are any.

To use:

./tools/testing/kunit/kunit.py run --summary

Link: https://lore.kernel.org/r/20241113222406.1590372-1-rmoar@google.com
Signed-off-by: Rae Moar <rmoar@google.com>
Signed-off-by: David Gow <davidgow@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
This commit is contained in:
David Gow 2024-11-13 22:24:05 +00:00 committed by Shuah Khan
parent 5017ec667b
commit 062a9dd9ba
4 changed files with 112 additions and 84 deletions

View File

@ -23,7 +23,7 @@ from typing import Iterable, List, Optional, Sequence, Tuple
import kunit_json import kunit_json
import kunit_kernel import kunit_kernel
import kunit_parser import kunit_parser
from kunit_printer import stdout from kunit_printer import stdout, null_printer
class KunitStatus(Enum): class KunitStatus(Enum):
SUCCESS = auto() SUCCESS = auto()
@ -49,6 +49,7 @@ class KunitBuildRequest(KunitConfigRequest):
class KunitParseRequest: class KunitParseRequest:
raw_output: Optional[str] raw_output: Optional[str]
json: Optional[str] json: Optional[str]
summary: bool
@dataclass @dataclass
class KunitExecRequest(KunitParseRequest): class KunitExecRequest(KunitParseRequest):
@ -235,11 +236,16 @@ def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input
parse_time = time.time() - parse_start parse_time = time.time() - parse_start
return KunitResult(KunitStatus.SUCCESS, parse_time), fake_test return KunitResult(KunitStatus.SUCCESS, parse_time), fake_test
default_printer = stdout
if request.summary:
default_printer = null_printer
# Actually parse the test results. # Actually parse the test results.
test = kunit_parser.parse_run_tests(input_data) test = kunit_parser.parse_run_tests(input_data, default_printer)
parse_time = time.time() - parse_start parse_time = time.time() - parse_start
kunit_parser.print_summary_line(test, stdout)
if request.json: if request.json:
json_str = kunit_json.get_json_result( json_str = kunit_json.get_json_result(
test=test, test=test,
@ -413,6 +419,10 @@ def add_parse_opts(parser: argparse.ArgumentParser) -> None:
help='Prints parsed test results as JSON to stdout or a file if ' help='Prints parsed test results as JSON to stdout or a file if '
'a filename is specified. Does nothing if --raw_output is set.', 'a filename is specified. Does nothing if --raw_output is set.',
type=str, const='stdout', default=None, metavar='FILE') type=str, const='stdout', default=None, metavar='FILE')
parser.add_argument('--summary',
help='Prints only the summary line for parsed test results.'
'Does nothing if --raw_output is set.',
action='store_true')
def tree_from_args(cli_args: argparse.Namespace) -> kunit_kernel.LinuxSourceTree: def tree_from_args(cli_args: argparse.Namespace) -> kunit_kernel.LinuxSourceTree:
@ -448,6 +458,7 @@ def run_handler(cli_args: argparse.Namespace) -> None:
jobs=cli_args.jobs, jobs=cli_args.jobs,
raw_output=cli_args.raw_output, raw_output=cli_args.raw_output,
json=cli_args.json, json=cli_args.json,
summary=cli_args.summary,
timeout=cli_args.timeout, timeout=cli_args.timeout,
filter_glob=cli_args.filter_glob, filter_glob=cli_args.filter_glob,
filter=cli_args.filter, filter=cli_args.filter,
@ -495,6 +506,7 @@ def exec_handler(cli_args: argparse.Namespace) -> None:
exec_request = KunitExecRequest(raw_output=cli_args.raw_output, exec_request = KunitExecRequest(raw_output=cli_args.raw_output,
build_dir=cli_args.build_dir, build_dir=cli_args.build_dir,
json=cli_args.json, json=cli_args.json,
summary=cli_args.summary,
timeout=cli_args.timeout, timeout=cli_args.timeout,
filter_glob=cli_args.filter_glob, filter_glob=cli_args.filter_glob,
filter=cli_args.filter, filter=cli_args.filter,
@ -520,7 +532,7 @@ def parse_handler(cli_args: argparse.Namespace) -> None:
# We know nothing about how the result was created! # We know nothing about how the result was created!
metadata = kunit_json.Metadata() metadata = kunit_json.Metadata()
request = KunitParseRequest(raw_output=cli_args.raw_output, request = KunitParseRequest(raw_output=cli_args.raw_output,
json=cli_args.json) json=cli_args.json, summary=cli_args.summary)
result, _ = parse_tests(request, metadata, kunit_output) result, _ = parse_tests(request, metadata, kunit_output)
if result.status != KunitStatus.SUCCESS: if result.status != KunitStatus.SUCCESS:
sys.exit(1) sys.exit(1)

View File

@ -17,7 +17,7 @@ import textwrap
from enum import Enum, auto from enum import Enum, auto
from typing import Iterable, Iterator, List, Optional, Tuple from typing import Iterable, Iterator, List, Optional, Tuple
from kunit_printer import stdout from kunit_printer import Printer, stdout
class Test: class Test:
""" """
@ -54,10 +54,10 @@ class Test:
"""Returns string representation of a Test class object.""" """Returns string representation of a Test class object."""
return str(self) return str(self)
def add_error(self, error_message: str) -> None: def add_error(self, printer: Printer, error_message: str) -> None:
"""Records an error that occurred while parsing this test.""" """Records an error that occurred while parsing this test."""
self.counts.errors += 1 self.counts.errors += 1
stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}') printer.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}')
def ok_status(self) -> bool: def ok_status(self) -> bool:
"""Returns true if the status was ok, i.e. passed or skipped.""" """Returns true if the status was ok, i.e. passed or skipped."""
@ -251,7 +251,7 @@ KTAP_VERSIONS = [1]
TAP_VERSIONS = [13, 14] TAP_VERSIONS = [13, 14]
def check_version(version_num: int, accepted_versions: List[int], def check_version(version_num: int, accepted_versions: List[int],
version_type: str, test: Test) -> None: version_type: str, test: Test, printer: Printer) -> None:
""" """
Adds error to test object if version number is too high or too Adds error to test object if version number is too high or too
low. low.
@ -263,13 +263,14 @@ def check_version(version_num: int, accepted_versions: List[int],
version_type - 'KTAP' or 'TAP' depending on the type of version_type - 'KTAP' or 'TAP' depending on the type of
version line. version line.
test - Test object for current test being parsed test - Test object for current test being parsed
printer - Printer object to output error
""" """
if version_num < min(accepted_versions): if version_num < min(accepted_versions):
test.add_error(f'{version_type} version lower than expected!') test.add_error(printer, f'{version_type} version lower than expected!')
elif version_num > max(accepted_versions): elif version_num > max(accepted_versions):
test.add_error(f'{version_type} version higer than expected!') test.add_error(printer, f'{version_type} version higer than expected!')
def parse_ktap_header(lines: LineStream, test: Test) -> bool: def parse_ktap_header(lines: LineStream, test: Test, printer: Printer) -> bool:
""" """
Parses KTAP/TAP header line and checks version number. Parses KTAP/TAP header line and checks version number.
Returns False if fails to parse KTAP/TAP header line. Returns False if fails to parse KTAP/TAP header line.
@ -281,6 +282,7 @@ def parse_ktap_header(lines: LineStream, test: Test) -> bool:
Parameters: Parameters:
lines - LineStream of KTAP output to parse lines - LineStream of KTAP output to parse
test - Test object for current test being parsed test - Test object for current test being parsed
printer - Printer object to output results
Return: Return:
True if successfully parsed KTAP/TAP header line True if successfully parsed KTAP/TAP header line
@ -289,10 +291,10 @@ def parse_ktap_header(lines: LineStream, test: Test) -> bool:
tap_match = TAP_START.match(lines.peek()) tap_match = TAP_START.match(lines.peek())
if ktap_match: if ktap_match:
version_num = int(ktap_match.group(1)) version_num = int(ktap_match.group(1))
check_version(version_num, KTAP_VERSIONS, 'KTAP', test) check_version(version_num, KTAP_VERSIONS, 'KTAP', test, printer)
elif tap_match: elif tap_match:
version_num = int(tap_match.group(1)) version_num = int(tap_match.group(1))
check_version(version_num, TAP_VERSIONS, 'TAP', test) check_version(version_num, TAP_VERSIONS, 'TAP', test, printer)
else: else:
return False return False
lines.pop() lines.pop()
@ -380,7 +382,7 @@ def peek_test_name_match(lines: LineStream, test: Test) -> bool:
return name == test.name return name == test.name
def parse_test_result(lines: LineStream, test: Test, def parse_test_result(lines: LineStream, test: Test,
expected_num: int) -> bool: expected_num: int, printer: Printer) -> bool:
""" """
Parses test result line and stores the status and name in the test Parses test result line and stores the status and name in the test
object. Reports an error if the test number does not match expected object. Reports an error if the test number does not match expected
@ -398,6 +400,7 @@ def parse_test_result(lines: LineStream, test: Test,
lines - LineStream of KTAP output to parse lines - LineStream of KTAP output to parse
test - Test object for current test being parsed test - Test object for current test being parsed
expected_num - expected test number for current test expected_num - expected test number for current test
printer - Printer object to output results
Return: Return:
True if successfully parsed a test result line. True if successfully parsed a test result line.
@ -420,7 +423,7 @@ def parse_test_result(lines: LineStream, test: Test,
# Check test num # Check test num
num = int(match.group(2)) num = int(match.group(2))
if num != expected_num: if num != expected_num:
test.add_error(f'Expected test number {expected_num} but found {num}') test.add_error(printer, f'Expected test number {expected_num} but found {num}')
# Set status of test object # Set status of test object
status = match.group(1) status = match.group(1)
@ -486,7 +489,7 @@ def format_test_divider(message: str, len_message: int) -> str:
len_2 = difference - len_1 len_2 = difference - len_1
return ('=' * len_1) + f' {message} ' + ('=' * len_2) return ('=' * len_1) + f' {message} ' + ('=' * len_2)
def print_test_header(test: Test) -> None: def print_test_header(test: Test, printer: Printer) -> None:
""" """
Prints test header with test name and optionally the expected number Prints test header with test name and optionally the expected number
of subtests. of subtests.
@ -496,6 +499,7 @@ def print_test_header(test: Test) -> None:
Parameters: Parameters:
test - Test object representing current test being printed test - Test object representing current test being printed
printer - Printer object to output results
""" """
message = test.name message = test.name
if message != "": if message != "":
@ -507,15 +511,15 @@ def print_test_header(test: Test) -> None:
message += '(1 subtest)' message += '(1 subtest)'
else: else:
message += f'({test.expected_count} subtests)' message += f'({test.expected_count} subtests)'
stdout.print_with_timestamp(format_test_divider(message, len(message))) printer.print_with_timestamp(format_test_divider(message, len(message)))
def print_log(log: Iterable[str]) -> None: def print_log(log: Iterable[str], printer: Printer) -> None:
"""Prints all strings in saved log for test in yellow.""" """Prints all strings in saved log for test in yellow."""
formatted = textwrap.dedent('\n'.join(log)) formatted = textwrap.dedent('\n'.join(log))
for line in formatted.splitlines(): for line in formatted.splitlines():
stdout.print_with_timestamp(stdout.yellow(line)) printer.print_with_timestamp(printer.yellow(line))
def format_test_result(test: Test) -> str: def format_test_result(test: Test, printer: Printer) -> str:
""" """
Returns string with formatted test result with colored status and test Returns string with formatted test result with colored status and test
name. name.
@ -525,23 +529,24 @@ def format_test_result(test: Test) -> str:
Parameters: Parameters:
test - Test object representing current test being printed test - Test object representing current test being printed
printer - Printer object to output results
Return: Return:
String containing formatted test result String containing formatted test result
""" """
if test.status == TestStatus.SUCCESS: if test.status == TestStatus.SUCCESS:
return stdout.green('[PASSED] ') + test.name return printer.green('[PASSED] ') + test.name
if test.status == TestStatus.SKIPPED: if test.status == TestStatus.SKIPPED:
return stdout.yellow('[SKIPPED] ') + test.name return printer.yellow('[SKIPPED] ') + test.name
if test.status == TestStatus.NO_TESTS: if test.status == TestStatus.NO_TESTS:
return stdout.yellow('[NO TESTS RUN] ') + test.name return printer.yellow('[NO TESTS RUN] ') + test.name
if test.status == TestStatus.TEST_CRASHED: if test.status == TestStatus.TEST_CRASHED:
print_log(test.log) print_log(test.log, printer)
return stdout.red('[CRASHED] ') + test.name return stdout.red('[CRASHED] ') + test.name
print_log(test.log) print_log(test.log, printer)
return stdout.red('[FAILED] ') + test.name return printer.red('[FAILED] ') + test.name
def print_test_result(test: Test) -> None: def print_test_result(test: Test, printer: Printer) -> None:
""" """
Prints result line with status of test. Prints result line with status of test.
@ -550,10 +555,11 @@ def print_test_result(test: Test) -> None:
Parameters: Parameters:
test - Test object representing current test being printed test - Test object representing current test being printed
printer - Printer object
""" """
stdout.print_with_timestamp(format_test_result(test)) printer.print_with_timestamp(format_test_result(test, printer))
def print_test_footer(test: Test) -> None: def print_test_footer(test: Test, printer: Printer) -> None:
""" """
Prints test footer with status of test. Prints test footer with status of test.
@ -562,10 +568,11 @@ def print_test_footer(test: Test) -> None:
Parameters: Parameters:
test - Test object representing current test being printed test - Test object representing current test being printed
printer - Printer object to output results
""" """
message = format_test_result(test) message = format_test_result(test, printer)
stdout.print_with_timestamp(format_test_divider(message, printer.print_with_timestamp(format_test_divider(message,
len(message) - stdout.color_len())) len(message) - printer.color_len()))
@ -601,7 +608,7 @@ def _summarize_failed_tests(test: Test) -> str:
return 'Failures: ' + ', '.join(failures) return 'Failures: ' + ', '.join(failures)
def print_summary_line(test: Test) -> None: def print_summary_line(test: Test, printer: Printer) -> None:
""" """
Prints summary line of test object. Color of line is dependent on Prints summary line of test object. Color of line is dependent on
status of test. Color is green if test passes, yellow if test is status of test. Color is green if test passes, yellow if test is
@ -614,6 +621,7 @@ def print_summary_line(test: Test) -> None:
Errors: 0" Errors: 0"
test - Test object representing current test being printed test - Test object representing current test being printed
printer - Printer object to output results
""" """
if test.status == TestStatus.SUCCESS: if test.status == TestStatus.SUCCESS:
color = stdout.green color = stdout.green
@ -621,7 +629,7 @@ def print_summary_line(test: Test) -> None:
color = stdout.yellow color = stdout.yellow
else: else:
color = stdout.red color = stdout.red
stdout.print_with_timestamp(color(f'Testing complete. {test.counts}')) printer.print_with_timestamp(color(f'Testing complete. {test.counts}'))
# Summarize failures that might have gone off-screen since we had a lot # Summarize failures that might have gone off-screen since we had a lot
# of tests (arbitrarily defined as >=100 for now). # of tests (arbitrarily defined as >=100 for now).
@ -630,7 +638,7 @@ def print_summary_line(test: Test) -> None:
summarized = _summarize_failed_tests(test) summarized = _summarize_failed_tests(test)
if not summarized: if not summarized:
return return
stdout.print_with_timestamp(color(summarized)) printer.print_with_timestamp(color(summarized))
# Other methods: # Other methods:
@ -654,7 +662,7 @@ def bubble_up_test_results(test: Test) -> None:
elif test.counts.get_status() == TestStatus.TEST_CRASHED: elif test.counts.get_status() == TestStatus.TEST_CRASHED:
test.status = TestStatus.TEST_CRASHED test.status = TestStatus.TEST_CRASHED
def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool) -> Test: def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool, printer: Printer) -> Test:
""" """
Finds next test to parse in LineStream, creates new Test object, Finds next test to parse in LineStream, creates new Test object,
parses any subtests of the test, populates Test object with all parses any subtests of the test, populates Test object with all
@ -710,6 +718,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
log - list of strings containing any preceding diagnostic lines log - list of strings containing any preceding diagnostic lines
corresponding to the current test corresponding to the current test
is_subtest - boolean indicating whether test is a subtest is_subtest - boolean indicating whether test is a subtest
printer - Printer object to output results
Return: Return:
Test object populated with characteristics and any subtests Test object populated with characteristics and any subtests
@ -725,14 +734,14 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
# If parsing the main/top-level test, parse KTAP version line and # If parsing the main/top-level test, parse KTAP version line and
# test plan # test plan
test.name = "main" test.name = "main"
ktap_line = parse_ktap_header(lines, test) ktap_line = parse_ktap_header(lines, test, printer)
test.log.extend(parse_diagnostic(lines)) test.log.extend(parse_diagnostic(lines))
parse_test_plan(lines, test) parse_test_plan(lines, test)
parent_test = True parent_test = True
else: else:
# If not the main test, attempt to parse a test header containing # If not the main test, attempt to parse a test header containing
# the KTAP version line and/or subtest header line # the KTAP version line and/or subtest header line
ktap_line = parse_ktap_header(lines, test) ktap_line = parse_ktap_header(lines, test, printer)
subtest_line = parse_test_header(lines, test) subtest_line = parse_test_header(lines, test)
parent_test = (ktap_line or subtest_line) parent_test = (ktap_line or subtest_line)
if parent_test: if parent_test:
@ -740,7 +749,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
# to parse test plan and print test header # to parse test plan and print test header
test.log.extend(parse_diagnostic(lines)) test.log.extend(parse_diagnostic(lines))
parse_test_plan(lines, test) parse_test_plan(lines, test)
print_test_header(test) print_test_header(test, printer)
expected_count = test.expected_count expected_count = test.expected_count
subtests = [] subtests = []
test_num = 1 test_num = 1
@ -758,16 +767,16 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
# If parser reaches end of test before # If parser reaches end of test before
# parsing expected number of subtests, print # parsing expected number of subtests, print
# crashed subtest and record error # crashed subtest and record error
test.add_error('missing expected subtest!') test.add_error(printer, 'missing expected subtest!')
sub_test.log.extend(sub_log) sub_test.log.extend(sub_log)
test.counts.add_status( test.counts.add_status(
TestStatus.TEST_CRASHED) TestStatus.TEST_CRASHED)
print_test_result(sub_test) print_test_result(sub_test, printer)
else: else:
test.log.extend(sub_log) test.log.extend(sub_log)
break break
else: else:
sub_test = parse_test(lines, test_num, sub_log, True) sub_test = parse_test(lines, test_num, sub_log, True, printer)
subtests.append(sub_test) subtests.append(sub_test)
test_num += 1 test_num += 1
test.subtests = subtests test.subtests = subtests
@ -775,51 +784,51 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
# If not main test, look for test result line # If not main test, look for test result line
test.log.extend(parse_diagnostic(lines)) test.log.extend(parse_diagnostic(lines))
if test.name != "" and not peek_test_name_match(lines, test): if test.name != "" and not peek_test_name_match(lines, test):
test.add_error('missing subtest result line!') test.add_error(printer, 'missing subtest result line!')
else: else:
parse_test_result(lines, test, expected_num) parse_test_result(lines, test, expected_num, printer)
# Check for there being no subtests within parent test # Check for there being no subtests within parent test
if parent_test and len(subtests) == 0: if parent_test and len(subtests) == 0:
# Don't override a bad status if this test had one reported. # Don't override a bad status if this test had one reported.
# Assumption: no subtests means CRASHED is from Test.__init__() # Assumption: no subtests means CRASHED is from Test.__init__()
if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS): if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS):
print_log(test.log) print_log(test.log, printer)
test.status = TestStatus.NO_TESTS test.status = TestStatus.NO_TESTS
test.add_error('0 tests run!') test.add_error(printer, '0 tests run!')
# Add statuses to TestCounts attribute in Test object # Add statuses to TestCounts attribute in Test object
bubble_up_test_results(test) bubble_up_test_results(test)
if parent_test and is_subtest: if parent_test and is_subtest:
# If test has subtests and is not the main test object, print # If test has subtests and is not the main test object, print
# footer. # footer.
print_test_footer(test) print_test_footer(test, printer)
elif is_subtest: elif is_subtest:
print_test_result(test) print_test_result(test, printer)
return test return test
def parse_run_tests(kernel_output: Iterable[str]) -> Test: def parse_run_tests(kernel_output: Iterable[str], printer: Printer) -> Test:
""" """
Using kernel output, extract KTAP lines, parse the lines for test Using kernel output, extract KTAP lines, parse the lines for test
results and print condensed test results and summary line. results and print condensed test results and summary line.
Parameters: Parameters:
kernel_output - Iterable object contains lines of kernel output kernel_output - Iterable object contains lines of kernel output
printer - Printer object to output results
Return: Return:
Test - the main test object with all subtests. Test - the main test object with all subtests.
""" """
stdout.print_with_timestamp(DIVIDER) printer.print_with_timestamp(DIVIDER)
lines = extract_tap_lines(kernel_output) lines = extract_tap_lines(kernel_output)
test = Test() test = Test()
if not lines: if not lines:
test.name = '<missing>' test.name = '<missing>'
test.add_error('Could not find any KTAP output. Did any KUnit tests run?') test.add_error(printer, 'Could not find any KTAP output. Did any KUnit tests run?')
test.status = TestStatus.FAILURE_TO_PARSE_TESTS test.status = TestStatus.FAILURE_TO_PARSE_TESTS
else: else:
test = parse_test(lines, 0, [], False) test = parse_test(lines, 0, [], False, printer)
if test.status != TestStatus.NO_TESTS: if test.status != TestStatus.NO_TESTS:
test.status = test.counts.get_status() test.status = test.counts.get_status()
stdout.print_with_timestamp(DIVIDER) printer.print_with_timestamp(DIVIDER)
print_summary_line(test)
return test return test

View File

@ -15,12 +15,17 @@ _RESET = '\033[0;0m'
class Printer: class Printer:
"""Wraps a file object, providing utilities for coloring output, etc.""" """Wraps a file object, providing utilities for coloring output, etc."""
def __init__(self, output: typing.IO[str]): def __init__(self, print: bool=True, output: typing.IO[str]=sys.stdout):
self._output = output self._output = output
self._use_color = output.isatty() self._print = print
if print:
self._use_color = output.isatty()
else:
self._use_color = False
def print(self, message: str) -> None: def print(self, message: str) -> None:
print(message, file=self._output) if self._print:
print(message, file=self._output)
def print_with_timestamp(self, message: str) -> None: def print_with_timestamp(self, message: str) -> None:
ts = datetime.datetime.now().strftime('%H:%M:%S') ts = datetime.datetime.now().strftime('%H:%M:%S')
@ -45,4 +50,5 @@ class Printer:
return len(self.red('')) return len(self.red(''))
# Provides a default instance that prints to stdout # Provides a default instance that prints to stdout
stdout = Printer(sys.stdout) stdout = Printer()
null_printer = Printer(print=False)

View File

@ -23,6 +23,7 @@ import kunit_parser
import kunit_kernel import kunit_kernel
import kunit_json import kunit_json
import kunit import kunit
from kunit_printer import stdout
test_tmpdir = '' test_tmpdir = ''
abs_test_data_dir = '' abs_test_data_dir = ''
@ -139,28 +140,28 @@ class KUnitParserTest(unittest.TestCase):
def test_parse_successful_test_log(self): def test_parse_successful_test_log(self):
all_passed_log = test_data_path('test_is_test_passed-all_passed.log') all_passed_log = test_data_path('test_is_test_passed-all_passed.log')
with open(all_passed_log) as file: with open(all_passed_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
self.assertEqual(result.counts.errors, 0) self.assertEqual(result.counts.errors, 0)
def test_parse_successful_nested_tests_log(self): def test_parse_successful_nested_tests_log(self):
all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log') all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log')
with open(all_passed_log) as file: with open(all_passed_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
self.assertEqual(result.counts.errors, 0) self.assertEqual(result.counts.errors, 0)
def test_kselftest_nested(self): def test_kselftest_nested(self):
kselftest_log = test_data_path('test_is_test_passed-kselftest.log') kselftest_log = test_data_path('test_is_test_passed-kselftest.log')
with open(kselftest_log) as file: with open(kselftest_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
self.assertEqual(result.counts.errors, 0) self.assertEqual(result.counts.errors, 0)
def test_parse_failed_test_log(self): def test_parse_failed_test_log(self):
failed_log = test_data_path('test_is_test_passed-failure.log') failed_log = test_data_path('test_is_test_passed-failure.log')
with open(failed_log) as file: with open(failed_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status)
self.assertEqual(result.counts.errors, 0) self.assertEqual(result.counts.errors, 0)
@ -168,7 +169,7 @@ class KUnitParserTest(unittest.TestCase):
empty_log = test_data_path('test_is_test_passed-no_tests_run_no_header.log') empty_log = test_data_path('test_is_test_passed-no_tests_run_no_header.log')
with open(empty_log) as file: with open(empty_log) as file:
result = kunit_parser.parse_run_tests( result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines())) kunit_parser.extract_tap_lines(file.readlines()), stdout)
self.assertEqual(0, len(result.subtests)) self.assertEqual(0, len(result.subtests))
self.assertEqual(kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS, result.status) self.assertEqual(kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS, result.status)
self.assertEqual(result.counts.errors, 1) self.assertEqual(result.counts.errors, 1)
@ -179,7 +180,7 @@ class KUnitParserTest(unittest.TestCase):
with open(missing_plan_log) as file: with open(missing_plan_log) as file:
result = kunit_parser.parse_run_tests( result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines( kunit_parser.extract_tap_lines(
file.readlines())) file.readlines()), stdout)
# A missing test plan is not an error. # A missing test plan is not an error.
self.assertEqual(result.counts, kunit_parser.TestCounts(passed=10, errors=0)) self.assertEqual(result.counts, kunit_parser.TestCounts(passed=10, errors=0))
self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
@ -188,7 +189,7 @@ class KUnitParserTest(unittest.TestCase):
header_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log') header_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log')
with open(header_log) as file: with open(header_log) as file:
result = kunit_parser.parse_run_tests( result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines())) kunit_parser.extract_tap_lines(file.readlines()), stdout)
self.assertEqual(0, len(result.subtests)) self.assertEqual(0, len(result.subtests))
self.assertEqual(kunit_parser.TestStatus.NO_TESTS, result.status) self.assertEqual(kunit_parser.TestStatus.NO_TESTS, result.status)
self.assertEqual(result.counts.errors, 1) self.assertEqual(result.counts.errors, 1)
@ -197,7 +198,7 @@ class KUnitParserTest(unittest.TestCase):
no_plan_log = test_data_path('test_is_test_passed-no_tests_no_plan.log') no_plan_log = test_data_path('test_is_test_passed-no_tests_no_plan.log')
with open(no_plan_log) as file: with open(no_plan_log) as file:
result = kunit_parser.parse_run_tests( result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines())) kunit_parser.extract_tap_lines(file.readlines()), stdout)
self.assertEqual(0, len(result.subtests[0].subtests[0].subtests)) self.assertEqual(0, len(result.subtests[0].subtests[0].subtests))
self.assertEqual( self.assertEqual(
kunit_parser.TestStatus.NO_TESTS, kunit_parser.TestStatus.NO_TESTS,
@ -210,7 +211,7 @@ class KUnitParserTest(unittest.TestCase):
print_mock = mock.patch('kunit_printer.Printer.print').start() print_mock = mock.patch('kunit_printer.Printer.print').start()
with open(crash_log) as file: with open(crash_log) as file:
result = kunit_parser.parse_run_tests( result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines())) kunit_parser.extract_tap_lines(file.readlines()), stdout)
print_mock.assert_any_call(StrContains('Could not find any KTAP output.')) print_mock.assert_any_call(StrContains('Could not find any KTAP output.'))
print_mock.stop() print_mock.stop()
self.assertEqual(0, len(result.subtests)) self.assertEqual(0, len(result.subtests))
@ -219,7 +220,7 @@ class KUnitParserTest(unittest.TestCase):
def test_skipped_test(self): def test_skipped_test(self):
skipped_log = test_data_path('test_skip_tests.log') skipped_log = test_data_path('test_skip_tests.log')
with open(skipped_log) as file: with open(skipped_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
# A skipped test does not fail the whole suite. # A skipped test does not fail the whole suite.
self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
@ -228,7 +229,7 @@ class KUnitParserTest(unittest.TestCase):
def test_skipped_all_tests(self): def test_skipped_all_tests(self):
skipped_log = test_data_path('test_skip_all_tests.log') skipped_log = test_data_path('test_skip_all_tests.log')
with open(skipped_log) as file: with open(skipped_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
self.assertEqual(kunit_parser.TestStatus.SKIPPED, result.status) self.assertEqual(kunit_parser.TestStatus.SKIPPED, result.status)
self.assertEqual(result.counts, kunit_parser.TestCounts(skipped=5)) self.assertEqual(result.counts, kunit_parser.TestCounts(skipped=5))
@ -236,7 +237,7 @@ class KUnitParserTest(unittest.TestCase):
def test_ignores_hyphen(self): def test_ignores_hyphen(self):
hyphen_log = test_data_path('test_strip_hyphen.log') hyphen_log = test_data_path('test_strip_hyphen.log')
with open(hyphen_log) as file: with open(hyphen_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
# A skipped test does not fail the whole suite. # A skipped test does not fail the whole suite.
self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
@ -250,7 +251,7 @@ class KUnitParserTest(unittest.TestCase):
def test_ignores_prefix_printk_time(self): def test_ignores_prefix_printk_time(self):
prefix_log = test_data_path('test_config_printk_time.log') prefix_log = test_data_path('test_config_printk_time.log')
with open(prefix_log) as file: with open(prefix_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
self.assertEqual('kunit-resource-test', result.subtests[0].name) self.assertEqual('kunit-resource-test', result.subtests[0].name)
self.assertEqual(result.counts.errors, 0) self.assertEqual(result.counts.errors, 0)
@ -258,7 +259,7 @@ class KUnitParserTest(unittest.TestCase):
def test_ignores_multiple_prefixes(self): def test_ignores_multiple_prefixes(self):
prefix_log = test_data_path('test_multiple_prefixes.log') prefix_log = test_data_path('test_multiple_prefixes.log')
with open(prefix_log) as file: with open(prefix_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
self.assertEqual('kunit-resource-test', result.subtests[0].name) self.assertEqual('kunit-resource-test', result.subtests[0].name)
self.assertEqual(result.counts.errors, 0) self.assertEqual(result.counts.errors, 0)
@ -266,7 +267,7 @@ class KUnitParserTest(unittest.TestCase):
def test_prefix_mixed_kernel_output(self): def test_prefix_mixed_kernel_output(self):
mixed_prefix_log = test_data_path('test_interrupted_tap_output.log') mixed_prefix_log = test_data_path('test_interrupted_tap_output.log')
with open(mixed_prefix_log) as file: with open(mixed_prefix_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
self.assertEqual('kunit-resource-test', result.subtests[0].name) self.assertEqual('kunit-resource-test', result.subtests[0].name)
self.assertEqual(result.counts.errors, 0) self.assertEqual(result.counts.errors, 0)
@ -274,7 +275,7 @@ class KUnitParserTest(unittest.TestCase):
def test_prefix_poundsign(self): def test_prefix_poundsign(self):
pound_log = test_data_path('test_pound_sign.log') pound_log = test_data_path('test_pound_sign.log')
with open(pound_log) as file: with open(pound_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
self.assertEqual('kunit-resource-test', result.subtests[0].name) self.assertEqual('kunit-resource-test', result.subtests[0].name)
self.assertEqual(result.counts.errors, 0) self.assertEqual(result.counts.errors, 0)
@ -282,7 +283,7 @@ class KUnitParserTest(unittest.TestCase):
def test_kernel_panic_end(self): def test_kernel_panic_end(self):
panic_log = test_data_path('test_kernel_panic_interrupt.log') panic_log = test_data_path('test_kernel_panic_interrupt.log')
with open(panic_log) as file: with open(panic_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
self.assertEqual(kunit_parser.TestStatus.TEST_CRASHED, result.status) self.assertEqual(kunit_parser.TestStatus.TEST_CRASHED, result.status)
self.assertEqual('kunit-resource-test', result.subtests[0].name) self.assertEqual('kunit-resource-test', result.subtests[0].name)
self.assertGreaterEqual(result.counts.errors, 1) self.assertGreaterEqual(result.counts.errors, 1)
@ -290,7 +291,7 @@ class KUnitParserTest(unittest.TestCase):
def test_pound_no_prefix(self): def test_pound_no_prefix(self):
pound_log = test_data_path('test_pound_no_prefix.log') pound_log = test_data_path('test_pound_no_prefix.log')
with open(pound_log) as file: with open(pound_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status)
self.assertEqual('kunit-resource-test', result.subtests[0].name) self.assertEqual('kunit-resource-test', result.subtests[0].name)
self.assertEqual(result.counts.errors, 0) self.assertEqual(result.counts.errors, 0)
@ -310,7 +311,7 @@ class KUnitParserTest(unittest.TestCase):
not ok 2 - test2 not ok 2 - test2
not ok 1 - some_failed_suite not ok 1 - some_failed_suite
""" """
result = kunit_parser.parse_run_tests(output.splitlines()) result = kunit_parser.parse_run_tests(output.splitlines(), stdout)
self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status)
self.assertEqual(kunit_parser._summarize_failed_tests(result), self.assertEqual(kunit_parser._summarize_failed_tests(result),
@ -319,7 +320,7 @@ class KUnitParserTest(unittest.TestCase):
def test_ktap_format(self): def test_ktap_format(self):
ktap_log = test_data_path('test_parse_ktap_output.log') ktap_log = test_data_path('test_parse_ktap_output.log')
with open(ktap_log) as file: with open(ktap_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
self.assertEqual(result.counts, kunit_parser.TestCounts(passed=3)) self.assertEqual(result.counts, kunit_parser.TestCounts(passed=3))
self.assertEqual('suite', result.subtests[0].name) self.assertEqual('suite', result.subtests[0].name)
self.assertEqual('case_1', result.subtests[0].subtests[0].name) self.assertEqual('case_1', result.subtests[0].subtests[0].name)
@ -328,13 +329,13 @@ class KUnitParserTest(unittest.TestCase):
def test_parse_subtest_header(self): def test_parse_subtest_header(self):
ktap_log = test_data_path('test_parse_subtest_header.log') ktap_log = test_data_path('test_parse_subtest_header.log')
with open(ktap_log) as file: with open(ktap_log) as file:
kunit_parser.parse_run_tests(file.readlines()) kunit_parser.parse_run_tests(file.readlines(), stdout)
self.print_mock.assert_any_call(StrContains('suite (1 subtest)')) self.print_mock.assert_any_call(StrContains('suite (1 subtest)'))
def test_parse_attributes(self): def test_parse_attributes(self):
ktap_log = test_data_path('test_parse_attributes.log') ktap_log = test_data_path('test_parse_attributes.log')
with open(ktap_log) as file: with open(ktap_log) as file:
result = kunit_parser.parse_run_tests(file.readlines()) result = kunit_parser.parse_run_tests(file.readlines(), stdout)
# Test should pass with no errors # Test should pass with no errors
self.assertEqual(result.counts, kunit_parser.TestCounts(passed=1, errors=0)) self.assertEqual(result.counts, kunit_parser.TestCounts(passed=1, errors=0))
@ -355,7 +356,7 @@ class KUnitParserTest(unittest.TestCase):
Indented more. Indented more.
not ok 1 test1 not ok 1 test1
""" """
result = kunit_parser.parse_run_tests(output.splitlines()) result = kunit_parser.parse_run_tests(output.splitlines(), stdout)
self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status)
self.print_mock.assert_any_call(StrContains('Test output.')) self.print_mock.assert_any_call(StrContains('Test output.'))
@ -544,7 +545,7 @@ class KUnitJsonTest(unittest.TestCase):
def _json_for(self, log_file): def _json_for(self, log_file):
with open(test_data_path(log_file)) as file: with open(test_data_path(log_file)) as file:
test_result = kunit_parser.parse_run_tests(file) test_result = kunit_parser.parse_run_tests(file, stdout)
json_obj = kunit_json.get_json_result( json_obj = kunit_json.get_json_result(
test=test_result, test=test_result,
metadata=kunit_json.Metadata()) metadata=kunit_json.Metadata())
@ -810,7 +811,7 @@ class KUnitMainTest(unittest.TestCase):
self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want
got = kunit._list_tests(self.linux_source_mock, got = kunit._list_tests(self.linux_source_mock,
kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', '', None, None, 'suite', False, False)) kunit.KunitExecRequest(None, None, False, '.kunit', 300, 'suite*', '', None, None, 'suite', False, False))
self.assertEqual(got, want) self.assertEqual(got, want)
# Should respect the user's filter glob when listing tests. # Should respect the user's filter glob when listing tests.
self.linux_source_mock.run_kernel.assert_called_once_with( self.linux_source_mock.run_kernel.assert_called_once_with(
@ -823,7 +824,7 @@ class KUnitMainTest(unittest.TestCase):
# Should respect the user's filter glob when listing tests. # Should respect the user's filter glob when listing tests.
mock_tests.assert_called_once_with(mock.ANY, mock_tests.assert_called_once_with(mock.ANY,
kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*.test*', '', None, None, 'suite', False, False)) kunit.KunitExecRequest(None, None, False, '.kunit', 300, 'suite*.test*', '', None, None, 'suite', False, False))
self.linux_source_mock.run_kernel.assert_has_calls([ self.linux_source_mock.run_kernel.assert_has_calls([
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', filter='', filter_action=None, timeout=300), mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', filter='', filter_action=None, timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', filter='', filter_action=None, timeout=300), mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', filter='', filter_action=None, timeout=300),
@ -836,7 +837,7 @@ class KUnitMainTest(unittest.TestCase):
# Should respect the user's filter glob when listing tests. # Should respect the user's filter glob when listing tests.
mock_tests.assert_called_once_with(mock.ANY, mock_tests.assert_called_once_with(mock.ANY,
kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', '', None, None, 'test', False, False)) kunit.KunitExecRequest(None, None, False, '.kunit', 300, 'suite*', '', None, None, 'test', False, False))
self.linux_source_mock.run_kernel.assert_has_calls([ self.linux_source_mock.run_kernel.assert_has_calls([
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', filter='', filter_action=None, timeout=300), mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', filter='', filter_action=None, timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', filter='', filter_action=None, timeout=300), mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', filter='', filter_action=None, timeout=300),