mirror of
https://gitlab.isc.org/isc-projects/bind9
synced 2025-09-01 23:25:38 +00:00
Ensure assertions and exceptions end up in system test log
If a test fails with an assertion failure or exception, its content along with traceback is displayed in pytest output. This information should be preserved in the test-specific logger for a given system test to make it easier to debug test failures.
This commit is contained in:
@@ -193,6 +193,19 @@ if os.getenv("LEGACY_TEST_RUNNER", "0") == "0":
|
|||||||
# from previous runs could mess with the runner.
|
# from previous runs could mess with the runner.
|
||||||
return "_tmp_" in str(path)
|
return "_tmp_" in str(path)
|
||||||
|
|
||||||
|
class NodeResult:
|
||||||
|
def __init__(self, report=None):
|
||||||
|
self.outcome = None
|
||||||
|
self.messages = []
|
||||||
|
if report is not None:
|
||||||
|
self.update(report)
|
||||||
|
|
||||||
|
def update(self, report):
|
||||||
|
if self.outcome is None or report.outcome != "passed":
|
||||||
|
self.outcome = report.outcome
|
||||||
|
if report.longreprtext:
|
||||||
|
self.messages.append(report.longreprtext)
|
||||||
|
|
||||||
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
||||||
def pytest_runtest_makereport(item):
|
def pytest_runtest_makereport(item):
|
||||||
"""Hook that is used to expose test results to session (for use in fixtures)."""
|
"""Hook that is used to expose test results to session (for use in fixtures)."""
|
||||||
@@ -210,9 +223,8 @@ if os.getenv("LEGACY_TEST_RUNNER", "0") == "0":
|
|||||||
test_results = getattr(item.session, "test_results")
|
test_results = getattr(item.session, "test_results")
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
setattr(item.session, "test_results", test_results)
|
setattr(item.session, "test_results", test_results)
|
||||||
node_result = test_results.get(item.nodeid)
|
node_result = test_results.setdefault(item.nodeid, NodeResult())
|
||||||
if node_result is None or report.outcome != "passed":
|
node_result.update(report)
|
||||||
test_results[item.nodeid] = report.outcome
|
|
||||||
|
|
||||||
# --------------------------- Fixtures -----------------------------------
|
# --------------------------- Fixtures -----------------------------------
|
||||||
|
|
||||||
@@ -328,15 +340,20 @@ if os.getenv("LEGACY_TEST_RUNNER", "0") == "0":
|
|||||||
for node in request.node.collect()
|
for node in request.node.collect()
|
||||||
if node.nodeid in all_test_results
|
if node.nodeid in all_test_results
|
||||||
}
|
}
|
||||||
logger.debug(test_results)
|
|
||||||
assert len(test_results)
|
assert len(test_results)
|
||||||
failed = any(res == "failed" for res in test_results.values())
|
messages = []
|
||||||
skipped = any(res == "skipped" for res in test_results.values())
|
for node, result in test_results.items():
|
||||||
|
logger.debug("%s %s", result.outcome.upper(), node)
|
||||||
|
messages.extend(result.messages)
|
||||||
|
for message in messages:
|
||||||
|
logger.debug("\n" + message)
|
||||||
|
failed = any(res.outcome == "failed" for res in test_results.values())
|
||||||
|
skipped = any(res.outcome == "skipped" for res in test_results.values())
|
||||||
if failed:
|
if failed:
|
||||||
return "failed"
|
return "failed"
|
||||||
if skipped:
|
if skipped:
|
||||||
return "skipped"
|
return "skipped"
|
||||||
assert all(res == "passed" for res in test_results.values())
|
assert all(res.outcome == "passed" for res in test_results.values())
|
||||||
return "passed"
|
return "passed"
|
||||||
|
|
||||||
# Create a temporary directory with a copy of the original system test dir contents
|
# Create a temporary directory with a copy of the original system test dir contents
|
||||||
|
Reference in New Issue
Block a user