Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Log test vector properties #1087

Merged
merged 1 commit into from
Jan 23, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 42 additions & 1 deletion forge/test/operators/pytorch/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,17 @@
from test.operators.utils import PyTestUtils
from test.operators.utils import FailingReasonsValidation

from ..utils import TestPlanUtils


@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item: _pytest.python.Function, call: _pytest.runner.CallInfo):
outcome: pluggy.callers._Result = yield
report: _pytest.reports.TestReport = outcome.get_result()

if report.when == "call" or (report.when == "setup" and report.skipped):
xfail_reason = PyTestUtils.get_xfail_reason(item)

# This hook function is called after each step of the test execution (setup, call, teardown)
if call.when == "call": # 'call' is a phase when the test is actually executed

Expand All @@ -30,7 +35,7 @@ def pytest_runtest_makereport(item: _pytest.python.Function, call: _pytest.runne
)

exception_value = call.excinfo.value
xfail_reason = PyTestUtils.get_xfail_reason(item)

if xfail_reason is not None: # an xfail reason is defined for the test
valid_reason = FailingReasonsValidation.validate_exception(exception_value, xfail_reason)

Expand All @@ -50,3 +55,39 @@ def pytest_runtest_makereport(item: _pytest.python.Function, call: _pytest.runne
outcome.force_result(new_report)
else:
logger.debug(f"Test '{item.name}' failed with exception: {type(exception_value)} '{exception_value}'")

if report.when == "call" or (report.when == "setup" and report.skipped):
try:
log_test_vector_properties(item, report, xfail_reason)
except Exception as e:
logger.error(f"Failed to log test vector properties: {e}")
logger.exception(e)
pass


def log_test_vector_properties(item: _pytest.python.Function, report: _pytest.reports.TestReport, xfail_reason: str):
original_name = item.originalname
test_id = item.name
test_id = test_id.replace(f"{original_name}[", "")
test_id = test_id.replace("]", "")
if test_id == "no_device-test_vector0":
# This is not a valid test id. It happens when no tests are selected to run.
return
test_vector = TestPlanUtils.test_id_to_test_vector(test_id)

item.user_properties.append(("id", test_id))
item.user_properties.append(("operator", test_vector.operator))
item.user_properties.append(
("input_source", test_vector.input_source.name if test_vector.input_source is not None else None)
)
item.user_properties.append(
("dev_data_format", test_vector.dev_data_format.name if test_vector.dev_data_format is not None else None)
)
item.user_properties.append(
("math_fidelity", test_vector.math_fidelity.name if test_vector.math_fidelity is not None else None)
)
item.user_properties.append(("input_shape", test_vector.input_shape))
item.user_properties.append(("kwargs", test_vector.kwargs))
if xfail_reason is not None:
item.user_properties.append(("xfail_reason", xfail_reason))
item.user_properties.append(("outcome", report.outcome))
Loading