123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297 |
- """Support for skip/xfail functions and markers."""
- import dataclasses
- import os
- import platform
- import sys
- import traceback
- from collections.abc import Mapping
- from typing import Generator
- from typing import Optional
- from typing import Tuple
- from typing import Type
- from _pytest.config import Config
- from _pytest.config import hookimpl
- from _pytest.config.argparsing import Parser
- from _pytest.mark.structures import Mark
- from _pytest.nodes import Item
- from _pytest.outcomes import fail
- from _pytest.outcomes import skip
- from _pytest.outcomes import xfail
- from _pytest.reports import BaseReport
- from _pytest.runner import CallInfo
- from _pytest.stash import StashKey
- def pytest_addoption(parser: Parser) -> None:
- group = parser.getgroup("general")
- group.addoption(
- "--runxfail",
- action="store_true",
- dest="runxfail",
- default=False,
- help="Report the results of xfail tests as if they were not marked",
- )
- parser.addini(
- "xfail_strict",
- "Default for the strict parameter of xfail "
- "markers when not given explicitly (default: False)",
- default=False,
- type="bool",
- )
- def pytest_configure(config: Config) -> None:
- if config.option.runxfail:
- # yay a hack
- import pytest
- old = pytest.xfail
- config.add_cleanup(lambda: setattr(pytest, "xfail", old))
- def nop(*args, **kwargs):
- pass
- nop.Exception = xfail.Exception # type: ignore[attr-defined]
- setattr(pytest, "xfail", nop)
- config.addinivalue_line(
- "markers",
- "skip(reason=None): skip the given test function with an optional reason. "
- 'Example: skip(reason="no way of currently testing this") skips the '
- "test.",
- )
- config.addinivalue_line(
- "markers",
- "skipif(condition, ..., *, reason=...): "
- "skip the given test function if any of the conditions evaluate to True. "
- "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
- "See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif",
- )
- config.addinivalue_line(
- "markers",
- "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
- "mark the test function as an expected failure if any of the conditions "
- "evaluate to True. Optionally specify a reason for better reporting "
- "and run=False if you don't even want to execute the test function. "
- "If only specific exception(s) are expected, you can list them in "
- "raises, and if the test fails in other ways, it will be reported as "
- "a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail",
- )
- def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]:
- """Evaluate a single skipif/xfail condition.
- If an old-style string condition is given, it is eval()'d, otherwise the
- condition is bool()'d. If this fails, an appropriately formatted pytest.fail
- is raised.
- Returns (result, reason). The reason is only relevant if the result is True.
- """
- # String condition.
- if isinstance(condition, str):
- globals_ = {
- "os": os,
- "sys": sys,
- "platform": platform,
- "config": item.config,
- }
- for dictionary in reversed(
- item.ihook.pytest_markeval_namespace(config=item.config)
- ):
- if not isinstance(dictionary, Mapping):
- raise ValueError(
- "pytest_markeval_namespace() needs to return a dict, got {!r}".format(
- dictionary
- )
- )
- globals_.update(dictionary)
- if hasattr(item, "obj"):
- globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
- try:
- filename = f"<{mark.name} condition>"
- condition_code = compile(condition, filename, "eval")
- result = eval(condition_code, globals_)
- except SyntaxError as exc:
- msglines = [
- "Error evaluating %r condition" % mark.name,
- " " + condition,
- " " + " " * (exc.offset or 0) + "^",
- "SyntaxError: invalid syntax",
- ]
- fail("\n".join(msglines), pytrace=False)
- except Exception as exc:
- msglines = [
- "Error evaluating %r condition" % mark.name,
- " " + condition,
- *traceback.format_exception_only(type(exc), exc),
- ]
- fail("\n".join(msglines), pytrace=False)
- # Boolean condition.
- else:
- try:
- result = bool(condition)
- except Exception as exc:
- msglines = [
- "Error evaluating %r condition as a boolean" % mark.name,
- *traceback.format_exception_only(type(exc), exc),
- ]
- fail("\n".join(msglines), pytrace=False)
- reason = mark.kwargs.get("reason", None)
- if reason is None:
- if isinstance(condition, str):
- reason = "condition: " + condition
- else:
- # XXX better be checked at collection time
- msg = (
- "Error evaluating %r: " % mark.name
- + "you need to specify reason=STRING when using booleans as conditions."
- )
- fail(msg, pytrace=False)
- return result, reason
- @dataclasses.dataclass(frozen=True)
- class Skip:
- """The result of evaluate_skip_marks()."""
- reason: str = "unconditional skip"
- def evaluate_skip_marks(item: Item) -> Optional[Skip]:
- """Evaluate skip and skipif marks on item, returning Skip if triggered."""
- for mark in item.iter_markers(name="skipif"):
- if "condition" not in mark.kwargs:
- conditions = mark.args
- else:
- conditions = (mark.kwargs["condition"],)
- # Unconditional.
- if not conditions:
- reason = mark.kwargs.get("reason", "")
- return Skip(reason)
- # If any of the conditions are true.
- for condition in conditions:
- result, reason = evaluate_condition(item, mark, condition)
- if result:
- return Skip(reason)
- for mark in item.iter_markers(name="skip"):
- try:
- return Skip(*mark.args, **mark.kwargs)
- except TypeError as e:
- raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None
- return None
- @dataclasses.dataclass(frozen=True)
- class Xfail:
- """The result of evaluate_xfail_marks()."""
- __slots__ = ("reason", "run", "strict", "raises")
- reason: str
- run: bool
- strict: bool
- raises: Optional[Tuple[Type[BaseException], ...]]
- def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
- """Evaluate xfail marks on item, returning Xfail if triggered."""
- for mark in item.iter_markers(name="xfail"):
- run = mark.kwargs.get("run", True)
- strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
- raises = mark.kwargs.get("raises", None)
- if "condition" not in mark.kwargs:
- conditions = mark.args
- else:
- conditions = (mark.kwargs["condition"],)
- # Unconditional.
- if not conditions:
- reason = mark.kwargs.get("reason", "")
- return Xfail(reason, run, strict, raises)
- # If any of the conditions are true.
- for condition in conditions:
- result, reason = evaluate_condition(item, mark, condition)
- if result:
- return Xfail(reason, run, strict, raises)
- return None
- # Saves the xfail mark evaluation. Can be refreshed during call if None.
- xfailed_key = StashKey[Optional[Xfail]]()
- @hookimpl(tryfirst=True)
- def pytest_runtest_setup(item: Item) -> None:
- skipped = evaluate_skip_marks(item)
- if skipped:
- raise skip.Exception(skipped.reason, _use_item_location=True)
- item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
- if xfailed and not item.config.option.runxfail and not xfailed.run:
- xfail("[NOTRUN] " + xfailed.reason)
- @hookimpl(hookwrapper=True)
- def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
- xfailed = item.stash.get(xfailed_key, None)
- if xfailed is None:
- item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
- if xfailed and not item.config.option.runxfail and not xfailed.run:
- xfail("[NOTRUN] " + xfailed.reason)
- yield
- # The test run may have added an xfail mark dynamically.
- xfailed = item.stash.get(xfailed_key, None)
- if xfailed is None:
- item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item)
- @hookimpl(hookwrapper=True)
- def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
- outcome = yield
- rep = outcome.get_result()
- xfailed = item.stash.get(xfailed_key, None)
- if item.config.option.runxfail:
- pass # don't interfere
- elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
- assert call.excinfo.value.msg is not None
- rep.wasxfail = "reason: " + call.excinfo.value.msg
- rep.outcome = "skipped"
- elif not rep.skipped and xfailed:
- if call.excinfo:
- raises = xfailed.raises
- if raises is not None and not isinstance(call.excinfo.value, raises):
- rep.outcome = "failed"
- else:
- rep.outcome = "skipped"
- rep.wasxfail = xfailed.reason
- elif call.when == "call":
- if xfailed.strict:
- rep.outcome = "failed"
- rep.longrepr = "[XPASS(strict)] " + xfailed.reason
- else:
- rep.outcome = "passed"
- rep.wasxfail = xfailed.reason
- def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
- if hasattr(report, "wasxfail"):
- if report.skipped:
- return "xfailed", "x", "XFAIL"
- elif report.passed:
- return "xpassed", "X", "XPASS"
- return None
|