skipping.py 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. # -*- coding: utf-8 -*-
  2. """ support for skip/xfail functions and markers. """
  3. from __future__ import absolute_import
  4. from __future__ import division
  5. from __future__ import print_function
  6. from _pytest.config import hookimpl
  7. from _pytest.mark.evaluate import MarkEvaluator
  8. from _pytest.outcomes import fail
  9. from _pytest.outcomes import skip
  10. from _pytest.outcomes import xfail
  11. def pytest_addoption(parser):
  12. group = parser.getgroup("general")
  13. group.addoption(
  14. "--runxfail",
  15. action="store_true",
  16. dest="runxfail",
  17. default=False,
  18. help="report the results of xfail tests as if they were not marked",
  19. )
  20. parser.addini(
  21. "xfail_strict",
  22. "default for the strict parameter of xfail "
  23. "markers when not given explicitly (default: False)",
  24. default=False,
  25. type="bool",
  26. )
  27. def pytest_configure(config):
  28. if config.option.runxfail:
  29. # yay a hack
  30. import pytest
  31. old = pytest.xfail
  32. config._cleanup.append(lambda: setattr(pytest, "xfail", old))
  33. def nop(*args, **kwargs):
  34. pass
  35. nop.Exception = xfail.Exception
  36. setattr(pytest, "xfail", nop)
  37. config.addinivalue_line(
  38. "markers",
  39. "skip(reason=None): skip the given test function with an optional reason. "
  40. 'Example: skip(reason="no way of currently testing this") skips the '
  41. "test.",
  42. )
  43. config.addinivalue_line(
  44. "markers",
  45. "skipif(condition): skip the given test function if eval(condition) "
  46. "results in a True value. Evaluation happens within the "
  47. "module global context. Example: skipif('sys.platform == \"win32\"') "
  48. "skips the test if we are on the win32 platform. see "
  49. "https://docs.pytest.org/en/latest/skipping.html",
  50. )
  51. config.addinivalue_line(
  52. "markers",
  53. "xfail(condition, reason=None, run=True, raises=None, strict=False): "
  54. "mark the test function as an expected failure if eval(condition) "
  55. "has a True value. Optionally specify a reason for better reporting "
  56. "and run=False if you don't even want to execute the test function. "
  57. "If only specific exception(s) are expected, you can list them in "
  58. "raises, and if the test fails in other ways, it will be reported as "
  59. "a true failure. See https://docs.pytest.org/en/latest/skipping.html",
  60. )
  61. @hookimpl(tryfirst=True)
  62. def pytest_runtest_setup(item):
  63. # Check if skip or skipif are specified as pytest marks
  64. item._skipped_by_mark = False
  65. eval_skipif = MarkEvaluator(item, "skipif")
  66. if eval_skipif.istrue():
  67. item._skipped_by_mark = True
  68. skip(eval_skipif.getexplanation())
  69. for skip_info in item.iter_markers(name="skip"):
  70. item._skipped_by_mark = True
  71. if "reason" in skip_info.kwargs:
  72. skip(skip_info.kwargs["reason"])
  73. elif skip_info.args:
  74. skip(skip_info.args[0])
  75. else:
  76. skip("unconditional skip")
  77. item._evalxfail = MarkEvaluator(item, "xfail")
  78. check_xfail_no_run(item)
  79. @hookimpl(hookwrapper=True)
  80. def pytest_pyfunc_call(pyfuncitem):
  81. check_xfail_no_run(pyfuncitem)
  82. outcome = yield
  83. passed = outcome.excinfo is None
  84. if passed:
  85. check_strict_xfail(pyfuncitem)
  86. def check_xfail_no_run(item):
  87. """check xfail(run=False)"""
  88. if not item.config.option.runxfail:
  89. evalxfail = item._evalxfail
  90. if evalxfail.istrue():
  91. if not evalxfail.get("run", True):
  92. xfail("[NOTRUN] " + evalxfail.getexplanation())
  93. def check_strict_xfail(pyfuncitem):
  94. """check xfail(strict=True) for the given PASSING test"""
  95. evalxfail = pyfuncitem._evalxfail
  96. if evalxfail.istrue():
  97. strict_default = pyfuncitem.config.getini("xfail_strict")
  98. is_strict_xfail = evalxfail.get("strict", strict_default)
  99. if is_strict_xfail:
  100. del pyfuncitem._evalxfail
  101. explanation = evalxfail.getexplanation()
  102. fail("[XPASS(strict)] " + explanation, pytrace=False)
  103. @hookimpl(hookwrapper=True)
  104. def pytest_runtest_makereport(item, call):
  105. outcome = yield
  106. rep = outcome.get_result()
  107. evalxfail = getattr(item, "_evalxfail", None)
  108. # unitttest special case, see setting of _unexpectedsuccess
  109. if hasattr(item, "_unexpectedsuccess") and rep.when == "call":
  110. from _pytest.compat import _is_unittest_unexpected_success_a_failure
  111. if item._unexpectedsuccess:
  112. rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess)
  113. else:
  114. rep.longrepr = "Unexpected success"
  115. if _is_unittest_unexpected_success_a_failure():
  116. rep.outcome = "failed"
  117. else:
  118. rep.outcome = "passed"
  119. rep.wasxfail = rep.longrepr
  120. elif item.config.option.runxfail:
  121. pass # don't interefere
  122. elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):
  123. rep.wasxfail = "reason: " + call.excinfo.value.msg
  124. rep.outcome = "skipped"
  125. elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue():
  126. if call.excinfo:
  127. if evalxfail.invalidraise(call.excinfo.value):
  128. rep.outcome = "failed"
  129. else:
  130. rep.outcome = "skipped"
  131. rep.wasxfail = evalxfail.getexplanation()
  132. elif call.when == "call":
  133. strict_default = item.config.getini("xfail_strict")
  134. is_strict_xfail = evalxfail.get("strict", strict_default)
  135. explanation = evalxfail.getexplanation()
  136. if is_strict_xfail:
  137. rep.outcome = "failed"
  138. rep.longrepr = "[XPASS(strict)] {}".format(explanation)
  139. else:
  140. rep.outcome = "passed"
  141. rep.wasxfail = explanation
  142. elif (
  143. getattr(item, "_skipped_by_mark", False)
  144. and rep.skipped
  145. and type(rep.longrepr) is tuple
  146. ):
  147. # skipped by mark.skipif; change the location of the failure
  148. # to point to the item definition, otherwise it will display
  149. # the location of where the skip exception was raised within pytest
  150. filename, line, reason = rep.longrepr
  151. filename, line = item.location[:2]
  152. rep.longrepr = filename, line, reason
  153. # called by terminalreporter progress reporting
  154. def pytest_report_teststatus(report):
  155. if hasattr(report, "wasxfail"):
  156. if report.skipped:
  157. return "xfailed", "x", "XFAIL"
  158. elif report.passed:
  159. return "xpassed", "X", "XPASS"