conftest.py 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. import os
  2. from typing import MutableMapping
  3. import pytest
  4. import responses
  5. from django.db import connections
  6. from sentry.silo import SiloMode
  7. pytest_plugins = ["sentry.testutils.pytest"]
  8. # XXX: The below code is vendored code from https://github.com/utgwkk/pytest-github-actions-annotate-failures
  9. # so that we can add support for pytest_rerunfailures
  10. # retried tests will no longer be annotated in GHA
  11. #
  12. # Reference:
  13. # https://docs.pytest.org/en/latest/writing_plugins.html#hookwrapper-executing-around-other-hooks
  14. # https://docs.pytest.org/en/latest/writing_plugins.html#hook-function-ordering-call-example
  15. # https://docs.pytest.org/en/stable/reference.html#pytest.hookspec.pytest_runtest_makereport
  16. #
  17. # Inspired by:
  18. # https://github.com/pytest-dev/pytest/blob/master/src/_pytest/terminal.py
  19. @pytest.hookimpl(tryfirst=True, hookwrapper=True)
  20. def pytest_runtest_makereport(item, call):
  21. # execute all other hooks to obtain the report object
  22. outcome = yield
  23. report = outcome.get_result()
  24. # enable only in a workflow of GitHub Actions
  25. # ref: https://help.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables#default-environment-variables
  26. if os.environ.get("GITHUB_ACTIONS") != "true":
  27. return
  28. # If we have the pytest_rerunfailures plugin,
  29. # and there are still retries to be run,
  30. # then do not return the error
  31. if hasattr(item, "execution_count"):
  32. import pytest_rerunfailures
  33. if item.execution_count <= pytest_rerunfailures.get_reruns_count(item):
  34. return
  35. if report.when == "call" and report.failed:
  36. # collect information to be annotated
  37. filesystempath, lineno, _ = report.location
  38. # try to convert to absolute path in GitHub Actions
  39. workspace = os.environ.get("GITHUB_WORKSPACE")
  40. if workspace:
  41. full_path = os.path.abspath(filesystempath)
  42. try:
  43. rel_path = os.path.relpath(full_path, workspace)
  44. except ValueError:
  45. # os.path.relpath() will raise ValueError on Windows
  46. # when full_path and workspace have different mount points.
  47. # https://github.com/utgwkk/pytest-github-actions-annotate-failures/issues/20
  48. rel_path = filesystempath
  49. if not rel_path.startswith(".."):
  50. filesystempath = rel_path
  51. if lineno is not None:
  52. # 0-index to 1-index
  53. lineno += 1
  54. # get the name of the current failed test, with parametrize info
  55. longrepr = report.head_line or item.name
  56. # get the error message and line number from the actual error
  57. try:
  58. longrepr += "\n\n" + report.longrepr.reprcrash.message
  59. lineno = report.longrepr.reprcrash.lineno
  60. except AttributeError:
  61. pass
  62. print(_error_workflow_command(filesystempath, lineno, longrepr)) # noqa: S002
  63. def _error_workflow_command(filesystempath, lineno, longrepr):
  64. # Build collection of arguments. Ordering is strict for easy testing
  65. details_dict = {"file": filesystempath}
  66. if lineno is not None:
  67. details_dict["line"] = lineno
  68. details = ",".join(f"{k}={v}" for k, v in details_dict.items())
  69. if longrepr is None:
  70. return f"\n::error {details}"
  71. else:
  72. longrepr = _escape(longrepr)
  73. return f"\n::error {details}::{longrepr}"
  74. def _escape(s):
  75. return s.replace("%", "%25").replace("\r", "%0D").replace("\n", "%0A")
  76. _MODEL_MANIFEST_FILE_PATH = "./model-manifest.json" # os.getenv("SENTRY_MODEL_MANIFEST_FILE_PATH")
  77. _model_manifest = None
  78. @pytest.fixture(scope="session", autouse=True)
  79. def create_model_manifest_file():
  80. """Audit which models are touched by each test case and write it to file."""
  81. # We have to construct the ModelManifest lazily, because importing
  82. # sentry.testutils.modelmanifest too early causes a dependency cycle.
  83. from sentry.testutils.modelmanifest import ModelManifest
  84. if _MODEL_MANIFEST_FILE_PATH:
  85. global _model_manifest
  86. _model_manifest = ModelManifest.open(_MODEL_MANIFEST_FILE_PATH)
  87. with _model_manifest.write():
  88. yield
  89. else:
  90. yield
  91. @pytest.fixture(scope="class", autouse=True)
  92. def register_class_in_model_manifest(request: pytest.FixtureRequest):
  93. if _model_manifest:
  94. with _model_manifest.register(request.node.nodeid):
  95. yield
  96. else:
  97. yield
  98. @pytest.fixture(autouse=True)
  99. def validate_silo_mode():
  100. # NOTE! Hybrid cloud uses many mechanisms to simulate multiple different configurations of the application
  101. # during tests. It depends upon `override_settings` using the correct contextmanager behaviors and correct
  102. # thread handling in acceptance tests. If you hit one of these, it's possible either that cleanup logic has
  103. # a bug, or you may be using a contextmanager incorrectly. Let us know and we can help!
  104. if SiloMode.get_current_mode() != SiloMode.MONOLITH:
  105. raise Exception(
  106. "Possible test leak bug! SiloMode was not reset to Monolith between tests. Please read the comment for validate_silo_mode() in tests/conftest.py."
  107. )
  108. yield
  109. if SiloMode.get_current_mode() != SiloMode.MONOLITH:
  110. raise Exception(
  111. "Possible test leak bug! SiloMode was not reset to Monolith between tests. Please read the comment for validate_silo_mode() in tests/conftest.py."
  112. )
  113. @pytest.fixture(autouse=True)
  114. def setup_simulate_on_commit(request):
  115. from sentry.testutils.hybrid_cloud import simulate_on_commit
  116. with simulate_on_commit(request):
  117. yield
  118. @pytest.fixture(autouse=True)
  119. def setup_enforce_monotonic_transactions(request):
  120. from sentry.testutils.hybrid_cloud import enforce_no_cross_transaction_interactions
  121. with enforce_no_cross_transaction_interactions():
  122. yield
  123. @pytest.fixture(autouse=True)
  124. def audit_hybrid_cloud_writes_and_deletes(request):
  125. """
  126. Ensure that write operations on hybrid cloud foreign keys are recorded
  127. alongside outboxes or use a context manager to indicate that the
  128. caller has considered outbox and didn't accidentally forget.
  129. Generally you can avoid assertion errors from these checks by:
  130. 1. Running deletion/write logic within an `outbox_context`.
  131. 2. Using Model.delete()/save methods that create outbox messages in the
  132. same transaction as a delete operation.
  133. Scenarios that are generally always unsafe are using
  134. `QuerySet.delete()`, `QuerySet.update()` or raw SQL to perform
  135. writes.
  136. The User.delete() method is a good example of how to safely
  137. delete records and generate outbox messages.
  138. """
  139. from sentry.testutils.silo import validate_protected_queries
  140. debug_cursor_state: MutableMapping[str, bool] = {}
  141. for conn in connections.all():
  142. debug_cursor_state[conn.alias] = conn.force_debug_cursor
  143. conn.queries_log.clear()
  144. conn.force_debug_cursor = True
  145. try:
  146. yield
  147. finally:
  148. for conn in connections.all():
  149. conn.force_debug_cursor = debug_cursor_state[conn.alias]
  150. validate_protected_queries(conn.queries)
  151. @pytest.fixture(autouse=True)
  152. def check_leaked_responses_mocks():
  153. yield
  154. leaked = responses.registered()
  155. if leaked:
  156. responses.reset()
  157. leaked_s = "".join(f"- {item}\n" for item in leaked)
  158. raise AssertionError(
  159. f"`responses` were leaked outside of the test context:\n{leaked_s}"
  160. f"(make sure to use `@responses.activate` or `with responses.mock:`)"
  161. )