conftest.py 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. import os
  2. from typing import MutableMapping
  3. import pytest
  4. from django.db import connections
  5. from sentry.silo import SiloMode
  6. pytest_plugins = ["sentry.utils.pytest"]
  7. # XXX: The below code is vendored code from https://github.com/utgwkk/pytest-github-actions-annotate-failures
  8. # so that we can add support for pytest_rerunfailures
  9. # retried tests will no longer be annotated in GHA
  10. #
  11. # Reference:
  12. # https://docs.pytest.org/en/latest/writing_plugins.html#hookwrapper-executing-around-other-hooks
  13. # https://docs.pytest.org/en/latest/writing_plugins.html#hook-function-ordering-call-example
  14. # https://docs.pytest.org/en/stable/reference.html#pytest.hookspec.pytest_runtest_makereport
  15. #
  16. # Inspired by:
  17. # https://github.com/pytest-dev/pytest/blob/master/src/_pytest/terminal.py
  18. @pytest.hookimpl(tryfirst=True, hookwrapper=True)
  19. def pytest_runtest_makereport(item, call):
  20. # execute all other hooks to obtain the report object
  21. outcome = yield
  22. report = outcome.get_result()
  23. # enable only in a workflow of GitHub Actions
  24. # ref: https://help.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables#default-environment-variables
  25. if os.environ.get("GITHUB_ACTIONS") != "true":
  26. return
  27. # If we have the pytest_rerunfailures plugin,
  28. # and there are still retries to be run,
  29. # then do not return the error
  30. if hasattr(item, "execution_count"):
  31. import pytest_rerunfailures
  32. if item.execution_count <= pytest_rerunfailures.get_reruns_count(item):
  33. return
  34. if report.when == "call" and report.failed:
  35. # collect information to be annotated
  36. filesystempath, lineno, _ = report.location
  37. # try to convert to absolute path in GitHub Actions
  38. workspace = os.environ.get("GITHUB_WORKSPACE")
  39. if workspace:
  40. full_path = os.path.abspath(filesystempath)
  41. try:
  42. rel_path = os.path.relpath(full_path, workspace)
  43. except ValueError:
  44. # os.path.relpath() will raise ValueError on Windows
  45. # when full_path and workspace have different mount points.
  46. # https://github.com/utgwkk/pytest-github-actions-annotate-failures/issues/20
  47. rel_path = filesystempath
  48. if not rel_path.startswith(".."):
  49. filesystempath = rel_path
  50. if lineno is not None:
  51. # 0-index to 1-index
  52. lineno += 1
  53. # get the name of the current failed test, with parametrize info
  54. longrepr = report.head_line or item.name
  55. # get the error message and line number from the actual error
  56. try:
  57. longrepr += "\n\n" + report.longrepr.reprcrash.message
  58. lineno = report.longrepr.reprcrash.lineno
  59. except AttributeError:
  60. pass
  61. print(_error_workflow_command(filesystempath, lineno, longrepr)) # noqa: S002
  62. def _error_workflow_command(filesystempath, lineno, longrepr):
  63. # Build collection of arguments. Ordering is strict for easy testing
  64. details_dict = {"file": filesystempath}
  65. if lineno is not None:
  66. details_dict["line"] = lineno
  67. details = ",".join(f"{k}={v}" for k, v in details_dict.items())
  68. if longrepr is None:
  69. return f"\n::error {details}"
  70. else:
  71. longrepr = _escape(longrepr)
  72. return f"\n::error {details}::{longrepr}"
  73. def _escape(s):
  74. return s.replace("%", "%25").replace("\r", "%0D").replace("\n", "%0A")
  75. _MODEL_MANIFEST_FILE_PATH = "./model-manifest.json" # os.getenv("SENTRY_MODEL_MANIFEST_FILE_PATH")
  76. _model_manifest = None
  77. @pytest.fixture(scope="session", autouse=True)
  78. def create_model_manifest_file():
  79. """Audit which models are touched by each test case and write it to file."""
  80. # We have to construct the ModelManifest lazily, because importing
  81. # sentry.testutils.modelmanifest too early causes a dependency cycle.
  82. from sentry.testutils.modelmanifest import ModelManifest
  83. if _MODEL_MANIFEST_FILE_PATH:
  84. global _model_manifest
  85. _model_manifest = ModelManifest.open(_MODEL_MANIFEST_FILE_PATH)
  86. with _model_manifest.write():
  87. yield
  88. else:
  89. yield
  90. @pytest.fixture(scope="class", autouse=True)
  91. def register_class_in_model_manifest(request: pytest.FixtureRequest):
  92. if _model_manifest:
  93. with _model_manifest.register(request.node.nodeid):
  94. yield
  95. else:
  96. yield
  97. @pytest.fixture(autouse=True)
  98. def validate_silo_mode():
  99. # NOTE! Hybrid cloud uses many mechanisms to simulate multiple different configurations of the application
  100. # during tests. It depends upon `override_settings` using the correct contextmanager behaviors and correct
  101. # thread handling in acceptance tests. If you hit one of these, it's possible either that cleanup logic has
  102. # a bug, or you may be using a contextmanager incorrectly. Let us know and we can help!
  103. if SiloMode.get_current_mode() != SiloMode.MONOLITH:
  104. raise Exception(
  105. "Possible test leak bug! SiloMode was not reset to Monolith between tests. Please read the comment for validate_silo_mode() in tests/conftest.py."
  106. )
  107. yield
  108. if SiloMode.get_current_mode() != SiloMode.MONOLITH:
  109. raise Exception(
  110. "Possible test leak bug! SiloMode was not reset to Monolith between tests. Please read the comment for validate_silo_mode() in tests/conftest.py."
  111. )
  112. @pytest.fixture(autouse=True)
  113. def setup_simulate_on_commit(request):
  114. from sentry.testutils.hybrid_cloud import simulate_on_commit
  115. with simulate_on_commit(request):
  116. yield
  117. @pytest.fixture(autouse=True)
  118. def audit_hybrid_cloud_writes_and_deletes(request):
  119. """
  120. Ensure that write operations on hybrid cloud foreign keys are recorded
  121. alongside outboxes or use a context manager to indicate that the
  122. caller has considered outbox and didn't accidentally forget.
  123. Generally you can avoid assertion errors from these checks by:
  124. 1. Running deletion/write logic within an `outbox_context`.
  125. 2. Using Model.delete()/save methods that create outbox messages in the
  126. same transaction as a delete operation.
  127. Scenarios that are generally always unsafe are using
  128. `QuerySet.delete()`, `QuerySet.update()` or raw SQL to perform
  129. writes.
  130. The User.delete() method is a good example of how to safely
  131. delete records and generate outbox messages.
  132. """
  133. from sentry.testutils.silo import validate_protected_queries
  134. debug_cursor_state: MutableMapping[str, bool] = {}
  135. for conn in connections.all():
  136. debug_cursor_state[conn.alias] = conn.force_debug_cursor
  137. conn.queries_log.clear()
  138. conn.force_debug_cursor = True
  139. try:
  140. yield
  141. finally:
  142. for conn in connections.all():
  143. conn.force_debug_cursor = debug_cursor_state[conn.alias]
  144. validate_protected_queries(conn.queries)