test_event_ai_suggested_fix.py 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. import time
  2. from unittest.mock import Mock, patch
  3. import pytest
  4. import responses
  5. from django.test.utils import override_settings
  6. from django.urls import reverse
  7. from openai.types.chat.chat_completion import ChatCompletion, Choice
  8. from openai.types.chat.chat_completion_message import ChatCompletionMessage
  9. from sentry.testutils.cases import APITestCase
  10. from sentry.testutils.skips import requires_snuba
  11. pytestmark = [requires_snuba]
  12. @pytest.fixture(autouse=True)
  13. def openai_features():
  14. with override_settings(OPENAI_API_KEY="X"):
  15. yield
  16. @pytest.fixture(autouse=True)
  17. def openai_mock(monkeypatch):
  18. def dummy_response(*args, **kwargs):
  19. return ChatCompletion(
  20. id="test",
  21. choices=[
  22. Choice(
  23. index=0,
  24. message=ChatCompletionMessage(
  25. content="AI generated response", role="assistant"
  26. ),
  27. finish_reason="stop",
  28. )
  29. ],
  30. created=int(time.time()),
  31. model="gpt3.5-trubo",
  32. object="chat.completion",
  33. )
  34. mock_openai = Mock()
  35. mock_openai().chat.completions.create = dummy_response
  36. monkeypatch.setattr("sentry.api.endpoints.event_ai_suggested_fix.OpenAI", mock_openai)
  37. class EventAiSuggestedFixEndpointTest(APITestCase):
  38. def setUp(self):
  39. super().setUp()
  40. self.event = self.store_event(
  41. project_id=self.project.id,
  42. data={
  43. "exception": {
  44. "values": [
  45. {
  46. "type": "ZeroDivisionError",
  47. "stacktrace": {"frames": [{"function": "a"}, {"function": "b"}]},
  48. }
  49. ]
  50. }
  51. },
  52. )
  53. self.path = reverse(
  54. "sentry-api-0-event-ai-fix-suggest",
  55. kwargs={
  56. "organization_id_or_slug": self.organization.slug,
  57. "project_id_or_slug": self.project.slug,
  58. "event_id": self.event.event_id,
  59. },
  60. )
  61. self.login_as(self.user)
  62. @responses.activate
  63. def test_consent(self):
  64. with patch(
  65. "sentry.api.endpoints.event_ai_suggested_fix.get_openai_policy",
  66. return_value="individual_consent",
  67. ):
  68. response = self.client.get(self.path)
  69. assert response.status_code == 403
  70. assert response.json() == {"restriction": "individual_consent"}
  71. response = self.client.get(self.path + "?consent=yes")
  72. assert response.status_code == 200
  73. assert response.json() == {"suggestion": "AI generated response"}
  74. with patch(
  75. "sentry.api.endpoints.event_ai_suggested_fix.get_openai_policy",
  76. return_value="subprocessor",
  77. ):
  78. response = self.client.get(self.path)
  79. assert response.status_code == 403
  80. assert response.json() == {"restriction": "subprocessor"}
  81. with patch(
  82. "sentry.api.endpoints.event_ai_suggested_fix.get_openai_policy",
  83. return_value="pii_certification_required",
  84. ):
  85. response = self.client.get(self.path)
  86. assert response.status_code == 403
  87. assert response.json() == {"restriction": "pii_certification_required"}
  88. with patch(
  89. "sentry.api.endpoints.event_ai_suggested_fix.get_openai_policy",
  90. return_value="allowed",
  91. ):
  92. response = self.client.get(self.path)
  93. assert response.status_code == 200
  94. assert response.json() == {"suggestion": "AI generated response"}
  95. def test_describe_event_for_ai(self):
  96. from sentry.api.endpoints.event_ai_suggested_fix import describe_event_for_ai
  97. event_data = {
  98. "exception": {
  99. "values": [
  100. {
  101. "type": "ArithmeticError",
  102. "value": "division by zero",
  103. "stacktrace": {
  104. "frames": [
  105. {
  106. "function": "divide",
  107. "filename": "math_operations.py",
  108. "lineno": 27,
  109. "context_line": "result = 1 / 0",
  110. "pre_context": [
  111. "def divide(x, y):",
  112. " # Attempt to divide by zero",
  113. ],
  114. "post_context": [" return result", ""],
  115. "in_app": True,
  116. },
  117. None, # Edge case, just to make sure it doesn't break
  118. {
  119. "function": "calculate",
  120. "filename": "main.py",
  121. "lineno": 15,
  122. "context_line": "divide(10, 0)",
  123. "pre_context": ["def calculate():", " # Calculate division"],
  124. "post_context": [" print('Calculation complete')", ""],
  125. "in_app": True,
  126. },
  127. ]
  128. },
  129. }
  130. ]
  131. }
  132. }
  133. exceptions = describe_event_for_ai(event=event_data, model="gpt-3.5-turbo")
  134. assert (
  135. len(exceptions.get("exceptions", [])) == 1
  136. ), "Should have one exception in the event data"
  137. exception = exceptions["exceptions"][0]
  138. assert exception["type"] == "ArithmeticError", "Exception type should be 'ArithmeticError'"
  139. assert (
  140. exception["message"] == "division by zero"
  141. ), "Exception message should be 'division by zero'"
  142. assert "stacktrace" in exception, "Exception should have a stacktrace"
  143. assert len(exception["stacktrace"]) == 2, "Stacktrace should have two frames"
  144. assert (
  145. exception["stacktrace"][0]["func"] == "calculate"
  146. ), "First frame function should be 'calculate'"
  147. assert (
  148. exception["stacktrace"][1]["func"] == "divide"
  149. ), "Second frame function should be 'divide'"