Просмотр исходного кода

fix(issue-platform): parse env var to conditionally run search_issues migrations in snuba (#42397)

Adds a flag to apply `search_issues` migrations to snuba when running
`devservices`
Gilbert Szeto 2 лет назад
Родитель
Сommit
ff5e58bf4f

+ 1 - 0
.github/actions/setup-sentry/action.yml

@@ -151,6 +151,7 @@ runs:
         NEED_CHARTCUTERIE: ${{ inputs.chartcuterie }}
         WORKDIR: ${{ inputs.workdir }}
         PG_VERSION: ${{ inputs.pg-version }}
+        ENABLE_AUTORUN_MIGRATION_SEARCH_ISSUES: '1'
       run: |
         sentry init
 

+ 3 - 2
src/sentry/conf/server.py

@@ -1,7 +1,6 @@
 """
 These settings act as the default (base) settings for the Sentry-provided web-server
 """
-
 import os
 import os.path
 import platform
@@ -1906,7 +1905,6 @@ SENTRY_USE_PROFILING = False
 # This flag activates consuming issue platform occurrence data in the development environment
 SENTRY_USE_ISSUE_OCCURRENCE = False
 
-
 # This flag activates code paths that are specific for customer domains
 SENTRY_USE_CUSTOMER_DOMAINS = False
 
@@ -2074,6 +2072,9 @@ SENTRY_DEVSERVICES = {
                 "ENABLE_ISSUE_OCCURRENCE_CONSUMER": "1"
                 if settings.SENTRY_USE_ISSUE_OCCURRENCE
                 else "",
+                "ENABLE_AUTORUN_MIGRATION_SEARCH_ISSUES": os.environ.get(
+                    "ENABLE_AUTORUN_MIGRATION_SEARCH_ISSUES", ""
+                ),
             },
             "only_if": "snuba" in settings.SENTRY_EVENTSTREAM
             or "kafka" in settings.SENTRY_EVENTSTREAM,

+ 36 - 0
tests/sentry/issues/test_search_issues_dataset.py

@@ -0,0 +1,36 @@
+from datetime import datetime, timedelta
+
+from sentry_sdk import Hub
+from snuba_sdk.legacy import json_to_snql
+
+from sentry.testutils import SnubaTestCase, TestCase
+from sentry.utils import json
+from sentry.utils.snuba import _snql_query
+
+
+class DatasetTest(SnubaTestCase, TestCase):  # type: ignore[misc]
+    def test_query_dataset_returns_empty(self) -> None:
+        # make a random query just to verify the table exists
+        now = datetime.now()
+        json_body = {
+            "selected_columns": ["project_id"],
+            "offset": 0,
+            "limit": 100,
+            "project": [1],
+            "dataset": "search_issues",
+            "groupby": ["project_id"],
+            "conditions": [
+                ["project_id", "IN", [2]],
+                ["timestamp", ">=", now - timedelta(minutes=1)],
+                ["timestamp", "<", now + timedelta(minutes=1)],
+            ],
+            "aggregations": [["count()", "", "count"]],
+            "consistent": False,
+        }
+        request = json_to_snql(json_body, "search_issues")
+        request.validate()
+        resp = _snql_query(((request, None, None), Hub(Hub.current), {}))
+        assert resp[0].status == 200
+        stuff = json.loads(resp[0].data)
+
+        assert len(stuff["data"]) == 0