Просмотр исходного кода

cleanup(group-attributes): Remove the option to send updates to kafka (#79334)

Removing the option `issues.group_attributes.send_kafka` which gates
whether snapshots are sent to kafka to update the fields in
GroupAttributes. The option is enabled globally and has been on for a
few months. This PR enabled the options in tests and cleans up any
references to the the options.

Some unit tests for daily summary and weekly reports modified groups
outside of the `with self.options({"issues.group_attributes.send_kafka":
True}):` block, meaning those changes were not published to
GroupAttributes. Since the queries for these two features uses a join on
GroupAttributes, the tested behavior was inaccurate. Namely, the tests
expected resolved groups to appear in the generated reports. This PR
also fixes those inconsistencies.
Snigdha Sharma 4 месяцев назад
Родитель
Сommit
361e5e59c8

+ 0 - 4
src/sentry/issues/attributes.py

@@ -14,7 +14,6 @@ from django.db.models.signals import post_delete, post_save
 from django.dispatch import receiver
 from sentry_kafka_schemas.schema_types.group_attributes_v1 import GroupAttributesSnapshot
 
-from sentry import options
 from sentry.conf.types.kafka_definition import Topic
 from sentry.models.group import Group
 from sentry.models.groupassignee import GroupAssignee
@@ -91,9 +90,6 @@ def send_snapshot_values(
 def bulk_send_snapshot_values(
     group_ids: list[int] | None, groups: list[Group] | None, group_deleted: bool = False
 ) -> None:
-    if not (options.get("issues.group_attributes.send_kafka") or False):
-        return
-
     if group_ids is None and groups is None:
         raise ValueError("cannot send snapshot values when group_ids and groups are None")
 

+ 0 - 36
src/sentry/tasks/summaries/weekly_reports.py

@@ -181,15 +181,6 @@ def prepare_organization_report(
                     (e["events.group_id"], e["count()"]) for e in key_errors
                 ]
 
-                if ctx.organization.slug == "sentry":
-                    logger.info(
-                        "project_key_errors.results",
-                        extra={
-                            "batch_id": str(batch_id),
-                            "project_id": project.id,
-                            "num_key_errors": len(key_errors),
-                        },
-                    )
             key_transactions_this_week = project_key_transactions_this_week(ctx, project)
             if key_transactions_this_week:
                 project_ctx.key_transactions = [
@@ -626,36 +617,9 @@ def render_template_context(ctx, user_id: int | None) -> dict[str, Any] | None:
         }
 
     def key_errors():
-        # TODO(Steve): Remove debug logging for Sentry
         def all_key_errors():
-            if ctx.organization.slug == "sentry":
-                logger.info(
-                    "render_template_context.all_key_errors.num_projects",
-                    extra={
-                        "user_id": user_id if user_id else "",
-                        "num_user_projects": len(user_projects),
-                    },
-                )
             for project_ctx in user_projects:
-                if ctx.organization.slug == "sentry":
-                    logger.info(
-                        "render_template_context.all_key_errors.project",
-                        extra={
-                            "user_id": user_id,
-                            "project_id": project_ctx.project.id,
-                        },
-                    )
                 for group, count in project_ctx.key_errors_by_group:
-                    if ctx.organization.slug == "sentry":
-                        logger.info(
-                            "render_template_context.all_key_errors.found_error",
-                            extra={
-                                "group_id": group.id,
-                                "user_id": user_id,
-                                "project_id": project_ctx.project.id,
-                            },
-                        )
-
                     (
                         substatus,
                         substatus_color,

+ 0 - 3
src/sentry/testutils/pytest/sentry.py

@@ -270,9 +270,6 @@ def pytest_configure(config: pytest.Config) -> None:
 
     settings.SENTRY_USE_ISSUE_OCCURRENCE = True
 
-    # TODO: enable this during tests
-    settings.SENTRY_OPTIONS["issues.group_attributes.send_kafka"] = False
-
     # For now, multiprocessing does not work in tests.
     settings.KAFKA_CONSUMER_FORCE_DISABLE_MULTIPROCESSING = True
 

+ 66 - 69
tests/sentry/incidents/endpoints/test_organization_alert_rule_anomalies.py

@@ -41,29 +41,28 @@ class AlertRuleAnomalyEndpointTest(AlertRuleBase, SnubaTestCase):
     def test_simple(self, mock_seer_request, mock_seer_store_request):
         self.create_team(organization=self.organization, members=[self.user])
         two_weeks_ago = before_now(days=14).replace(hour=10, minute=0, second=0, microsecond=0)
-        with self.options({"issues.group_attributes.send_kafka": True}):
-            self.store_event(
-                data={
-                    "event_id": "a" * 32,
-                    "message": "super duper bad",
-                    "timestamp": iso_format(two_weeks_ago + timedelta(minutes=1)),
-                    "fingerprint": ["group1"],
-                    "tags": {"sentry:user": self.user.email},
-                    "exception": [{"value": "BadError"}],
-                },
-                project_id=self.project.id,
-            )
-            self.store_event(
-                data={
-                    "event_id": "b" * 32,
-                    "message": "super bad",
-                    "timestamp": iso_format(two_weeks_ago + timedelta(days=10)),
-                    "fingerprint": ["group2"],
-                    "tags": {"sentry:user": self.user.email},
-                    "exception": [{"value": "BadError"}],
-                },
-                project_id=self.project.id,
-            )
+        self.store_event(
+            data={
+                "event_id": "a" * 32,
+                "message": "super duper bad",
+                "timestamp": iso_format(two_weeks_ago + timedelta(minutes=1)),
+                "fingerprint": ["group1"],
+                "tags": {"sentry:user": self.user.email},
+                "exception": [{"value": "BadError"}],
+            },
+            project_id=self.project.id,
+        )
+        self.store_event(
+            data={
+                "event_id": "b" * 32,
+                "message": "super bad",
+                "timestamp": iso_format(two_weeks_ago + timedelta(days=10)),
+                "fingerprint": ["group2"],
+                "tags": {"sentry:user": self.user.email},
+                "exception": [{"value": "BadError"}],
+            },
+            project_id=self.project.id,
+        )
         seer_store_data_return_value: StoreDataResponse = {"success": True}
         mock_seer_store_request.return_value = HTTPResponse(
             orjson.dumps(seer_store_data_return_value), status=200
@@ -163,29 +162,28 @@ class AlertRuleAnomalyEndpointTest(AlertRuleBase, SnubaTestCase):
     def test_timeout(self, mock_logger, mock_seer_request, mock_seer_store_request):
         self.create_team(organization=self.organization, members=[self.user])
         two_weeks_ago = before_now(days=14).replace(hour=10, minute=0, second=0, microsecond=0)
-        with self.options({"issues.group_attributes.send_kafka": True}):
-            self.store_event(
-                data={
-                    "event_id": "a" * 32,
-                    "message": "super duper bad",
-                    "timestamp": iso_format(two_weeks_ago + timedelta(minutes=1)),
-                    "fingerprint": ["group1"],
-                    "tags": {"sentry:user": self.user.email},
-                    "exception": [{"value": "BadError"}],
-                },
-                project_id=self.project.id,
-            )
-            self.store_event(
-                data={
-                    "event_id": "b" * 32,
-                    "message": "super bad",
-                    "timestamp": iso_format(two_weeks_ago + timedelta(days=10)),
-                    "fingerprint": ["group2"],
-                    "tags": {"sentry:user": self.user.email},
-                    "exception": [{"value": "BadError"}],
-                },
-                project_id=self.project.id,
-            )
+        self.store_event(
+            data={
+                "event_id": "a" * 32,
+                "message": "super duper bad",
+                "timestamp": iso_format(two_weeks_ago + timedelta(minutes=1)),
+                "fingerprint": ["group1"],
+                "tags": {"sentry:user": self.user.email},
+                "exception": [{"value": "BadError"}],
+            },
+            project_id=self.project.id,
+        )
+        self.store_event(
+            data={
+                "event_id": "b" * 32,
+                "message": "super bad",
+                "timestamp": iso_format(two_weeks_ago + timedelta(days=10)),
+                "fingerprint": ["group2"],
+                "tags": {"sentry:user": self.user.email},
+                "exception": [{"value": "BadError"}],
+            },
+            project_id=self.project.id,
+        )
 
         seer_return_value: StoreDataResponse = {"success": True}
         mock_seer_store_request.return_value = HTTPResponse(
@@ -236,29 +234,28 @@ class AlertRuleAnomalyEndpointTest(AlertRuleBase, SnubaTestCase):
     def test_seer_error(self, mock_logger, mock_seer_request, mock_seer_store_request):
         self.create_team(organization=self.organization, members=[self.user])
         two_weeks_ago = before_now(days=14).replace(hour=10, minute=0, second=0, microsecond=0)
-        with self.options({"issues.group_attributes.send_kafka": True}):
-            self.store_event(
-                data={
-                    "event_id": "a" * 32,
-                    "message": "super duper bad",
-                    "timestamp": iso_format(two_weeks_ago + timedelta(minutes=1)),
-                    "fingerprint": ["group1"],
-                    "tags": {"sentry:user": self.user.email},
-                    "exception": [{"value": "BadError"}],
-                },
-                project_id=self.project.id,
-            )
-            self.store_event(
-                data={
-                    "event_id": "b" * 32,
-                    "message": "super bad",
-                    "timestamp": iso_format(two_weeks_ago + timedelta(days=10)),
-                    "fingerprint": ["group2"],
-                    "tags": {"sentry:user": self.user.email},
-                    "exception": [{"value": "BadError"}],
-                },
-                project_id=self.project.id,
-            )
+        self.store_event(
+            data={
+                "event_id": "a" * 32,
+                "message": "super duper bad",
+                "timestamp": iso_format(two_weeks_ago + timedelta(minutes=1)),
+                "fingerprint": ["group1"],
+                "tags": {"sentry:user": self.user.email},
+                "exception": [{"value": "BadError"}],
+            },
+            project_id=self.project.id,
+        )
+        self.store_event(
+            data={
+                "event_id": "b" * 32,
+                "message": "super bad",
+                "timestamp": iso_format(two_weeks_ago + timedelta(days=10)),
+                "fingerprint": ["group2"],
+                "tags": {"sentry:user": self.user.email},
+                "exception": [{"value": "BadError"}],
+            },
+            project_id=self.project.id,
+        )
 
         seer_return_value: StoreDataResponse = {"success": True}
         mock_seer_store_request.return_value = HTTPResponse(

+ 22 - 23
tests/sentry/incidents/endpoints/test_organization_alert_rule_index.py

@@ -309,29 +309,28 @@ class AlertRuleCreateEndpointTest(AlertRuleIndexBase, SnubaTestCase):
         seer_return_value: StoreDataResponse = {"success": True}
         mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
         two_weeks_ago = before_now(days=14).replace(hour=10, minute=0, second=0, microsecond=0)
-        with self.options({"issues.group_attributes.send_kafka": True}):
-            self.store_event(
-                data={
-                    "event_id": "a" * 32,
-                    "message": "super duper bad",
-                    "timestamp": iso_format(two_weeks_ago + timedelta(minutes=1)),
-                    "fingerprint": ["group1"],
-                    "tags": {"sentry:user": self.user.email},
-                },
-                default_event_type=EventType.ERROR,
-                project_id=self.project.id,
-            )
-            self.store_event(
-                data={
-                    "event_id": "b" * 32,
-                    "message": "super bad",
-                    "timestamp": iso_format(two_weeks_ago + timedelta(days=10)),
-                    "fingerprint": ["group2"],
-                    "tags": {"sentry:user": self.user.email},
-                },
-                default_event_type=EventType.ERROR,
-                project_id=self.project.id,
-            )
+        self.store_event(
+            data={
+                "event_id": "a" * 32,
+                "message": "super duper bad",
+                "timestamp": iso_format(two_weeks_ago + timedelta(minutes=1)),
+                "fingerprint": ["group1"],
+                "tags": {"sentry:user": self.user.email},
+            },
+            default_event_type=EventType.ERROR,
+            project_id=self.project.id,
+        )
+        self.store_event(
+            data={
+                "event_id": "b" * 32,
+                "message": "super bad",
+                "timestamp": iso_format(two_weeks_ago + timedelta(days=10)),
+                "fingerprint": ["group2"],
+                "tags": {"sentry:user": self.user.email},
+            },
+            default_event_type=EventType.ERROR,
+            project_id=self.project.id,
+        )
 
         with outbox_runner():
             resp = self.get_success_response(

+ 8 - 12
tests/sentry/incidents/test_logic.py

@@ -103,7 +103,6 @@ from sentry.snuba.models import QuerySubscription, SnubaQuery, SnubaQueryEventTy
 from sentry.testutils.cases import BaseIncidentsTest, BaseMetricsTestCase, TestCase
 from sentry.testutils.helpers.datetime import before_now, freeze_time, iso_format
 from sentry.testutils.helpers.features import with_feature
-from sentry.testutils.helpers.options import override_options
 from sentry.testutils.silo import assume_test_silo_mode, assume_test_silo_mode_of
 from sentry.types.actor import Actor
 
@@ -309,7 +308,6 @@ class GetIncidentAggregatesTest(TestCase, BaseIncidentAggregatesTest):
     def test_projects(self):
         assert get_incident_aggregates(self.project_incident) == {"count": 4}
 
-    @override_options({"issues.group_attributes.send_kafka": True})
     def test_is_unresolved_query(self):
         incident = self.create_incident(
             date_started=self.now - timedelta(minutes=5),
@@ -1811,11 +1809,10 @@ class UpdateAlertRuleTest(TestCase, BaseIncidentsTest):
         mock_seer_request.reset_mock()
 
         two_weeks_ago = before_now(days=14).replace(hour=10, minute=0, second=0, microsecond=0)
-        with self.options({"issues.group_attributes.send_kafka": True}):
-            self.create_error_event(timestamp=iso_format(two_weeks_ago + timedelta(minutes=1)))
-            self.create_error_event(
-                timestamp=iso_format(two_weeks_ago + timedelta(days=10))
-            )  # 4 days ago
+        self.create_error_event(timestamp=iso_format(two_weeks_ago + timedelta(minutes=1)))
+        self.create_error_event(
+            timestamp=iso_format(two_weeks_ago + timedelta(days=10))
+        )  # 4 days ago
 
         # update aggregate
         update_alert_rule(
@@ -1840,11 +1837,10 @@ class UpdateAlertRuleTest(TestCase, BaseIncidentsTest):
         mock_seer_request.return_value = HTTPResponse(orjson.dumps(seer_return_value), status=200)
 
         two_weeks_ago = before_now(days=14).replace(hour=10, minute=0, second=0, microsecond=0)
-        with self.options({"issues.group_attributes.send_kafka": True}):
-            self.create_error_event(timestamp=iso_format(two_weeks_ago + timedelta(minutes=1)))
-            self.create_error_event(
-                timestamp=iso_format(two_weeks_ago + timedelta(days=10))
-            )  # 4 days ago
+        self.create_error_event(timestamp=iso_format(two_weeks_ago + timedelta(minutes=1)))
+        self.create_error_event(
+            timestamp=iso_format(two_weeks_ago + timedelta(days=10))
+        )  # 4 days ago
 
         dynamic_rule = self.create_alert_rule(
             sensitivity=AlertRuleSensitivity.HIGH,

+ 19 - 20
tests/sentry/incidents/test_subscription_processor.py

@@ -2663,26 +2663,25 @@ class ProcessUpdateTest(ProcessUpdateBaseClass):
         )
         comparison_date = timezone.now() - comparison_delta
 
-        with self.options({"issues.group_attributes.send_kafka": True}):
-            for i in range(4):
-                data = {
-                    "timestamp": iso_format(comparison_date - timedelta(minutes=30 + i)),
-                    "stacktrace": copy.deepcopy(DEFAULT_EVENT_DATA["stacktrace"]),
-                    "fingerprint": ["group2"],
-                    "level": "error",
-                    "exception": {
-                        "values": [
-                            {
-                                "type": "IntegrationError",
-                                "value": "Identity not found.",
-                            }
-                        ]
-                    },
-                }
-                self.store_event(
-                    data=data,
-                    project_id=self.project.id,
-                )
+        for i in range(4):
+            data = {
+                "timestamp": iso_format(comparison_date - timedelta(minutes=30 + i)),
+                "stacktrace": copy.deepcopy(DEFAULT_EVENT_DATA["stacktrace"]),
+                "fingerprint": ["group2"],
+                "level": "error",
+                "exception": {
+                    "values": [
+                        {
+                            "type": "IntegrationError",
+                            "value": "Identity not found.",
+                        }
+                    ]
+                },
+            }
+            self.store_event(
+                data=data,
+                project_id=self.project.id,
+            )
 
         self.metrics.incr.reset_mock()
         processor = self.send_update(rule, 2, timedelta(minutes=-9), subscription=self.sub)

+ 0 - 2
tests/sentry/issues/endpoints/test_organization_group_index.py

@@ -83,7 +83,6 @@ class GroupListTest(APITestCase, SnubaTestCase, SearchIssueTestMixin):
     def setUp(self) -> None:
         super().setUp()
         self.min_ago = before_now(minutes=1)
-        options.set("issues.group_attributes.send_kafka", True)
 
     def _parse_links(self, header):
         # links come in {url: {...attrs}}, but we need {rel: {...attrs}}
@@ -1233,7 +1232,6 @@ class GroupListTest(APITestCase, SnubaTestCase, SearchIssueTestMixin):
         assert response.status_code == 200
         assert len(response.data) == 0
 
-    @override_options({"issues.group_attributes.send_kafka": False})
     @with_feature({"organizations:issue-search-snuba": False})
     def test_assigned_or_suggested_search(self, _: MagicMock) -> None:
         event = self.store_event(

+ 1 - 6
tests/sentry/issues/test_attributes.py

@@ -224,12 +224,7 @@ class PostUpdateLogGroupAttributesChangedTest(TestCase):
         with patch(
             "sentry.issues.attributes._log_group_attributes_changed"
         ) as _log_group_attributes_changed:
-            with override_options(
-                {
-                    "groups.enable-post-update-signal": True,
-                    "issues.group_attributes.send_kafka": True,
-                }
-            ):
+            with override_options({"groups.enable-post-update-signal": True}):
                 Group.objects.filter(id__in=[g.id for g in groups]).update(**update_fields)
             _log_group_attributes_changed.assert_called_with(
                 Operation.UPDATED, "group", expected_str

+ 44 - 46
tests/sentry/seer/anomaly_detection/test_store_data.py

@@ -114,29 +114,28 @@ class AnomalyDetectionStoreDataTest(AlertRuleBase, BaseMetricsTestCase, Performa
         alert_rule = self.create_alert_rule(organization=self.organization, projects=[self.project])
         snuba_query = SnubaQuery.objects.get(id=alert_rule.snuba_query_id)
 
-        with self.options({"issues.group_attributes.send_kafka": True}):
-            self.store_event(
-                data={
-                    "event_id": "a" * 32,
-                    "message": "super duper bad",
-                    "timestamp": self.time_1_dt.isoformat(),
-                    "fingerprint": ["group1"],
-                    "tags": {"sentry:user": self.user.email},
-                    "exception": [{"value": "BadError"}],
-                },
-                project_id=self.project.id,
-            )
-            self.store_event(
-                data={
-                    "event_id": "b" * 32,
-                    "message": "super bad",
-                    "timestamp": self.time_2_dt.isoformat(),
-                    "fingerprint": ["group2"],
-                    "tags": {"sentry:user": self.user.email},
-                    "exception": [{"value": "BadError"}],
-                },
-                project_id=self.project.id,
-            )
+        self.store_event(
+            data={
+                "event_id": "a" * 32,
+                "message": "super duper bad",
+                "timestamp": self.time_1_dt.isoformat(),
+                "fingerprint": ["group1"],
+                "tags": {"sentry:user": self.user.email},
+                "exception": [{"value": "BadError"}],
+            },
+            project_id=self.project.id,
+        )
+        self.store_event(
+            data={
+                "event_id": "b" * 32,
+                "message": "super bad",
+                "timestamp": self.time_2_dt.isoformat(),
+                "fingerprint": ["group2"],
+                "tags": {"sentry:user": self.user.email},
+                "exception": [{"value": "BadError"}],
+            },
+            project_id=self.project.id,
+        )
         result = fetch_historical_data(self.organization, snuba_query, ["count()"], self.project)
         assert result
         assert {"time": int(self.time_1_ts), "count": 1} in result.data.get("data")
@@ -148,29 +147,28 @@ class AnomalyDetectionStoreDataTest(AlertRuleBase, BaseMetricsTestCase, Performa
         snuba_query.query = "is:unresolved"
         snuba_query.save()
 
-        with self.options({"issues.group_attributes.send_kafka": True}):
-            self.store_event(
-                data={
-                    "event_id": "a" * 32,
-                    "message": "super duper bad",
-                    "timestamp": self.time_1_dt.isoformat(),
-                    "fingerprint": ["group1"],
-                    "tags": {"sentry:user": self.user.email},
-                    "exception": [{"value": "BadError"}],
-                },
-                project_id=self.project.id,
-            )
-            self.store_event(
-                data={
-                    "event_id": "b" * 32,
-                    "message": "super bad",
-                    "timestamp": self.time_2_dt.isoformat(),
-                    "fingerprint": ["group2"],
-                    "tags": {"sentry:user": self.user.email},
-                    "exception": [{"value": "BadError"}],
-                },
-                project_id=self.project.id,
-            )
+        self.store_event(
+            data={
+                "event_id": "a" * 32,
+                "message": "super duper bad",
+                "timestamp": self.time_1_dt.isoformat(),
+                "fingerprint": ["group1"],
+                "tags": {"sentry:user": self.user.email},
+                "exception": [{"value": "BadError"}],
+            },
+            project_id=self.project.id,
+        )
+        self.store_event(
+            data={
+                "event_id": "b" * 32,
+                "message": "super bad",
+                "timestamp": self.time_2_dt.isoformat(),
+                "fingerprint": ["group2"],
+                "tags": {"sentry:user": self.user.email},
+                "exception": [{"value": "BadError"}],
+            },
+            project_id=self.project.id,
+        )
         result = fetch_historical_data(self.organization, snuba_query, ["count()"], self.project)
         assert result
         assert {"time": int(self.time_1_ts), "count": 1} in result.data.get("data")

Некоторые файлы не были показаны из-за большого количества измененных файлов