Browse Source

chore(issue-platform): Remove the `organizations:issue-platform` flag from the backend (#72071)

Removing the `organizations:issue-platform` feature flag now that it's
been released in self-hosted
(https://github.com/getsentry/self-hosted/pull/2309).
Snigdha Sharma 9 months ago
parent
commit
74411e2061

+ 1 - 4
src/sentry/issues/search.py

@@ -6,7 +6,6 @@ from collections.abc import Callable, Mapping, Sequence
 from copy import deepcopy
 from typing import Any, Optional, Protocol, TypedDict
 
-from sentry import features
 from sentry.api.event_search import SearchFilter, SearchKey, SearchValue
 from sentry.issues import grouptype
 from sentry.issues.grouptype import GroupCategory, get_all_group_type_ids, get_group_type_by_type_id
@@ -237,9 +236,7 @@ def _query_params_for_generic(
     categories: Sequence[GroupCategory] | None = None,
 ) -> SnubaQueryParams | None:
     organization = Organization.objects.filter(id=organization_id).first()
-    if organization and features.has(
-        "organizations:issue-platform", organization=organization, actor=actor
-    ):
+    if organization:
         if categories is None:
             logging.error("Category is required in _query_params_for_generic")
             return None

+ 1 - 65
src/sentry/monitors/logic/mark_failed.py

@@ -11,9 +11,7 @@ from django.db.models import Q
 from django.utils.text import get_text_list
 from django.utils.translation import gettext_lazy as _
 
-from sentry import features
 from sentry.issues.grouptype import MonitorIncidentType
-from sentry.models.organization import Organization
 from sentry.monitors.models import (
     CheckInStatus,
     MonitorCheckIn,
@@ -89,16 +87,7 @@ def mark_failed(
     monitor_env.refresh_from_db()
 
     # Create incidents + issues
-    try:
-        organization = Organization.objects.get_from_cache(id=monitor_env.monitor.organization_id)
-        use_issue_platform = features.has("organizations:issue-platform", organization=organization)
-    except Organization.DoesNotExist:
-        use_issue_platform = False
-
-    if use_issue_platform:
-        return mark_failed_threshold(failed_checkin, failure_issue_threshold, received)
-    else:
-        return mark_failed_no_threshold(failed_checkin)
+    return mark_failed_threshold(failed_checkin, failure_issue_threshold, received)
 
 
 class SimpleCheckIn(TypedDict):
@@ -196,59 +185,6 @@ def mark_failed_threshold(
     return True
 
 
-def mark_failed_no_threshold(failed_checkin: MonitorCheckIn):
-    from sentry.signals import monitor_environment_failed
-
-    monitor_env = failed_checkin.monitor_environment
-
-    monitor_env.update(status=MonitorStatus.ERROR)
-
-    # Do not create event if monitor or monitor environment is muted
-    if monitor_env.monitor.is_muted or monitor_env.is_muted:
-        return True
-
-    create_legacy_event(failed_checkin)
-
-    monitor_environment_failed.send(monitor_environment=monitor_env, sender=type(monitor_env))
-
-    return True
-
-
-def create_legacy_event(failed_checkin: MonitorCheckIn):
-    from sentry.coreapi import insert_data_to_database_legacy
-    from sentry.event_manager import EventManager
-    from sentry.models.project import Project
-
-    monitor_env = failed_checkin.monitor_environment
-    context = get_monitor_environment_context(monitor_env)
-
-    # XXX(epurkhiser): This matches up with the occurrence_data reason
-    reason_map = {
-        CheckInStatus.MISSED: "missed_checkin",
-        CheckInStatus.TIMEOUT: "duration",
-    }
-    reason = reason_map.get(failed_checkin.status, "unknown")
-
-    event_manager = EventManager(
-        {
-            "logentry": {"message": f"Monitor failure: {monitor_env.monitor.name} ({reason})"},
-            "contexts": {"monitor": context},
-            "fingerprint": ["monitor", str(monitor_env.monitor.guid), reason],
-            "environment": monitor_env.get_environment().name,
-            # TODO: Both of these values should be get transformed from context to tags
-            # We should understand why that is not happening and remove these when it correctly is
-            "tags": {
-                "monitor.id": str(monitor_env.monitor.guid),
-                "monitor.slug": monitor_env.monitor.slug,
-            },
-        },
-        project=Project(id=monitor_env.monitor.project_id),
-    )
-    event_manager.normalize()
-    data = event_manager.get_data()
-    insert_data_to_database_legacy(data)
-
-
 def create_issue_platform_occurrence(
     failed_checkins: Sequence[SimpleCheckIn],
     failed_checkin: MonitorCheckIn,

+ 1 - 6
src/sentry/search/snuba/executors.py

@@ -174,12 +174,7 @@ def group_categories_from_search_filters(
     group_categories = group_categories_from(search_filters)
 
     if not group_categories:
-        group_categories = {
-            gc
-            for gc in get_search_strategies().keys()
-            if gc != GroupCategory.PROFILE.value
-            or features.has("organizations:issue-platform", organization, actor=actor)
-        }
+        group_categories = set(get_search_strategies().keys())
         # if we're not searching for feedbacks, then hide them by default
         group_categories.discard(GroupCategory.FEEDBACK.value)
 

+ 0 - 1
tests/acceptance/test_organization_monitors.py

@@ -20,7 +20,6 @@ from sentry.testutils.silo import no_silo_test
 class OrganizationMontorsTest(AcceptanceTestCase):
     def setUp(self):
         super().setUp()
-        self.features = ["organizations:issue-platform"]
         self.path = f"/organizations/{self.organization.slug}/crons/"
         self.team = self.create_team(organization=self.organization, name="Mariachi Band")
 

+ 20 - 18
tests/acceptance/test_performance_issues.py

@@ -68,15 +68,16 @@ class PerformanceIssuesTest(AcceptanceTestCase, SnubaTestCase, PerformanceIssueT
             "n-plus-one-in-django-new-view", mock_now.return_value.timestamp()
         )
 
-        with mock.patch(
-            "sentry.issues.ingest.send_issue_occurrence_to_eventstream",
-            side_effect=send_issue_occurrence_to_eventstream,
-        ) as mock_eventstream, mock.patch.object(
-            PerformanceNPlusOneGroupType,
-            "noise_config",
-            new=NoiseConfig(0, timedelta(minutes=1)),
-        ), self.feature(
-            "organizations:issue-platform"
+        with (
+            mock.patch(
+                "sentry.issues.ingest.send_issue_occurrence_to_eventstream",
+                side_effect=send_issue_occurrence_to_eventstream,
+            ) as mock_eventstream,
+            mock.patch.object(
+                PerformanceNPlusOneGroupType,
+                "noise_config",
+                new=NoiseConfig(0, timedelta(minutes=1)),
+            ),
         ):
             self.store_event(data=event_data, project_id=self.project.id)
             group = mock_eventstream.call_args[0][2].group
@@ -104,15 +105,16 @@ class PerformanceIssuesTest(AcceptanceTestCase, SnubaTestCase, PerformanceIssueT
 
         event_data["contexts"]["trace"]["op"] = "navigation"
 
-        with mock.patch(
-            "sentry.issues.ingest.send_issue_occurrence_to_eventstream",
-            side_effect=send_issue_occurrence_to_eventstream,
-        ) as mock_eventstream, mock.patch.object(
-            PerformanceNPlusOneAPICallsGroupType,
-            "noise_config",
-            new=NoiseConfig(0, timedelta(minutes=1)),
-        ), self.feature(
-            "organizations:issue-platform"
+        with (
+            mock.patch(
+                "sentry.issues.ingest.send_issue_occurrence_to_eventstream",
+                side_effect=send_issue_occurrence_to_eventstream,
+            ) as mock_eventstream,
+            mock.patch.object(
+                PerformanceNPlusOneAPICallsGroupType,
+                "noise_config",
+                new=NoiseConfig(0, timedelta(minutes=1)),
+            ),
         ):
             self.store_event(data=event_data, project_id=self.project.id)
             group = mock_eventstream.call_args[0][2].group

+ 0 - 1
tests/sentry/feedback/usecases/test_create_feedback.py

@@ -700,7 +700,6 @@ def test_create_feedback_spam_detection_adds_field_calls(
         {
             "organizations:user-feedback-spam-filter-actions": True,
             "organizations:user-feedback-spam-filter-ingest": True,
-            "organizations:issue-platform": True,
             "organizations:feedback-ingest": True,
             "organizations:feedback-post-process-group": True,
         }

+ 0 - 2
tests/sentry/issues/endpoints/test_organization_group_index.py

@@ -2873,7 +2873,6 @@ class GroupListTest(APITestCase, SnubaTestCase, SearchIssueTestMixin):
         autospec=True,
     )
     @override_options({"issues.group_attributes.send_kafka": True})
-    @with_feature("organizations:issue-platform")
     def test_snuba_perf_issue(self, mock_query: MagicMock) -> None:
         self.project = self.create_project(organization=self.organization)
         # create a performance issue
@@ -2934,7 +2933,6 @@ class GroupListTest(APITestCase, SnubaTestCase, SearchIssueTestMixin):
         autospec=True,
     )
     @override_options({"issues.group_attributes.send_kafka": True})
-    @with_feature("organizations:issue-platform")
     @with_feature(PerformanceRenderBlockingAssetSpanGroupType.build_visible_feature_name())
     @with_feature(PerformanceNPlusOneGroupType.build_visible_feature_name())
     def test_snuba_type_and_category(

+ 1 - 203
tests/sentry/monitors/logic/test_mark_failed.py

@@ -1,5 +1,4 @@
 import uuid
-from datetime import timedelta
 from itertools import cycle
 from unittest.mock import patch
 
@@ -21,206 +20,11 @@ from sentry.monitors.models import (
     ScheduleType,
 )
 from sentry.testutils.cases import TestCase
-from sentry.testutils.helpers import with_feature
 
 
 class MarkFailedTestCase(TestCase):
-    @with_feature({"organizations:issue-platform": False})
-    @patch("sentry.coreapi.insert_data_to_database_legacy")
-    def test_mark_failed_default_params_legacy(self, mock_insert_data_to_database_legacy):
-        monitor = Monitor.objects.create(
-            name="test monitor",
-            organization_id=self.organization.id,
-            project_id=self.project.id,
-            type=MonitorType.CRON_JOB,
-            config={
-                "schedule": [1, "month"],
-                "schedule_type": ScheduleType.INTERVAL,
-                "max_runtime": None,
-                "checkin_margin": None,
-            },
-        )
-        monitor_environment = MonitorEnvironment.objects.create(
-            monitor=monitor,
-            environment_id=self.environment.id,
-            status=monitor.status,
-        )
-        checkin = MonitorCheckIn.objects.create(
-            monitor=monitor,
-            monitor_environment=monitor_environment,
-            project_id=self.project.id,
-            status=CheckInStatus.UNKNOWN,
-        )
-        assert mark_failed(checkin, ts=checkin.date_added)
-
-        assert len(mock_insert_data_to_database_legacy.mock_calls) == 1
-
-        event = mock_insert_data_to_database_legacy.mock_calls[0].args[0]
-
-        assert dict(
-            event,
-            **{
-                "level": "error",
-                "project": self.project.id,
-                "environment": monitor_environment.get_environment().name,
-                "platform": "other",
-                "contexts": {
-                    "monitor": {
-                        "status": "error",
-                        "type": "cron_job",
-                        "config": {
-                            "schedule_type": 2,
-                            "schedule": [1, "month"],
-                            "max_runtime": None,
-                            "checkin_margin": None,
-                        },
-                        "id": str(monitor.guid),
-                        "name": monitor.name,
-                        "slug": str(monitor.slug),
-                    }
-                },
-                "logentry": {"formatted": "Monitor failure: test monitor (unknown)"},
-                "fingerprint": ["monitor", str(monitor.guid), "unknown"],
-                "logger": "",
-                "type": "default",
-            },
-        ) == dict(event)
-
-    @with_feature({"organizations:issue-platform": False})
-    @patch("sentry.coreapi.insert_data_to_database_legacy")
-    def test_mark_failed_with_reason_legacy(self, mock_insert_data_to_database_legacy):
-        monitor = Monitor.objects.create(
-            name="test monitor",
-            organization_id=self.organization.id,
-            project_id=self.project.id,
-            type=MonitorType.CRON_JOB,
-            config={
-                "schedule": [1, "month"],
-                "schedule_type": ScheduleType.INTERVAL,
-                "max_runtime": None,
-                "checkin_margin": None,
-            },
-        )
-        monitor_environment = MonitorEnvironment.objects.create(
-            monitor=monitor,
-            environment_id=self.environment.id,
-            status=monitor.status,
-        )
-        checkin = MonitorCheckIn.objects.create(
-            monitor=monitor,
-            monitor_environment=monitor_environment,
-            project_id=self.project.id,
-            status=CheckInStatus.TIMEOUT,
-        )
-        assert mark_failed(checkin, ts=checkin.date_added)
-
-        assert len(mock_insert_data_to_database_legacy.mock_calls) == 1
-
-        event = mock_insert_data_to_database_legacy.mock_calls[0].args[0]
-
-        assert dict(
-            event,
-            **{
-                "level": "error",
-                "project": self.project.id,
-                "environment": monitor_environment.get_environment().name,
-                "platform": "other",
-                "contexts": {
-                    "monitor": {
-                        "status": "error",
-                        "type": "cron_job",
-                        "config": {
-                            "schedule_type": 2,
-                            "schedule": [1, "month"],
-                            "max_runtime": None,
-                            "checkin_margin": None,
-                        },
-                        "id": str(monitor.guid),
-                        "name": monitor.name,
-                        "slug": monitor.slug,
-                    }
-                },
-                "logentry": {"formatted": "Monitor failure: test monitor (duration)"},
-                "fingerprint": ["monitor", str(monitor.guid), "duration"],
-                "logger": "",
-                "type": "default",
-            },
-        ) == dict(event)
-
-    @with_feature({"organizations:issue-platform": False})
-    @patch("sentry.coreapi.insert_data_to_database_legacy")
-    def test_mark_failed_with_missed_reason_legacy(self, mock_insert_data_to_database_legacy):
-        last_checkin = timezone.now().replace(second=0, microsecond=0)
-        next_checkin = last_checkin + timedelta(hours=1)
-
-        monitor = Monitor.objects.create(
-            name="test monitor",
-            organization_id=self.organization.id,
-            project_id=self.project.id,
-            type=MonitorType.CRON_JOB,
-            config={
-                "schedule": [1, "hour"],
-                "schedule_type": ScheduleType.INTERVAL,
-                "max_runtime": None,
-                "checkin_margin": None,
-            },
-        )
-        monitor_environment = MonitorEnvironment.objects.create(
-            monitor=monitor,
-            environment_id=self.environment.id,
-            last_checkin=last_checkin,
-            next_checkin=next_checkin,
-            next_checkin_latest=next_checkin + timedelta(minutes=1),
-            status=monitor.status,
-        )
-        checkin = MonitorCheckIn.objects.create(
-            monitor=monitor,
-            monitor_environment=monitor_environment,
-            project_id=self.project.id,
-            status=CheckInStatus.MISSED,
-        )
-        assert mark_failed(checkin, ts=checkin.date_added)
-
-        monitor.refresh_from_db()
-        monitor_environment.refresh_from_db()
-        assert monitor_environment.status == MonitorStatus.ERROR
-
-        assert len(mock_insert_data_to_database_legacy.mock_calls) == 1
-
-        event = mock_insert_data_to_database_legacy.mock_calls[0].args[0]
-
-        assert dict(
-            event,
-            **{
-                "level": "error",
-                "project": self.project.id,
-                "environment": monitor_environment.get_environment().name,
-                "platform": "other",
-                "contexts": {
-                    "monitor": {
-                        "status": "error",
-                        "type": "cron_job",
-                        "config": {
-                            "schedule_type": 2,
-                            "schedule": [1, "hour"],
-                            "max_runtime": None,
-                            "checkin_margin": None,
-                        },
-                        "id": str(monitor.guid),
-                        "name": monitor.name,
-                        "slug": monitor.slug,
-                    }
-                },
-                "logentry": {"formatted": "Monitor failure: test monitor (missed_checkin)"},
-                "fingerprint": ["monitor", str(monitor.guid), "missed_checkin"],
-                "logger": "",
-                "type": "default",
-            },
-        ) == dict(event)
-
-    @with_feature("organizations:issue-platform")
     @patch("sentry.issues.producer.produce_occurrence_to_kafka")
-    def test_mark_failed_default_params_issue_platform(self, mock_produce_occurrence_to_kafka):
+    def test_mark_failed_default_params(self, mock_produce_occurrence_to_kafka):
         monitor = Monitor.objects.create(
             name="test monitor",
             organization_id=self.organization.id,
@@ -340,7 +144,6 @@ class MarkFailedTestCase(TestCase):
             },
         ) == dict(event)
 
-    @with_feature("organizations:issue-platform")
     @patch("sentry.issues.producer.produce_occurrence_to_kafka")
     def test_mark_failed_muted(self, mock_produce_occurrence_to_kafka):
         monitor = Monitor.objects.create(
@@ -378,7 +181,6 @@ class MarkFailedTestCase(TestCase):
         assert len(mock_produce_occurrence_to_kafka.mock_calls) == 0
         assert monitor_environment.active_incident is not None
 
-    @with_feature("organizations:issue-platform")
     @patch("sentry.issues.producer.produce_occurrence_to_kafka")
     def test_mark_failed_env_muted(self, mock_produce_occurrence_to_kafka):
         monitor = Monitor.objects.create(
@@ -418,7 +220,6 @@ class MarkFailedTestCase(TestCase):
         assert len(mock_produce_occurrence_to_kafka.mock_calls) == 0
         assert monitor_environment.active_incident is not None
 
-    @with_feature("organizations:issue-platform")
     @patch("sentry.issues.producer.produce_occurrence_to_kafka")
     def test_mark_failed_issue_threshold(self, mock_produce_occurrence_to_kafka):
         failure_issue_threshold = 8
@@ -565,7 +366,6 @@ class MarkFailedTestCase(TestCase):
 
     # Test to make sure that timeout mark_failed (which occur in the past)
     # correctly create issues once passing the failure_issue_threshold
-    @with_feature("organizations:issue-platform")
     @patch("sentry.issues.producer.produce_occurrence_to_kafka")
     def test_mark_failed_issue_threshold_timeout(self, mock_produce_occurrence_to_kafka):
         failure_issue_threshold = 8
@@ -647,7 +447,6 @@ class MarkFailedTestCase(TestCase):
         assert occurrence["evidence_display"][0]["value"] == "8 timeout check-ins detected"
 
     # we are duplicating this test as the code paths are different, for now
-    @with_feature("organizations:issue-platform")
     @patch("sentry.issues.producer.produce_occurrence_to_kafka")
     def test_mark_failed_issue_threshold_disabled(self, mock_produce_occurrence_to_kafka):
         failure_issue_threshold = 8
@@ -688,7 +487,6 @@ class MarkFailedTestCase(TestCase):
         assert len(mock_produce_occurrence_to_kafka.mock_calls) == 0
         assert monitor_environment.active_incident is not None
 
-    @with_feature("organizations:issue-platform")
     def test_mark_failed_issue_assignment(self):
         monitor = Monitor.objects.create(
             name="test monitor",

+ 0 - 36
tests/sentry/rules/history/test_preview.py

@@ -700,42 +700,6 @@ class FrequencyConditionTest(
         assert group.id not in result
 
     def test_frequency_conditions(self):
-        prev_hour = timezone.now() - timedelta(hours=1)
-        prev_two_hour = timezone.now() - timedelta(hours=2)
-        group = None
-        for time in (prev_hour, prev_two_hour):
-            for i in range(5):
-                group = self.store_event(
-                    project_id=self.project.id, data={"timestamp": iso_format(time)}
-                ).group
-        assert group is not None
-
-        conditions = [
-            {
-                "id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition",
-                "value": 4,
-                "interval": "5m",
-            },
-            {
-                "id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition",
-                "value": 9,
-                "interval": "1d",
-            },
-        ]
-        result = preview(self.project, conditions, [], *MATCH_ARGS)
-        assert result is not None
-        assert group.id in result
-
-        conditions[0]["value"] = 5
-        result = preview(self.project, conditions, [], *MATCH_ARGS)
-        assert result is not None
-        assert group.id not in result
-
-        result = preview(self.project, conditions, [], "any", "all", 0)
-        assert result is not None
-        assert group.id in result
-
-    def test_frequency_conditions_issue_platform(self):
         prev_hour = timezone.now() - timedelta(hours=1)
         prev_two_hour = timezone.now() - timedelta(hours=2)
         for time in (prev_hour, prev_two_hour):

+ 6 - 7
tests/sentry/rules/processing/test_delayed_processing.py

@@ -223,13 +223,12 @@ class ProcessDelayedAlertConditionsTest(
         )
         tags = [["foo", "guux"], ["sentry:release", "releaseme"]]
         contexts = {"trace": {"trace_id": "b" * 32, "span_id": "c" * 16, "op": ""}}
-        with self.feature("organizations:issue-platform"):
-            for i in range(3):
-                event5 = self.create_performance_issue(
-                    tags=tags,
-                    fingerprint="group-5",
-                    contexts=contexts,
-                )
+        for i in range(3):
+            event5 = self.create_performance_issue(
+                tags=tags,
+                fingerprint="group-5",
+                contexts=contexts,
+            )
         group5 = event5.group
         assert group5
         assert self.group1

Some files were not shown because too many files changed in this diff