Browse Source

chore(database) Drop tables for project and team avatar (#68616)

These tables no longer have django models, and can be deleted.
Mark Story 10 months ago
parent
commit
0c8f916ecd

+ 1 - 1
migrations_lockfile.txt

@@ -9,5 +9,5 @@ feedback: 0004_index_together
 hybridcloud: 0016_add_control_cacheversion
 nodestore: 0002_nodestore_no_dictfield
 replays: 0004_index_together
-sentry: 0713_team_remove_actor_state
+sentry: 0714_drop_project_team_avatar
 social_auth: 0002_default_auto_field

+ 2 - 0
src/sentry/db/router.py

@@ -68,6 +68,8 @@ class SiloRouter:
     """
 
     historical_silo_assignments = {
+        "sentry_teamavatar": SiloMode.REGION,
+        "sentry_projectavatar": SiloMode.REGION,
         "sentry_pagerdutyservice": SiloMode.REGION,
         "sentry_notificationsetting": SiloMode.CONTROL,
     }

+ 42 - 0
src/sentry/migrations/0714_drop_project_team_avatar.py

@@ -0,0 +1,42 @@
+# Generated by Django 5.0.3 on 2024-04-10 15:44
+
+from django.db import migrations
+
+from sentry.new_migrations.migrations import CheckedMigration
+
+
+class Migration(CheckedMigration):
+    # This flag is used to mark that a migration shouldn't be automatically run in production.
+    # This should only be used for operations where it's safe to run the migration after your
+    # code has deployed. So this should not be used for most operations that alter the schema
+    # of a table.
+    # Here are some things that make sense to mark as post deployment:
+    # - Large data migrations. Typically we want these to be run manually so that they can be
+    #   monitored and not block the deploy for a long period of time while they run.
+    # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
+    #   run this outside deployments so that we don't block them. Note that while adding an index
+    #   is a schema change, it's completely safe to run the operation after the code has deployed.
+    # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
+
+    is_post_deployment = False
+
+    dependencies = [
+        ("sentry", "0713_team_remove_actor_state"),
+    ]
+
+    operations = [
+        migrations.SeparateDatabaseAndState(
+            database_operations=[
+                migrations.RunSQL(
+                    sql='DROP TABLE IF EXISTS "sentry_projectavatar"',
+                    reverse_sql="CREATE TABLE sentry_projectavatar (id BIGSERIAL)",
+                    hints={"tables": ["sentry_teamavatar"]},
+                ),
+                migrations.RunSQL(
+                    sql='DROP TABLE IF EXISTS "sentry_teamavatar"',
+                    reverse_sql="CREATE TABLE sentry_teamavatar (id BIGSERIAL)",
+                    hints={"tables": ["sentry_teamavatar"]},
+                ),
+            ]
+        )
+    ]

+ 0 - 36
tests/sentry/migrations/test_0632_apitoken_backfill_last_chars.py

@@ -1,36 +0,0 @@
-from django.db import router
-
-from sentry.silo.safety import unguarded_write
-from sentry.testutils.cases import TestMigrations
-from sentry.testutils.helpers import override_options
-from sentry.testutils.silo import no_silo_test
-
-
-@no_silo_test
-class LastCharsApiTokenMigrationTest(TestMigrations):
-    migrate_from = "0631_add_priority_columns_to_groupedmessage"
-    migrate_to = "0632_apitoken_backfill_last_chars"
-    connection = "control"
-
-    def setUp(self):
-        from sentry.models.apitoken import ApiToken
-
-        with unguarded_write(using=router.db_for_write(ApiToken)):
-            super().setUp()
-
-    @override_options({"apitoken.auto-add-last-chars": False})
-    def setup_before_migration(self, apps):
-        ApiToken = apps.get_model("sentry", "ApiToken")
-
-        self.api_token = ApiToken.objects.create(
-            user_id=self.user.id,
-            refresh_token=None,
-        )
-        self.api_token.save()
-
-        assert self.api_token.token_last_characters is None
-
-    def test(self):
-        self.api_token.refresh_from_db()
-        assert self.api_token.name is None
-        assert self.api_token.token_last_characters == self.api_token.token[-4:]

+ 0 - 35
tests/sentry/migrations/test_0654_rename_priority_sort_to_trends.py

@@ -1,35 +0,0 @@
-import pytest
-
-from sentry.models.savedsearch import SavedSearch
-from sentry.testutils.cases import TestMigrations
-
-
-@pytest.mark.skip("Migration is no longer runnable. Retain until migration is removed.")
-class RenamePrioritySortToTrendsTest(TestMigrations):
-    migrate_from = "0653_apitoken_add_token_type"
-    migrate_to = "0654_rename_priority_sort_to_trends"
-
-    def setup_before_migration(self, apps):
-        self.priority_searches = []
-        for i in range(3):
-            self.priority_searches.append(
-                SavedSearch.objects.create(
-                    organization=self.organization, query="is:unresolved", sort="priority"
-                )
-            )
-
-        self.other_searches = [
-            SavedSearch.objects.create(organization=self.organization, query="is:unresolved"),
-            SavedSearch.objects.create(
-                organization=self.organization, query="is:unresolved", sort="date"
-            ),
-        ]
-
-    def test(self):
-        for search in self.priority_searches:
-            search.refresh_from_db()
-            assert search.sort == "trends"
-
-        for search in self.other_searches:
-            search.refresh_from_db()
-            assert search.sort == "date"

+ 0 - 65
tests/sentry/migrations/test_0675_dashboard_widget_query_rename_priority_sort_to_trends.py

@@ -1,65 +0,0 @@
-import pytest
-
-from sentry.models.dashboard_widget import DashboardWidgetDisplayTypes
-from sentry.testutils.cases import TestMigrations
-
-
-@pytest.mark.skip("Migration is no longer runnable. Retain until migration is removed.")
-class RenamePrioritySortToTrendsTest(TestMigrations):
-    migrate_from = "0674_monitor_clear_missed_timeout_as_error"
-    migrate_to = "0675_dashboard_widget_query_rename_priority_sort_to_trends"
-
-    def setup_before_migration(self, apps):
-        Dashboard = apps.get_model("sentry", "Dashboard")
-        DashboardWidget = apps.get_model("sentry", "DashboardWidget")
-        DashboardWidgetQuery = apps.get_model("sentry", "DashboardWidgetQuery")
-
-        self.dashboard = Dashboard.objects.create(
-            organization_id=self.organization.id,
-            title="Dashboard",
-            created_by_id=self.user.id,
-        )
-
-        self.queries_with_priority_sort = []
-        self.other_queries = []
-
-        for i, title in enumerate(["Widget 1", "Widget 2", "Widget 3"]):
-            widget = DashboardWidget.objects.create(
-                dashboard=self.dashboard,
-                order=i,
-                title=title,
-                display_type=DashboardWidgetDisplayTypes.TABLE,
-            )
-            widget_query = DashboardWidgetQuery.objects.create(
-                widget=widget,
-                name="query",
-                fields=["assignee", "issue", "title"],
-                order=1,
-                orderby="priority",
-            )
-            self.queries_with_priority_sort.append(widget_query)
-
-        for i, title in enumerate(["Widget 1", "Widget 2", "Widget 3"]):
-            widget = DashboardWidget.objects.create(
-                dashboard=self.dashboard,
-                order=i + 3,
-                title=title,
-                display_type=DashboardWidgetDisplayTypes.TABLE,
-            )
-            widget_query = DashboardWidgetQuery.objects.create(
-                widget=widget,
-                name="query",
-                fields=["assignee", "issue", "title"],
-                order=1,
-                orderby="last_seen",
-            )
-            self.other_queries.append(widget_query)
-
-    def test(self):
-        for query in self.queries_with_priority_sort:
-            query.refresh_from_db()
-            assert query.orderby == "trends"
-
-        for query in self.other_queries:
-            query.refresh_from_db()
-            assert query.orderby == "last_seen"

+ 0 - 245
tests/sentry/migrations/test_0692_backfill_group_priority_again.py

@@ -1,245 +0,0 @@
-import logging
-
-import pytest
-from django.conf import settings
-
-from sentry.issues.grouptype import (
-    ErrorGroupType,
-    FeedbackGroup,
-    MonitorIncidentType,
-    PerformanceConsecutiveHTTPQueriesGroupType,
-    PerformanceP95EndpointRegressionGroupType,
-    ReplayDeadClickType,
-)
-from sentry.models.group import GroupStatus
-from sentry.models.project import Project
-from sentry.testutils.cases import TestMigrations
-from sentry.types.group import GroupSubStatus
-from sentry.utils import redis
-
-
-class PriorityLevel:
-    LOW = 25
-    MEDIUM = 50
-    HIGH = 75
-
-
-@pytest.mark.skip("Migration is no longer runnable. Retain until migration is removed.")
-class BackfillGroupPriority(TestMigrations):
-    migrate_from = "0691_remove_project_team_avatar_model"
-    migrate_to = "0692_backfill_group_priority_again"
-
-    def setup_initial_state(self):
-        self._create_groups_to_backfill(self.project)
-        redis_cluster = redis.redis_clusters.get(settings.SENTRY_MONITORS_REDIS_CLUSTER)
-        redis_cluster.set("priority_backfill-2.last_processed_id", self.cache_group_id)
-
-    def test(self):
-        for groups, expected_priority in (
-            (self.high_priority_groups, PriorityLevel.HIGH),
-            (self.medium_priority_groups, PriorityLevel.MEDIUM),
-            (self.low_priority_groups, PriorityLevel.LOW),
-        ):
-            for desc, group in groups:
-                group.refresh_from_db()
-                if desc == "skip me":
-                    # these groups should not have been backfilled because the group id is less than the redis cached ID
-                    assert not group.priority
-                    continue
-
-                assert group.priority == expected_priority, desc
-                if not desc.startswith("existing"):
-                    assert group.data.get("metadata")["initial_priority"] == expected_priority
-
-    def _create_groups_to_backfill(self, project: Project) -> None:
-        skipped_group_count = 3
-        data = [
-            # three groups to skip to test the redis cache
-            (
-                "skip me",
-                {"type": FeedbackGroup.type_id},
-                PriorityLevel.MEDIUM,
-            ),
-            (
-                "skip me",
-                {"type": FeedbackGroup.type_id},
-                PriorityLevel.MEDIUM,
-            ),
-            (
-                "skip me",
-                {"type": FeedbackGroup.type_id},
-                PriorityLevel.MEDIUM,
-            ),
-            # groups with priority remain unchanged, even if escalating.
-            (
-                "existing low priority",
-                {
-                    "priority": PriorityLevel.LOW,
-                    "data": {"metadata": {"initial_priority": PriorityLevel.LOW}},
-                },
-                PriorityLevel.LOW,
-            ),
-            (
-                "existing low priority with escalation",
-                {
-                    "priority": PriorityLevel.LOW,
-                    "status": GroupStatus.UNRESOLVED,
-                    "substatus": GroupSubStatus.ESCALATING,
-                    "data": {"metadata": {"initial_priority": PriorityLevel.LOW}},
-                },
-                PriorityLevel.LOW,
-            ),
-            # escalating groups are high priority, except for Replay and Feedback issues
-            (
-                "escalating error group",
-                {
-                    "status": GroupStatus.UNRESOLVED,
-                    "substatus": GroupSubStatus.ESCALATING,
-                    "type": ErrorGroupType.type_id,
-                    "level": logging.INFO,  # this level should not matter
-                },
-                PriorityLevel.HIGH,
-            ),
-            (
-                "escalating performance group",
-                {
-                    "status": GroupStatus.UNRESOLVED,
-                    "substatus": GroupSubStatus.ESCALATING,
-                    "type": PerformanceConsecutiveHTTPQueriesGroupType.type_id,
-                },
-                PriorityLevel.HIGH,
-            ),
-            (
-                "escalating cron group",
-                {
-                    "status": GroupStatus.UNRESOLVED,
-                    "substatus": GroupSubStatus.ESCALATING,
-                    "type": MonitorIncidentType.type_id,
-                },
-                PriorityLevel.HIGH,
-            ),
-            (
-                "escalating replay group",
-                {
-                    "status": GroupStatus.UNRESOLVED,
-                    "substatus": GroupSubStatus.ESCALATING,
-                    "type": ReplayDeadClickType.type_id,
-                },
-                PriorityLevel.MEDIUM,
-            ),
-            (
-                "escalating feedback group",
-                {
-                    "status": GroupStatus.UNRESOLVED,
-                    "substatus": GroupSubStatus.ESCALATING,
-                    "type": FeedbackGroup.type_id,
-                },
-                PriorityLevel.MEDIUM,
-            ),
-            # error groups respect log levels if present
-            (
-                "error group with log level INFO",
-                {
-                    "type": ErrorGroupType.type_id,
-                    "level": logging.INFO,
-                },
-                PriorityLevel.LOW,
-            ),
-            (
-                "error group with log level DEBUG",
-                {
-                    "type": ErrorGroupType.type_id,
-                    "level": logging.DEBUG,
-                },
-                PriorityLevel.LOW,
-            ),
-            (
-                "error group with log level WARNING",
-                {
-                    "type": ErrorGroupType.type_id,
-                    "level": logging.WARNING,
-                },
-                PriorityLevel.MEDIUM,
-            ),
-            (
-                "error group with log level ERROR",
-                {
-                    "type": ErrorGroupType.type_id,
-                    "level": logging.ERROR,
-                },
-                PriorityLevel.HIGH,
-            ),
-            (
-                "error group with log level FATAL",
-                {
-                    "type": ErrorGroupType.type_id,
-                    "level": logging.FATAL,
-                },
-                PriorityLevel.HIGH,
-            ),
-            # cron groups are medium priority if they are warnings, high priority otherwise
-            (
-                "cron group with log level WARNING",
-                {
-                    "type": MonitorIncidentType.type_id,
-                    "level": logging.WARNING,
-                },
-                PriorityLevel.MEDIUM,
-            ),
-            (
-                "cron group with log level ERROR",
-                {
-                    "substatus": GroupSubStatus.ONGOING,
-                    "type": MonitorIncidentType.type_id,
-                    "level": logging.ERROR,
-                },
-                PriorityLevel.HIGH,
-            ),
-            (
-                "cron group with log level DEBUG",
-                {
-                    "type": MonitorIncidentType.type_id,
-                    "level": logging.DEBUG,
-                },
-                PriorityLevel.HIGH,
-            ),
-            # statistical detectors are medium priority
-            (
-                "statistical detector group",
-                {
-                    "level": logging.ERROR,
-                    "type": PerformanceP95EndpointRegressionGroupType.type_id,
-                },
-                PriorityLevel.MEDIUM,
-            ),
-            # performance issues are otherwise low priority
-            (
-                "performance group",
-                {
-                    "level": logging.ERROR,
-                    "type": PerformanceConsecutiveHTTPQueriesGroupType.type_id,
-                },
-                PriorityLevel.LOW,
-            ),
-        ]
-
-        self.low_priority_groups = []
-        self.medium_priority_groups = []
-        self.high_priority_groups = []
-
-        for desc, group_data, expected_priority in data:
-            group = self.create_group(project, **group_data)  # type: ignore[arg-type]
-
-            if desc == "skip me":
-                skipped_group_count -= 1
-                if skipped_group_count == 0:
-                    self.cache_group_id = group.id
-
-            if expected_priority == PriorityLevel.LOW:
-                self.low_priority_groups.append((desc, group))
-
-            elif expected_priority == PriorityLevel.MEDIUM:
-                self.medium_priority_groups.append((desc, group))
-
-            elif expected_priority == PriorityLevel.HIGH:
-                self.high_priority_groups.append((desc, group))

+ 0 - 58
tests/sentry/migrations/test_0711_backfill_group_attributes_to_self_hosted.py

@@ -1,58 +0,0 @@
-from sentry_sdk import Hub
-from snuba_sdk.legacy import json_to_snql
-
-from sentry.testutils.cases import SnubaTestCase, TestMigrations
-from sentry.utils import json, redis
-from sentry.utils.snuba import _snql_query
-
-
-def run_test(expected_groups):
-    project = expected_groups[0].project
-    json_body = {
-        "selected_columns": [
-            "group_id",
-        ],
-        "offset": 0,
-        "limit": 100,
-        "project": [project.id],
-        "dataset": "group_attributes",
-        "order_by": ["group_id"],
-        "consistent": True,
-        "tenant_ids": {
-            "referrer": "group_attributes",
-            "organization_id": project.organization_id,
-        },
-    }
-    request = json_to_snql(json_body, "group_attributes")
-    request.validate()
-    identity = lambda x: x
-    resp = _snql_query(((request, identity, identity), Hub(Hub.current), {}, "test_api"))[0]
-    assert resp.status == 200
-    data = json.loads(resp.data)["data"]
-    assert {g.id for g in expected_groups} == {d["group_id"] for d in data}
-
-
-class TestBackfillGroupAttributes(SnubaTestCase, TestMigrations):
-    migrate_from = "0710_grouphistory_remove_actor_state"
-    migrate_to = "0711_backfill_group_attributes_to_self_hosted"
-
-    def setup_initial_state(self):
-        self.group = self.create_group()
-        self.group_2 = self.create_group()
-
-    def test(self):
-        run_test([self.group, self.group_2])
-
-
-class TestBackfillGroupAttributesRetry(SnubaTestCase, TestMigrations):
-    migrate_from = "0710_grouphistory_remove_actor_state"
-    migrate_to = "0711_backfill_group_attributes_to_self_hosted"
-
-    def setup_initial_state(self):
-        self.group = self.create_group()
-        self.group_2 = self.create_group()
-        redis_client = redis.redis_clusters.get("default")
-        redis_client.set("backfill_group_attributes_to_snuba_progress_again", self.group.id)
-
-    def test_restart(self):
-        run_test([self.group_2])