Browse Source

ref: fix some more typing in tests (#57399)

<!-- Describe your PR here. -->
anthony sottile 1 year ago
parent
commit
fd33557666

+ 0 - 11
pyproject.toml

@@ -825,20 +825,9 @@ module = [
     "tests.sentry.lang.javascript.test_processor",
     "tests.sentry.models.test_organizationmember",
     "tests.sentry.models.test_project",
-    "tests.sentry.release_health.release_monitor",
-    "tests.sentry.replays.test_project_replay_recording_segment_details",
     "tests.sentry.replays.test_project_replay_recording_segment_index",
     "tests.sentry.replays.unit.test_dead_click_issue",
-    "tests.sentry.search.events.builder.test_metrics",
-    "tests.sentry.search.events.test_fields",
-    "tests.sentry.sentry_apps.test_sentry_app_creator",
-    "tests.sentry.sentry_metrics.consumers.test_slicing_router",
-    "tests.sentry.sentry_metrics.limiters.test_writes_limiter",
-    "tests.sentry.sentry_metrics.test_base_indexer",
-    "tests.sentry.sentry_metrics.test_batch",
-    "tests.sentry.tagstore.test_types",
     "tests.sentry.tasks.test_post_process",
-    "tests.sentry.web.test_client_config",
 ]
 disable_error_code = [
     "arg-type",

+ 1 - 1
src/sentry/models/groupowner.py

@@ -67,7 +67,7 @@ class GroupOwner(Model):
             (GroupOwnerType.CODEOWNERS, "Codeowners"),
         )
     )
-    context = JSONField(null=True)
+    context: models.Field[dict[str, Any], dict[str, Any]] = JSONField(null=True)
     user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, on_delete="CASCADE", null=True)
     team = FlexibleForeignKey("sentry.Team", null=True)
     date_added = models.DateTimeField(default=timezone.now)

+ 1 - 1
src/sentry/search/events/fields.py

@@ -2171,5 +2171,5 @@ class MetricsFunction(SnQLFunction):
 
 class FunctionDetails(NamedTuple):
     field: str
-    instance: SnQLFunction
+    instance: DiscoverFunction
     arguments: Mapping[str, NormalizedArg]

+ 8 - 2
tests/sentry/release_health/release_monitor/__init__.py

@@ -1,9 +1,13 @@
 from __future__ import annotations
 
 from sentry.release_health.release_monitor.base import BaseReleaseMonitorBackend
+from sentry.testutils.abstract import Abstract
+from sentry.testutils.cases import SnubaTestCase, TestCase
 
 
-class BaseFetchProjectsWithRecentSessionsTest:
+class BaseFetchProjectsWithRecentSessionsTest(TestCase, SnubaTestCase):
+    __test__ = Abstract(__module__, __qualname__)  # type: ignore[name-defined]  # python/mypy#10570
+
     backend_class: type[BaseReleaseMonitorBackend]
 
     def setUp(self):
@@ -42,7 +46,9 @@ class BaseFetchProjectsWithRecentSessionsTest:
         }
 
 
-class BaseFetchProjectReleaseHealthTotalsTest:
+class BaseFetchProjectReleaseHealthTotalsTest(TestCase, SnubaTestCase):
+    __test__ = Abstract(__module__, __qualname__)  # type: ignore[name-defined]  # python/mypy#10570
+
     backend_class: type[BaseReleaseMonitorBackend]
 
     def setUp(self):

+ 3 - 3
tests/sentry/release_health/release_monitor/test_metrics.py

@@ -1,7 +1,7 @@
 import pytest
 
 from sentry.release_health.release_monitor.metrics import MetricReleaseMonitorBackend
-from sentry.testutils.cases import BaseMetricsTestCase, TestCase
+from sentry.testutils.cases import BaseMetricsTestCase
 from sentry.testutils.silo import region_silo_test
 from tests.sentry.release_health.release_monitor import (
     BaseFetchProjectReleaseHealthTotalsTest,
@@ -13,13 +13,13 @@ pytestmark = pytest.mark.sentry_metrics
 
 @region_silo_test(stable=True)
 class MetricFetchProjectsWithRecentSessionsTest(
-    BaseFetchProjectsWithRecentSessionsTest, TestCase, BaseMetricsTestCase
+    BaseFetchProjectsWithRecentSessionsTest, BaseMetricsTestCase
 ):
     backend_class = MetricReleaseMonitorBackend
 
 
 @region_silo_test(stable=True)
 class SessionFetchProjectReleaseHealthTotalsTest(
-    BaseFetchProjectReleaseHealthTotalsTest, TestCase, BaseMetricsTestCase
+    BaseFetchProjectReleaseHealthTotalsTest, BaseMetricsTestCase
 ):
     backend_class = MetricReleaseMonitorBackend

+ 2 - 7
tests/sentry/release_health/release_monitor/test_sessions.py

@@ -1,5 +1,4 @@
 from sentry.release_health.release_monitor.sessions import SessionReleaseMonitorBackend
-from sentry.testutils.cases import SnubaTestCase, TestCase
 from sentry.testutils.silo import region_silo_test
 from tests.sentry.release_health.release_monitor import (
     BaseFetchProjectReleaseHealthTotalsTest,
@@ -8,14 +7,10 @@ from tests.sentry.release_health.release_monitor import (
 
 
 @region_silo_test(stable=True)
-class SessionFetchProjectsWithRecentSessionsTest(
-    BaseFetchProjectsWithRecentSessionsTest, TestCase, SnubaTestCase
-):
+class SessionFetchProjectsWithRecentSessionsTest(BaseFetchProjectsWithRecentSessionsTest):
     backend_class = SessionReleaseMonitorBackend
 
 
 @region_silo_test(stable=True)
-class SessionFetchProjectReleaseHealthTotalsTest(
-    BaseFetchProjectReleaseHealthTotalsTest, TestCase, SnubaTestCase
-):
+class SessionFetchProjectReleaseHealthTotalsTest(BaseFetchProjectReleaseHealthTotalsTest):
     backend_class = SessionReleaseMonitorBackend

+ 14 - 7
tests/sentry/replays/test_project_replay_recording_segment_details.py

@@ -10,13 +10,22 @@ from sentry.replays.lib.storage import (
     make_filename,
 )
 from sentry.replays.testutils import mock_replay
+from sentry.testutils.abstract import Abstract
 from sentry.testutils.cases import APITestCase, ReplaysSnubaTestCase
+from sentry.testutils.helpers.response import close_streaming_response
 from sentry.testutils.silo import region_silo_test
 
 
-class EnvironmentMixin:
+class EnvironmentBase(APITestCase):
+    __test__ = Abstract(__module__, __qualname__)  # type: ignore[name-defined]  # python/mypy#10570
+
     endpoint = "sentry-api-0-project-replay-recording-segment-details"
 
+    segment_filename: str
+
+    def init_environment(self) -> None:
+        raise NotImplementedError
+
     def setUp(self):
         super().setUp()
         self.replay_id = uuid.uuid4().hex
@@ -60,11 +69,11 @@ class EnvironmentMixin:
             )
             assert response.get("Content-Length") == str(self.segment_data_size)
             assert response.get("Content-Type") == "application/json"
-            assert self.segment_data == b"".join(response.streaming_content)
+            assert self.segment_data == close_streaming_response(response)
 
 
 @region_silo_test(stable=True)
-class FilestoreReplayRecordingSegmentDetailsTestCase(EnvironmentMixin, APITestCase):
+class FilestoreReplayRecordingSegmentDetailsTestCase(EnvironmentBase):
     def init_environment(self):
         metadata = RecordingSegmentStorageMeta(
             project_id=self.project.id,
@@ -78,9 +87,7 @@ class FilestoreReplayRecordingSegmentDetailsTestCase(EnvironmentMixin, APITestCa
 
 
 @region_silo_test(stable=True)
-class StorageReplayRecordingSegmentDetailsTestCase(
-    EnvironmentMixin, APITestCase, ReplaysSnubaTestCase
-):
+class StorageReplayRecordingSegmentDetailsTestCase(EnvironmentBase, ReplaysSnubaTestCase):
     def init_environment(self):
         metadata = RecordingSegmentStorageMeta(
             project_id=self.project.id,
@@ -94,7 +101,7 @@ class StorageReplayRecordingSegmentDetailsTestCase(
         self.store_replays(
             mock_replay(
                 datetime.datetime.now() - datetime.timedelta(seconds=22),
-                metadata.project_id,
+                str(metadata.project_id),
                 metadata.replay_id,
                 segment_id=metadata.segment_id,
                 retention_days=metadata.retention_days,

+ 41 - 40
tests/sentry/search/events/builder/test_metrics.py

@@ -23,9 +23,9 @@ from sentry.sentry_metrics.aggregation_option_registry import AggregationOption
 from sentry.sentry_metrics.use_case_id_registry import UseCaseID
 from sentry.sentry_metrics.utils import resolve_tag_value
 from sentry.snuba.dataset import Dataset
-from sentry.snuba.metrics import TransactionMRI
 from sentry.snuba.metrics.extraction import QUERY_HASH_KEY, OnDemandMetricSpec
 from sentry.snuba.metrics.naming_layer import TransactionMetricKey
+from sentry.snuba.metrics.naming_layer.mri import TransactionMRI
 from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
 
 pytestmark = pytest.mark.sentry_metrics
@@ -458,6 +458,7 @@ class MetricQueryBuilderTest(MetricBuilderBaseTest):
         MetricsQueryBuilder(self.params, limit=51)
         # None is ok, defaults to 50
         query = MetricsQueryBuilder(self.params)
+        assert query.limit is not None
         assert query.limit.limit == 50
         # anything higher should throw an error
         with pytest.raises(IncompatibleMetricsQuery):
@@ -1613,14 +1614,14 @@ class MetricQueryBuilderTest(MetricBuilderBaseTest):
 
 class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
     def test_get_query(self):
-        query = TimeseriesMetricQueryBuilder(
+        orig_query = TimeseriesMetricQueryBuilder(
             self.params,
             dataset=Dataset.PerformanceMetrics,
             interval=900,
             query="",
             selected_columns=["p50(transaction.duration)"],
         )
-        snql_query = query.get_snql_query()
+        snql_query = orig_query.get_snql_query()
         assert len(snql_query) == 1
         query = snql_query[0].query
         self.assertCountEqual(
@@ -1983,8 +1984,8 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
 
     def test_run_query_with_on_demand_count(self):
         field = "count()"
-        query = "transaction.duration:>0"
-        spec = OnDemandMetricSpec(field=field, query=query)
+        query_s = "transaction.duration:>0"
+        spec = OnDemandMetricSpec(field=field, query=query_s)
 
         for hour in range(0, 5):
             self.store_transaction_metric(
@@ -2000,7 +2001,7 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
             self.params,
             dataset=Dataset.PerformanceMetrics,
             interval=3600,
-            query=query,
+            query=query_s,
             selected_columns=[field],
             config=QueryBuilderConfig(
                 on_demand_metrics_enabled=True,
@@ -2039,8 +2040,8 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
 
     def test_run_query_with_on_demand_distribution(self):
         field = "p75(measurements.fp)"
-        query = "transaction.duration:>0"
-        spec = OnDemandMetricSpec(field=field, query=query)
+        query_s = "transaction.duration:>0"
+        spec = OnDemandMetricSpec(field=field, query=query_s)
 
         for hour in range(0, 5):
             self.store_transaction_metric(
@@ -2056,7 +2057,7 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
             self.params,
             dataset=Dataset.PerformanceMetrics,
             interval=3600,
-            query=query,
+            query=query_s,
             selected_columns=[field],
             config=QueryBuilderConfig(
                 on_demand_metrics_enabled=True,
@@ -2095,8 +2096,8 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
 
     def test_run_query_with_on_demand_failure_count(self):
         field = "failure_count()"
-        query = "transaction.duration:>=100"
-        spec = OnDemandMetricSpec(field=field, query=query)
+        query_s = "transaction.duration:>=100"
+        spec = OnDemandMetricSpec(field=field, query=query_s)
         timestamp = self.start
         self.store_transaction_metric(
             value=1,
@@ -2110,7 +2111,7 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
             self.params,
             dataset=Dataset.PerformanceMetrics,
             interval=3600,
-            query=query,
+            query=query_s,
             selected_columns=[field],
             config=QueryBuilderConfig(on_demand_metrics_enabled=True),
         )
@@ -2123,8 +2124,8 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
 
     def test_run_query_with_on_demand_failure_rate(self):
         field = "failure_rate()"
-        query = "transaction.duration:>=100"
-        spec = OnDemandMetricSpec(field=field, query=query)
+        query_s = "transaction.duration:>=100"
+        spec = OnDemandMetricSpec(field=field, query=query_s)
 
         for hour in range(0, 5):
             # 1 per hour failed
@@ -2152,7 +2153,7 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
             self.params,
             dataset=Dataset.PerformanceMetrics,
             interval=3600,
-            query=query,
+            query=query_s,
             selected_columns=[field],
             config=QueryBuilderConfig(
                 on_demand_metrics_enabled=True,
@@ -2191,8 +2192,8 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
 
     def test_run_query_with_on_demand_apdex(self):
         field = "apdex(10)"
-        query = "transaction.duration:>=100"
-        spec = OnDemandMetricSpec(field=field, query=query)
+        query_s = "transaction.duration:>=100"
+        spec = OnDemandMetricSpec(field=field, query=query_s)
 
         for hour in range(0, 5):
             self.store_transaction_metric(
@@ -2218,7 +2219,7 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
             self.params,
             dataset=Dataset.PerformanceMetrics,
             interval=3600,
-            query=query,
+            query=query_s,
             selected_columns=[field],
             config=QueryBuilderConfig(
                 on_demand_metrics_enabled=True,
@@ -2258,8 +2259,8 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
     def test_run_query_with_on_demand_epm(self):
         """Test events per minute for 1 event within an hour."""
         field = "epm()"
-        query = "transaction.duration:>=100"
-        spec = OnDemandMetricSpec(field=field, query=query)
+        query_s = "transaction.duration:>=100"
+        spec = OnDemandMetricSpec(field=field, query=query_s)
         timestamp = self.start
         self.store_transaction_metric(
             value=1,
@@ -2273,7 +2274,7 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
             self.params,
             dataset=Dataset.PerformanceMetrics,
             interval=3600,
-            query=query,
+            query=query_s,
             selected_columns=[field],
             config=QueryBuilderConfig(on_demand_metrics_enabled=True),
         )
@@ -2287,8 +2288,8 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
     def test_run_query_with_on_demand_eps(self):
         """Test event per second for 1 event within an hour."""
         field = "eps()"
-        query = "transaction.duration:>=100"
-        spec = OnDemandMetricSpec(field=field, query=query)
+        query_s = "transaction.duration:>=100"
+        spec = OnDemandMetricSpec(field=field, query=query_s)
         timestamp = self.start
         self.store_transaction_metric(
             value=1,
@@ -2302,7 +2303,7 @@ class TimeseriesMetricQueryBuilderTest(MetricBuilderBaseTest):
             self.params,
             dataset=Dataset.PerformanceMetrics,
             interval=3600,
-            query=query,
+            query=query_s,
             selected_columns=[field],
             config=QueryBuilderConfig(on_demand_metrics_enabled=True),
         )
@@ -2421,8 +2422,8 @@ class HistogramMetricQueryBuilderTest(MetricBuilderBaseTest):
 class AlertMetricsQueryBuilderTest(MetricBuilderBaseTest):
     def test_run_query_with_on_demand_distribution(self):
         field = "p75(measurements.fp)"
-        query = "transaction.duration:>=100"
-        spec = OnDemandMetricSpec(field=field, query=query)
+        query_s = "transaction.duration:>=100"
+        spec = OnDemandMetricSpec(field=field, query=query_s)
 
         self.store_transaction_metric(
             value=200,
@@ -2436,7 +2437,7 @@ class AlertMetricsQueryBuilderTest(MetricBuilderBaseTest):
         query = AlertMetricsQueryBuilder(
             self.params,
             granularity=3600,
-            query=query,
+            query=query_s,
             dataset=Dataset.PerformanceMetrics,
             selected_columns=[field],
             config=QueryBuilderConfig(
@@ -2455,8 +2456,8 @@ class AlertMetricsQueryBuilderTest(MetricBuilderBaseTest):
 
     def test_run_query_with_on_demand_count(self):
         field = "count(measurements.fp)"
-        query = "transaction.duration:>=100"
-        spec = OnDemandMetricSpec(field=field, query=query)
+        query_s = "transaction.duration:>=100"
+        spec = OnDemandMetricSpec(field=field, query=query_s)
 
         self.store_transaction_metric(
             value=100,
@@ -2470,7 +2471,7 @@ class AlertMetricsQueryBuilderTest(MetricBuilderBaseTest):
         query = AlertMetricsQueryBuilder(
             self.params,
             granularity=3600,
-            query=query,
+            query=query_s,
             dataset=Dataset.PerformanceMetrics,
             selected_columns=[field],
             config=QueryBuilderConfig(
@@ -2489,8 +2490,8 @@ class AlertMetricsQueryBuilderTest(MetricBuilderBaseTest):
 
     def test_run_query_with_on_demand_failure_rate(self):
         field = "failure_rate()"
-        query = "transaction.duration:>=100"
-        spec = OnDemandMetricSpec(field=field, query=query)
+        query_s = "transaction.duration:>=100"
+        spec = OnDemandMetricSpec(field=field, query=query_s)
 
         self.store_transaction_metric(
             value=1,
@@ -2513,7 +2514,7 @@ class AlertMetricsQueryBuilderTest(MetricBuilderBaseTest):
         query = AlertMetricsQueryBuilder(
             self.params,
             granularity=3600,
-            query=query,
+            query=query_s,
             dataset=Dataset.PerformanceMetrics,
             selected_columns=[field],
             config=QueryBuilderConfig(
@@ -2533,8 +2534,8 @@ class AlertMetricsQueryBuilderTest(MetricBuilderBaseTest):
 
     def test_run_query_with_on_demand_apdex(self):
         field = "apdex(10)"
-        query = "transaction.duration:>=100"
-        spec = OnDemandMetricSpec(field=field, query=query)
+        query_s = "transaction.duration:>=100"
+        spec = OnDemandMetricSpec(field=field, query=query_s)
 
         self.store_transaction_metric(
             value=1,
@@ -2557,7 +2558,7 @@ class AlertMetricsQueryBuilderTest(MetricBuilderBaseTest):
         query = AlertMetricsQueryBuilder(
             self.params,
             granularity=3600,
-            query=query,
+            query=query_s,
             dataset=Dataset.PerformanceMetrics,
             selected_columns=[field],
             config=QueryBuilderConfig(
@@ -2639,7 +2640,7 @@ class AlertMetricsQueryBuilderTest(MetricBuilderBaseTest):
                                         Column("metric_id"),
                                         indexer.resolve(
                                             UseCaseID.TRANSACTIONS,
-                                            None,
+                                            1,
                                             "d:transactions/on_demand@none",
                                         ),
                                     ],
@@ -2654,7 +2655,7 @@ class AlertMetricsQueryBuilderTest(MetricBuilderBaseTest):
             snql_query.select,
         )
 
-        query_hash_index = indexer.resolve(UseCaseID.TRANSACTIONS, None, QUERY_HASH_KEY)
+        query_hash_index = indexer.resolve(UseCaseID.TRANSACTIONS, 1, QUERY_HASH_KEY)
 
         query_hash_clause = Condition(
             lhs=Column(name=f"tags_raw[{query_hash_index}]"), op=Op.EQ, rhs="62b395db"
@@ -2692,7 +2693,7 @@ class AlertMetricsQueryBuilderTest(MetricBuilderBaseTest):
                                 Column("metric_id"),
                                 indexer.resolve(
                                     UseCaseID.TRANSACTIONS,
-                                    None,
+                                    1,
                                     "c:transactions/on_demand@none",
                                 ),
                             ],
@@ -2704,7 +2705,7 @@ class AlertMetricsQueryBuilderTest(MetricBuilderBaseTest):
             snql_query.select,
         )
 
-        query_hash_index = indexer.resolve(UseCaseID.TRANSACTIONS, None, QUERY_HASH_KEY)
+        query_hash_index = indexer.resolve(UseCaseID.TRANSACTIONS, 1, QUERY_HASH_KEY)
 
         start_time_clause = Condition(lhs=Column(name="timestamp"), op=Op.GTE, rhs=self.start)
         end_time_clause = Condition(lhs=Column(name="timestamp"), op=Op.LT, rhs=self.end)

+ 1 - 5
tests/sentry/sentry_apps/test_sentry_app_creator.py

@@ -130,11 +130,7 @@ class TestCreator(TestCase):
         assert AuditLogEntry.objects.filter(event=audit_log.get_event_id("SENTRY_APP_ADD")).exists()
 
     def test_blank_schema(self):
-        self.creator.schema = ""
-        assert self.creator.run(user=self.user)
-
-    def test_none_schema(self):
-        self.creator.schema = None
+        self.creator.schema = {}
         assert self.creator.run(user=self.user)
 
     def test_schema_with_no_elements(self):

+ 26 - 24
tests/sentry/sentry_metrics/consumers/test_slicing_router.py

@@ -42,25 +42,25 @@ def metrics_message(org_id: int) -> Message[RoutingPayload]:
 
 @pytest.fixture
 def setup_slicing(monkeypatch) -> None:
-    monkeypatch.setitem(SENTRY_SLICING_CONFIG, "sliceable", {(0, 128): 0, (128, 256): 1})
+    monkeypatch.setitem(SENTRY_SLICING_CONFIG, "generic_metrics", {(0, 128): 0, (128, 256): 1})
     monkeypatch.setitem(
         SLICED_KAFKA_TOPICS,
         (KAFKA_SNUBA_GENERIC_METRICS, 0),
-        {"topic": "sliced_topic_0", "cluster": "sliceable_0"},
+        {"topic": "sliced_topic_0", "cluster": "generic_metrics_0"},
     )
     monkeypatch.setitem(
         SLICED_KAFKA_TOPICS,
         (KAFKA_SNUBA_GENERIC_METRICS, 1),
-        {"topic": "sliced_topic_1", "cluster": "sliceable_1"},
+        {"topic": "sliced_topic_1", "cluster": "generic_metrics_1"},
     )
     monkeypatch.setitem(
         KAFKA_CLUSTERS,
-        "sliceable_0",
+        "generic_metrics_0",
         {"bootstrap.servers": "127.0.0.1:9092"},
     )
     monkeypatch.setitem(
         KAFKA_CLUSTERS,
-        "sliceable_1",
+        "generic_metrics_1",
         {"bootstrap.servers": "127.0.0.1:9092"},
     )
 
@@ -72,7 +72,7 @@ def test_with_slicing(metrics_message, setup_slicing) -> None:
     based on the org_id header.
     """
     org_id = metrics_message.payload.routing_header.get("org_id")
-    router = SlicingRouter("sliceable")
+    router = SlicingRouter("generic_metrics")
     route = router.get_route_for_message(metrics_message)
     if int(org_id) % SENTRY_SLICING_LOGICAL_PARTITION_COUNT < 128:
         assert route.topic.name == "sliced_topic_0"
@@ -103,7 +103,7 @@ def test_with_no_org_in_routing_header(setup_slicing) -> None:
         )
     )
     assert message.payload.routing_header.get("org_id") is None
-    router = SlicingRouter("sliceable")
+    router = SlicingRouter("generic_metrics")
     with pytest.raises(MissingOrgInRoutingHeader):
         _ = router.get_route_for_message(message)
 
@@ -126,7 +126,7 @@ def test_with_misconfiguration(metrics_message, monkeypatch):
     )
 
     with pytest.raises(SlicingConfigurationException):
-        _ = SlicingRouter("sliceable")
+        _ = SlicingRouter("generic_metrics")
 
 
 def test_validate_slicing_consumer_config(monkeypatch) -> None:
@@ -136,66 +136,68 @@ def test_validate_slicing_consumer_config(monkeypatch) -> None:
     with pytest.raises(
         SlicingConfigurationException, match=r"not defined in settings.SENTRY_SLICING_CONFIG"
     ):
-        _validate_slicing_consumer_config("sliceable")
+        _validate_slicing_consumer_config("generic_metrics")
 
     # Let the check for slicing config pass
-    monkeypatch.setitem(SENTRY_SLICING_CONFIG, "sliceable", {(0, 128): 0, (128, 256): 1})
+    monkeypatch.setitem(SENTRY_SLICING_CONFIG, "generic_metrics", {(0, 128): 0, (128, 256): 1})
 
     # Create the sliced kafka topics but omit defining the broker config in
     # KAFKA_CLUSTERS
     monkeypatch.setitem(
         SLICED_KAFKA_TOPICS,
-        ("sliceable", 0),
-        {"topic": "sliced_topic_0", "cluster": "sliceable_0"},
+        ("generic_metrics", 0),
+        {"topic": "sliced_topic_0", "cluster": "generic_metrics_0"},
     )
     monkeypatch.setitem(
         SLICED_KAFKA_TOPICS,
-        ("sliceable", 1),
-        {"topic": "sliced_topic_1", "cluster": "sliceable_1"},
+        ("generic_metrics", 1),
+        {"topic": "sliced_topic_1", "cluster": "generic_metrics_1"},
     )
     monkeypatch.setitem(
         KAFKA_CLUSTERS,
-        "sliceable_0",
+        "generic_metrics_0",
         {"bootstrap.servers": "127.0.0.1:9092"},
     )
     with pytest.raises(SlicingConfigurationException, match=r"Broker configuration missing"):
-        _validate_slicing_consumer_config("sliceable")
+        _validate_slicing_consumer_config("generic_metrics")
 
     # Now add the broker config for the second slice
     monkeypatch.setitem(
         KAFKA_CLUSTERS,
-        "sliceable_1",
+        "generic_metrics_1",
         {"bootstrap.servers": "127.0.0.1:9092"},
     )
 
     try:
-        _validate_slicing_consumer_config("sliceable")
+        _validate_slicing_consumer_config("generic_metrics")
     except SlicingConfigurationException as e:
         assert False, f"Should not raise exception: {e}"
 
 
 def test_validate_slicing_config(monkeypatch) -> None:
     # Valid setup(s)
-    monkeypatch.setitem(SENTRY_SLICING_CONFIG, "sliceable", {(0, 128): 0, (128, 256): 1})
+    monkeypatch.setitem(SENTRY_SLICING_CONFIG, "generic_metrics", {(0, 128): 0, (128, 256): 1})
     _validate_slicing_config()
 
     monkeypatch.setitem(
-        SENTRY_SLICING_CONFIG, "sliceable", {(0, 64): 0, (64, 66): 1, (66, 100): 0, (100, 256): 1}
+        SENTRY_SLICING_CONFIG,
+        "generic_metrics",
+        {(0, 64): 0, (64, 66): 1, (66, 100): 0, (100, 256): 1},
     )
     _validate_slicing_config()
 
     # Assign a given logical partition to two slices
-    monkeypatch.setitem(SENTRY_SLICING_CONFIG, "sliceable", {(0, 129): 0, (128, 256): 1})
+    monkeypatch.setitem(SENTRY_SLICING_CONFIG, "generic_metrics", {(0, 129): 0, (128, 256): 1})
     with pytest.raises(
         SlicingConfigurationException,
-        match=r"'sliceable' has two assignments to logical partition 128",
+        match=r"'generic_metrics' has two assignments to logical partition 128",
     ):
         _validate_slicing_config()
 
     # Fail to assign a logical partition to a slice
-    monkeypatch.setitem(SENTRY_SLICING_CONFIG, "sliceable", {(0, 127): 0, (128, 256): 1})
+    monkeypatch.setitem(SENTRY_SLICING_CONFIG, "generic_metrics", {(0, 127): 0, (128, 256): 1})
     with pytest.raises(
         SlicingConfigurationException,
-        match=r"'sliceable' is missing logical partition assignments: \{127\}",
+        match=r"'generic_metrics' is missing logical partition assignments: \{127\}",
     ):
         _validate_slicing_config()

Some files were not shown because too many files changed in this diff