Browse Source

ref: fix some mypy issues in snuba tests (#53458)

<!-- Describe your PR here. -->
anthony sottile 1 year ago
parent
commit
fdaefe252c

+ 0 - 26
pyproject.toml

@@ -813,7 +813,6 @@ module = [
     "sentry.snuba.metrics.mqb_query_transformer",
     "sentry.snuba.metrics.query",
     "sentry.snuba.metrics.query_builder",
-    "sentry.snuba.metrics.utils",
     "sentry.snuba.metrics_enhanced_performance",
     "sentry.snuba.metrics_performance",
     "sentry.snuba.models",
@@ -1260,34 +1259,9 @@ module = [
     "tests.sentry_plugins.trello.test_plugin",
     "tests.sentry_plugins.twilio.test_plugin",
     "tests.sentry_plugins.victorops.test_plugin",
-    "tests.snuba.api.endpoints.test_discover_key_transactions",
-    "tests.snuba.api.endpoints.test_group_details",
-    "tests.snuba.api.endpoints.test_group_events",
-    "tests.snuba.api.endpoints.test_organization_events",
-    "tests.snuba.api.endpoints.test_organization_events_facets_performance",
-    "tests.snuba.api.endpoints.test_organization_events_histogram",
-    "tests.snuba.api.endpoints.test_organization_events_mep",
-    "tests.snuba.api.endpoints.test_organization_events_meta",
-    "tests.snuba.api.endpoints.test_organization_events_spans_performance",
-    "tests.snuba.api.endpoints.test_organization_events_stats",
-    "tests.snuba.api.endpoints.test_organization_events_trace",
-    "tests.snuba.api.endpoints.test_organization_events_vitals",
-    "tests.snuba.api.endpoints.test_organization_group_index",
-    "tests.snuba.api.endpoints.test_organization_group_index_stats",
-    "tests.snuba.api.endpoints.test_organization_metrics_meta",
-    "tests.snuba.api.endpoints.test_organization_sessions",
-    "tests.snuba.api.endpoints.test_project_event_details",
-    "tests.snuba.api.endpoints.test_project_group_index",
-    "tests.snuba.api.serializers.test_group",
-    "tests.snuba.api.serializers.test_group_stream",
-    "tests.snuba.incidents.test_tasks",
     "tests.snuba.rules.conditions.test_event_frequency",
     "tests.snuba.sessions.test_sessions",
-    "tests.snuba.sessions.test_sessions_v2",
     "tests.snuba.tagstore.test_tagstore_backend",
-    "tests.snuba.tasks.test_unmerge",
-    "tests.snuba.test_util",
-    "tests.snuba.tsdb.test_tsdb_backend",
 ]
 disable_error_code = [
     "arg-type",

+ 3 - 3
src/sentry/api/endpoints/organization_sessions.py

@@ -48,7 +48,7 @@ class OrganizationSessionsEndpoint(OrganizationEventsEndpointBase):
                         request, organization, offset=request_offset, limit=request_limit
                     )
 
-                return release_health.run_sessions_query(
+                return release_health.backend.run_sessions_query(
                     organization.id, query, span_op="sessions.endpoint"
                 )
 
@@ -73,11 +73,11 @@ class OrganizationSessionsEndpoint(OrganizationEventsEndpointBase):
 
         # HACK to prevent front-end crash when release health is sessions-based:
         query_params = MultiValueDict(request.GET)
-        if not release_health.is_metrics_based() and request.GET.get("interval") == "10s":
+        if not release_health.backend.is_metrics_based() and request.GET.get("interval") == "10s":
             query_params["interval"] = "1m"
 
         start, _ = get_date_range_from_params(query_params)
-        query_config = release_health.sessions_query_config(organization, start)
+        query_config = release_health.backend.sessions_query_config(organization, start)
 
         return QueryDefinition(
             query_params,

+ 2 - 2
src/sentry/api/serializers/models/group.py

@@ -172,8 +172,8 @@ class BaseGroupSerializerResponse(BaseGroupResponseOptional):
 
 class SeenStats(TypedDict):
     times_seen: int
-    first_seen: datetime
-    last_seen: datetime
+    first_seen: datetime | None
+    last_seen: datetime | None
     user_count: int
 
 

+ 1 - 1
src/sentry/incidents/action_handlers.py

@@ -117,7 +117,7 @@ class EmailActionHandler(ActionHandler):
             )
             self.build_message(email_context, trigger_status, user_id).send_async(to=[email])
 
-    def build_message(self, context, status, user_id):
+    def build_message(self, context, status, user_id) -> MessageBuilder:
         display = self.status_display[status]
         return MessageBuilder(
             subject="[{}] {} - {}".format(

+ 2 - 2
src/sentry/issues/ongoing.py

@@ -20,8 +20,8 @@ def bulk_transition_group_to_ongoing(
 
     Group.objects.update_group_status(
         groups=groups_to_transistion,
-        status=GroupStatus.UNRESOLVED,  # type: ignore
-        substatus=GroupSubStatus.ONGOING,  # type: ignore
+        status=GroupStatus.UNRESOLVED,
+        substatus=GroupSubStatus.ONGOING,
         activity_type=ActivityType.AUTO_SET_ONGOING,
         activity_data=activity_data,
         send_activity_notification=False,

+ 3 - 3
src/sentry/models/group.py

@@ -410,8 +410,8 @@ class GroupManager(BaseManager):
     def update_group_status(
         self,
         groups: Sequence[Group],
-        status: GroupStatus,
-        substatus: GroupSubStatus | None,
+        status: int,
+        substatus: int | None,
         activity_type: ActivityType,
         activity_data: Optional[Mapping[str, Any]] = None,
         send_activity_notification: bool = True,
@@ -531,7 +531,7 @@ class Group(Model):
     score = BoundedIntegerField(default=0)
     # deprecated, do not use. GroupShare has superseded
     is_public = models.BooleanField(default=False, null=True)
-    data = GzippedDictField(blank=True, null=True)
+    data: models.Field[dict[str, Any], dict[str, Any]] = GzippedDictField(blank=True, null=True)
     short_id = BoundedBigIntegerField(null=True)
     type = BoundedPositiveIntegerField(default=ErrorGroupType.type_id, db_index=True)
 

+ 55 - 20
src/sentry/snuba/metrics/utils.py

@@ -1,3 +1,25 @@
+from __future__ import annotations
+
+import re
+from abc import ABC
+from datetime import datetime, timedelta, timezone
+from typing import (
+    Collection,
+    Dict,
+    Generator,
+    List,
+    Literal,
+    Mapping,
+    Optional,
+    Sequence,
+    Tuple,
+    TypedDict,
+    Union,
+    overload,
+)
+
+from sentry.snuba.dataset import EntityKey
+
 __all__ = (
     "MAX_POINTS",
     "GRANULARITY",
@@ -40,24 +62,6 @@ __all__ = (
     "NON_RESOLVABLE_TAG_VALUES",
 )
 
-import re
-from abc import ABC
-from datetime import datetime, timedelta, timezone
-from typing import (
-    Collection,
-    Dict,
-    Generator,
-    List,
-    Literal,
-    Mapping,
-    Optional,
-    Sequence,
-    Tuple,
-    TypedDict,
-    Union,
-)
-
-from sentry.snuba.dataset import EntityKey
 
 #: Max number of data points per time series:
 MAX_POINTS = 10000
@@ -81,6 +85,7 @@ MetricOperationType = Literal[
     "p90",
     "p95",
     "p99",
+    "percentage",
     "histogram",
     "rate",
     "count_web_vitals",
@@ -116,7 +121,15 @@ MetricUnit = Literal[
     "exabyte",
 ]
 #: The type of metric, which determines the snuba entity to query
-MetricType = Literal["counter", "set", "distribution", "numeric"]
+MetricType = Literal[
+    "counter",
+    "set",
+    "distribution",
+    "numeric",
+    "generic_counter",
+    "generic_set",
+    "generic_distribution",
+]
 
 MetricEntity = Literal[
     "metrics_counters",
@@ -350,9 +363,31 @@ class OrderByNotSupportedOverCompositeEntityException(NotSupportedOverCompositeE
     ...
 
 
+@overload
+def to_intervals(start: None, end: datetime, interval_seconds: int) -> tuple[None, None, int]:
+    ...
+
+
+@overload
+def to_intervals(start: datetime, end: None, interval_seconds: int) -> tuple[None, None, int]:
+    ...
+
+
+@overload
+def to_intervals(start: None, end: None, interval_seconds: int) -> tuple[None, None, int]:
+    ...
+
+
+@overload
+def to_intervals(
+    start: datetime, end: datetime, interval_seconds: int
+) -> tuple[datetime, datetime, int]:
+    ...
+
+
 def to_intervals(
     start: Optional[datetime], end: Optional[datetime], interval_seconds: int
-) -> Tuple[Optional[datetime], Optional[datetime], int]:
+) -> tuple[datetime, datetime, int] | tuple[None, None, int]:
     """
     Given a `start` date, `end` date and an alignment interval in seconds returns the aligned start, end and
     the number of total intervals in [start:end]

+ 4 - 1
src/sentry/tasks/unmerge.py

@@ -1,3 +1,5 @@
+from __future__ import annotations
+
 import logging
 from collections import defaultdict
 from functools import reduce
@@ -8,6 +10,7 @@ from django.db import router, transaction
 from sentry import eventstore, similarity, tsdb
 from sentry.constants import DEFAULT_LOGGER_NAME, LOG_LEVELS_MAP
 from sentry.event_manager import generate_culprit
+from sentry.eventstore.models import BaseEvent
 from sentry.models import (
     Activity,
     Environment,
@@ -165,7 +168,7 @@ def get_group_backfill_attributes(caches, group, events):
     }
 
 
-def get_fingerprint(event):
+def get_fingerprint(event: BaseEvent) -> str | None:
     # TODO: This *might* need to be protected from an IndexError?
     return event.get_primary_hash()
 

+ 1 - 1
src/sentry/testutils/cases.py

@@ -1673,7 +1673,7 @@ class MetricsEnhancedPerformanceTestCase(BaseMetricsLayerTestCase, TestCase):
 
     def store_transaction_metric(
         self,
-        value: List[int] | int,
+        value: List[float] | float,
         metric: str = "transaction.duration",
         internal_metric: Optional[str] = None,
         entity: Optional[str] = None,

+ 84 - 79
tests/sentry/utils/test_outcomes.py

@@ -1,24 +1,21 @@
-import copy
-from unittest.mock import Mock
+from unittest import mock
 
 import pytest
+from django.conf import settings
 
 from sentry.utils import json, kafka_config, outcomes
 from sentry.utils.outcomes import Outcome, track_outcome
 
 
 @pytest.fixture(autouse=True)
-def setup(monkeypatch, settings):
+def setup(monkeypatch):
     # Rely on the fact that the publisher is initialized lazily
-    monkeypatch.setattr(kafka_config, "get_kafka_producer_cluster_options", Mock())
-    monkeypatch.setattr(outcomes, "KafkaPublisher", Mock())
-
-    # Reset internals of the outcomes module
-    monkeypatch.setattr(outcomes, "outcomes_publisher", None)
-    monkeypatch.setattr(outcomes, "billing_publisher", None)
-
-    # Settings fixture does not restore nested mutable attributes
-    settings.KAFKA_TOPICS = copy.deepcopy(settings.KAFKA_TOPICS)
+    with mock.patch.object(kafka_config, "get_kafka_producer_cluster_options"):
+        with mock.patch.object(outcomes, "KafkaPublisher"):
+            # Reset internals of the outcomes module
+            with mock.patch.object(outcomes, "outcomes_publisher", None):
+                with mock.patch.object(outcomes, "billing_publisher", None):
+                    yield
 
 
 @pytest.mark.parametrize(
@@ -56,7 +53,7 @@ def test_parse_outcome(name, outcome):
     assert Outcome.parse(name) == outcome
 
 
-def test_track_outcome_default(settings):
+def test_track_outcome_default():
     """
     Asserts an outcomes serialization roundtrip with defaults.
 
@@ -66,40 +63,43 @@ def test_track_outcome_default(settings):
     """
 
     # Provide a billing cluster config that should be ignored
-    settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES_BILLING] = {"cluster": "different"}
-
-    track_outcome(
-        org_id=1,
-        project_id=2,
-        key_id=3,
-        outcome=Outcome.INVALID,
-        reason="project_id",
-    )
-
-    cluster_args, _ = kafka_config.get_kafka_producer_cluster_options.call_args
-    assert cluster_args == (settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES]["cluster"],)
-
-    assert outcomes.outcomes_publisher
-    (topic_name, payload), _ = outcomes.outcomes_publisher.publish.call_args
-    assert topic_name == settings.KAFKA_OUTCOMES
-
-    data = json.loads(payload)
-    del data["timestamp"]
-    assert data == {
-        "org_id": 1,
-        "project_id": 2,
-        "key_id": 3,
-        "outcome": Outcome.INVALID.value,
-        "reason": "project_id",
-        "event_id": None,
-        "category": None,
-        "quantity": 1,
-    }
-
-    assert outcomes.billing_publisher is None
-
-
-def test_track_outcome_billing(settings):
+    with mock.patch.dict(
+        settings.KAFKA_TOPICS, {settings.KAFKA_OUTCOMES_BILLING: {"cluster": "different"}}
+    ):
+        track_outcome(
+            org_id=1,
+            project_id=2,
+            key_id=3,
+            outcome=Outcome.INVALID,
+            reason="project_id",
+        )
+
+        cluster_args, _ = kafka_config.get_kafka_producer_cluster_options.call_args
+        assert cluster_args == (
+            kafka_config.get_topic_definition(settings.KAFKA_OUTCOMES)["cluster"],
+        )
+
+        assert outcomes.outcomes_publisher
+        (topic_name, payload), _ = outcomes.outcomes_publisher.publish.call_args
+        assert topic_name == settings.KAFKA_OUTCOMES
+
+        data = json.loads(payload)
+        del data["timestamp"]
+        assert data == {
+            "org_id": 1,
+            "project_id": 2,
+            "key_id": 3,
+            "outcome": Outcome.INVALID.value,
+            "reason": "project_id",
+            "event_id": None,
+            "category": None,
+            "quantity": 1,
+        }
+
+        assert outcomes.billing_publisher is None
+
+
+def test_track_outcome_billing():
     """
     Checks that outcomes are routed to the SHARED topic within the same cluster
     in default configuration.
@@ -113,7 +113,7 @@ def test_track_outcome_billing(settings):
     )
 
     cluster_args, _ = kafka_config.get_kafka_producer_cluster_options.call_args
-    assert cluster_args == (settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES]["cluster"],)
+    assert cluster_args == (kafka_config.get_topic_definition(settings.KAFKA_OUTCOMES)["cluster"],)
 
     assert outcomes.outcomes_publisher
     (topic_name, _), _ = outcomes.outcomes_publisher.publish.call_args
@@ -122,31 +122,35 @@ def test_track_outcome_billing(settings):
     assert outcomes.billing_publisher is None
 
 
-def test_track_outcome_billing_topic(settings):
+def test_track_outcome_billing_topic():
     """
     Checks that outcomes are routed to the DEDICATED billing topic within the
     same cluster in default configuration.
     """
 
-    settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES_BILLING] = {
-        "cluster": settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES]["cluster"],
-    }
+    with mock.patch.dict(
+        settings.KAFKA_TOPICS,
+        {
+            settings.KAFKA_OUTCOMES_BILLING: {
+                "cluster": settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES]["cluster"],
+            }
+        },
+    ):
+        track_outcome(
+            org_id=1,
+            project_id=1,
+            key_id=1,
+            outcome=Outcome.ACCEPTED,
+        )
 
-    track_outcome(
-        org_id=1,
-        project_id=1,
-        key_id=1,
-        outcome=Outcome.ACCEPTED,
-    )
+        cluster_args, _ = kafka_config.get_kafka_producer_cluster_options.call_args
+        assert cluster_args == (settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES]["cluster"],)
 
-    cluster_args, _ = kafka_config.get_kafka_producer_cluster_options.call_args
-    assert cluster_args == (settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES]["cluster"],)
+        assert outcomes.outcomes_publisher
+        (topic_name, _), _ = outcomes.outcomes_publisher.publish.call_args
+        assert topic_name == settings.KAFKA_OUTCOMES_BILLING
 
-    assert outcomes.outcomes_publisher
-    (topic_name, _), _ = outcomes.outcomes_publisher.publish.call_args
-    assert topic_name == settings.KAFKA_OUTCOMES_BILLING
-
-    assert outcomes.billing_publisher is None
+        assert outcomes.billing_publisher is None
 
 
 def test_track_outcome_billing_cluster(settings):
@@ -154,20 +158,21 @@ def test_track_outcome_billing_cluster(settings):
     Checks that outcomes are routed to the dedicated cluster and topic.
     """
 
-    settings.KAFKA_TOPICS[settings.KAFKA_OUTCOMES_BILLING] = {"cluster": "different"}
+    with mock.patch.dict(
+        settings.KAFKA_TOPICS, {settings.KAFKA_OUTCOMES_BILLING: {"cluster": "different"}}
+    ):
+        track_outcome(
+            org_id=1,
+            project_id=1,
+            key_id=1,
+            outcome=Outcome.ACCEPTED,
+        )
 
-    track_outcome(
-        org_id=1,
-        project_id=1,
-        key_id=1,
-        outcome=Outcome.ACCEPTED,
-    )
-
-    cluster_args, _ = kafka_config.get_kafka_producer_cluster_options.call_args
-    assert cluster_args == ("different",)
+        cluster_args, _ = kafka_config.get_kafka_producer_cluster_options.call_args
+        assert cluster_args == ("different",)
 
-    assert outcomes.billing_publisher
-    (topic_name, _), _ = outcomes.billing_publisher.publish.call_args
-    assert topic_name == settings.KAFKA_OUTCOMES_BILLING
+        assert outcomes.billing_publisher
+        (topic_name, _), _ = outcomes.billing_publisher.publish.call_args
+        assert topic_name == settings.KAFKA_OUTCOMES_BILLING
 
-    assert outcomes.outcomes_publisher is None
+        assert outcomes.outcomes_publisher is None

Some files were not shown because too many files changed in this diff