Browse Source

ref(metrics): rename name param to mri (#73494)

Ogi 8 months ago
parent
commit
81a2c4fbb1

+ 3 - 9
src/sentry/sentry_metrics/client/snuba.py

@@ -58,14 +58,12 @@ class SnubaMetricsBackend(GenericMetricsBackend):
         """
         assert in_test_environment(), "This backend should only be used in testing environments"
         BaseMetricsTestCase.store_metric(
-            name=build_mri(metric_name, "c", use_case_id, unit),
+            mri=build_mri(metric_name, "c", use_case_id, unit),
             tags=tags,
             value=value,
             org_id=org_id,
             project_id=project_id,
-            type="counter",
             timestamp=int(datetime.now().timestamp()),
-            use_case_id=use_case_id,
         )
 
     def set(
@@ -86,14 +84,12 @@ class SnubaMetricsBackend(GenericMetricsBackend):
         assert in_test_environment(), "This backend should only be used in testing environments"
         for val in value:
             BaseMetricsTestCase.store_metric(
-                name=build_mri(metric_name, "s", use_case_id, unit),
+                mri=build_mri(metric_name, "s", use_case_id, unit),
                 tags=tags,
                 value=val,
                 org_id=org_id,
                 project_id=project_id,
-                type="set",
                 timestamp=int(datetime.now().timestamp()),
-                use_case_id=use_case_id,
             )
 
     def distribution(
@@ -114,14 +110,12 @@ class SnubaMetricsBackend(GenericMetricsBackend):
         assert in_test_environment(), "This backend should only be used in testing environments"
         for val in value:
             BaseMetricsTestCase.store_metric(
-                name=build_mri(metric_name, "d", use_case_id, unit),
+                mri=build_mri(metric_name, "d", use_case_id, unit),
                 tags=tags,
                 value=val,
                 org_id=org_id,
                 project_id=project_id,
-                type="distribution",
                 timestamp=int(datetime.now().timestamp()),
-                use_case_id=use_case_id,
             )
 
     def close(self) -> None:

+ 38 - 67
src/sentry/testutils/cases.py

@@ -12,7 +12,7 @@ from collections.abc import Mapping, Sequence
 from contextlib import contextmanager
 from datetime import UTC, datetime, timedelta
 from io import BytesIO
-from typing import Any, Literal, TypedDict, Union
+from typing import Any, TypedDict, Union
 from unittest import mock
 from urllib.parse import urlencode
 from uuid import uuid4
@@ -1627,6 +1627,13 @@ class BaseSpansTestCase(SnubaTestCase):
 
 
 class BaseMetricsTestCase(SnubaTestCase):
+    ENTITY_SHORTHANDS = {
+        "c": "counter",
+        "s": "set",
+        "d": "distribution",
+        "g": "gauge",
+    }
+
     snuba_endpoint = "/tests/entities/{entity}/insert"
 
     def store_session(self, session):
@@ -1648,11 +1655,10 @@ class BaseMetricsTestCase(SnubaTestCase):
         # This check is not yet reflected in relay, see https://getsentry.atlassian.net/browse/INGEST-464
         user_is_nil = user is None or user == "00000000-0000-0000-0000-000000000000"
 
-        def push(type, mri: str, tags, value):
+        def push(mri: str, tags, value):
             self.store_metric(
                 org_id,
                 project_id,
-                type,
                 mri,
                 {**tags, **base_tags},
                 int(
@@ -1661,33 +1667,31 @@ class BaseMetricsTestCase(SnubaTestCase):
                     else session["started"].timestamp()
                 ),
                 value,
-                use_case_id=UseCaseID.SESSIONS,
             )
 
         # seq=0 is equivalent to relay's session.init, init=True is transformed
         # to seq=0 in Relay.
         if session["seq"] == 0:  # init
-            push("counter", SessionMRI.RAW_SESSION.value, {"session.status": "init"}, +1)
+            push(SessionMRI.RAW_SESSION.value, {"session.status": "init"}, +1)
 
         status = session["status"]
 
         # Mark the session as errored, which includes fatal sessions.
         if session.get("errors", 0) > 0 or status not in ("ok", "exited"):
-            push("set", SessionMRI.RAW_ERROR.value, {}, session["session_id"])
+            push(SessionMRI.RAW_ERROR.value, {}, session["session_id"])
             if not user_is_nil:
-                push("set", SessionMRI.RAW_USER.value, {"session.status": "errored"}, user)
+                push(SessionMRI.RAW_USER.value, {"session.status": "errored"}, user)
         elif not user_is_nil:
-            push("set", SessionMRI.RAW_USER.value, {}, user)
+            push(SessionMRI.RAW_USER.value, {}, user)
 
         if status in ("abnormal", "crashed"):  # fatal
-            push("counter", SessionMRI.RAW_SESSION.value, {"session.status": status}, +1)
+            push(SessionMRI.RAW_SESSION.value, {"session.status": status}, +1)
             if not user_is_nil:
-                push("set", SessionMRI.RAW_USER.value, {"session.status": status}, user)
+                push(SessionMRI.RAW_USER.value, {"session.status": status}, user)
 
         if status == "exited":
             if session["duration"] is not None:
                 push(
-                    "distribution",
                     SessionMRI.RAW_DURATION.value,
                     {"session.status": status},
                     session["duration"],
@@ -1702,14 +1706,17 @@ class BaseMetricsTestCase(SnubaTestCase):
         cls,
         org_id: int,
         project_id: int,
-        type: Literal["counter", "set", "distribution", "gauge"],
-        name: str,
+        mri: str,
         tags: dict[str, str],
         timestamp: int,
         value: Any,
-        use_case_id: UseCaseID,
         aggregation_option: AggregationOption | None = None,
     ) -> None:
+
+        parsed = parse_mri(mri)
+        metric_type = parsed.entity
+        use_case_id = UseCaseID(parsed.namespace)
+
         mapping_meta = {}
 
         def metric_id(key: str):
@@ -1751,12 +1758,12 @@ class BaseMetricsTestCase(SnubaTestCase):
 
         assert not isinstance(value, list)
 
-        if type == "set":
+        if metric_type == "s":
             # Relay uses a different hashing algorithm, but that's ok
             value = [int.from_bytes(hashlib.md5(str(value).encode()).digest()[:4], "big")]
-        elif type == "distribution":
+        elif metric_type == "d":
             value = [value]
-        elif type == "gauge":
+        elif metric_type == "g":
             # In case we pass either an int or float, we will emit a gauge with all the same values.
             if not isinstance(value, dict):
                 value = {
@@ -1770,10 +1777,10 @@ class BaseMetricsTestCase(SnubaTestCase):
         msg = {
             "org_id": org_id,
             "project_id": project_id,
-            "metric_id": metric_id(name),
+            "metric_id": metric_id(mri),
             "timestamp": timestamp,
             "tags": {tag_key(key): tag_value(value) for key, value in tags.items()},
-            "type": {"counter": "c", "set": "s", "distribution": "d", "gauge": "g"}[type],
+            "type": metric_type,
             "value": value,
             "retention_days": 90,
             "use_case_id": use_case_id.value,
@@ -1790,9 +1797,9 @@ class BaseMetricsTestCase(SnubaTestCase):
             msg["aggregation_option"] = aggregation_option.value
 
         if METRIC_PATH_MAPPING[use_case_id] == UseCaseKey.PERFORMANCE:
-            entity = f"generic_metrics_{type}s"
+            entity = f"generic_metrics_{cls.ENTITY_SHORTHANDS[metric_type]}s"
         else:
-            entity = f"metrics_{type}s"
+            entity = f"metrics_{cls.ENTITY_SHORTHANDS[metric_type]}s"
 
         cls.__send_buckets([msg], entity)
 
@@ -1819,12 +1826,7 @@ class BaseMetricsTestCase(SnubaTestCase):
 
 
 class BaseMetricsLayerTestCase(BaseMetricsTestCase):
-    ENTITY_SHORTHANDS = {
-        "c": "counter",
-        "s": "set",
-        "d": "distribution",
-        "g": "gauge",
-    }
+
     # In order to avoid complexity and edge cases while working on tests, all children of this class should use
     # this mocked time, except in case in which a specific time is required. This is suggested because working
     # with time ranges in metrics is very error-prone and requires an in-depth knowledge of the underlying
@@ -1847,22 +1849,11 @@ class BaseMetricsLayerTestCase(BaseMetricsTestCase):
         """
         raise NotImplementedError
 
-    def _extract_entity_from_mri(self, mri_string: str) -> str | None:
-        """
-        Extracts the entity name from the MRI given a map of shorthands used to represent that entity in the MRI.
-        """
-        if (parsed_mri := parse_mri(mri_string)) is not None:
-            return self.ENTITY_SHORTHANDS[parsed_mri.entity]
-        else:
-            return None
-
     def _store_metric(
         self,
-        name: str,
+        mri: str,
         tags: dict[str, str],
         value: int | float | dict[str, int | float],
-        use_case_id: UseCaseID,
-        type: str | None = None,
         org_id: int | None = None,
         project_id: int | None = None,
         days_before_now: int = 0,
@@ -1881,8 +1872,7 @@ class BaseMetricsLayerTestCase(BaseMetricsTestCase):
         self.store_metric(
             org_id=self.organization.id if org_id is None else org_id,
             project_id=self.project.id if project_id is None else project_id,
-            type=self._extract_entity_from_mri(name) if type is None else type,
-            name=name,
+            mri=mri,
             tags=tags,
             timestamp=int(
                 (
@@ -1898,7 +1888,6 @@ class BaseMetricsLayerTestCase(BaseMetricsTestCase):
                 ).timestamp()
             ),
             value=value,
-            use_case_id=use_case_id,
             aggregation_option=aggregation_option,
         )
 
@@ -1948,13 +1937,11 @@ class BaseMetricsLayerTestCase(BaseMetricsTestCase):
         aggregation_option: AggregationOption | None = None,
     ):
         self._store_metric(
-            type=type,
-            name=name,
+            mri=name,
             tags=tags,
             value=value,
             org_id=org_id,
             project_id=project_id,
-            use_case_id=UseCaseID.TRANSACTIONS,
             days_before_now=days_before_now,
             hours_before_now=hours_before_now,
             minutes_before_now=minutes_before_now,
@@ -1976,13 +1963,11 @@ class BaseMetricsLayerTestCase(BaseMetricsTestCase):
         seconds_before_now: int = 0,
     ):
         self._store_metric(
-            type=type,
-            name=name,
+            mri=name,
             tags=tags,
             value=value,
             org_id=org_id,
             project_id=project_id,
-            use_case_id=UseCaseID.SESSIONS,
             days_before_now=days_before_now,
             hours_before_now=hours_before_now,
             minutes_before_now=minutes_before_now,
@@ -2004,13 +1989,11 @@ class BaseMetricsLayerTestCase(BaseMetricsTestCase):
         aggregation_option: AggregationOption | None = None,
     ):
         self._store_metric(
-            type=type,
-            name=name,
+            mri=name,
             tags=tags,
             value=value,
             org_id=org_id,
             project_id=project_id,
-            use_case_id=UseCaseID.CUSTOM,
             days_before_now=days_before_now,
             hours_before_now=hours_before_now,
             minutes_before_now=minutes_before_now,
@@ -2178,12 +2161,10 @@ class MetricsEnhancedPerformanceTestCase(BaseMetricsLayerTestCase, TestCase):
             self.store_metric(
                 org_id,
                 project,
-                self.TYPE_MAP[entity],
                 internal_metric,
                 tags,
                 int(metric_timestamp),
                 subvalue,
-                use_case_id=use_case_id,
                 aggregation_option=aggregation_option,
             )
 
@@ -2247,12 +2228,10 @@ class MetricsEnhancedPerformanceTestCase(BaseMetricsLayerTestCase, TestCase):
             self.store_metric(
                 org_id,
                 project,
-                self.TYPE_MAP[entity],
                 internal_metric,
                 tags,
                 int(metric_timestamp),
                 subvalue,
-                use_case_id=use_case_id,
             )
 
     def wait_for_metric_count(
@@ -3129,49 +3108,41 @@ class OrganizationMetricsIntegrationTestCase(MetricsAPIBaseTestCase):
         self.store_metric(
             org_id=org_id,
             project_id=self.project.id,
-            name="metric1",
+            mri="c:sessions/metric1@none",
             timestamp=now,
             tags={
                 "tag1": "value1",
                 "tag2": "value2",
             },
-            type="counter",
             value=1,
-            use_case_id=UseCaseID.SESSIONS,
         )
         self.store_metric(
             org_id=org_id,
             project_id=self.project.id,
-            name="metric1",
+            mri="c:sessions/metric1@none",
             timestamp=now,
             tags={"tag3": "value3"},
-            type="counter",
             value=1,
-            use_case_id=UseCaseID.SESSIONS,
         )
         self.store_metric(
             org_id=org_id,
             project_id=self.project.id,
-            name="metric2",
+            mri="c:sessions/metric2@none",
             timestamp=now,
             tags={
                 "tag4": "value3",
                 "tag1": "value2",
                 "tag2": "value1",
             },
-            type="set",
             value=123,
-            use_case_id=UseCaseID.SESSIONS,
         )
         self.store_metric(
             org_id=org_id,
             project_id=self.project.id,
-            name="metric3",
+            mri="c:sessions/metric3@none",
             timestamp=now,
             tags={},
-            type="set",
             value=123,
-            use_case_id=UseCaseID.SESSIONS,
         )
 
 

+ 11 - 16
tests/sentry/api/endpoints/test_organization_metrics_details.py

@@ -1,6 +1,5 @@
 from __future__ import annotations
 
-from typing import Literal
 from unittest.mock import patch
 
 import pytest
@@ -187,23 +186,21 @@ class OrganizationMetricsDetailsTest(OrganizationMetricsIntegrationTestCase):
         block_metric("s:custom/user@none", [project_1])
         block_tags_of_metric("d:custom/page_load@millisecond", {"release"}, [project_2])
 
-        metrics: tuple[tuple[str, Literal["set", "counter", "distribution"], Project], ...] = (
-            ("s:custom/user@none", "set", project_1),
-            ("s:custom/user@none", "set", project_2),
-            ("c:custom/clicks@none", "counter", project_1),
-            ("d:custom/page_load@millisecond", "distribution", project_2),
-            ("g:custom/page_load@millisecond", "distribution", project_2),
+        metrics: tuple[tuple[str, Project], ...] = (
+            ("s:custom/user@none", project_1),
+            ("s:custom/user@none", project_2),
+            ("c:custom/clicks@none", project_1),
+            ("d:custom/page_load@millisecond", project_2),
+            ("g:custom/page_load@millisecond", project_2),
         )
-        for mri, entity, project in metrics:
+        for mri, project in metrics:
             self.store_metric(
                 project.organization.id,
                 project.id,
-                entity,
                 mri,
                 {"transaction": "/hello"},
                 int(self.now.timestamp()),
                 10,
-                UseCaseID.CUSTOM,
             )
 
         response = self.get_success_response(
@@ -328,20 +325,18 @@ class OrganizationMetricsDetailsTest(OrganizationMetricsIntegrationTestCase):
 
         block_metric("s:custom/user@none", [project_1])
 
-        metrics: tuple[tuple[str, Literal["set", "counter", "distribution"], Project], ...] = (
-            ("s:custom/user@none", "set", project_1),
-            ("c:custom/clicks@none", "counter", project_1),
+        metrics: tuple[tuple[str, Project], ...] = (
+            ("s:custom/user@none", project_1),
+            ("c:custom/clicks@none", project_1),
         )
-        for mri, entity, project in metrics:
+        for mri, project in metrics:
             self.store_metric(
                 project.organization.id,
                 project.id,
-                entity,
                 mri,
                 {"transaction": "/hello"},
                 int(self.now.timestamp()),
                 10,
-                UseCaseID.CUSTOM,
             )
 
         response = self.get_success_response(

+ 0 - 5
tests/sentry/api/endpoints/test_organization_metrics_metadata.py

@@ -2,7 +2,6 @@ from datetime import timedelta
 
 import pytest
 
-from sentry.sentry_metrics.use_case_id_registry import UseCaseID
 from sentry.snuba.metrics.naming_layer import TransactionMRI
 from sentry.testutils.cases import MetricsAPIBaseTestCase
 from sentry.testutils.helpers.datetime import freeze_time
@@ -47,7 +46,6 @@ class OrganizationMetricsTagValues(MetricsAPIBaseTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "distribution",
                 TransactionMRI.DURATION.value,
                 {
                     "transaction": transaction,
@@ -57,7 +55,6 @@ class OrganizationMetricsTagValues(MetricsAPIBaseTestCase):
                 },
                 self.now().timestamp(),
                 value,
-                UseCaseID.TRANSACTIONS,
             )
         # Use Case: CUSTOM
         for value, release, tag_value, time in (
@@ -70,7 +67,6 @@ class OrganizationMetricsTagValues(MetricsAPIBaseTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "distribution",
                 "d:custom/my_test_metric@percent",
                 {
                     "transaction": "/hello",
@@ -81,7 +77,6 @@ class OrganizationMetricsTagValues(MetricsAPIBaseTestCase):
                 },
                 self.now().timestamp(),
                 value,
-                UseCaseID.CUSTOM,
             )
 
         self.prod_env = self.create_environment(name="prod", project=self.project)

+ 0 - 5
tests/sentry/api/endpoints/test_organization_metrics_tag_details_v2.py

@@ -2,7 +2,6 @@ from datetime import timedelta
 
 import pytest
 
-from sentry.sentry_metrics.use_case_id_registry import UseCaseID
 from sentry.snuba.metrics.naming_layer import TransactionMRI
 from sentry.testutils.cases import MetricsAPIBaseTestCase
 from sentry.testutils.helpers.datetime import freeze_time
@@ -47,7 +46,6 @@ class OrganizationMetricsTagValues(MetricsAPIBaseTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "distribution",
                 TransactionMRI.DURATION.value,
                 {
                     "transaction": transaction,
@@ -57,7 +55,6 @@ class OrganizationMetricsTagValues(MetricsAPIBaseTestCase):
                 },
                 self.now().timestamp(),
                 value,
-                UseCaseID.TRANSACTIONS,
             )
         # Use Case: CUSTOM
         for value, release, tag_value, time in (
@@ -71,7 +68,6 @@ class OrganizationMetricsTagValues(MetricsAPIBaseTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "distribution",
                 "d:custom/my_test_metric@percent",
                 {
                     "transaction": "/hello",
@@ -82,7 +78,6 @@ class OrganizationMetricsTagValues(MetricsAPIBaseTestCase):
                 },
                 self.now().timestamp(),
                 value,
-                UseCaseID.CUSTOM,
             )
 
         self.prod_env = self.create_environment(name="prod", project=self.project)

+ 0 - 11
tests/sentry/api/endpoints/test_organization_metrics_tags.py

@@ -4,7 +4,6 @@ from unittest.mock import patch
 
 import pytest
 
-from sentry.sentry_metrics.use_case_id_registry import UseCaseID
 from sentry.snuba.metrics import SessionMRI, TransactionMRI
 from sentry.testutils.cases import MetricsAPIBaseTestCase
 from sentry.testutils.helpers.datetime import freeze_time
@@ -50,7 +49,6 @@ class OrganizationMetricsTagsIntegrationTest(MetricsAPIBaseTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "distribution",
                 TransactionMRI.DURATION.value,
                 {
                     "transaction": transaction,
@@ -60,7 +58,6 @@ class OrganizationMetricsTagsIntegrationTest(MetricsAPIBaseTestCase):
                 },
                 timestamp.timestamp(),
                 value,
-                UseCaseID.TRANSACTIONS,
             )
         # Use Case: CUSTOM
         for value, release, tag_value, timestamp in (
@@ -73,7 +70,6 @@ class OrganizationMetricsTagsIntegrationTest(MetricsAPIBaseTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "distribution",
                 "d:custom/my_test_metric@percent",
                 {
                     "transaction": "/hello",
@@ -84,7 +80,6 @@ class OrganizationMetricsTagsIntegrationTest(MetricsAPIBaseTestCase):
                 },
                 timestamp.timestamp(),
                 value,
-                UseCaseID.CUSTOM,
             )
 
         self.prod_env = self.create_environment(name="prod", project=self.project)
@@ -213,12 +208,10 @@ class OrganizationMetricsTagsIntegrationTest(MetricsAPIBaseTestCase):
         self.store_metric(
             self.project.organization.id,
             self.project.id,
-            "gauge",
             mri,
             {"transaction": "/hello", "release": "1.0", "environment": "prod"},
             int(self.now().timestamp()),
             10,
-            UseCaseID.CUSTOM,
         )
 
         response = self.get_success_response(
@@ -248,12 +241,10 @@ class OrganizationMetricsTagsIntegrationTest(MetricsAPIBaseTestCase):
         self.store_metric(
             self.project.organization.id,
             self.project.id,
-            "distribution",
             mri,
             {"transaction": "/hello", "release": "1.0", "environment": "prod"},
             int(self.now().timestamp()),
             10,
-            UseCaseID.SPANS,
         )
 
         response = self.get_success_response(
@@ -266,12 +257,10 @@ class OrganizationMetricsTagsIntegrationTest(MetricsAPIBaseTestCase):
         self.store_metric(
             self.project.organization.id,
             self.project.id,
-            "distribution",
             mri,
             {},
             int(self.now().timestamp()),
             10,
-            UseCaseID.SPANS,
         )
 
         response = self.get_success_response(

+ 0 - 4
tests/sentry/api/endpoints/test_organization_release_health_data.py

@@ -2448,12 +2448,10 @@ class DerivedMetricsDataTest(MetricsAPIBaseTestCase):
             self.store_metric(
                 self.organization.id,
                 self.project.id,
-                "counter",
                 SessionMRI.RAW_SESSION.value,
                 {"session.status": "crashed"},
                 int(session["started"]),
                 +1,
-                use_case_id=UseCaseID.SESSIONS,
             )
 
         response = self.get_success_response(
@@ -2501,12 +2499,10 @@ class DerivedMetricsDataTest(MetricsAPIBaseTestCase):
             self.store_metric(
                 self.organization.id,
                 self.project.id,
-                "counter",
                 SessionMRI.RAW_SESSION.value,
                 {"session.status": "crashed"},
                 int(session["started"]),
                 +1,
-                use_case_id=UseCaseID.SESSIONS,
             )
 
         response = self.get_success_response(

+ 2 - 6
tests/sentry/issues/test_escalating.py

@@ -63,11 +63,9 @@ class BaseGroupCounts(BaseMetricsTestCase, TestCase):
             last_event = self.store_event(data=data, project_id=proj_id, assert_no_errors=False)
 
             self.store_metric(
-                type="counter",
-                use_case_id=UseCaseID.ESCALATING_ISSUES,
                 org_id=last_event.project.organization_id,
                 project_id=last_event.project.id,
-                name=build_mri("event_ingested", "c", UseCaseID.ESCALATING_ISSUES, None),
+                mri=build_mri("event_ingested", "c", UseCaseID.ESCALATING_ISSUES, None),
                 value=1,
                 tags={"group": str(last_event.group_id)},
                 timestamp=data["timestamp"],
@@ -117,11 +115,9 @@ class HistoricGroupCounts(
             insert_time=timestamp,
         )
         self.store_metric(
-            type="counter",
-            use_case_id=UseCaseID.ESCALATING_ISSUES,
             org_id=profile_error_event.project.organization_id,
             project_id=profile_error_event.project.id,
-            name=build_mri("event_ingested", "c", UseCaseID.ESCALATING_ISSUES, None),
+            mri=build_mri("event_ingested", "c", UseCaseID.ESCALATING_ISSUES, None),
             value=1,
             tags={"group": str(profile_error_event.group_id)},
             timestamp=profile_error_event.data["timestamp"],

+ 0 - 27
tests/sentry/sentry_metrics/querying/data/test_api.py

@@ -25,7 +25,6 @@ from sentry.sentry_metrics.querying.units import (
     UnitFamily,
     get_unit_family_and_unit,
 )
-from sentry.sentry_metrics.use_case_id_registry import UseCaseID
 from sentry.sentry_metrics.visibility import block_metric, block_tags_of_metric
 from sentry.snuba.metrics.naming_layer import TransactionMRI
 from sentry.testutils.cases import BaseMetricsTestCase, TestCase
@@ -68,7 +67,6 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "distribution",
                 TransactionMRI.DURATION.value,
                 {
                     "transaction": transaction,
@@ -78,7 +76,6 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
                 },
                 self.ts(time),
                 value,
-                UseCaseID.TRANSACTIONS,
             )
 
         self.prod_env = self.create_environment(name="prod", project=self.project)
@@ -500,12 +497,10 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "distribution",
                 TransactionMRI.MEASUREMENTS_FCP.value,
                 tags,
                 self.ts(time),
                 value,
-                UseCaseID.TRANSACTIONS,
             )
 
         query_1 = self.mql("sum", TransactionMRI.MEASUREMENTS_FCP.value, group_by="transaction")
@@ -959,12 +954,10 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "set",
                 mri,
                 {},
                 self.ts(self.now()),
                 user,
-                UseCaseID.CUSTOM,
             )
 
         query_1 = self.mql("count_unique", mri)
@@ -997,12 +990,10 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
             self.store_metric(
                 self.project.organization.id,
                 project.id,
-                "distribution",
                 mri,
                 {},
                 self.ts(self.now()),
                 value,
-                UseCaseID.CUSTOM,
             )
 
         query_1 = self.mql("sum", mri)
@@ -1035,12 +1026,10 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
             self.store_metric(
                 self.project.organization.id,
                 project.id,
-                "distribution",
                 mri,
                 {},
                 self.ts(self.now()),
                 value,
-                UseCaseID.CUSTOM,
             )
 
         query_1 = self.mql("sum", mri)
@@ -1073,12 +1062,10 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
             self.store_metric(
                 self.project.organization.id,
                 project.id,
-                "distribution",
                 mri,
                 {},
                 self.ts(self.now()),
                 10.0,
-                UseCaseID.CUSTOM,
             )
 
         query_1 = self.mql("sum", mri_1)
@@ -1116,12 +1103,10 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
             self.store_metric(
                 self.project.organization.id,
                 project.id,
-                "distribution",
                 mri,
                 {"transaction": "/hello"},
                 self.ts(self.now()),
                 value,
-                UseCaseID.CUSTOM,
             )
 
         query_1 = self.mql("sum", mri)
@@ -1375,12 +1360,10 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "distribution",
                 mri,
                 {},
                 self.ts(self.now()),
                 value,
-                UseCaseID.CUSTOM,
             )
 
         for formula, expected_result, expected_unit_family in (
@@ -1434,12 +1417,10 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "distribution",
                 mri,
                 {},
                 self.ts(self.now()),
                 value,
-                UseCaseID.CUSTOM,
             )
 
         query_1 = self.mql("avg", mri_1)
@@ -1477,12 +1458,10 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "distribution",
                 mri,
                 {},
                 self.ts(self.now()),
                 value,
-                UseCaseID.CUSTOM,
             )
 
         query_1 = self.mql("avg", mri_1)
@@ -1520,12 +1499,10 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "distribution",
                 mri,
                 {},
                 self.ts(self.now()),
                 value,
-                UseCaseID.CUSTOM,
             )
 
         query_1 = self.mql("avg", mri_1)
@@ -1563,12 +1540,10 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
             self.store_metric(
                 self.project.organization.id,
                 self.project.id,
-                "distribution",
                 mri,
                 {},
                 self.ts(self.now()),
                 value,
-                UseCaseID.CUSTOM,
             )
 
         for formula, expected_result, expected_unit_family, expected_unit in (
@@ -1620,7 +1595,6 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
             self.store_metric(
                 new_project.organization.id,
                 new_project.id,
-                "distribution",
                 TransactionMRI.DURATION.value,
                 {
                     "transaction": transaction,
@@ -1629,7 +1603,6 @@ class MetricsAPITestCase(TestCase, BaseMetricsTestCase):
                 },
                 self.ts(time),
                 value,
-                UseCaseID.TRANSACTIONS,
             )
         return new_project
 

+ 0 - 2
tests/sentry/snuba/metrics/test_datasource.py

@@ -43,12 +43,10 @@ class DatasourceTestCase(BaseMetricsLayerTestCase, TestCase):
         self.store_metric(
             self.project.organization.id,
             self.project.id,
-            "distribution",
             custom_mri,
             {},
             int(self.now.timestamp()),
             10,
-            UseCaseID.CUSTOM,
         )
 
         mris = get_stored_metrics_of_projects([self.project], [UseCaseID.TRANSACTIONS])

Some files were not shown because too many files changed in this diff