Просмотр исходного кода

feat(webvitals): Discover aggregate performance_score function (#60536)

Adds a `performance_score` function that calculates the aggregate
performance score of a single web vital.
Only implemented for the metrics dataset for now (non metrics layer).
edwardgou-sentry 1 год назад
Родитель
Сommit
8d21bb996a

+ 11 - 0
src/sentry/search/events/constants.py

@@ -269,6 +269,17 @@ METRICS_MAP = {
     MEASUREMENTS_FRAMES_FROZEN_RATE: "d:transactions/measurements.frames_frozen_rate@ratio",
     MEASUREMENTS_FRAMES_SLOW_RATE: "d:transactions/measurements.frames_slow_rate@ratio",
     MEASUREMENTS_STALL_PERCENTAGE: "d:transactions/measurements.stall_percentage@ratio",
+    "measurements.score.lcp": "d:transactions/measurements.score.lcp@ratio",
+    "measurements.score.fid": "d:transactions/measurements.score.fid@ratio",
+    "measurements.score.cls": "d:transactions/measurements.score.cls@ratio",
+    "measurements.score.fcp": "d:transactions/measurements.score.fcp@ratio",
+    "measurements.score.ttfb": "d:transactions/measurements.score.ttfb@ratio",
+    "measurements.score.total": "d:transactions/measurements.score.total@ratio",
+    "measurements.score.weight.lcp": "d:transactions/measurements.score.weight.lcp@ratio",
+    "measurements.score.weight.fid": "d:transactions/measurements.score.weight.fid@ratio",
+    "measurements.score.weight.cls": "d:transactions/measurements.score.weight.cls@ratio",
+    "measurements.score.weight.fcp": "d:transactions/measurements.score.weight.fcp@ratio",
+    "measurements.score.weight.ttfb": "d:transactions/measurements.score.weight.ttfb@ratio",
     "spans.browser": "d:transactions/breakdowns.span_ops.ops.browser@millisecond",
     "spans.db": "d:transactions/breakdowns.span_ops.ops.db@millisecond",
     "spans.http": "d:transactions/breakdowns.span_ops.ops.http@millisecond",

+ 59 - 0
src/sentry/search/events/datasets/metrics.py

@@ -581,6 +581,25 @@ class MetricsDatasetConfig(DatasetConfig):
                     snql_distribution=self._resolve_web_vital_function,
                     default_result_type="integer",
                 ),
+                fields.MetricsFunction(
+                    "performance_score",
+                    required_args=[
+                        fields.MetricArg(
+                            "column",
+                            allowed_columns=[
+                                "measurements.score.fcp",
+                                "measurements.score.lcp",
+                                "measurements.score.fid",
+                                "measurements.score.cls",
+                                "measurements.score.ttfb",
+                            ],
+                            allow_custom_measurements=False,
+                        )
+                    ],
+                    calculated_args=[resolve_metric_id],
+                    snql_distribution=self._resolve_web_vital_score_function,
+                    default_result_type="integer",
+                ),
                 fields.MetricsFunction(
                     "epm",
                     snql_distribution=self._resolve_epm,
@@ -1232,6 +1251,46 @@ class MetricsDatasetConfig(DatasetConfig):
             alias,
         )
 
+    def _resolve_web_vital_score_function(
+        self,
+        args: Mapping[str, Union[str, Column, SelectType, int, float]],
+        alias: str,
+    ) -> SelectType:
+        column = args["column"]
+        metric_id = args["metric_id"]
+
+        if column not in [
+            "measurements.score.lcp",
+            "measurements.score.fcp",
+            "measurements.score.fid",
+            "measurements.score.cls",
+            "measurements.score.ttfb",
+        ]:
+            raise InvalidSearchQuery("performance_score only supports measurements")
+
+        weight_metric_id = self.resolve_metric(column.replace("score", "score.weight"))
+
+        return Function(
+            "divide",
+            [
+                Function(
+                    "sumIf",
+                    [
+                        Column("value"),
+                        Function("equals", [Column("metric_id"), metric_id]),
+                    ],
+                ),
+                Function(
+                    "sumIf",
+                    [
+                        Column("value"),
+                        Function("equals", [Column("metric_id"), weight_metric_id]),
+                    ],
+                ),
+            ],
+            alias,
+        )
+
     def _resolve_total_transaction_duration(self, alias: str, scope: str) -> SelectType:
         """This calculates the total time, and based on the scope will return
         either the apps total time or whatever other local scope/filters are

+ 11 - 0
src/sentry/testutils/cases.py

@@ -1802,6 +1802,17 @@ class MetricsEnhancedPerformanceTestCase(BaseMetricsLayerTestCase, TestCase):
         "measurements.cls": "metrics_distributions",
         "measurements.frames_frozen_rate": "metrics_distributions",
         "measurements.time_to_initial_display": "metrics_distributions",
+        "measurements.score.lcp": "metrics_distributions",
+        "measurements.score.fcp": "metrics_distributions",
+        "measurements.score.fid": "metrics_distributions",
+        "measurements.score.cls": "metrics_distributions",
+        "measurements.score.ttfb": "metrics_distributions",
+        "measurements.score.total": "metrics_distributions",
+        "measurements.score.weight.lcp": "metrics_distributions",
+        "measurements.score.weight.fcp": "metrics_distributions",
+        "measurements.score.weight.fid": "metrics_distributions",
+        "measurements.score.weight.cls": "metrics_distributions",
+        "measurements.score.weight.ttfb": "metrics_distributions",
         "spans.http": "metrics_distributions",
         "user": "metrics_sets",
     }

+ 108 - 0
tests/snuba/api/endpoints/test_organization_events_mep.py

@@ -2587,6 +2587,110 @@ class OrganizationEventsMetricsEnhancedPerformanceEndpointTest(MetricsEnhancedPe
         assert data[0]["device.class"] == level
         assert meta["fields"]["device.class"] == "string"
 
+    def test_performance_score(self):
+        self.store_transaction_metric(
+            0.03,
+            metric="measurements.score.lcp",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            0.30,
+            metric="measurements.score.weight.lcp",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            0.35,
+            metric="measurements.score.fcp",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            0.70,
+            metric="measurements.score.weight.fcp",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            0.38,
+            metric="measurements.score.total",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+
+        self.store_transaction_metric(
+            1.00,
+            metric="measurements.score.lcp",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            1.00,
+            metric="measurements.score.weight.lcp",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            0.00,
+            metric="measurements.score.fid",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        # These fid and ttfb scenarios shouldn't really be happening, but we can test them anyways
+        self.store_transaction_metric(
+            0.00,
+            metric="measurements.score.weight.fid",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            1.00,
+            metric="measurements.score.ttfb",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            0.00,
+            metric="measurements.score.weight.ttfb",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            1.00,
+            metric="measurements.score.total",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+
+        response = self.do_request(
+            {
+                "field": [
+                    "transaction",
+                    "performance_score(measurements.score.lcp)",
+                    "performance_score(measurements.score.fcp)",
+                    "performance_score(measurements.score.fid)",
+                    "performance_score(measurements.score.ttfb)",
+                ],
+                "query": "event.type:transaction",
+                "dataset": "metrics",
+                "per_page": 50,
+            }
+        )
+        assert response.status_code == 200, response.content
+        assert len(response.data["data"]) == 1
+        data = response.data["data"]
+        meta = response.data["meta"]
+        field_meta = meta["fields"]
+
+        assert data[0]["performance_score(measurements.score.lcp)"] == 0.7923076923076923
+        assert data[0]["performance_score(measurements.score.fcp)"] == 0.5
+        assert data[0]["performance_score(measurements.score.fid)"] == 0
+        assert data[0]["performance_score(measurements.score.ttfb)"] is None
+
+        assert meta["isMetricsData"]
+        assert field_meta["performance_score(measurements.score.lcp)"] == "integer"
+
 
 class OrganizationEventsMetricsEnhancedPerformanceEndpointTestWithOnDemandMetrics(
     MetricsEnhancedPerformanceTestCase
@@ -2677,3 +2781,7 @@ class OrganizationEventsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
     @pytest.mark.xfail(reason="Not implemented")
     def test_device_class_filter(self):
         super().test_device_class_filter()
+
+    @pytest.mark.xfail(reason="Not implemented")
+    def test_performance_score(self):
+        super().test_performance_score()