Просмотр исходного кода

feat(webvitals): Better edge case handling for performance_score function (#60695)

Adds bounds for performance_score function and return 0 if sum of
weights is 0.
edwardgou-sentry 1 год назад
Родитель
Сommit
7c598cd342

+ 50 - 11
src/sentry/search/events/datasets/metrics.py

@@ -1271,22 +1271,61 @@ class MetricsDatasetConfig(DatasetConfig):
         weight_metric_id = self.resolve_metric(column.replace("score", "score.weight"))
 
         return Function(
-            "divide",
+            "greatest",
             [
                 Function(
-                    "sumIf",
-                    [
-                        Column("value"),
-                        Function("equals", [Column("metric_id"), metric_id]),
-                    ],
-                ),
-                Function(
-                    "sumIf",
+                    "least",
                     [
-                        Column("value"),
-                        Function("equals", [Column("metric_id"), weight_metric_id]),
+                        Function(
+                            "if",
+                            [
+                                Function(
+                                    "greater",
+                                    [
+                                        Function(
+                                            "sumIf",
+                                            [
+                                                Column("value"),
+                                                Function(
+                                                    "equals",
+                                                    [Column("metric_id"), weight_metric_id],
+                                                ),
+                                            ],
+                                        ),
+                                        0.0,
+                                    ],
+                                ),
+                                Function(
+                                    "divide",
+                                    [
+                                        Function(
+                                            "sumIf",
+                                            [
+                                                Column("value"),
+                                                Function(
+                                                    "equals", [Column("metric_id"), metric_id]
+                                                ),
+                                            ],
+                                        ),
+                                        Function(
+                                            "sumIf",
+                                            [
+                                                Column("value"),
+                                                Function(
+                                                    "equals",
+                                                    [Column("metric_id"), weight_metric_id],
+                                                ),
+                                            ],
+                                        ),
+                                    ],
+                                ),
+                                0.0,
+                            ],
+                        ),
+                        1.0,
                     ],
                 ),
+                0.0,
             ],
             alias,
         )

+ 62 - 1
tests/snuba/api/endpoints/test_organization_events_mep.py

@@ -2686,7 +2686,64 @@ class OrganizationEventsMetricsEnhancedPerformanceEndpointTest(MetricsEnhancedPe
         assert data[0]["performance_score(measurements.score.lcp)"] == 0.7923076923076923
         assert data[0]["performance_score(measurements.score.fcp)"] == 0.5
         assert data[0]["performance_score(measurements.score.fid)"] == 0
-        assert data[0]["performance_score(measurements.score.ttfb)"] is None
+        assert data[0]["performance_score(measurements.score.ttfb)"] == 0
+
+        assert meta["isMetricsData"]
+        assert field_meta["performance_score(measurements.score.lcp)"] == "integer"
+
+    def test_performance_score_boundaries(self):
+        # Scores shouldn't exceed 1 or go below 0, but we can test these boundaries anyways
+        self.store_transaction_metric(
+            0.65,
+            metric="measurements.score.lcp",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            0.30,
+            metric="measurements.score.weight.lcp",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            -0.35,
+            metric="measurements.score.fcp",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            0.70,
+            metric="measurements.score.weight.fcp",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            0.3,
+            metric="measurements.score.total",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+
+        response = self.do_request(
+            {
+                "field": [
+                    "transaction",
+                    "performance_score(measurements.score.lcp)",
+                    "performance_score(measurements.score.fcp)",
+                ],
+                "query": "event.type:transaction",
+                "dataset": "metrics",
+                "per_page": 50,
+            }
+        )
+        assert response.status_code == 200, response.content
+        assert len(response.data["data"]) == 1
+        data = response.data["data"]
+        meta = response.data["meta"]
+        field_meta = meta["fields"]
+
+        assert data[0]["performance_score(measurements.score.lcp)"] == 1.0
+        assert data[0]["performance_score(measurements.score.fcp)"] == 0.0
 
         assert meta["isMetricsData"]
         assert field_meta["performance_score(measurements.score.lcp)"] == "integer"
@@ -2785,3 +2842,7 @@ class OrganizationEventsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
     @pytest.mark.xfail(reason="Not implemented")
     def test_performance_score(self):
         super().test_performance_score()
+
+    @pytest.mark.xfail(reason="Not implemented")
+    def test_performance_score_boundaries(self):
+        super().test_performance_score()