Browse Source

feat(app-start): Add count_starts to get number of cold/warm starts (#61288)

For fetching the counts of cold/warm starts of a screen for comparison
Nar Saynorath 1 year ago
parent
commit
0710b297bc

+ 39 - 0
src/sentry/search/events/datasets/metrics.py

@@ -558,6 +558,22 @@ class MetricsDatasetConfig(DatasetConfig):
                     ),
                     default_result_type="integer",
                 ),
+                fields.MetricsFunction(
+                    "count_starts",
+                    required_args=[
+                        fields.MetricArg(
+                            "column",
+                            allowed_columns=[
+                                "measurements.app_start_warm",
+                                "measurements.app_start_cold",
+                            ],
+                            allow_custom_measurements=False,
+                        ),
+                    ],
+                    calculated_args=[resolve_metric_id],
+                    snql_distribution=self._resolve_count_starts_function,
+                    default_result_type="integer",
+                ),
                 fields.MetricsFunction(
                     "count_web_vitals",
                     required_args=[
@@ -1251,6 +1267,29 @@ class MetricsDatasetConfig(DatasetConfig):
     def _key_transaction_filter_converter(self, search_filter: SearchFilter) -> Optional[WhereType]:
         return filter_aliases.team_key_transaction_filter(self.builder, search_filter)
 
+    def _resolve_count_starts_function(
+        self,
+        args: Mapping[str, Union[str, Column, SelectType, int, float]],
+        alias: str,
+    ) -> SelectType:
+        column = args["column"]
+        metric_id = args["metric_id"]
+
+        if column not in [
+            "measurements.app_start_cold",
+            "measurements.app_start_warm",
+        ]:
+            raise InvalidSearchQuery("count_starts only supports cold or app start measurements")
+
+        return Function(
+            "countIf",
+            [
+                Column("value"),
+                Function("equals", [Column("metric_id"), metric_id]),
+            ],
+            alias,
+        )
+
     def _resolve_web_vital_function(
         self,
         args: Mapping[str, Union[str, Column, SelectType, int, float]],

+ 2 - 0
src/sentry/testutils/cases.py

@@ -1806,6 +1806,8 @@ class MetricsEnhancedPerformanceTestCase(BaseMetricsLayerTestCase, TestCase):
         "measurements.score.weight.fid": "metrics_distributions",
         "measurements.score.weight.cls": "metrics_distributions",
         "measurements.score.weight.ttfb": "metrics_distributions",
+        "measurements.app_start_cold": "metrics_distributions",
+        "measurements.app_start_warm": "metrics_distributions",
         "spans.http": "metrics_distributions",
         "user": "metrics_sets",
     }

+ 46 - 0
tests/snuba/api/endpoints/test_organization_events_mep.py

@@ -2989,6 +2989,48 @@ class OrganizationEventsMetricsEnhancedPerformanceEndpointTest(MetricsEnhancedPe
 
         assert meta["isMetricsData"]
 
+    def test_count_starts(self):
+        self.store_transaction_metric(
+            200,
+            metric="measurements.app_start_warm",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            100,
+            metric="measurements.app_start_warm",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+        self.store_transaction_metric(
+            10,
+            metric="measurements.app_start_cold",
+            tags={"transaction": "foo_transaction"},
+            timestamp=self.min_ago,
+        )
+
+        response = self.do_request(
+            {
+                "field": [
+                    "transaction",
+                    "count_starts(measurements.app_start_warm)",
+                    "count_starts(measurements.app_start_cold)",
+                ],
+                "query": "event.type:transaction",
+                "dataset": "metrics",
+                "per_page": 50,
+            }
+        )
+        assert response.status_code == 200, response.content
+        assert len(response.data["data"]) == 1
+        data = response.data["data"]
+        meta = response.data["meta"]
+
+        assert data[0]["count_starts(measurements.app_start_warm)"] == 2
+        assert data[0]["count_starts(measurements.app_start_cold)"] == 1
+
+        assert meta["isMetricsData"]
+
 
 class OrganizationEventsMetricsEnhancedPerformanceEndpointTestWithOnDemandMetrics(
     MetricsEnhancedPerformanceTestCase
@@ -3107,3 +3149,7 @@ class OrganizationEventsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
     @pytest.mark.xfail(reason="Not implemented")
     def test_count_scores(self):
         super().test_count_scores()
+
+    @pytest.mark.xfail(reason="Not implemented")
+    def test_count_starts(self):
+        super().test_count_starts()