Browse Source

fix(mep): Events-stats shouldn't fallback on metrics (#37055)

- This removes the metricsEnhanced parameter entirely from the backend
- This fixes events-stats to not fallback to discover when
  dataset=metrics
William Mak 2 years ago
parent
commit
7c0a1b39d7

+ 4 - 14
src/sentry/api/endpoints/organization_events.py

@@ -131,13 +131,8 @@ class OrganizationEventsV2Endpoint(OrganizationEventsV2EndpointBase):
             "organizations:performance-dry-run-mep", organization=organization, actor=request.user
         )
 
-        # This param will be deprecated in favour of dataset
-        if "metricsEnhanced" in request.GET:
-            metrics_enhanced = request.GET.get("metricsEnhanced") == "1" and use_metrics
-            dataset = discover if not metrics_enhanced else metrics_enhanced_performance
-        else:
-            dataset = self.get_dataset(request) if use_metrics else discover
-            metrics_enhanced = dataset != discover
+        dataset = self.get_dataset(request) if use_metrics else discover
+        metrics_enhanced = dataset != discover
 
         sentry_sdk.set_tag("performance.metrics_enhanced", metrics_enhanced)
         allow_metric_aggregates = request.GET.get("preventMetricAggregates") != "1"
@@ -294,13 +289,8 @@ class OrganizationEventsEndpoint(OrganizationEventsV2EndpointBase):
             "organizations:performance-dry-run-mep", organization=organization, actor=request.user
         )
 
-        # This param will be deprecated in favour of dataset
-        if "metricsEnhanced" in request.GET:
-            metrics_enhanced = request.GET.get("metricsEnhanced") == "1" and use_metrics
-            dataset = discover if not metrics_enhanced else metrics_enhanced_performance
-        else:
-            dataset = self.get_dataset(request) if use_metrics else discover
-            metrics_enhanced = dataset != discover
+        dataset = self.get_dataset(request) if use_metrics else discover
+        metrics_enhanced = dataset != discover
 
         sentry_sdk.set_tag("performance.metrics_enhanced", metrics_enhanced)
         allow_metric_aggregates = request.GET.get("preventMetricAggregates") != "1"

+ 2 - 7
src/sentry/api/endpoints/organization_events_stats.py

@@ -146,13 +146,8 @@ class OrganizationEventsStatsEndpoint(OrganizationEventsV2EndpointBase):  # type
                 "organizations:performance-dry-run-mep", False
             )
 
-            # This param will be deprecated in favour of dataset
-            if "metricsEnhanced" in request.GET:
-                metrics_enhanced = request.GET.get("metricsEnhanced") == "1" and use_metrics
-                dataset = discover if not metrics_enhanced else metrics_enhanced_performance
-            else:
-                dataset = self.get_dataset(request) if use_metrics else discover
-                metrics_enhanced = dataset != discover
+            dataset = self.get_dataset(request) if use_metrics else discover
+            metrics_enhanced = dataset != discover
 
             allow_metric_aggregates = request.GET.get("preventMetricAggregates") != "1"
             sentry_sdk.set_tag("performance.metrics_enhanced", metrics_enhanced)

+ 41 - 77
src/sentry/snuba/metrics_performance.py

@@ -5,7 +5,6 @@ import sentry_sdk
 from snuba_sdk import AliasedExpression
 
 from sentry.discover.arithmetic import categorize_columns
-from sentry.exceptions import IncompatibleMetricsQuery, InvalidSearchQuery
 from sentry.search.events.builder import (
     HistogramMetricQueryBuilder,
     MetricsQueryBuilder,
@@ -133,86 +132,51 @@ def timeseries_query(
         metrics_compatible = True
 
     if metrics_compatible or dry_run:
-        try:
-            with sentry_sdk.start_span(op="mep", description="TimeseriesMetricQueryBuilder"):
-                metrics_query = TimeseriesMetricQueryBuilder(
-                    params,
-                    rollup,
-                    dataset=Dataset.PerformanceMetrics,
-                    query=query,
-                    selected_columns=columns,
-                    functions_acl=functions_acl,
-                    allow_metric_aggregates=allow_metric_aggregates,
-                    dry_run=dry_run,
-                )
-                if dry_run:
-                    metrics_referrer = referrer + ".dry-run"
-                else:
-                    metrics_referrer = referrer + ".metrics-enhanced"
-                result = metrics_query.run_query(metrics_referrer)
-                if dry_run:
-                    # Query has to reach here to be considered compatible
-                    sentry_sdk.set_tag("query.mep_compatible", True)
-                    return
-            with sentry_sdk.start_span(op="mep", description="query.transform_results"):
-                result = discover.transform_results(
-                    result, metrics_query.function_alias_map, {}, None
-                )
-                result["data"] = (
-                    discover.zerofill(
-                        result["data"],
-                        params["start"],
-                        params["end"],
-                        rollup,
-                        "time",
-                    )
-                    if zerofill_results
-                    else result["data"]
-                )
-                sentry_sdk.set_tag("performance.dataset", "metrics")
-                result["meta"]["isMetricsData"] = True
-                return SnubaTSResult(
-                    {
-                        "data": result["data"],
-                        "isMetricsData": True,
-                        "meta": result["meta"],
-                    },
+        with sentry_sdk.start_span(op="mep", description="TimeseriesMetricQueryBuilder"):
+            metrics_query = TimeseriesMetricQueryBuilder(
+                params,
+                rollup,
+                dataset=Dataset.PerformanceMetrics,
+                query=query,
+                selected_columns=columns,
+                functions_acl=functions_acl,
+                allow_metric_aggregates=allow_metric_aggregates,
+                dry_run=dry_run,
+            )
+            if dry_run:
+                metrics_referrer = referrer + ".dry-run"
+            else:
+                metrics_referrer = referrer + ".metrics-enhanced"
+            result = metrics_query.run_query(metrics_referrer)
+            if dry_run:
+                # Query has to reach here to be considered compatible
+                sentry_sdk.set_tag("query.mep_compatible", True)
+                return
+        with sentry_sdk.start_span(op="mep", description="query.transform_results"):
+            result = discover.transform_results(result, metrics_query.function_alias_map, {}, None)
+            result["data"] = (
+                discover.zerofill(
+                    result["data"],
                     params["start"],
                     params["end"],
                     rollup,
+                    "time",
                 )
-        # raise Invalid Queries since the same thing will happen with discover
-        except InvalidSearchQuery as error:
-            if not dry_run:
-                raise error
-            else:
-                sentry_sdk.set_tag("performance.mep_incompatible", str(error))
-        # any remaining errors mean we should try again with discover
-        except IncompatibleMetricsQuery as error:
-            sentry_sdk.set_tag("performance.mep_incompatible", str(error))
-            metrics_compatible = False
-        except Exception as error:
-            if dry_run:
-                return
-            else:
-                raise error
-
-    if dry_run:
-        return {}
-
-    # This isn't a query we can enhance with metrics
-    if not metrics_compatible:
-        sentry_sdk.set_tag("performance.dataset", "discover")
-        return discover.timeseries_query(
-            selected_columns,
-            query,
-            params,
-            rollup,
-            referrer,
-            zerofill_results,
-            comparison_delta,
-            functions_acl,
-        )
+                if zerofill_results
+                else result["data"]
+            )
+            sentry_sdk.set_tag("performance.dataset", "metrics")
+            result["meta"]["isMetricsData"] = True
+            return SnubaTSResult(
+                {
+                    "data": result["data"],
+                    "isMetricsData": True,
+                    "meta": result["meta"],
+                },
+                params["start"],
+                params["end"],
+                rollup,
+            )
     return SnubaTSResult()
 
 

+ 29 - 13
tests/snuba/api/endpoints/test_organization_events_stats_mep.py

@@ -58,7 +58,7 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
                     "interval": "1h",
                     "yAxis": axis,
                     "project": self.project.id,
-                    "metricsEnhanced": "1",
+                    "dataset": "metricsEnhanced",
                 },
             )
             assert response.status_code == 200, response.content
@@ -87,7 +87,7 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
                     "interval": "24h",
                     "yAxis": axis,
                     "project": self.project.id,
-                    "metricsEnhanced": "1",
+                    "dataset": "metricsEnhanced",
                 },
             )
             assert response.status_code == 200, response.content
@@ -114,7 +114,7 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
                     "interval": "1h",
                     "yAxis": axis,
                     "project": self.project.id,
-                    "metricsEnhanced": "1",
+                    "dataset": "metricsEnhanced",
                 },
             )
             assert response.status_code == 200, response.content
@@ -143,7 +143,7 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
                     "interval": "1m",
                     "yAxis": axis,
                     "project": self.project.id,
-                    "metricsEnhanced": "1",
+                    "dataset": "metricsEnhanced",
                 },
             )
             assert response.status_code == 200, response.content
@@ -171,7 +171,7 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
                 "interval": "1h",
                 "yAxis": ["failure_rate()"],
                 "project": self.project.id,
-                "metricsEnhanced": "1",
+                "dataset": "metricsEnhanced",
             },
         )
         assert response.status_code == 200, response.content
@@ -200,7 +200,7 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
                 "interval": "1h",
                 "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
                 "project": self.project.id,
-                "metricsEnhanced": "1",
+                "dataset": "metricsEnhanced",
             },
         )
         assert response.status_code == 200, response.content
@@ -224,7 +224,7 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
                 "end": iso_format(self.day_ago + timedelta(hours=2)),
                 "interval": "1h",
                 "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
-                "metricsEnhanced": "1",
+                "dataset": "metricsEnhanced",
             },
         )
 
@@ -243,7 +243,7 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
                 "end": iso_format(self.day_ago + timedelta(hours=2)),
                 "interval": "1h",
                 "yAxis": "count_unique(user)",
-                "metricsEnhanced": "1",
+                "dataset": "metricsEnhanced",
             },
         )
         assert response.status_code == 200, response.content
@@ -262,7 +262,7 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
                     "interval": "1h",
                     "query": query,
                     "yAxis": ["epm()"],
-                    "metricsEnhanced": "1",
+                    "dataset": "metricsEnhanced",
                 },
             )
             assert response.status_code == 200, response.content
@@ -289,7 +289,7 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
                 "interval": "1h",
                 "query": "p95():<5s",
                 "yAxis": ["epm()"],
-                "metricsEnhanced": "1",
+                "dataset": "metricsEnhanced",
                 "preventMetricAggregates": "1",
             },
         )
@@ -326,7 +326,7 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
                 "end": iso_format(self.day_ago + timedelta(hours=2)),
                 "interval": "1h",
                 "yAxis": "sum(transaction.duration)",
-                "metricsEnhanced": "1",
+                "dataset": "metricsEnhanced",
             },
         )
         assert response.status_code == 200, response.content
@@ -371,7 +371,7 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
                 "end": iso_format(self.day_ago + timedelta(hours=2)),
                 "interval": "1h",
                 "yAxis": "sum(measurements.datacenter_memory)",
-                "metricsEnhanced": "1",
+                "dataset": "metricsEnhanced",
             },
         )
         assert response.status_code == 200, response.content
@@ -419,7 +419,7 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
                     "sum(measurements.datacenter_memory)",
                     "p50(measurements.datacenter_memory)",
                 ],
-                "metricsEnhanced": "1",
+                "dataset": "metricsEnhanced",
             },
         )
         assert response.status_code == 200, response.content
@@ -461,3 +461,19 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
             "sum_measurements_datacenter_memory": "pebibyte",
             "p50_measurements_datacenter_memory": "pebibyte",
         }
+
+    def test_dataset_metrics_does_not_fallback(self):
+        self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
+        self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
+        self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
+        response = self.do_request(
+            data={
+                "start": iso_format(self.day_ago),
+                "end": iso_format(self.day_ago + timedelta(hours=2)),
+                "interval": "1h",
+                "query": "transaction.duration:<5s",
+                "yAxis": "sum(transaction.duration)",
+                "dataset": "metrics",
+            },
+        )
+        assert response.status_code == 400, response.content