Browse Source

feat(metrics-extraction): Add preflight request to split widgets (#65997)

### Summary
On-demand needs a temporary preflight request to check which side of the
data the Errors & Transactions dataset the widget lies on. This only
runs the first time that widget-id runs after a dashboard is saved, then
afterwards relies on the dataset it found to make further queries.
Kev 1 year ago
parent
commit
1fe0569d35

+ 10 - 0
src/sentry/api/bases/organization_events.py

@@ -22,6 +22,7 @@ from sentry.api.serializers.snuba import BaseSnubaSerializer, SnubaTSResultSeria
 from sentry.api.utils import handle_query_errors
 from sentry.discover.arithmetic import is_equation, strip_equation
 from sentry.exceptions import InvalidSearchQuery
+from sentry.models.dashboard_widget import DashboardWidgetTypes
 from sentry.models.group import Group
 from sentry.models.organization import Organization
 from sentry.models.project import Project
@@ -223,6 +224,15 @@ class OrganizationEventsV2EndpointBase(OrganizationEventsEndpointBase):
 
         return use_on_demand_metrics, on_demand_metric_type
 
+    def get_split_decision(self, has_errors, has_other_data):
+        """This can be removed once the discover dataset has been fully split"""
+        if has_errors and not has_other_data:
+            return DashboardWidgetTypes.ERROR_EVENTS
+        if not has_errors and has_other_data:
+            return DashboardWidgetTypes.TRANSACTION_LIKE
+        # Covers cases of !A && !B and A && B
+        return DashboardWidgetTypes.ERROR_EVENTS
+
     def handle_unit_meta(
         self, meta: dict[str, str]
     ) -> tuple[dict[str, str], dict[str, str | None]]:

+ 61 - 3
src/sentry/api/endpoints/organization_events.py

@@ -1,5 +1,6 @@
 import logging
 from collections.abc import Mapping
+from typing import Any
 
 import sentry_sdk
 from drf_spectacular.utils import OpenApiResponse, extend_schema
@@ -18,6 +19,7 @@ from sentry.apidocs.examples.discover_performance_examples import DiscoverAndPer
 from sentry.apidocs.parameters import GlobalParams, OrganizationParams, VisibilityParams
 from sentry.apidocs.utils import inline_sentry_response_serializer
 from sentry.exceptions import InvalidParams
+from sentry.models.dashboard_widget import DashboardWidget, DashboardWidgetTypes
 from sentry.models.organization import Organization
 from sentry.ratelimits.config import RateLimitConfig
 from sentry.snuba import discover, metrics_enhanced_performance, metrics_performance
@@ -274,16 +276,17 @@ class OrganizationEventsEndpoint(OrganizationEventsV2EndpointBase):
 
         sentry_sdk.set_tag("performance.metrics_enhanced", metrics_enhanced)
         allow_metric_aggregates = request.GET.get("preventMetricAggregates") != "1"
+
         # Force the referrer to "api.auth-token.events" for events requests authorized through a bearer token
         if request.auth:
             referrer = API_TOKEN_REFERRER
         elif referrer not in ALLOWED_EVENTS_REFERRERS:
             referrer = Referrer.API_ORGANIZATION_EVENTS.value
 
-        def data_fn(offset, limit):
-            return dataset.query(
+        def _data_fn(scopedDataset, offset, limit, query) -> dict[str, Any]:
+            return scopedDataset.query(
                 selected_columns=self.get_field_list(organization, request),
-                query=request.GET.get("query"),
+                query=query,
                 params=params,
                 snuba_params=snuba_params,
                 equations=self.get_equation_list(organization, request),
@@ -303,6 +306,61 @@ class OrganizationEventsEndpoint(OrganizationEventsV2EndpointBase):
                 on_demand_metrics_type=on_demand_metrics_type,
             )
 
+        def data_fn_factory(scopedDataset):
+            """
+            This factory closes over query and dataset in order to make an additional request to the errors dataset
+            in the case that this request is from a dashboard widget and we're trying to split their discover dataset.
+
+            This should be removed once the discover dataset is completely split in dashboards.
+            """
+            scoped_query = request.GET.get("query")
+            dashboard_widget_id = request.GET.get("dashboardWidgetId", None)
+
+            def fn(offset, limit) -> dict[str, Any]:
+                if not (metrics_enhanced and dashboard_widget_id):
+                    return _data_fn(scopedDataset, offset, limit, scoped_query)
+
+                widget = DashboardWidget.objects.get(id=dashboard_widget_id)
+                does_widget_have_split = widget.discover_widget_split is not None
+
+                if does_widget_have_split:
+                    # This is essentially cached behaviour and we skip the check
+                    split_query = scoped_query
+                    if widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS:
+                        split_dataset = discover
+                        split_query = f"({scoped_query}) AND event.type:error"
+                    elif widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE:
+                        split_dataset = scopedDataset
+                    else:
+                        split_dataset = discover
+
+                    return _data_fn(split_dataset, offset, limit, split_query)
+
+                # Widget has not split the discover dataset yet, so we need to check if there are errors etc.
+                error_results = _data_fn(
+                    discover, offset, limit, f"({scoped_query}) AND event.type:error"
+                )
+                has_errors = len(error_results["data"]) > 0
+
+                if has_errors:
+                    # If we see errors, always fallback to discover to scoped_query for the user.
+                    all_results = _data_fn(discover, offset, limit, scoped_query)
+                else:
+                    all_results = _data_fn(scopedDataset, offset, limit, scoped_query)
+
+                has_other_data = len(all_results["data"]) > 0
+                new_discover_widget_split = self.get_split_decision(has_errors, has_other_data)
+
+                if widget.discover_widget_split != new_discover_widget_split:
+                    widget.discover_widget_split = new_discover_widget_split
+                    widget.save()
+
+                return all_results
+
+            return fn
+
+        data_fn = data_fn_factory(dataset)
+
         with handle_query_errors():
             # Don't include cursor headers if the client won't be using them
             if request.GET.get("noPagination"):

+ 150 - 4
src/sentry/api/endpoints/organization_events_stats.py

@@ -1,5 +1,6 @@
 from collections.abc import Mapping, Sequence
 from datetime import datetime, timedelta
+from typing import Any
 
 import sentry_sdk
 from rest_framework.exceptions import ValidationError
@@ -11,6 +12,7 @@ from sentry.api.api_publish_status import ApiPublishStatus
 from sentry.api.base import region_silo_endpoint
 from sentry.api.bases import OrganizationEventsV2EndpointBase
 from sentry.constants import MAX_TOP_EVENTS
+from sentry.models.dashboard_widget import DashboardWidget, DashboardWidgetTypes
 from sentry.models.organization import Organization
 from sentry.snuba import (
     discover,
@@ -23,7 +25,7 @@ from sentry.snuba import (
 )
 from sentry.snuba.metrics.extraction import MetricSpecType
 from sentry.snuba.referrer import Referrer
-from sentry.utils.snuba import SnubaTSResult
+from sentry.utils.snuba import SnubaError, SnubaTSResult
 
 METRICS_ENHANCED_REFERRERS: set[str] = {
     Referrer.API_PERFORMANCE_HOMEPAGE_WIDGET_CHART.value,
@@ -214,7 +216,8 @@ class OrganizationEventsStatsEndpoint(OrganizationEventsV2EndpointBase):
 
         force_metrics_layer = request.GET.get("forceMetricsLayer") == "true"
 
-        def get_event_stats(
+        def _get_event_stats(
+            scoped_dataset: Any,
             query_columns: Sequence[str],
             query: str,
             params: dict[str, str],
@@ -223,7 +226,7 @@ class OrganizationEventsStatsEndpoint(OrganizationEventsV2EndpointBase):
             comparison_delta: datetime | None,
         ) -> SnubaTSResult:
             if top_events > 0:
-                return dataset.top_events_timeseries(
+                return scoped_dataset.top_events_timeseries(
                     timeseries_columns=query_columns,
                     selected_columns=self.get_field_list(organization, request),
                     equations=self.get_equation_list(organization, request),
@@ -241,7 +244,7 @@ class OrganizationEventsStatsEndpoint(OrganizationEventsV2EndpointBase):
                     include_other=include_other,
                 )
 
-            return dataset.timeseries_query(
+            return scoped_dataset.timeseries_query(
                 selected_columns=query_columns,
                 query=query,
                 params=params,
@@ -266,6 +269,149 @@ class OrganizationEventsStatsEndpoint(OrganizationEventsV2EndpointBase):
                 on_demand_metrics_type=on_demand_metrics_type,
             )
 
+        def get_event_stats_factory(scoped_dataset):
+            """
+            This factory closes over dataset in order to make an additional request to the errors dataset
+            in the case that this request is from a dashboard widget and we're trying to split their discover dataset.
+
+            This should be removed once the discover dataset is completely split in dashboards.
+            """
+            dashboard_widget_id = request.GET.get("dashboardWidgetId", None)
+
+            def fn(
+                query_columns: Sequence[str],
+                query: str,
+                params: dict[str, str],
+                rollup: int,
+                zerofill_results: bool,
+                comparison_delta: datetime | None,
+            ) -> SnubaTSResult:
+
+                if not (metrics_enhanced and dashboard_widget_id):
+                    return _get_event_stats(
+                        scoped_dataset,
+                        query_columns,
+                        query,
+                        params,
+                        rollup,
+                        zerofill_results,
+                        comparison_delta,
+                    )
+
+                try:
+                    widget = DashboardWidget.objects.get(id=dashboard_widget_id)
+                    does_widget_have_split = widget.discover_widget_split is not None
+
+                    if does_widget_have_split:
+                        # This is essentially cached behaviour and we skip the check
+                        split_query = query
+                        if widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS:
+                            split_dataset = discover
+                            split_query = f"({query}) AND event.type:error"
+                        elif widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE:
+                            split_dataset = scoped_dataset
+                        else:
+                            # This is a fallback for the ambiguous case.
+                            split_dataset = scoped_dataset
+
+                        return _get_event_stats(
+                            split_dataset,
+                            query_columns,
+                            split_query,
+                            params,
+                            rollup,
+                            zerofill_results,
+                            comparison_delta,
+                        )
+
+                    # Widget has not split the discover dataset yet, so we need to check if there are errors etc.
+                    errors_only_query = f"({query}) AND event.type:error"
+                    try:
+                        error_results = _get_event_stats(
+                            discover,
+                            query_columns,
+                            errors_only_query,
+                            params,
+                            rollup,
+                            zerofill_results,
+                            comparison_delta,
+                        )
+                        sum_error_results = error_results.data["data"]
+                        has_errors = any(
+                            any("(" in column and ")" in column for column in row.keys())
+                            for row in sum_error_results
+                        )
+                    except SnubaError as e:
+                        sentry_sdk.capture_exception(e)
+                        has_errors = True
+
+                    if has_errors:
+                        # If we see errors, always fallback to discover to scopedQuery for the user.
+                        all_results = _get_event_stats(
+                            scoped_dataset,
+                            query_columns,
+                            query,
+                            params,
+                            rollup,
+                            zerofill_results,
+                            comparison_delta,
+                        )
+                    else:
+                        all_results = _get_event_stats(
+                            discover,
+                            query_columns,
+                            query,
+                            params,
+                            rollup,
+                            zerofill_results,
+                            comparison_delta,
+                        )
+
+                    if isinstance(all_results, SnubaTSResult):
+                        other_data = all_results.data["data"]
+                    else:
+                        other_data = sum(
+                            [
+                                timeseries_result.data["data"]
+                                for timeseries_result in all_results.values()
+                            ],
+                            [],
+                        )
+
+                    has_other_data = any(
+                        any(
+                            column_name != "time"
+                            and isinstance(column_value, (int, float))
+                            and column_value != 0
+                            for (column_name, column_value) in row.items()
+                        )
+                        for row in other_data
+                    )
+
+                    new_discover_widget_split = self.get_split_decision(has_errors, has_other_data)
+
+                    if widget.discover_widget_split != new_discover_widget_split:
+                        widget.discover_widget_split = new_discover_widget_split
+                        widget.save()
+                    return all_results
+
+                except Exception as e:
+                    # Swallow the exception if it was due to dashboards, and try again one more time.
+                    sentry_sdk.capture_exception(e)
+                    return _get_event_stats(
+                        scoped_dataset,
+                        query_columns,
+                        query,
+                        params,
+                        rollup,
+                        zerofill_results,
+                        comparison_delta,
+                    )
+
+            return fn
+
+        get_event_stats = get_event_stats_factory(dataset)
+
         try:
             return Response(
                 self.get_event_stats_data(

+ 5 - 1
src/sentry/api/serializers/rest_framework/dashboard.py

@@ -250,7 +250,7 @@ class DashboardWidgetQuerySerializer(CamelSnakeSerializer[Dashboard]):
 
         try:
             builder.resolve_orderby(orderby)
-        except (InvalidSearchQuery) as err:
+        except InvalidSearchQuery as err:
             data["discover_query_error"] = {"orderby": f"Invalid orderby: {err}"}
 
         return data
@@ -593,6 +593,7 @@ class DashboardDetailsSerializer(CamelSnakeSerializer[Dashboard]):
             thresholds=widget_data.get("thresholds", None),
             interval=widget_data.get("interval", "5m"),
             widget_type=widget_data.get("widget_type", DashboardWidgetTypes.DISCOVER),
+            discover_widget_split=widget_data.get("discover_widget_split", None),
             order=order,
             limit=widget_data.get("limit", None),
             detail={"layout": widget_data.get("layout")},
@@ -632,6 +633,9 @@ class DashboardDetailsSerializer(CamelSnakeSerializer[Dashboard]):
         widget.display_type = data.get("display_type", widget.display_type)
         widget.interval = data.get("interval", widget.interval)
         widget.widget_type = data.get("widget_type", widget.widget_type)
+        widget.discover_widget_split = data.get(
+            "discover_widget_split", widget.discover_widget_split
+        )
         widget.order = order
         widget.limit = data.get("limit", widget.limit)
         widget.detail = {"layout": data.get("layout", prev_layout)}

+ 2 - 0
src/sentry/testutils/helpers/on_demand.py

@@ -19,6 +19,7 @@ def create_widget(
     columns: Sequence[str] | None = None,
     dashboard: Dashboard | None = None,
     widget: DashboardWidget | None = None,
+    discover_widget_split: int | None = None,
 ) -> tuple[DashboardWidgetQuery, DashboardWidget, Dashboard]:
     columns = columns or []
     dashboard = dashboard or Dashboard.objects.create(
@@ -32,6 +33,7 @@ def create_widget(
         order=order,
         widget_type=DashboardWidgetTypes.DISCOVER,
         display_type=DashboardWidgetDisplayTypes.LINE_CHART,
+        discover_widget_split=discover_widget_split,
     )
 
     if id:

+ 84 - 0
tests/snuba/api/endpoints/test_organization_events_mep.py

@@ -6,6 +6,7 @@ from django.urls import reverse
 from rest_framework.response import Response
 
 from sentry.discover.models import TeamKeyTransaction
+from sentry.models.dashboard_widget import DashboardWidgetTypes
 from sentry.models.projectteam import ProjectTeam
 from sentry.models.transaction_threshold import (
     ProjectTransactionThreshold,
@@ -26,6 +27,7 @@ from sentry.snuba.utils import DATASET_OPTIONS
 from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
 from sentry.testutils.helpers.datetime import before_now, iso_format
 from sentry.testutils.helpers.discover import user_misery_formula
+from sentry.testutils.helpers.on_demand import create_widget
 from sentry.testutils.silo import region_silo_test
 from sentry.utils.samples import load_data
 
@@ -3257,6 +3259,88 @@ class OrganizationEventsMetricsEnhancedPerformanceEndpointTestWithOnDemandMetric
         self._assert_on_demand_response(response, expected_on_demand_query=True)
         assert response.data["data"] == [{user_misery_field: user_misery_formula(1, 2)}]
 
+    def test_on_demand_user_misery_discover_split_with_widget_id_unsaved(self) -> None:
+        user_misery_field = "user_misery(300)"
+        query = "transaction.duration:>=100"
+
+        _, widget, __ = create_widget(
+            ["epm()"],
+            "transaction.duration:>=100",
+            self.project,
+            title="Dashboard 123",
+            columns=["user.id", "release", "count()"],
+            discover_widget_split=None,
+        )
+
+        # We store data for both specs, however, when the query builders try to query
+        # for the data it will not query on-demand data
+        for spec_version in OnDemandMetricSpecVersioning.get_spec_versions():
+            spec = OnDemandMetricSpec(
+                field=user_misery_field,
+                query=query,
+                spec_type=MetricSpecType.DYNAMIC_QUERY,
+                # We only allow querying the function in the latest spec version,
+                # otherwise, the data returned by the endpoint would be 0.05
+                spec_version=spec_version,
+            )
+            tags = {"satisfaction": "miserable"}
+            self.store_on_demand_metric(1, spec=spec, additional_tags=tags, timestamp=self.min_ago)
+            self.store_on_demand_metric(2, spec=spec, timestamp=self.min_ago)
+
+        params = {"field": [user_misery_field], "project": self.project.id, "query": query}
+        self._create_specs(params)
+
+        params["dashboardWidgetId"] = widget.id
+
+        # Since we're using the extra feature flag we expect user_misery to be an on-demand metric
+        with mock.patch.object(widget, "save") as mock_widget_save:
+            response = self._make_on_demand_request(params, {SPEC_VERSION_TWO_FLAG: True})
+            assert bool(mock_widget_save.assert_called_once)
+
+        self._assert_on_demand_response(response, expected_on_demand_query=True)
+        assert response.data["data"] == [{user_misery_field: user_misery_formula(1, 2)}]
+
+    def test_on_demand_user_misery_discover_split_with_widget_id_saved(self) -> None:
+        user_misery_field = "user_misery(300)"
+        query = "transaction.duration:>=100"
+
+        _, widget, __ = create_widget(
+            ["epm()"],
+            "transaction.duration:>=100",
+            self.project,
+            title="Dashboard 123",
+            columns=["user.id", "release", "count()"],
+            discover_widget_split=DashboardWidgetTypes.TRANSACTION_LIKE,  # Transaction-like tries to use on-demand
+        )
+
+        # We store data for both specs, however, when the query builders try to query
+        # for the data it will not query on-demand data
+        for spec_version in OnDemandMetricSpecVersioning.get_spec_versions():
+            spec = OnDemandMetricSpec(
+                field=user_misery_field,
+                query=query,
+                spec_type=MetricSpecType.DYNAMIC_QUERY,
+                # We only allow querying the function in the latest spec version,
+                # otherwise, the data returned by the endpoint would be 0.05
+                spec_version=spec_version,
+            )
+            tags = {"satisfaction": "miserable"}
+            self.store_on_demand_metric(1, spec=spec, additional_tags=tags, timestamp=self.min_ago)
+            self.store_on_demand_metric(2, spec=spec, timestamp=self.min_ago)
+
+        params = {"field": [user_misery_field], "project": self.project.id, "query": query}
+        self._create_specs(params)
+
+        params["dashboardWidgetId"] = widget.id
+
+        # Since we're using the extra feature flag we expect user_misery to be an on-demand metric
+        with mock.patch.object(widget, "save") as mock_widget_save:
+            response = self._make_on_demand_request(params, {SPEC_VERSION_TWO_FLAG: True})
+            assert bool(mock_widget_save.assert_not_called)
+
+        self._assert_on_demand_response(response, expected_on_demand_query=True)
+        assert response.data["data"] == [{user_misery_field: user_misery_formula(1, 2)}]
+
     def test_on_demand_count_unique(self):
         field = "count_unique(user)"
         query = "transaction.duration:>0"

+ 176 - 0
tests/snuba/api/endpoints/test_organization_events_stats_mep.py

@@ -7,11 +7,13 @@ from unittest import mock
 import pytest
 from django.urls import reverse
 
+from sentry.models.dashboard_widget import DashboardWidgetTypes
 from sentry.models.environment import Environment
 from sentry.sentry_metrics.use_case_id_registry import UseCaseID
 from sentry.snuba.metrics.extraction import MetricSpecType, OnDemandMetricSpec
 from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
 from sentry.testutils.helpers.datetime import before_now, iso_format
+from sentry.testutils.helpers.on_demand import create_widget
 from sentry.testutils.silo import region_silo_test
 
 pytestmark = pytest.mark.sentry_metrics
@@ -1174,6 +1176,180 @@ class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithOnDemandW
             assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
             assert response.data[group]["isMetricsExtractedData"]
 
+    def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved(self):
+        field = "count()"
+        field_two = "count_web_vitals(measurements.lcp, good)"
+        groupbys = ["customtag1", "customtag2"]
+        query = "transaction.duration:>=100"
+        spec = OnDemandMetricSpec(
+            field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
+        )
+        spec_two = OnDemandMetricSpec(
+            field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
+        )
+
+        _, widget, __ = create_widget(
+            ["epm()"],
+            "transaction.duration:>=100",
+            self.project,
+            title="Dashboard 123",
+            columns=["user.id", "release", "count()"],
+            discover_widget_split=None,
+        )
+
+        for hour in range(0, 5):
+            self.store_on_demand_metric(
+                hour * 62 * 24,
+                spec=spec,
+                additional_tags={
+                    "customtag1": "foo",
+                    "customtag2": "red",
+                    "environment": "production",
+                },
+                timestamp=self.day_ago + timedelta(hours=hour),
+            )
+            self.store_on_demand_metric(
+                hour * 60 * 24,
+                spec=spec_two,
+                additional_tags={
+                    "customtag1": "bar",
+                    "customtag2": "blue",
+                    "environment": "production",
+                },
+                timestamp=self.day_ago + timedelta(hours=hour),
+            )
+
+        yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
+
+        with mock.patch.object(widget, "save") as mock_widget_save:
+            response = self.do_request(
+                data={
+                    "project": self.project.id,
+                    "start": iso_format(self.day_ago),
+                    "end": iso_format(self.day_ago + timedelta(hours=2)),
+                    "interval": "1h",
+                    "orderby": ["-count()"],
+                    "query": query,
+                    "yAxis": yAxis,
+                    "field": [
+                        "count()",
+                        "count_web_vitals(measurements.lcp, good)",
+                        "customtag1",
+                        "customtag2",
+                    ],
+                    "topEvents": 5,
+                    "dataset": "metricsEnhanced",
+                    "useOnDemandMetrics": "true",
+                    "onDemandType": "dynamic_query",
+                    "dashboardWidgetId": widget.id,
+                },
+            )
+            assert bool(mock_widget_save.assert_called_once)
+
+        assert response.status_code == 200, response.content
+
+        groups = [
+            ("foo,red", "count()", 0.0, 1488.0),
+            ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
+            ("bar,blue", "count()", 0.0, 0.0),
+            ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
+        ]
+        assert len(response.data.keys()) == 2
+        for group_count in groups:
+            group, agg, row1, row2 = group_count
+            row_data = response.data[group][agg]["data"][:2]
+            assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
+
+            assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
+            assert response.data[group]["isMetricsExtractedData"]
+
+    def test_top_events_with_transaction_on_demand_passing_widget_id_saved(self):
+        field = "count()"
+        field_two = "count_web_vitals(measurements.lcp, good)"
+        groupbys = ["customtag1", "customtag2"]
+        query = "transaction.duration:>=100"
+        spec = OnDemandMetricSpec(
+            field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
+        )
+        spec_two = OnDemandMetricSpec(
+            field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
+        )
+
+        _, widget, __ = create_widget(
+            ["epm()"],
+            "transaction.duration:>=100",
+            self.project,
+            title="Dashboard 123",
+            columns=["user.id", "release", "count()"],
+            discover_widget_split=DashboardWidgetTypes.TRANSACTION_LIKE,  # Transactions like uses on-demand
+        )
+
+        for hour in range(0, 5):
+            self.store_on_demand_metric(
+                hour * 62 * 24,
+                spec=spec,
+                additional_tags={
+                    "customtag1": "foo",
+                    "customtag2": "red",
+                    "environment": "production",
+                },
+                timestamp=self.day_ago + timedelta(hours=hour),
+            )
+            self.store_on_demand_metric(
+                hour * 60 * 24,
+                spec=spec_two,
+                additional_tags={
+                    "customtag1": "bar",
+                    "customtag2": "blue",
+                    "environment": "production",
+                },
+                timestamp=self.day_ago + timedelta(hours=hour),
+            )
+
+        yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
+
+        with mock.patch.object(widget, "save") as mock_widget_save:
+            response = self.do_request(
+                data={
+                    "project": self.project.id,
+                    "start": iso_format(self.day_ago),
+                    "end": iso_format(self.day_ago + timedelta(hours=2)),
+                    "interval": "1h",
+                    "orderby": ["-count()"],
+                    "query": query,
+                    "yAxis": yAxis,
+                    "field": [
+                        "count()",
+                        "count_web_vitals(measurements.lcp, good)",
+                        "customtag1",
+                        "customtag2",
+                    ],
+                    "topEvents": 5,
+                    "dataset": "metricsEnhanced",
+                    "useOnDemandMetrics": "true",
+                    "onDemandType": "dynamic_query",
+                    "dashboardWidgetId": widget.id,
+                },
+            )
+            assert bool(mock_widget_save.assert_not_called)
+
+        assert response.status_code == 200, response.content
+
+        groups = [
+            ("foo,red", "count()", 0.0, 1488.0),
+            ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
+            ("bar,blue", "count()", 0.0, 0.0),
+            ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
+        ]
+        assert len(response.data.keys()) == 2
+        for group_count in groups:
+            group, agg, row1, row2 = group_count
+            row_data = response.data[group][agg]["data"][:2]
+            assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
+
+            assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
+            assert response.data[group]["isMetricsExtractedData"]
+
     def test_timeseries_on_demand_with_multiple_percentiles(self):
         field = "p75(measurements.fcp)"
         field_two = "p75(measurements.lcp)"