|
@@ -14,6 +14,7 @@ from sentry_relay.processing import validate_sampling_condition
|
|
|
from sentry import features, options
|
|
|
from sentry.api.endpoints.project_transaction_threshold import DEFAULT_THRESHOLD
|
|
|
from sentry.api.utils import get_date_range_from_params
|
|
|
+from sentry.features.rollout import in_random_rollout
|
|
|
from sentry.incidents.models.alert_rule import AlertRule, AlertRuleStatus
|
|
|
from sentry.models.dashboard_widget import (
|
|
|
ON_DEMAND_ENABLED_KEY,
|
|
@@ -203,6 +204,20 @@ def _get_alert_metric_specs(
|
|
|
return specs
|
|
|
|
|
|
|
|
|
+def _bulk_cache_query_key(project: Project) -> str:
|
|
|
+ return f"on-demand.bulk-query-cache.{project.organization.id}"
|
|
|
+
|
|
|
+
|
|
|
+def _get_bulk_cached_query(project: Project) -> dict[str, Any]:
|
|
|
+ query_bulk_cache_key = _bulk_cache_query_key(project)
|
|
|
+ return cache.get(query_bulk_cache_key, None)
|
|
|
+
|
|
|
+
|
|
|
+def _set_bulk_cached_query(project: Project, query_cache: dict[str, Any]) -> None:
|
|
|
+ query_bulk_cache_key = _bulk_cache_query_key(project)
|
|
|
+ cache.set(query_bulk_cache_key, query_cache, timeout=5400)
|
|
|
+
|
|
|
+
|
|
|
@metrics.wraps("on_demand_metrics._get_widget_metric_specs")
|
|
|
def _get_widget_metric_specs(
|
|
|
project: Project, enabled_features: set[str], prefilling: bool
|
|
@@ -230,6 +245,10 @@ def _get_widget_metric_specs(
|
|
|
"on_demand_metrics.widgets_to_process", amount=len(widget_queries), sample_rate=1.0
|
|
|
)
|
|
|
|
|
|
+ organization_bulk_query_cache = _get_bulk_cached_query(project)
|
|
|
+ save_organization_bulk_cache = not bool(organization_bulk_query_cache)
|
|
|
+ organization_bulk_query_cache = {}
|
|
|
+
|
|
|
ignored_widget_ids: dict[int, bool] = {}
|
|
|
specs_for_widget: dict[int, list[HashedMetricSpec]] = defaultdict(list)
|
|
|
widget_query_for_spec_hash: dict[str, DashboardWidgetQuery] = {}
|
|
@@ -239,7 +258,9 @@ def _get_widget_metric_specs(
|
|
|
|
|
|
with metrics.timer("on_demand_metrics.widget_spec_convert"):
|
|
|
for widget_query in widget_queries:
|
|
|
- widget_specs = convert_widget_query_to_metric(project, widget_query, prefilling)
|
|
|
+ widget_specs = convert_widget_query_to_metric(
|
|
|
+ project, widget_query, prefilling, organization_bulk_query_cache
|
|
|
+ )
|
|
|
|
|
|
if not widget_specs:
|
|
|
# Skip checking any widget queries that don't have specs,
|
|
@@ -285,6 +306,9 @@ def _get_widget_metric_specs(
|
|
|
|
|
|
_update_state_with_spec_limit(trimmed_specs, widget_query_for_spec_hash)
|
|
|
metrics.incr("on_demand_metrics.widget_query_specs", amount=len(specs))
|
|
|
+ if in_random_rollout("on_demand_metrics.cache_should_use_on_demand"):
|
|
|
+ if save_organization_bulk_cache:
|
|
|
+ _set_bulk_cached_query(project, organization_bulk_query_cache)
|
|
|
return specs
|
|
|
|
|
|
|
|
@@ -410,7 +434,10 @@ def _convert_snuba_query_to_metrics(
|
|
|
|
|
|
|
|
|
def convert_widget_query_to_metric(
|
|
|
- project: Project, widget_query: DashboardWidgetQuery, prefilling: bool
|
|
|
+ project: Project,
|
|
|
+ widget_query: DashboardWidgetQuery,
|
|
|
+ prefilling: bool,
|
|
|
+ organization_bulk_query_cache: dict[str, Any] | None = None,
|
|
|
) -> list[HashedMetricSpec]:
|
|
|
"""
|
|
|
Converts a passed metrics widget query to one or more MetricSpecs.
|
|
@@ -426,7 +453,7 @@ def convert_widget_query_to_metric(
|
|
|
|
|
|
for aggregate in aggregates:
|
|
|
metrics_specs += _generate_metric_specs(
|
|
|
- aggregate, widget_query, project, prefilling, groupbys
|
|
|
+ aggregate, widget_query, project, prefilling, groupbys, organization_bulk_query_cache
|
|
|
)
|
|
|
|
|
|
return metrics_specs
|
|
@@ -438,6 +465,7 @@ def _generate_metric_specs(
|
|
|
project: Project,
|
|
|
prefilling: bool,
|
|
|
groupbys: Sequence[str] | None = None,
|
|
|
+ organization_bulk_query_cache: dict[str, Any] | None = None,
|
|
|
) -> list[HashedMetricSpec]:
|
|
|
metrics_specs = []
|
|
|
metrics.incr("on_demand_metrics.before_widget_spec_generation")
|
|
@@ -452,6 +480,7 @@ def _generate_metric_specs(
|
|
|
prefilling,
|
|
|
groupbys=groupbys,
|
|
|
spec_type=MetricSpecType.DYNAMIC_QUERY,
|
|
|
+ organization_bulk_query_cache=organization_bulk_query_cache,
|
|
|
):
|
|
|
for spec in results:
|
|
|
_log_on_demand_metric_spec(
|
|
@@ -708,6 +737,7 @@ def _convert_aggregate_and_query_to_metrics(
|
|
|
prefilling: bool,
|
|
|
spec_type: MetricSpecType = MetricSpecType.SIMPLE_QUERY,
|
|
|
groupbys: Sequence[str] | None = None,
|
|
|
+ organization_bulk_query_cache: dict[str, Any] | None = None,
|
|
|
) -> Sequence[HashedMetricSpec] | None:
|
|
|
"""
|
|
|
Converts an aggregate and a query to a metric spec with its hash value.
|
|
@@ -719,7 +749,9 @@ def _convert_aggregate_and_query_to_metrics(
|
|
|
# We can avoid injection of the environment in the query, since it's supported by standard, thus it won't change
|
|
|
# the supported state of a query, since if it's standard, and we added environment it will still be standard
|
|
|
# and if it's on demand, it will always be on demand irrespectively of what we add.
|
|
|
- if not should_use_on_demand_metrics(dataset, aggregate, query, groupbys, prefilling):
|
|
|
+ if not should_use_on_demand_metrics(
|
|
|
+ dataset, aggregate, query, groupbys, prefilling, organization_bulk_query_cache
|
|
|
+ ):
|
|
|
return None
|
|
|
|
|
|
metric_specs_and_hashes = []
|