Browse Source

feat(projectconfig): Limit experimental config build times (#66949)

Set a 15s time limit on experimental feature computation in project
configs.

For considered alternatives, see
https://github.com/getsentry/team-ingest/issues/286#issuecomment-1988053152.

Notes:
- All features have the default timeout of 15s. Allowing custom timeouts
per feature makes working around them easy with a wrong perception of
timeouts being properly applied.
- 15s is half the time the project config build task is allowed to run.
I expect this limit to hit hardly ever, having little to no impact on
the current state in prod, but allowing us to collect data and tune it
in the future (see https://github.com/getsentry/team-ingest/issues/285).

### mypy errors

When the function provided to `add_experimental_config` doesn't provide
an initial argument for the timeout, mypy errors with the following
(example) message:
```txt
src/sentry/relay/config/__init__.py:444: error: Argument 3 to "add_experimental_config" has incompatible type "Callable[[Project], Mapping[str, Any] | None]"; expected "ExperimentalConfigBuilder"  [arg-type]
```
Iker Barriocanal 11 months ago
parent
commit
bb9215a584

+ 11 - 27
src/sentry/relay/config/__init__.py

@@ -1,6 +1,6 @@
 import logging
 import uuid
-from collections.abc import Callable, Mapping, MutableMapping, Sequence
+from collections.abc import Mapping, MutableMapping, Sequence
 from datetime import datetime, timezone
 from typing import Any, Literal, NotRequired, TypedDict
 
@@ -37,6 +37,7 @@ from sentry.ingest.transaction_clusterer.rules import (
 from sentry.interfaces.security import DEFAULT_DISALLOWED_SOURCES
 from sentry.models.project import Project
 from sentry.models.projectkey import ProjectKey
+from sentry.relay.config.experimental import TimeChecker, add_experimental_config
 from sentry.relay.config.metric_extraction import (
     get_metric_conditional_tagging_rules,
     get_metric_extraction_config,
@@ -246,7 +247,7 @@ class CardinalityLimit(TypedDict):
     namespace: str | None
 
 
-def get_metrics_config(project: Project) -> Mapping[str, Any] | None:
+def get_metrics_config(timeout: TimeChecker, project: Project) -> Mapping[str, Any] | None:
     metrics_config = {}
 
     if features.has("organizations:relay-cardinality-limiter", project.organization):
@@ -256,6 +257,7 @@ def get_metrics_config(project: Project) -> Mapping[str, Any] | None:
 
         cardinality_limits: list[CardinalityLimit] = []
         for namespace, option_name in USE_CASE_ID_CARDINALITY_LIMIT_QUOTA_OPTIONS.items():
+            timeout.check()
             option = options.get(option_name)
             if not option or not len(option) == 1:
                 # Multiple quotas are not supported
@@ -281,6 +283,7 @@ def get_metrics_config(project: Project) -> Mapping[str, Any] | None:
 
     if features.has("organizations:metrics-blocking", project.organization):
         metrics_blocking_state = get_metrics_blocking_state_for_relay_config(project)
+        timeout.check()
         if metrics_blocking_state is not None:
             metrics_config.update(metrics_blocking_state)  # type:ignore
 
@@ -312,7 +315,7 @@ def get_project_config(
             return _get_project_config(project, full_config=full_config, project_keys=project_keys)
 
 
-def get_dynamic_sampling_config(project: Project) -> Mapping[str, Any] | None:
+def get_dynamic_sampling_config(timeout: TimeChecker, project: Project) -> Mapping[str, Any] | None:
     if features.has("organizations:dynamic-sampling", project.organization):
         # For compatibility reasons we want to return an empty list of old rules. This has been done in order to make
         # old Relays use empty configs which will result in them forwarding sampling decisions to upstream Relays.
@@ -336,11 +339,14 @@ class TransactionNameRule(TypedDict):
     redaction: TransactionNameRuleRedaction
 
 
-def get_transaction_names_config(project: Project) -> Sequence[TransactionNameRule] | None:
+def get_transaction_names_config(
+    timeout: TimeChecker, project: Project
+) -> Sequence[TransactionNameRule] | None:
     if not features.has("organizations:transaction-name-normalize", project.organization):
         return None
 
     cluster_rules = get_sorted_rules(ClustererNamespace.TRANSACTIONS, project)
+    timeout.check()
     if not cluster_rules:
         return None
 
@@ -377,28 +383,6 @@ class SpanDescriptionRule(TypedDict):
     redaction: SpanDescriptionRuleRedaction
 
 
-def add_experimental_config(
-    config: MutableMapping[str, Any],
-    key: str,
-    function: Callable[..., Any],
-    *args: Any,
-    **kwargs: Any,
-) -> None:
-    """Try to set `config[key] = function(*args, **kwargs)`.
-    If the result of the function call is None, the key is not set.
-    If the function call raises an exception, we log it to sentry and the key remains unset.
-    NOTE: Only use this function if you expect Relay to behave reasonably
-    if ``key`` is missing from the config.
-    """
-    try:
-        subconfig = function(*args, **kwargs)
-    except Exception:
-        logger.exception("Exception while building Relay project config field")
-    else:
-        if subconfig is not None:
-            config[key] = subconfig
-
-
 def _should_extract_abnormal_mechanism(project: Project) -> bool:
     return sample_modulo(
         "sentry-metrics.releasehealth.abnormal-mechanism-extraction-rate", project.organization_id
@@ -996,7 +980,7 @@ def _should_extract_transaction_metrics(project: Project) -> bool:
 
 
 def get_transaction_metrics_settings(
-    project: Project, breakdowns_config: Mapping[str, Any] | None
+    timeout: TimeChecker, project: Project, breakdowns_config: Mapping[str, Any] | None
 ) -> TransactionMetricsSettings:
     """This function assumes that the corresponding feature flag has been checked.
     See _should_extract_transaction_metrics.

+ 78 - 0
src/sentry/relay/config/experimental.py

@@ -0,0 +1,78 @@
+import logging
+from collections.abc import MutableMapping
+from datetime import datetime, timedelta, timezone
+from typing import Any, Protocol
+
+import sentry_sdk
+
+logger = logging.getLogger(__name__)
+
+
+class TimeoutException(Exception):
+    def __init__(self, elapsed: timedelta, timeout: timedelta, *args: object) -> None:
+        super().__init__(*args)
+        self._elapsed = elapsed
+        self._timeout = timeout
+
+
+class TimeChecker:
+    """Interface to check whether a timeout has been hit.
+
+    The class is initialized with the provided hard timeout. If the timedelta is
+    not bigger than `0`, no checks are performed.  Calling `check` checks the
+    timeout, and raises a `TimeoutException` if it's hit. The timeout starts at
+    the moment the class is initialized.
+    """
+
+    def __init__(self, hard_timeout: timedelta) -> None:
+        self._hard_timeout = hard_timeout
+        self._start = datetime.now(timezone.utc)
+
+    def check(self) -> None:
+        if self._hard_timeout <= timedelta(0):
+            return
+
+        now = datetime.now(timezone.utc)
+        elapsed = now - self._start
+        if elapsed >= self._hard_timeout:
+            raise TimeoutException(elapsed, self._hard_timeout)
+
+
+class ExperimentalConfigBuilder(Protocol):
+    def __call__(self, timeout: TimeChecker, *args, **kwargs) -> Any:
+        pass
+
+
+#: Timeout for an experimental feature build.
+_FEATURE_BUILD_TIMEOUT = timedelta(seconds=15)
+
+
+def add_experimental_config(
+    config: MutableMapping[str, Any],
+    key: str,
+    function: ExperimentalConfigBuilder,
+    *args: Any,
+    **kwargs: Any,
+) -> None:
+    """Try to set `config[key] = function(*args, **kwargs)`.
+    If the result of the function call is None, the key is not set.
+    If the function call raises an exception, we log it to sentry and the key remains unset.
+    NOTE: Only use this function if you expect Relay to behave reasonably
+    if ``key`` is missing from the config.
+    """
+    timeout = TimeChecker(_FEATURE_BUILD_TIMEOUT)
+
+    with sentry_sdk.start_span(op=f"project_config.experimental_config.{key}"):
+        try:
+            subconfig = function(timeout, *args, **kwargs)
+        except TimeoutException as e:
+            logger.exception(
+                "Project config feature build timed out: %s",
+                key,
+                extra={"hard_timeout": e._timeout, "elapsed": e._elapsed},
+            )
+        except Exception:
+            logger.exception("Exception while building Relay project config field")
+        else:
+            if subconfig is not None:
+                config[key] = subconfig

+ 10 - 1
src/sentry/relay/config/metric_extraction.py

@@ -28,6 +28,7 @@ from sentry.models.transaction_threshold import (
     ProjectTransactionThresholdOverride,
     TransactionMetric,
 )
+from sentry.relay.config.experimental import TimeChecker
 from sentry.search.events import fields
 from sentry.search.events.builder import QueryBuilder
 from sentry.search.events.types import ParamsType, QueryBuilderConfig
@@ -89,7 +90,9 @@ def get_max_widget_specs(organization: Organization) -> int:
 
 
 @metrics.wraps("on_demand_metrics.get_metric_extraction_config")
-def get_metric_extraction_config(project: Project) -> MetricExtractionConfig | None:
+def get_metric_extraction_config(
+    timeout: TimeChecker, project: Project
+) -> MetricExtractionConfig | None:
     """
     Returns generic metric extraction config for the given project.
 
@@ -102,16 +105,21 @@ def get_metric_extraction_config(project: Project) -> MetricExtractionConfig | N
     sentry_sdk.set_tag("organization_id", project.organization_id)
     with sentry_sdk.start_span(op="on_demand_metrics_feature_flags"):
         enabled_features = on_demand_metrics_feature_flags(project.organization)
+    timeout.check()
 
     prefilling = "organizations:on-demand-metrics-prefill" in enabled_features
 
     with sentry_sdk.start_span(op="get_alert_metric_specs"):
         alert_specs = _get_alert_metric_specs(project, enabled_features, prefilling)
+    timeout.check()
     with sentry_sdk.start_span(op="get_widget_metric_specs"):
         widget_specs = _get_widget_metric_specs(project, enabled_features, prefilling)
+    timeout.check()
 
     with sentry_sdk.start_span(op="merge_metric_specs"):
         metric_specs = _merge_metric_specs(alert_specs, widget_specs)
+    timeout.check()
+
     if not metric_specs:
         return None
 
@@ -824,6 +832,7 @@ _DEFAULT_THRESHOLD = _DefaultThreshold(
 
 
 def get_metric_conditional_tagging_rules(
+    timeout: Any,
     project: Project,
 ) -> Sequence[MetricConditionalTaggingRule]:
     rules: list[MetricConditionalTaggingRule] = []

+ 42 - 0
tests/sentry/relay/config/test_experimental.py

@@ -0,0 +1,42 @@
+from datetime import timedelta
+from time import sleep
+from unittest.mock import patch
+
+import pytest
+
+from sentry.relay.config.experimental import TimeChecker, TimeoutException, add_experimental_config
+
+
+def test_time_checker_throws_on_timeout_hit():
+    checker = TimeChecker(timedelta(seconds=1))
+    sleep(1)
+    with pytest.raises(TimeoutException):
+        checker.check()
+
+
+def test_time_checker_no_throw_on_timeout_no_hit():
+    checker = TimeChecker(timedelta(seconds=5))
+    checker.check()
+
+
+@pytest.mark.parametrize("timeout", (-1, 0))
+def test_time_checker_noop_on_invalid_timeout(timeout):
+    checker = TimeChecker(timedelta(seconds=timeout))
+    checker.check()
+
+
+@patch("sentry.relay.config.experimental._FEATURE_BUILD_TIMEOUT", timedelta(seconds=1))
+@patch("sentry.relay.config.experimental.logger.exception")
+def test_add_experimental_config_catches_timeout(mock_logger):
+    def dummy(timeout: TimeChecker, *args, **kwargs):
+        sleep(1)
+        timeout.check()
+
+    add_experimental_config({}, "test-key", dummy, 1, 1)  # Does not raise
+
+    # Assert logger message.
+    # These many asserts is a workaround to exclude `elapsed` from the assertion
+    assert mock_logger.call_args[0] == ("Project config feature build timed out: %s", "test-key")
+    extra = mock_logger.call_args[1]["extra"]
+    assert extra.pop("elapsed") > timedelta(seconds=1)
+    assert extra == {"hard_timeout": timedelta(seconds=1)}

+ 69 - 54
tests/sentry/relay/config/test_metric_extraction.py

@@ -10,6 +10,7 @@ from sentry.models.dashboard_widget import DashboardWidgetQuery, DashboardWidget
 from sentry.models.environment import Environment
 from sentry.models.project import Project
 from sentry.models.transaction_threshold import ProjectTransactionThreshold, TransactionMetric
+from sentry.relay.config.experimental import TimeChecker
 from sentry.relay.config.metric_extraction import (
     get_current_widget_specs,
     get_metric_extraction_config,
@@ -78,14 +79,14 @@ def create_project_threshold(
 @django_db_all
 def test_get_metric_extraction_config_empty_no_alerts(default_project: Project) -> None:
     with Feature(ON_DEMAND_METRICS):
-        assert not get_metric_extraction_config(default_project)
+        assert not get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
 
 @django_db_all
 def test_get_metric_extraction_config_empty_feature_flag_off(default_project: Project) -> None:
     create_alert("count()", "transaction.duration:>=1000", default_project)
 
-    assert not get_metric_extraction_config(default_project)
+    assert not get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
 
 @django_db_all
@@ -94,7 +95,7 @@ def test_get_metric_extraction_config_empty_standard_alerts(default_project: Pro
         # standard alerts are not included in the config
         create_alert("count()", "", default_project)
 
-        assert not get_metric_extraction_config(default_project)
+        assert not get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
 
 @django_db_all
@@ -102,7 +103,7 @@ def test_get_metric_extraction_config_single_alert(default_project: Project) ->
     with Feature(ON_DEMAND_METRICS):
         create_alert("count()", "transaction.duration:>=1000", default_project)
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -130,7 +131,7 @@ def test_get_metric_extraction_config_with_double_write_env_alert(
             environment=default_environment,
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -174,7 +175,7 @@ def test_get_metric_extraction_config_single_alert_with_mri(default_project: Pro
             default_project,
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config is None
 
@@ -185,7 +186,7 @@ def test_get_metric_extraction_config_multiple_alerts(default_project: Project)
         create_alert("count()", "transaction.duration:>=1000", default_project)
         create_alert("count()", "transaction.duration:>=2000", default_project)
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert len(config["metrics"]) == 2
@@ -205,10 +206,12 @@ def test_get_metric_extraction_config_multiple_alerts_above_max_limit(
         create_alert("count()", "transaction.duration:>=1000", default_project)
         create_alert("count()", "transaction.duration:>=2000", default_project)
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         with mock.patch("sentry_sdk.capture_exception") as capture_exception:
-            config = get_metric_extraction_config(default_project)
+            config = get_metric_extraction_config(
+                TimeChecker(timedelta(seconds=0)), default_project
+            )
             assert config
 
             assert capture_exception.call_count == 2
@@ -229,7 +232,7 @@ def test_get_metric_extraction_config_multiple_alerts_duplicated(default_project
         create_alert("count()", "transaction.duration:>=1000", default_project)
         create_alert("count()", "transaction.duration:>=1000", default_project)
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert len(config["metrics"]) == 1
@@ -246,7 +249,7 @@ def test_get_metric_extraction_config_environment(
             "count()", "transaction.duration:>0", default_project, environment=default_environment
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         # assert that the deduplication works with environments
@@ -265,7 +268,7 @@ def test_get_metric_extraction_config_single_standard_widget(default_project: Pr
     with Feature({ON_DEMAND_METRICS_WIDGETS: True}):
         create_widget(["count()"], "", default_project)
 
-        assert not get_metric_extraction_config(default_project)
+        assert not get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
 
 @django_db_all
@@ -273,7 +276,7 @@ def test_get_metric_extraction_config_single_widget(default_project: Project) ->
     with Feature({ON_DEMAND_METRICS_WIDGETS: True}):
         create_widget(["count()"], "transaction.duration:>=1000", default_project)
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -310,7 +313,7 @@ def test_get_metric_extraction_config_single_widget_multiple_aggregates(
             ["count()", "avg(transaction.duration)"], "transaction.duration:>=1000", default_project
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -370,7 +373,7 @@ def test_get_metric_extraction_config_single_widget_multiple_count_if(
         ]
         create_widget(aggregates, "transaction.duration:>=1000", default_project)
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -473,7 +476,7 @@ def test_get_metric_extraction_config_multiple_aggregates_single_field(
             default_project,
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -509,7 +512,7 @@ def test_get_metric_extraction_config_multiple_widgets_duplicated(default_projec
         )
         create_widget(["count()"], "transaction.duration:>=1000", default_project, "Dashboard 2")
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -566,7 +569,9 @@ def test_get_metric_extraction_config_multiple_widgets_above_max_limit(
         create_widget(["count()"], "transaction.duration:>=1000", default_project, "Dashboard 2")
 
         with mock.patch("sentry_sdk.capture_exception") as capture_exception:
-            config = get_metric_extraction_config(default_project)
+            config = get_metric_extraction_config(
+                TimeChecker(timedelta(seconds=0)), default_project
+            )
             assert config
 
             assert capture_exception.call_count == 2
@@ -591,7 +596,9 @@ def test_get_metric_extraction_config_multiple_widgets_not_above_max_limit_ident
         create_widget(["count()"], "transaction.duration:>=1000", default_project, "Dashboard 2")
 
         with mock.patch("sentry_sdk.capture_exception") as capture_exception:
-            config = get_metric_extraction_config(default_project)
+            config = get_metric_extraction_config(
+                TimeChecker(timedelta(seconds=0)), default_project
+            )
             assert config
 
             assert capture_exception.call_count == 0
@@ -617,7 +624,9 @@ def test_get_metric_extraction_config_multiple_widgets_above_max_limit_ordered_s
         process_widget_specs([widget_query.id])
 
         with mock.patch("sentry_sdk.capture_exception") as capture_exception:
-            config = get_metric_extraction_config(default_project)
+            config = get_metric_extraction_config(
+                TimeChecker(timedelta(seconds=0)), default_project
+            )
 
             assert config
             assert len(config["metrics"]) == 8  # 4 * 2 spec versions
@@ -660,7 +669,9 @@ def test_get_metric_extraction_config_multiple_widgets_not_using_extended_specs(
         create_widget(["count()"], "transaction.duration:>=1000", default_project, "Dashboard 2")
 
         with mock.patch("sentry_sdk.capture_exception") as capture_exception:
-            config = get_metric_extraction_config(default_project)
+            config = get_metric_extraction_config(
+                TimeChecker(timedelta(seconds=0)), default_project
+            )
             assert config
 
             assert capture_exception.call_count == 2
@@ -690,7 +701,9 @@ def test_get_metric_extraction_config_multiple_widgets_above_extended_max_limit(
         create_widget(["count()"], "transaction.duration:>=1000", default_project, "Dashboard 2")
 
         with mock.patch("sentry_sdk.capture_exception") as capture_exception:
-            config = get_metric_extraction_config(default_project)
+            config = get_metric_extraction_config(
+                TimeChecker(timedelta(seconds=0)), default_project
+            )
             assert config
 
             assert capture_exception.call_count == 2
@@ -719,7 +732,7 @@ def test_get_metric_extraction_config_multiple_widgets_under_extended_max_limit(
         create_widget(["count()"], "transaction.duration:>=1100", default_project)
         create_widget(["count()"], "transaction.duration:>=1000", default_project, "Dashboard 2")
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         # Revert to 2 after {"include_environment_tag"} becomes the default
@@ -733,7 +746,7 @@ def test_get_metric_extraction_config_alerts_and_widgets_off(default_project: Pr
         create_alert("count()", "transaction.duration:>=1000", default_project)
         create_widget(["count()"], "transaction.duration:>=1000", default_project)
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -758,7 +771,7 @@ def test_get_metric_extraction_config_alerts_and_widgets(default_project: Projec
             ["count()", "avg(transaction.duration)"], "transaction.duration:>=1000", default_project
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -810,7 +823,7 @@ def test_get_metric_extraction_config_with_failure_count(default_project: Projec
     with Feature({ON_DEMAND_METRICS_WIDGETS: True}):
         create_widget(["failure_count()"], "transaction.duration:>=1000", default_project)
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -870,7 +883,7 @@ def test_get_metric_extraction_config_with_apdex(default_project: Project) -> No
         # preferred.
         create_project_threshold(default_project, 200, TransactionMetric.DURATION.value)
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert len(config["metrics"]) == 1
@@ -922,7 +935,7 @@ def test_get_metric_extraction_config_with_count_unique(
         assert widget_query.conditions == query
         assert widget_query.columns == []
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
         assert config
         # Let's only assert the current version of the spec
         spec = config["metrics"][0]
@@ -948,7 +961,7 @@ def test_get_metric_extraction_config_with_count_web_vitals(
             default_project,
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         vital = measurement.split(".")[1]
 
@@ -1177,7 +1190,7 @@ def test_get_metric_extraction_config_with_user_misery(default_project: Project)
             default_project,
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -1233,7 +1246,7 @@ def test_get_metric_extraction_config_user_misery_with_tag_columns(
             columns=["lcp.element", "custom"],
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -1288,7 +1301,7 @@ def test_get_metric_extraction_config_epm_with_non_tag_columns(default_project:
             columns=["user.id", "user", "release"],
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -1333,7 +1346,7 @@ def test_get_metric_extraction_config_with_high_cardinality(default_project: Pro
             columns=["user.id", "release", "count()"],
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert not config
 
@@ -1372,7 +1385,7 @@ def test_get_metric_extraction_config_multiple_widgets_with_high_cardinality(
             title="Widget3",
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         # Revert to 2 after {"include_environment_tag"} becomes the default
@@ -1401,7 +1414,7 @@ def test_get_metric_extraction_config_with_extraction_enabled(default_project: P
             columns=["user.id", "release", "count()"],
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
 
@@ -1435,7 +1448,7 @@ def test_stateful_get_metric_extraction_config_with_extraction_disabled(
             columns=["user.id", "release", "count()"],
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert not config
 
@@ -1479,7 +1492,7 @@ def test_stateful_get_metric_extraction_config_multiple_widgets_with_extraction_
             title="Widget3",
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         # Revert to 2 after {"include_environment_tag"} becomes the default
@@ -1520,7 +1533,7 @@ def test_stateful_get_metric_extraction_config_enabled_with_multiple_versions(
             "enabled:enrolled",
         ]
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         # Check that the first version being enabled outputs both specs.
         assert config
@@ -1533,7 +1546,7 @@ def test_stateful_get_metric_extraction_config_enabled_with_multiple_versions(
             extraction_row_default.extraction_state = "disabled:manual"
             extraction_row_default.save()
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         # In the future with separate version decisions, assert that there is only one spec in config here.
         assert not config
@@ -1558,7 +1571,7 @@ def test_stateful_get_metric_extraction_config_with_low_cardinality(
             columns=["user.id", "release", "count()"],
         )
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
 
@@ -1569,7 +1582,7 @@ def test_get_metric_extraction_config_with_unicode_character(default_project: Pr
         # This will cause the Unicode bug to be raised for the current version
         create_widget(["count()"], "user.name:Armén", default_project)
         create_widget(["count()"], "user.name:Kevan", default_project, title="Dashboard Foo")
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -1631,7 +1644,7 @@ def test_get_metric_extraction_config_epm_eps(
     with Feature({ON_DEMAND_METRICS_WIDGETS: True}):
         create_widget([metric], query, default_project)
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         # epm() and eps() are supported by standard metrics when there's no query
         if query == "":
@@ -1687,7 +1700,7 @@ def test_get_metrics_extraction_config_features_combinations(
 
     features = {feature: True for feature in enabled_features}
     with Feature(features):
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
         if number_of_metrics == 0:
             assert config is None
         else:
@@ -1706,7 +1719,7 @@ def test_get_metric_extraction_config_with_transactions_dataset(default_project:
 
     # We test with prefilling, and we expect that both alerts are fetched since we support both datasets.
     with Feature({ON_DEMAND_METRICS_PREFILL: True}):
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -1732,7 +1745,7 @@ def test_get_metric_extraction_config_with_transactions_dataset(default_project:
 
     # We test without prefilling, and we expect that only alerts for performance metrics are fetched.
     with Feature({ON_DEMAND_METRICS: True}):
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert config["metrics"] == [
@@ -1758,7 +1771,7 @@ def test_get_metric_extraction_config_with_no_spec(default_project: Project) ->
     )
 
     with Feature({ON_DEMAND_METRICS: True}):
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
 
         assert config
         assert len(config["metrics"]) == 1
@@ -1813,7 +1826,7 @@ def test_include_environment_for_widgets(default_project: Project) -> None:
 
     with Feature([ON_DEMAND_METRICS, ON_DEMAND_METRICS_WIDGETS]):
         widget, _, _ = create_widget([aggr], query, default_project)
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
         # Because we have two specs we will have two metrics.
         # The second spec includes the environment tag as part of the query hash.
         assert config and config["metrics"] == [
@@ -1869,10 +1882,10 @@ def test_include_environment_for_widgets_with_multiple_env(default_project: Proj
 
     with Feature([ON_DEMAND_METRICS, ON_DEMAND_METRICS_WIDGETS]):
         widget_query, _, _ = create_widget(aggrs, query, default_project, columns=columns)
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
         assert config
 
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
         process_widget_specs([widget_query.id])
         assert config
         assert [
@@ -1920,7 +1933,7 @@ def test_alert_and_widget_colliding(default_project: Project) -> None:
 
     with Feature([ON_DEMAND_METRICS, ON_DEMAND_METRICS_WIDGETS]):
         widget, _, _ = create_widget([aggr], query, default_project)
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
         # Because we have two specs we will have two metrics.
         assert config and config["metrics"] == [
             widget_to_metric_spec("f1353b0f", condition),
@@ -1930,7 +1943,7 @@ def test_alert_and_widget_colliding(default_project: Project) -> None:
         # Once we deprecate the current spec version, the widget will not create
         # the f1353b0f, thus, there will be no more duplicated specs
         alert = create_alert(aggr, query, default_project)
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
         # Now that we iterate over the widgets first, we will pick the spec generated by the widget
         # which includes the environment as a tag
         assert config and config["metrics"] == [
@@ -1988,7 +2001,7 @@ def test_event_type(
 
     with Feature([ON_DEMAND_METRICS, ON_DEMAND_METRICS_WIDGETS]):
         widget, _, _ = create_widget([aggr], query, default_project)
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
         if not config_assertion:
             assert config is None
         else:
@@ -2007,7 +2020,7 @@ def test_level_field(default_project: Project) -> None:
 
     with Feature(ON_DEMAND_METRICS_WIDGETS):
         create_widget([aggr], query, default_project)
-        config = get_metric_extraction_config(default_project)
+        config = get_metric_extraction_config(TimeChecker(timedelta(seconds=0)), default_project)
         assert config is None
 
 
@@ -2030,7 +2043,9 @@ def test_widget_modifed_after_on_demand(default_project: Project) -> None:
         with mock.patch("sentry_sdk.capture_exception") as capture_exception:
 
             process_widget_specs([widget_query.id])
-            config = get_metric_extraction_config(default_project)
+            config = get_metric_extraction_config(
+                TimeChecker(timedelta(seconds=0)), default_project
+            )
 
             assert config and config["metrics"]
 

+ 1 - 1
tests/sentry/relay/test_config.py

@@ -140,7 +140,7 @@ SOME_EXCEPTION = RuntimeError("foo")
 @django_db_all
 @region_silo_test
 @mock.patch("sentry.relay.config.generate_rules", side_effect=SOME_EXCEPTION)
-@mock.patch("sentry.relay.config.logger")
+@mock.patch("sentry.relay.config.experimental.logger")
 def test_get_experimental_config_dyn_sampling(mock_logger, _, default_project):
     keys = ProjectKey.objects.filter(project=default_project)
     with Feature({"organizations:dynamic-sampling": True}):