Browse Source

fix(tests): fix the set_sentry_option test helper (#43492)

Fixes flakiness introduced by the combination of `region_silo_test`
decorators in https://github.com/getsentry/sentry/pull/43329 and the
`set_sentry_option` test helper, which did not follow python's
`ContextManager` protocol correctly.

Basically, any context manager's `enter` and `exit` must behave like a
stack -- for a given thread's stack, the most recent `enter` should be
`exited` first (FILO). Unfortunately, `set_entry_option` would `enter`
in the middle of a test, but exit at that end of the test. This
interacts with `region_silo_test` which actually invokes
`override_settings` itself to manage the `SiloMode` state within tests
-- the "push" of the silo mode, while proceeding the exit of the
`set_sentry_option`, would also "pop" proceeding the exit of the
`set_sentry_option` configuration. Because django's `override_settings`
implementation reaches into the `settings._wrapped` object to manage its
stack state (later restoring it in precisely the same order as the exit
calls), this would result in a corrupt, inconsistent stack.

This was "flaky" because it isn't a problem right away. It causes future
tests to start in the wrong silo mode. Depending on entropy of which
next tests followed and how much they asserted, they could be running
hybrid cloud code in tests not annotated or prepared for hybrid cloud
specific logic.

The solution is simply to force the decorator to be a context manager,
which allows these fixtures and the `region_silo_test` decorators to
cooperator.

In addition, to prevent future such bad states, resulting in flaky tests
nad hard to debug conditions, we now validate that the `SiloMode` has
been cleaned up at the start and end of every test.
Zach Collins 2 years ago
parent
commit
fe751041a8

+ 0 - 2
src/sentry/testutils/cases.py

@@ -122,7 +122,6 @@ from sentry.search.events.constants import (
 )
 from sentry.sentry_metrics import indexer
 from sentry.sentry_metrics.configuration import UseCaseKey
-from sentry.silo import single_process_silo_mode_state
 from sentry.snuba.metrics.datasource import get_series
 from sentry.tagstore.snuba import SnubaTagStorage
 from sentry.testutils.factories import get_fixture_path
@@ -315,7 +314,6 @@ class BaseTestCase(Fixtures):
         GroupMeta.objects.clear_local_cache()
 
     def _post_teardown(self):
-        single_process_silo_mode_state.mode = None
         super()._post_teardown()
 
     def options(self, options):

+ 4 - 6
src/sentry/utils/pytest/fixtures.py

@@ -426,22 +426,20 @@ def reset_snuba(call_snuba):
 
 
 @pytest.fixture
-def set_sentry_option(request):
+def set_sentry_option():
     """
     A pytest-style wrapper around override_options.
 
     ```python
     def test_basic(set_sentry_option):
-        set_sentry_option("key", 1.0)
+        with set_sentry_option("key", 1.0):
+            do stuff
     ```
     """
     from sentry.testutils.helpers.options import override_options
 
     def inner(key, value):
-        ctx_mgr = override_options({key: value})
-        ctx_mgr.__enter__()
-
-        request.addfinalizer(lambda: ctx_mgr.__exit__(None, None, None))
+        return override_options({key: value})
 
     return inner
 

+ 19 - 0
tests/conftest.py

@@ -3,6 +3,8 @@ import os
 
 import pytest
 
+from sentry.silo import SiloMode
+
 pytest_plugins = ["sentry.utils.pytest"]
 
 
@@ -142,3 +144,20 @@ def setup_default_hybrid_cloud_stubs():
         for stub in stubs:
             stack.enter_context(stub)
         yield
+
+
+@pytest.fixture(autouse=True)
+def validate_silo_mode():
+    # NOTE!  Hybrid cloud uses many mechanisms to simulate multiple different configurations of the application
+    # during tests.  It depends upon `override_settings` using the correct contextmanager behaviors and correct
+    # thread handling in acceptance tests.  If you hit one of these, it's possible either that cleanup logic has
+    # a bug, or you may be using a contextmanager incorrectly.  Let us know and we can help!
+    if SiloMode.get_current_mode() != SiloMode.MONOLITH:
+        raise Exception(
+            "Possible test leak bug!  SiloMode was not reset to Monolith between tests.  Please read the comment for validate_silo_mode() in tests/conftest.py."
+        )
+    yield
+    if SiloMode.get_current_mode() != SiloMode.MONOLITH:
+        raise Exception(
+            "Possible test leak bug!  SiloMode was not reset to Monolith between tests.  Please read the comment for validate_silo_mode() in tests/conftest.py."
+        )

+ 13 - 13
tests/sentry/api/endpoints/test_relay_projectconfigs_v2.py

@@ -391,18 +391,18 @@ def test_session_metrics_extraction(call_endpoint, task_runner, drop_sessions):
 def test_session_metrics_abnormal_mechanism_tag_extraction(
     call_endpoint, task_runner, set_sentry_option, abnormal_mechanism_rollout
 ):
-    set_sentry_option(
+    with set_sentry_option(
         "sentry-metrics.releasehealth.abnormal-mechanism-extraction-rate",
         abnormal_mechanism_rollout,
-    )
-    with Feature({"organizations:metrics-extraction": True}):
-        with task_runner():
-            result, status_code = call_endpoint(full_config=True)
-            assert status_code < 400
-
-        for config in result["configs"].values():
-            config = config["config"]
-            assert config["sessionMetrics"] == {
-                "version": 2 if abnormal_mechanism_rollout else 1,
-                "drop": False,
-            }
+    ):
+        with Feature({"organizations:metrics-extraction": True}):
+            with task_runner():
+                result, status_code = call_endpoint(full_config=True)
+                assert status_code < 400
+
+            for config in result["configs"].values():
+                config = config["config"]
+                assert config["sessionMetrics"] == {
+                    "version": 2 if abnormal_mechanism_rollout else 1,
+                    "drop": False,
+                }

+ 14 - 11
tests/sentry/relay/test_config.py

@@ -508,19 +508,22 @@ def test_project_config_with_breakdown(default_project, insta_snapshot, transact
 def test_project_config_with_organizations_metrics_extraction(
     default_project, set_sentry_option, abnormal_mechanism_rollout, has_metrics_extraction
 ):
-    set_sentry_option(
+    with set_sentry_option(
         "sentry-metrics.releasehealth.abnormal-mechanism-extraction-rate",
         abnormal_mechanism_rollout,
-    )
-    with Feature({"organizations:metrics-extraction": has_metrics_extraction}):
-        cfg = get_project_config(default_project, full_config=True)
-
-    cfg = cfg.to_dict()
-    session_metrics = get_path(cfg, "config", "sessionMetrics")
-    if has_metrics_extraction:
-        assert session_metrics == {"drop": False, "version": 2 if abnormal_mechanism_rollout else 1}
-    else:
-        assert session_metrics is None
+    ):
+        with Feature({"organizations:metrics-extraction": has_metrics_extraction}):
+            cfg = get_project_config(default_project, full_config=True)
+
+        cfg = cfg.to_dict()
+        session_metrics = get_path(cfg, "config", "sessionMetrics")
+        if has_metrics_extraction:
+            assert session_metrics == {
+                "drop": False,
+                "version": 2 if abnormal_mechanism_rollout else 1,
+            }
+        else:
+            assert session_metrics is None
 
 
 @pytest.mark.django_db

+ 88 - 93
tests/sentry/sentry_metrics/limiters/test_cardinality_limiter.py

@@ -17,7 +17,8 @@ from sentry.sentry_metrics.indexer.limiters.cardinality import TimeseriesCardina
 
 @pytest.fixture(autouse=True)
 def rollout_all_orgs(set_sentry_option):
-    set_sentry_option("sentry-metrics.cardinality-limiter.orgs-rollout-rate", 1.0)
+    with set_sentry_option("sentry-metrics.cardinality-limiter.orgs-rollout-rate", 1.0):
+        yield
 
 
 class MockCardinalityLimiter(CardinalityLimiter):
@@ -65,65 +66,64 @@ class MockCardinalityLimiter(CardinalityLimiter):
 
 
 def test_reject_all(set_sentry_option):
-    set_sentry_option(
+    with set_sentry_option(
         "sentry-metrics.cardinality-limiter.limits.releasehealth.per-org",
         [{"window_seconds": 3600, "granularity_seconds": 60, "limit": 0}],
-    )
-    backend = MockCardinalityLimiter()
-    backend.assert_quota = Quota(window_seconds=3600, granularity_seconds=60, limit=0)
-    backend.grant_hashes = 0
-    limiter = TimeseriesCardinalityLimiter("", backend)
+    ):
+        backend = MockCardinalityLimiter()
+        backend.assert_quota = Quota(window_seconds=3600, granularity_seconds=60, limit=0)
+        backend.grant_hashes = 0
+        limiter = TimeseriesCardinalityLimiter("", backend)
 
-    result = limiter.check_cardinality_limits(
-        UseCaseKey.RELEASE_HEALTH,
-        {
-            PartitionIdxOffset(0, 0): {"org_id": 1, "name": "foo", "tags": {}},
-            PartitionIdxOffset(0, 1): {"org_id": 1, "name": "bar", "tags": {}},
-        },
-    )
+        result = limiter.check_cardinality_limits(
+            UseCaseKey.RELEASE_HEALTH,
+            {
+                PartitionIdxOffset(0, 0): {"org_id": 1, "name": "foo", "tags": {}},
+                PartitionIdxOffset(0, 1): {"org_id": 1, "name": "bar", "tags": {}},
+            },
+        )
 
-    assert result.keys_to_remove == [PartitionIdxOffset(0, 0), PartitionIdxOffset(0, 1)]
+        assert result.keys_to_remove == [PartitionIdxOffset(0, 0), PartitionIdxOffset(0, 1)]
 
 
 def test_reject_partial(set_sentry_option):
-    set_sentry_option(
+    with set_sentry_option(
         "sentry-metrics.cardinality-limiter.limits.releasehealth.per-org",
         [{"window_seconds": 3600, "granularity_seconds": 60, "limit": 1}],
-    )
+    ):
+        backend = MockCardinalityLimiter()
+        backend.assert_quota = Quota(window_seconds=3600, granularity_seconds=60, limit=1)
+        backend.grant_hashes = 1
+        limiter = TimeseriesCardinalityLimiter("", backend)
 
-    backend = MockCardinalityLimiter()
-    backend.assert_quota = Quota(window_seconds=3600, granularity_seconds=60, limit=1)
-    backend.grant_hashes = 1
-    limiter = TimeseriesCardinalityLimiter("", backend)
+        result = limiter.check_cardinality_limits(
+            UseCaseKey.RELEASE_HEALTH,
+            {
+                PartitionIdxOffset(0, 0): {"org_id": 1, "name": "foo", "tags": {}},
+                PartitionIdxOffset(0, 1): {"org_id": 1, "name": "bar", "tags": {}},
+                PartitionIdxOffset(0, 2): {"org_id": 1, "name": "baz", "tags": {}},
+            },
+        )
 
-    result = limiter.check_cardinality_limits(
-        UseCaseKey.RELEASE_HEALTH,
-        {
-            PartitionIdxOffset(0, 0): {"org_id": 1, "name": "foo", "tags": {}},
-            PartitionIdxOffset(0, 1): {"org_id": 1, "name": "bar", "tags": {}},
-            PartitionIdxOffset(0, 2): {"org_id": 1, "name": "baz", "tags": {}},
-        },
-    )
-
-    assert result.keys_to_remove == [PartitionIdxOffset(0, 1), PartitionIdxOffset(0, 2)]
+        assert result.keys_to_remove == [PartitionIdxOffset(0, 1), PartitionIdxOffset(0, 2)]
 
 
 def test_accept_all(set_sentry_option):
-    set_sentry_option("sentry-metrics.cardinality-limiter.limits.releasehealth.per-org", [])
-    backend = MockCardinalityLimiter()
-    backend.grant_hashes = 1000
-    limiter = TimeseriesCardinalityLimiter("", backend)
+    with set_sentry_option("sentry-metrics.cardinality-limiter.limits.releasehealth.per-org", []):
+        backend = MockCardinalityLimiter()
+        backend.grant_hashes = 1000
+        limiter = TimeseriesCardinalityLimiter("", backend)
 
-    result = limiter.check_cardinality_limits(
-        UseCaseKey.RELEASE_HEALTH,
-        {
-            PartitionIdxOffset(0, 0): {"org_id": 1, "name": "foo", "tags": {}},
-            PartitionIdxOffset(0, 1): {"org_id": 1, "name": "bar", "tags": {}},
-            PartitionIdxOffset(0, 2): {"org_id": 1, "name": "baz", "tags": {}},
-        },
-    )
+        result = limiter.check_cardinality_limits(
+            UseCaseKey.RELEASE_HEALTH,
+            {
+                PartitionIdxOffset(0, 0): {"org_id": 1, "name": "foo", "tags": {}},
+                PartitionIdxOffset(0, 1): {"org_id": 1, "name": "bar", "tags": {}},
+                PartitionIdxOffset(0, 2): {"org_id": 1, "name": "baz", "tags": {}},
+            },
+        )
 
-    assert not result.keys_to_remove
+        assert not result.keys_to_remove
 
 
 def test_sample_rate_zero(set_sentry_option):
@@ -131,59 +131,54 @@ def test_sample_rate_zero(set_sentry_option):
     Assert that with a rollout rate of zero, no quotas are applied.
     """
 
-    set_sentry_option(
+    with set_sentry_option(
         "sentry-metrics.cardinality-limiter.limits.releasehealth.per-org",
         [{"window_seconds": 3600, "granularity_seconds": 60, "limit": 0}],
-    )
-
-    set_sentry_option("sentry-metrics.cardinality-limiter.orgs-rollout-rate", 0.0)
-
-    backend = MockCardinalityLimiter()
-    backend.grant_hashes = 0
-    backend.assert_requests = []
-    limiter = TimeseriesCardinalityLimiter("", backend)
-
-    result = limiter.check_cardinality_limits(
-        UseCaseKey.RELEASE_HEALTH,
-        {
-            PartitionIdxOffset(0, 0): {"org_id": 1, "name": "foo", "tags": {}},
-            PartitionIdxOffset(0, 1): {"org_id": 1, "name": "bar", "tags": {}},
-            PartitionIdxOffset(0, 2): {"org_id": 1, "name": "baz", "tags": {}},
-        },
-    )
-
-    assert not result.keys_to_remove
-    # Assert that we are not just passing the rate limiter, but also do not
-    # check any quotas. If there are no quotas, there are no requests, and
-    # therefore no grants.
-    #
-    # Right now we do call the limiter with an empty list of requests. If
-    # we didn't, `_grants` would be `None` instead of `[]`. Either behavior
-    # would be fine, in neither case we are hitting redis.
-    assert result._grants == []
+    ), set_sentry_option("sentry-metrics.cardinality-limiter.orgs-rollout-rate", 0.0):
+        backend = MockCardinalityLimiter()
+        backend.grant_hashes = 0
+        backend.assert_requests = []
+        limiter = TimeseriesCardinalityLimiter("", backend)
+
+        result = limiter.check_cardinality_limits(
+            UseCaseKey.RELEASE_HEALTH,
+            {
+                PartitionIdxOffset(0, 0): {"org_id": 1, "name": "foo", "tags": {}},
+                PartitionIdxOffset(0, 1): {"org_id": 1, "name": "bar", "tags": {}},
+                PartitionIdxOffset(0, 2): {"org_id": 1, "name": "baz", "tags": {}},
+            },
+        )
+
+        assert not result.keys_to_remove
+        # Assert that we are not just passing the rate limiter, but also do not
+        # check any quotas. If there are no quotas, there are no requests, and
+        # therefore no grants.
+        #
+        # Right now we do call the limiter with an empty list of requests. If
+        # we didn't, `_grants` would be `None` instead of `[]`. Either behavior
+        # would be fine, in neither case we are hitting redis.
+        assert result._grants == []
 
 
 def test_sample_rate_half(set_sentry_option):
-    set_sentry_option(
+    with set_sentry_option(
         "sentry-metrics.cardinality-limiter.limits.releasehealth.per-org",
         [{"window_seconds": 3600, "granularity_seconds": 60, "limit": 0}],
-    )
-
-    set_sentry_option("sentry-metrics.cardinality-limiter.orgs-rollout-rate", 0.5)
-
-    backend = MockCardinalityLimiter()
-    backend.grant_hashes = 0
-    backend.assert_quota = Quota(window_seconds=3600, granularity_seconds=60, limit=0)
-    limiter = TimeseriesCardinalityLimiter("", backend)
-
-    result = limiter.check_cardinality_limits(
-        UseCaseKey.RELEASE_HEALTH,
-        {
-            PartitionIdxOffset(0, 0): {"org_id": 1, "name": "foo", "tags": {}},
-            PartitionIdxOffset(0, 1): {"org_id": 99, "name": "bar", "tags": {}},
-        },
-    )
-
-    # We are sampling org_id=1 into cardinality limiting. Because our quota is
-    # zero, only that org's metrics are dropped.
-    assert result.keys_to_remove == [PartitionIdxOffset(0, 0)]
+    ), set_sentry_option("sentry-metrics.cardinality-limiter.orgs-rollout-rate", 0.5):
+
+        backend = MockCardinalityLimiter()
+        backend.grant_hashes = 0
+        backend.assert_quota = Quota(window_seconds=3600, granularity_seconds=60, limit=0)
+        limiter = TimeseriesCardinalityLimiter("", backend)
+
+        result = limiter.check_cardinality_limits(
+            UseCaseKey.RELEASE_HEALTH,
+            {
+                PartitionIdxOffset(0, 0): {"org_id": 1, "name": "foo", "tags": {}},
+                PartitionIdxOffset(0, 1): {"org_id": 99, "name": "bar", "tags": {}},
+            },
+        )
+
+        # We are sampling org_id=1 into cardinality limiting. Because our quota is
+        # zero, only that org's metrics are dropped.
+        assert result.keys_to_remove == [PartitionIdxOffset(0, 0)]

+ 92 - 84
tests/sentry/sentry_metrics/limiters/test_writes_limiter.py

@@ -17,82 +17,87 @@ def get_writes_limiter(namespace: str):
 
 
 def test_writes_limiter_no_limits(set_sentry_option):
-    set_sentry_option("sentry-metrics.writes-limiter.limits.performance.per-org", [])
-    set_sentry_option("sentry-metrics.writes-limiter.limits.performance.global", [])
-    writes_limiter = get_writes_limiter(PERFORMANCE_PG_NAMESPACE)
+    with set_sentry_option(
+        "sentry-metrics.writes-limiter.limits.performance.per-org", []
+    ), set_sentry_option("sentry-metrics.writes-limiter.limits.performance.global", []):
+        writes_limiter = get_writes_limiter(PERFORMANCE_PG_NAMESPACE)
 
-    key_collection = KeyCollection(
-        {
-            1: {"a", "b", "c"},
-            2: {"a", "b", "c"},
-        }
-    )
+        key_collection = KeyCollection(
+            {
+                1: {"a", "b", "c"},
+                2: {"a", "b", "c"},
+            }
+        )
 
-    with writes_limiter.check_write_limits(UseCaseKey.PERFORMANCE, key_collection) as state:
-        assert not state.dropped_strings
-        assert state.accepted_keys.as_tuples() == key_collection.as_tuples()
+        with writes_limiter.check_write_limits(UseCaseKey.PERFORMANCE, key_collection) as state:
+            assert not state.dropped_strings
+            assert state.accepted_keys.as_tuples() == key_collection.as_tuples()
 
 
 def test_writes_limiter_doesnt_limit(set_sentry_option):
-    set_sentry_option(
+    with set_sentry_option(
         "sentry-metrics.writes-limiter.limits.performance.per-org",
         [{"window_seconds": 10, "granularity_seconds": 10, "limit": 3}],
-    )
-    set_sentry_option("sentry-metrics.writes-limiter.limits.performance.global", [])
-    writes_limiter = get_writes_limiter(PERFORMANCE_PG_NAMESPACE)
+    ), set_sentry_option("sentry-metrics.writes-limiter.limits.performance.global", []):
+        writes_limiter = get_writes_limiter(PERFORMANCE_PG_NAMESPACE)
 
-    key_collection = KeyCollection(
-        {
-            1: {"a", "b", "c"},
-            2: {"a", "b", "c"},
-        }
-    )
+        key_collection = KeyCollection(
+            {
+                1: {"a", "b", "c"},
+                2: {"a", "b", "c"},
+            }
+        )
 
-    with writes_limiter.check_write_limits(UseCaseKey.PERFORMANCE, key_collection) as state:
-        assert not state.dropped_strings
-        assert state.accepted_keys.as_tuples() == key_collection.as_tuples()
+        with writes_limiter.check_write_limits(UseCaseKey.PERFORMANCE, key_collection) as state:
+            assert not state.dropped_strings
+            assert state.accepted_keys.as_tuples() == key_collection.as_tuples()
 
 
 def test_writes_limiter_org_limit(set_sentry_option):
-    set_sentry_option(
+    with set_sentry_option(
         "sentry-metrics.writes-limiter.limits.performance.per-org",
         [{"window_seconds": 10, "granularity_seconds": 10, "limit": 2}],
-    )
-    set_sentry_option("sentry-metrics.writes-limiter.limits.performance.global", [])
-    writes_limiter = get_writes_limiter(PERFORMANCE_PG_NAMESPACE)
-
-    key_collection = KeyCollection(
-        {
-            1: {"a", "b", "c"},
-            2: {"a", "b", "c"},
-        }
-    )
-
-    with writes_limiter.check_write_limits(UseCaseKey.PERFORMANCE, key_collection) as state:
-        assert len(state.dropped_strings) == 2
-        assert sorted(ds.key_result.org_id for ds in state.dropped_strings) == [1, 2]
-        assert sorted(org_id for org_id, string in state.accepted_keys.as_tuples()) == [1, 1, 2, 2]
+    ), set_sentry_option("sentry-metrics.writes-limiter.limits.performance.global", []):
+        writes_limiter = get_writes_limiter(PERFORMANCE_PG_NAMESPACE)
+
+        key_collection = KeyCollection(
+            {
+                1: {"a", "b", "c"},
+                2: {"a", "b", "c"},
+            }
+        )
+
+        with writes_limiter.check_write_limits(UseCaseKey.PERFORMANCE, key_collection) as state:
+            assert len(state.dropped_strings) == 2
+            assert sorted(ds.key_result.org_id for ds in state.dropped_strings) == [1, 2]
+            assert sorted(org_id for org_id, string in state.accepted_keys.as_tuples()) == [
+                1,
+                1,
+                2,
+                2,
+            ]
 
 
 def test_writes_limiter_global_limit(set_sentry_option):
-    set_sentry_option("sentry-metrics.writes-limiter.limits.performance.per-org", [])
-    set_sentry_option(
+    with set_sentry_option(
+        "sentry-metrics.writes-limiter.limits.performance.per-org", []
+    ), set_sentry_option(
         "sentry-metrics.writes-limiter.limits.performance.global",
         [{"window_seconds": 10, "granularity_seconds": 10, "limit": 4}],
-    )
-    writes_limiter = get_writes_limiter(PERFORMANCE_PG_NAMESPACE)
+    ):
+        writes_limiter = get_writes_limiter(PERFORMANCE_PG_NAMESPACE)
 
-    # edgecase: each organization's quota fits into the global quota
-    # individually, but not in total.
-    key_collection = KeyCollection(
-        {
-            1: {"a", "b", "c"},
-            2: {"a", "b", "c"},
-        }
-    )
+        # edgecase: each organization's quota fits into the global quota
+        # individually, but not in total.
+        key_collection = KeyCollection(
+            {
+                1: {"a", "b", "c"},
+                2: {"a", "b", "c"},
+            }
+        )
 
-    with writes_limiter.check_write_limits(UseCaseKey.PERFORMANCE, key_collection) as state:
-        assert len(state.dropped_strings) == 2
+        with writes_limiter.check_write_limits(UseCaseKey.PERFORMANCE, key_collection) as state:
+            assert len(state.dropped_strings) == 2
 
 
 def test_writes_limiter_respects_namespaces(set_sentry_option):
@@ -104,34 +109,37 @@ def test_writes_limiter_respects_namespaces(set_sentry_option):
     dropping all strings for subsequent calls to check_write_limits, while
     a different namespace is unaffected (no strings are dropped).
     """
-    set_sentry_option(
+    with set_sentry_option(
         "sentry-metrics.writes-limiter.limits.performance.per-org",
         [{"window_seconds": 20, "granularity_seconds": 20, "limit": 2}],
-    )
-    set_sentry_option("sentry-metrics.writes-limiter.limits.performance.global", [])
-    writes_limiter_perf = get_writes_limiter(PERFORMANCE_PG_NAMESPACE)
-
-    key_collection = KeyCollection(
-        {
-            1: {"a", "b", "c"},
-            2: {"a", "b", "c"},
-        }
-    )
-
-    with writes_limiter_perf.check_write_limits(UseCaseKey.PERFORMANCE, key_collection) as state:
-        assert len(state.dropped_strings) == 2
-
-    key_collection = KeyCollection(
-        {
-            1: {"d", "e"},
-            2: {"d", "e"},
-        }
-    )
-
-    with writes_limiter_perf.check_write_limits(UseCaseKey.PERFORMANCE, key_collection) as state:
-        assert len(state.dropped_strings) == 4
-
-    writes_limiter_rh = get_writes_limiter(RELEASE_HEALTH_PG_NAMESPACE)
-
-    with writes_limiter_rh.check_write_limits(UseCaseKey.PERFORMANCE, key_collection) as state:
-        assert not state.dropped_strings
+    ), set_sentry_option("sentry-metrics.writes-limiter.limits.performance.global", []):
+        writes_limiter_perf = get_writes_limiter(PERFORMANCE_PG_NAMESPACE)
+
+        key_collection = KeyCollection(
+            {
+                1: {"a", "b", "c"},
+                2: {"a", "b", "c"},
+            }
+        )
+
+        with writes_limiter_perf.check_write_limits(
+            UseCaseKey.PERFORMANCE, key_collection
+        ) as state:
+            assert len(state.dropped_strings) == 2
+
+        key_collection = KeyCollection(
+            {
+                1: {"d", "e"},
+                2: {"d", "e"},
+            }
+        )
+
+        with writes_limiter_perf.check_write_limits(
+            UseCaseKey.PERFORMANCE, key_collection
+        ) as state:
+            assert len(state.dropped_strings) == 4
+
+        writes_limiter_rh = get_writes_limiter(RELEASE_HEALTH_PG_NAMESPACE)
+
+        with writes_limiter_rh.check_write_limits(UseCaseKey.PERFORMANCE, key_collection) as state:
+            assert not state.dropped_strings

+ 30 - 31
tests/sentry/sentry_metrics/test_multiprocess_steps.py

@@ -504,48 +504,47 @@ def test_process_messages_cardinality_limited(
     settings.SENTRY_METRICS_INDEXER_DEBUG_LOG_SAMPLE_RATE = 1.0
 
     # set any limit at all to ensure we actually use the underlying rate limiter
-    set_sentry_option(
+    with set_sentry_option(
         "sentry-metrics.cardinality-limiter.limits.releasehealth.per-org",
         [{"window_seconds": 3600, "granularity_seconds": 60, "limit": 0}],
-    )
-    set_sentry_option("sentry-metrics.cardinality-limiter.orgs-rollout-rate", 1.0)
+    ), set_sentry_option("sentry-metrics.cardinality-limiter.orgs-rollout-rate", 1.0):
 
-    class MockCardinalityLimiter(CardinalityLimiter):
-        def check_within_quotas(self, requested_quotas):
-            # Grant nothing, limit everything
-            return 123, []
+        class MockCardinalityLimiter(CardinalityLimiter):
+            def check_within_quotas(self, requested_quotas):
+                # Grant nothing, limit everything
+                return 123, []
 
-        def use_quotas(self, grants, timestamp):
-            pass
+            def use_quotas(self, grants, timestamp):
+                pass
 
-    monkeypatch.setitem(
-        cardinality_limiter_factory.rate_limiters,
-        "releasehealth",
-        TimeseriesCardinalityLimiter("releasehealth", MockCardinalityLimiter()),
-    )
+        monkeypatch.setitem(
+            cardinality_limiter_factory.rate_limiters,
+            "releasehealth",
+            TimeseriesCardinalityLimiter("releasehealth", MockCardinalityLimiter()),
+        )
 
-    message_payloads = [counter_payload, distribution_payload, set_payload]
-    message_batch = [
-        Message(
-            BrokerValue(
-                KafkaPayload(None, json.dumps(payload).encode("utf-8"), []),
-                Partition(Topic("topic"), 0),
-                i + 1,
-                datetime.now(),
+        message_payloads = [counter_payload, distribution_payload, set_payload]
+        message_batch = [
+            Message(
+                BrokerValue(
+                    KafkaPayload(None, json.dumps(payload).encode("utf-8"), []),
+                    Partition(Topic("topic"), 0),
+                    i + 1,
+                    datetime.now(),
+                )
             )
-        )
-        for i, payload in enumerate(message_payloads)
-    ]
+            for i, payload in enumerate(message_payloads)
+        ]
 
-    last = message_batch[-1]
-    outer_message = Message(Value(message_batch, last.committable))
+        last = message_batch[-1]
+        outer_message = Message(Value(message_batch, last.committable))
 
-    with caplog.at_level(logging.ERROR):
-        new_batch = MESSAGE_PROCESSOR.process_messages(outer_message=outer_message)
+        with caplog.at_level(logging.ERROR):
+            new_batch = MESSAGE_PROCESSOR.process_messages(outer_message=outer_message)
 
-    compare_message_batches_ignoring_metadata(new_batch, [])
+        compare_message_batches_ignoring_metadata(new_batch, [])
 
-    assert "dropped_message" in caplog.text
+        assert "dropped_message" in caplog.text
 
 
 def test_valid_metric_name() -> None: