Просмотр исходного кода

test(backup): Remove @targets decorator, improve assert msg (#62511)

Retries https://github.com/getsentry/sentry/pull/61719.
Alex Zaslavsky 1 год назад
Родитель
Сommit
bf24a1a74d

+ 110 - 92
tests/sentry/backup/__init__.py

@@ -1,6 +1,7 @@
 from __future__ import annotations
 
-from typing import Callable, Literal, Type
+from functools import wraps
+from typing import Callable, Literal, Set, Type
 
 from django.db import models
 
@@ -12,114 +13,131 @@ from sentry.backup.dependencies import (
     sorted_dependencies,
 )
 from sentry.backup.helpers import DatetimeSafeDjangoJSONEncoder, get_exportable_sentry_models
+from sentry.utils.json import JSONData
 
 
-def targets(expected_models: list[Type[models.Model]]):
-    """A helper decorator that checks that every model that a test "targeted" was actually seen in
-    the output, ensuring that we're actually testing the thing we think we are. Additionally, this
-    decorator is easily legible to static analysis, which allows for static checks to ensure that
-    all `__relocation_scope__ != RelocationScope.Excluded` models are being tested.
+def verify_models_in_output(
+    expected_models: list[Type[models.Model]], actual_json: JSONData
+) -> None:
+    """
+    A helper context manager that checks that every model that a test "targeted" was actually seen
+    in the output, ensuring that we're actually testing the thing we think we are. Additionally,
+    this context manager is easily legible to static analysis, which allows for static checks to
+    ensure that all `__relocation_scope__ != RelocationScope.Excluded` models are being tested.
 
     To be considered a proper "testing" of a given target type, the resulting output must contain at
-    least one instance of that type with all of its fields present and set to non-default values."""
-
-    def decorator(func):
-        def wrapped(*args, **kwargs):
-            actual = func(*args, **kwargs)
-            if actual is None:
-                raise AssertionError(f"The test {func.__name__} did not return its actual JSON")
-
-            # Do a quick scan to ensure that at least one instance of each expected model is
-            # present.
-            actual_model_names = {entry["model"] for entry in actual}
-            expected_model_types = {
-                "sentry." + type.__name__.lower(): type for type in expected_models
-            }
-            expected_model_names = set(expected_model_types.keys())
-            notfound = sorted(expected_model_names - actual_model_names)
-            if len(notfound) > 0:
-                raise AssertionError(f"Some `@targets_models` entries were not found: {notfound}")
-
-            # Now do a more thorough check: for every `expected_models` entry, make sure that we
-            # have at least one instance of that model that sets all of its fields to some
-            # non-default value.
-            mistakes_by_model: dict[str, list[str]] = {}
-            encoder = DatetimeSafeDjangoJSONEncoder()
-            for model in actual:
-                name = model["model"]
-                if name not in expected_model_names:
-                    continue
-
-                data = model["fields"]
-                type = expected_model_types[name]
-                fields = type._meta.get_fields()
-                mistakes = []
-                for f in fields:
-                    field_name = f.name
-
-                    # IDs are synonymous with primary keys, and should not be included in JSON field
-                    # output.
-                    if field_name == "id":
-                        continue
-
-                    # The model gets a `ManyToOneRel` or `ManyToManyRel` from all other models where
-                    # it is referenced by foreign key. Those do not explicitly need to be set - we
-                    # don't care that models that reference this model exist, just that this model
-                    # exists in its most filled-out form.
-                    if isinstance(f, models.ManyToOneRel) or isinstance(f, models.ManyToManyRel):
-                        continue
-
-                    # TODO(getsentry/team-ospo#156): For some reason we currently don't always
-                    # serialize some `ManyToManyField`s with the `through` property set. I'll
-                    # investigate, but will skip over these for now.
-                    if isinstance(f, models.ManyToManyField):
-                        continue
-
-                    if not isinstance(f, models.Field):
-                        continue
-                    if field_name not in data:
-                        mistakes.append(f"Must include field: `{field_name}`")
-                        continue
-                    if f.has_default():
-                        default_value = f.get_default()
-                        serialized = encoder.encode(default_value)
-                        if serialized == data:
-                            mistakes.append(f"Must use non-default data: `{field_name}`")
-                            return
-
-                # If one model instance has N mistakes, and another has N - 1 mistakes, we want to
-                # keep the shortest list, to give the user the smallest number of fixes to make when
-                # reporting the mistake.
-                if name not in mistakes_by_model or (len(mistakes) < len(mistakes_by_model[name])):
-                    mistakes_by_model[name] = mistakes
-            for name, mistakes in mistakes_by_model.items():
-                num = len(mistakes)
-                if num > 0:
-                    raise AssertionError(f"Model {name} has {num} mistakes: {mistakes}")
-
-            return None
-
-        return wrapped
-
-    return decorator
-
+    least one instance of that type with all of its fields present and set to non-default values.
+    """
 
-def mark(group: set[NormalizedModelName], *marking: Type | Literal["__all__"]):
+    # Do a quick scan to ensure that at least one instance of each expected model is present.
+    actual_model_names = {entry["model"] for entry in actual_json}
+    expected_model_types = {"sentry." + type.__name__.lower(): type for type in expected_models}
+    expected_model_names = set(expected_model_types.keys())
+    notfound = sorted(expected_model_names - actual_model_names)
+    if len(notfound) > 0:
+        raise AssertionError(
+            f"""Some `expected_models` entries were not found: {notfound}
+
+            If you are seeing this in CI, it means that this test produced an `export.json` backup
+            file that was missing the above models. This check is in place to ensure that ALL models
+            of a certain category are covered by this particular test - by omitting a certain kind
+            of model from the backup output entirely, we end up in a situation where backing up the
+            model in question to JSON is untested.
+
+            To fix this, you'll need to modify the body of the test to add at least one of these
+            models to the database before exporting. The process for doing so is test-specific, but
+            if the test body contains a fixture factory like `self.create_exhaustive_...`, that
+            function will be a good place to start. If it does not, you can just write the model to
+            the database at the appropriate point in the test: `MyNewModel.objects.create(...)`.
+            """
+        )
+
+    # Now do a more thorough check: for every `expected_models` entry, make sure that we have at
+    # least one instance of that model that sets all of its fields to some non-default value.
+    mistakes_by_model: dict[str, list[str]] = {}
+    encoder = DatetimeSafeDjangoJSONEncoder()
+    for model in actual_json:
+        name = model["model"]
+        if name not in expected_model_names:
+            continue
+
+        data = model["fields"]
+        type = expected_model_types[name]
+        fields = type._meta.get_fields()
+        mistakes = []
+        for f in fields:
+            field_name = f.name
+
+            # IDs are synonymous with primary keys, and should not be included in JSON field output.
+            if field_name == "id":
+                continue
+
+            # The model gets a `ManyToOneRel` or `ManyToManyRel` from all other models where it is
+            # referenced by foreign key. Those do not explicitly need to be set - we don't care that
+            # models that reference this model exist, just that this model exists in its most
+            # filled-out form.
+            if isinstance(f, models.ManyToOneRel) or isinstance(f, models.ManyToManyRel):
+                continue
+
+            # TODO(getsentry/team-ospo#156): For some reason we currently don't always serialize
+            # some `ManyToManyField`s with the `through` property set. I'll investigate, but will
+            # skip over these for now.
+            if isinstance(f, models.ManyToManyField):
+                continue
+
+            if not isinstance(f, models.Field):
+                continue
+            if field_name not in data:
+                mistakes.append(f"Must include field: `{field_name}`")
+                continue
+            if f.has_default():
+                default_value = f.get_default()
+                serialized = encoder.encode(default_value)
+                if serialized == data:
+                    mistakes.append(f"Must use non-default data: `{field_name}`")
+                    return
+
+        # If one model instance has N mistakes, and another has N - 1 mistakes, we want to keep the
+        # shortest list, to give the user the smallest number of fixes to make when reporting the
+        # mistake.
+        if name not in mistakes_by_model or (len(mistakes) < len(mistakes_by_model[name])):
+            mistakes_by_model[name] = mistakes
+
+    for name, mistakes in mistakes_by_model.items():
+        num = len(mistakes)
+        if num > 0:
+            raise AssertionError(f"Model {name} has {num} mistakes: {mistakes}")
+
+
+def expect_models(group: set[NormalizedModelName], *marking: Type | Literal["__all__"]) -> Callable:
     """
-    A function that runs at module load time and marks all models that appear in a given test function.
+    A function that runs at module load time and marks all models that appear in a given test
+    function. The first argument stores the tracked models in a global group, which we then check in
+    `test_coverage.py` for completeness.
 
     Use the sentinel string "__all__" to indicate that all models are expected.
     """
 
     all: Literal["__all__"] = "__all__"
+    target_models: Set[Type[models.Model]] = set()
     for model in marking:
         if model == all:
             all_models = get_exportable_sentry_models()
             group.update({get_model_name(c) for c in all_models})
-            return list(all_models)
+            target_models = all_models.copy()
+            break
 
         group.add(get_model_name(model))
-    return marking
+        target_models.add(model)
+
+    def decorator(func: Callable):
+        @wraps(func)
+        def wrapper(*args, **kwargs):
+            return func(*args, target_models, **kwargs)
+
+        return wrapper
+
+    return decorator
 
 
 def get_matching_exportable_models(

+ 15 - 11
tests/sentry/backup/test_exhaustive.py

@@ -2,6 +2,9 @@ from __future__ import annotations
 
 import tempfile
 from pathlib import Path
+from typing import Type
+
+from django.db.models import Model
 
 from sentry.backup.dependencies import NormalizedModelName
 from sentry.backup.imports import (
@@ -17,7 +20,7 @@ from sentry.testutils.helpers.backups import (
     export_to_file,
 )
 from sentry.testutils.silo import region_silo_test
-from tests.sentry.backup import mark, targets
+from tests.sentry.backup import expect_models, verify_models_in_output
 
 EXHAUSTIVELY_TESTED: set[NormalizedModelName] = set()
 UNIQUENESS_TESTED: set[NormalizedModelName] = set()
@@ -35,18 +38,20 @@ class ExhaustiveTests(BackupTestCase):
         clear_database(reset_pks=reset_pks)
         return tmp_path
 
-    @targets(mark(EXHAUSTIVELY_TESTED, "__all__"))
-    def test_exhaustive_clean_pks(self):
+    @expect_models(EXHAUSTIVELY_TESTED, "__all__")
+    def test_exhaustive_clean_pks(self, expected_models: list[Type[Model]]):
         self.create_exhaustive_instance(is_superadmin=True)
-        return self.import_export_then_validate(self._testMethodName, reset_pks=True)
+        actual = self.import_export_then_validate(self._testMethodName, reset_pks=True)
+        verify_models_in_output(expected_models, actual)
 
-    @targets(mark(EXHAUSTIVELY_TESTED, "__all__"))
-    def test_exhaustive_dirty_pks(self):
+    @expect_models(EXHAUSTIVELY_TESTED, "__all__")
+    def test_exhaustive_dirty_pks(self, expected_models: list[Type[Model]]):
         self.create_exhaustive_instance(is_superadmin=True)
-        return self.import_export_then_validate(self._testMethodName, reset_pks=False)
+        actual = self.import_export_then_validate(self._testMethodName, reset_pks=False)
+        verify_models_in_output(expected_models, actual)
 
-    @targets(mark(UNIQUENESS_TESTED, "__all__"))
-    def test_uniqueness(self):
+    @expect_models(UNIQUENESS_TESTED, "__all__")
+    def test_uniqueness(self, expected_models: list[Type[Model]]):
         self.create_exhaustive_instance(is_superadmin=True)
         with tempfile.TemporaryDirectory() as tmp_dir:
             # Export the data once.
@@ -67,5 +72,4 @@ class ExhaustiveTests(BackupTestCase):
 
             tmp_actual = Path(tmp_dir).joinpath(f"{self._testMethodName}.actual.json")
             actual = export_to_file(tmp_actual, ExportScope.Global)
-
-            return actual
+            verify_models_in_output(expected_models, actual)

+ 64 - 43
tests/sentry/backup/test_imports.py

@@ -6,7 +6,7 @@ import tarfile
 import tempfile
 from datetime import date, datetime
 from pathlib import Path
-from typing import Tuple
+from typing import Tuple, Type
 from unittest.mock import patch
 
 import pytest
@@ -16,6 +16,7 @@ from cryptography.hazmat.backends import default_backend
 from cryptography.hazmat.primitives import hashes, serialization
 from cryptography.hazmat.primitives.asymmetric import padding
 from django.db import connections, router
+from django.db.models import Model
 from django.utils import timezone
 
 from sentry.backup.dependencies import NormalizedModelName, dependencies, get_model, get_model_name
@@ -75,7 +76,11 @@ from sentry.testutils.helpers.backups import (
 from sentry.testutils.hybrid_cloud import use_split_dbs
 from sentry.testutils.silo import assume_test_silo_mode, region_silo_test
 from sentry.utils import json
-from tests.sentry.backup import get_matching_exportable_models, mark, targets
+from tests.sentry.backup import (
+    expect_models,
+    get_matching_exportable_models,
+    verify_models_in_output,
+)
 
 
 class ImportTestCase(BackupTestCase):
@@ -1325,8 +1330,8 @@ class CollisionTests(ImportTestCase):
     Ensure that collisions are properly handled in different flag modes.
     """
 
-    @targets(mark(COLLISION_TESTED, ApiToken))
-    def test_colliding_api_token(self):
+    @expect_models(COLLISION_TESTED, ApiToken)
+    def test_colliding_api_token(self, expected_models: list[Type[Model]]):
         owner = self.create_exhaustive_user("owner")
         expires_at = timezone.now() + DEFAULT_EXPIRATION
 
@@ -1440,10 +1445,10 @@ class CollisionTests(ImportTestCase):
                 )
 
             with open(tmp_path, "rb") as tmp_file:
-                return json.load(tmp_file)
+                verify_models_in_output(expected_models, json.load(tmp_file))
 
-    @targets(mark(COLLISION_TESTED, Monitor))
-    def test_colliding_monitor(self):
+    @expect_models(COLLISION_TESTED, Monitor)
+    def test_colliding_monitor(self, expected_models: list[Type[Model]]):
         owner = self.create_exhaustive_user("owner")
         invited = self.create_exhaustive_user("invited")
         self.create_exhaustive_organization("some-org", owner, invited)
@@ -1471,10 +1476,10 @@ class CollisionTests(ImportTestCase):
             assert Monitor.objects.filter(guid=colliding.guid).count() == 1
 
             with open(tmp_path, "rb") as tmp_file:
-                return json.load(tmp_file)
+                verify_models_in_output(expected_models, json.load(tmp_file))
 
-    @targets(mark(COLLISION_TESTED, OrgAuthToken))
-    def test_colliding_org_auth_token(self):
+    @expect_models(COLLISION_TESTED, OrgAuthToken)
+    def test_colliding_org_auth_token(self, expected_models: list[Type[Model]]):
         owner = self.create_exhaustive_user("owner")
         invited = self.create_exhaustive_user("invited")
         member = self.create_exhaustive_user("member")
@@ -1520,10 +1525,10 @@ class CollisionTests(ImportTestCase):
                 )
 
             with open(tmp_path, "rb") as tmp_file:
-                return json.load(tmp_file)
+                verify_models_in_output(expected_models, json.load(tmp_file))
 
-    @targets(mark(COLLISION_TESTED, ProjectKey))
-    def test_colliding_project_key(self):
+    @expect_models(COLLISION_TESTED, ProjectKey)
+    def test_colliding_project_key(self, expected_models: list[Type[Model]]):
         owner = self.create_exhaustive_user("owner")
         invited = self.create_exhaustive_user("invited")
         member = self.create_exhaustive_user("member")
@@ -1553,7 +1558,7 @@ class CollisionTests(ImportTestCase):
             assert ProjectKey.objects.filter(secret_key=colliding.secret_key).count() == 1
 
             with open(tmp_path, "rb") as tmp_file:
-                return json.load(tmp_file)
+                verify_models_in_output(expected_models, json.load(tmp_file))
 
     @pytest.mark.xfail(
         not use_split_dbs(),
@@ -1561,8 +1566,8 @@ class CollisionTests(ImportTestCase):
         raises=urllib3.exceptions.MaxRetryError,
         strict=True,
     )
-    @targets(mark(COLLISION_TESTED, QuerySubscription))
-    def test_colliding_query_subscription(self):
+    @expect_models(COLLISION_TESTED, QuerySubscription)
+    def test_colliding_query_subscription(self, expected_models: list[Type[Model]]):
         # We need a celery task running to properly test the `subscription_id` assignment, otherwise
         # its value just defaults to `None`.
         with self.tasks():
@@ -1610,10 +1615,12 @@ class CollisionTests(ImportTestCase):
                 )
 
                 with open(tmp_path, "rb") as tmp_file:
-                    return json.load(tmp_file)
+                    verify_models_in_output(expected_models, json.load(tmp_file))
 
-    @targets(mark(COLLISION_TESTED, ControlOption, Option, Relay, RelayUsage, UserRole))
-    def test_colliding_configs_overwrite_configs_enabled_in_config_scope(self):
+    @expect_models(COLLISION_TESTED, ControlOption, Option, Relay, RelayUsage, UserRole)
+    def test_colliding_configs_overwrite_configs_enabled_in_config_scope(
+        self, expected_models: list[Type[Model]]
+    ):
         owner = self.create_exhaustive_user("owner", is_admin=True)
         self.create_exhaustive_global_configs(owner)
 
@@ -1704,10 +1711,12 @@ class CollisionTests(ImportTestCase):
                     assert actual_permission == old_user_role_permissions[i]
 
             with open(tmp_path, "rb") as tmp_file:
-                return json.load(tmp_file)
+                verify_models_in_output(expected_models, json.load(tmp_file))
 
-    @targets(mark(COLLISION_TESTED, ControlOption, Option, Relay, RelayUsage, UserRole))
-    def test_colliding_configs_overwrite_configs_disabled_in_config_scope(self):
+    @expect_models(COLLISION_TESTED, ControlOption, Option, Relay, RelayUsage, UserRole)
+    def test_colliding_configs_overwrite_configs_disabled_in_config_scope(
+        self, expected_models: list[Type[Model]]
+    ):
         owner = self.create_exhaustive_user("owner", is_admin=True)
         self.create_exhaustive_global_configs(owner)
 
@@ -1794,10 +1803,12 @@ class CollisionTests(ImportTestCase):
                 assert actual_user_role.permissions[0] == "other.admin"
 
             with open(tmp_path, "rb") as tmp_file:
-                return json.load(tmp_file)
+                verify_models_in_output(expected_models, json.load(tmp_file))
 
-    @targets(mark(COLLISION_TESTED, Email, User, UserEmail, UserIP))
-    def test_colliding_user_with_merging_enabled_in_user_scope(self):
+    @expect_models(COLLISION_TESTED, Email, User, UserEmail, UserIP)
+    def test_colliding_user_with_merging_enabled_in_user_scope(
+        self, expected_models: list[Type[Model]]
+    ):
         self.create_exhaustive_user(username="owner", email="importing@example.com")
 
         with tempfile.TemporaryDirectory() as tmp_dir:
@@ -1838,10 +1849,12 @@ class CollisionTests(ImportTestCase):
                 assert not ControlImportChunk.objects.filter(model="sentry.userpermission").exists()
 
             with open(tmp_path, "rb") as tmp_file:
-                return json.load(tmp_file)
+                verify_models_in_output(expected_models, json.load(tmp_file))
 
-    @targets(mark(COLLISION_TESTED, Email, User, UserEmail, UserIP))
-    def test_colliding_user_with_merging_disabled_in_user_scope(self):
+    @expect_models(COLLISION_TESTED, Email, User, UserEmail, UserIP)
+    def test_colliding_user_with_merging_disabled_in_user_scope(
+        self, expected_models: list[Type[Model]]
+    ):
         self.create_exhaustive_user(username="owner", email="importing@example.com")
 
         with tempfile.TemporaryDirectory() as tmp_dir:
@@ -1882,12 +1895,14 @@ class CollisionTests(ImportTestCase):
                 assert UserEmail.objects.filter(email__icontains="importing@").exists()
 
             with open(tmp_path, "rb") as tmp_file:
-                return json.load(tmp_file)
+                verify_models_in_output(expected_models, json.load(tmp_file))
 
-    @targets(
-        mark(COLLISION_TESTED, Email, Organization, OrganizationMember, User, UserEmail, UserIP)
+    @expect_models(
+        COLLISION_TESTED, Email, Organization, OrganizationMember, User, UserEmail, UserIP
     )
-    def test_colliding_user_with_merging_enabled_in_organization_scope(self):
+    def test_colliding_user_with_merging_enabled_in_organization_scope(
+        self, expected_models: list[Type[Model]]
+    ):
         owner = self.create_exhaustive_user(username="owner", email="importing@example.com")
         self.create_organization("some-org", owner=owner)
 
@@ -1957,12 +1972,14 @@ class CollisionTests(ImportTestCase):
                 )
 
             with open(tmp_path, "rb") as tmp_file:
-                return json.load(tmp_file)
+                verify_models_in_output(expected_models, json.load(tmp_file))
 
-    @targets(
-        mark(COLLISION_TESTED, Email, Organization, OrganizationMember, User, UserEmail, UserIP)
+    @expect_models(
+        COLLISION_TESTED, Email, Organization, OrganizationMember, User, UserEmail, UserIP
     )
-    def test_colliding_user_with_merging_disabled_in_organization_scope(self):
+    def test_colliding_user_with_merging_disabled_in_organization_scope(
+        self, expected_models: list[Type[Model]]
+    ):
         owner = self.create_exhaustive_user(username="owner", email="importing@example.com")
         self.create_organization("some-org", owner=owner)
 
@@ -2036,10 +2053,12 @@ class CollisionTests(ImportTestCase):
                 )
 
             with open(tmp_path, "rb") as tmp_file:
-                return json.load(tmp_file)
+                verify_models_in_output(expected_models, json.load(tmp_file))
 
-    @targets(mark(COLLISION_TESTED, Email, User, UserEmail, UserIP, UserPermission))
-    def test_colliding_user_with_merging_enabled_in_config_scope(self):
+    @expect_models(COLLISION_TESTED, Email, User, UserEmail, UserIP, UserPermission)
+    def test_colliding_user_with_merging_enabled_in_config_scope(
+        self, expected_models: list[Type[Model]]
+    ):
         self.create_exhaustive_user(username="owner", email="importing@example.com", is_admin=True)
 
         with tempfile.TemporaryDirectory() as tmp_dir:
@@ -2084,10 +2103,12 @@ class CollisionTests(ImportTestCase):
                 assert not ControlImportChunk.objects.filter(model="sentry.userpermission").exists()
 
             with open(tmp_path, "rb") as tmp_file:
-                return json.load(tmp_file)
+                verify_models_in_output(expected_models, json.load(tmp_file))
 
-    @targets(mark(COLLISION_TESTED, Email, User, UserEmail, UserIP, UserPermission))
-    def test_colliding_user_with_merging_disabled_in_config_scope(self):
+    @expect_models(COLLISION_TESTED, Email, User, UserEmail, UserIP, UserPermission)
+    def test_colliding_user_with_merging_disabled_in_config_scope(
+        self, expected_models: list[Type[Model]]
+    ):
         self.create_exhaustive_user(username="owner", email="importing@example.com", is_admin=True)
 
         with tempfile.TemporaryDirectory() as tmp_dir:
@@ -2131,7 +2152,7 @@ class CollisionTests(ImportTestCase):
                 assert UserEmail.objects.filter(email__icontains="importing@").exists()
 
             with open(tmp_path, "rb") as tmp_file:
-                return json.load(tmp_file)
+                verify_models_in_output(expected_models, json.load(tmp_file))
 
 
 @pytest.mark.skipif(reason="not legacy")

+ 8 - 5
tests/sentry/backup/test_models.py

@@ -2,6 +2,9 @@ from __future__ import annotations
 
 import tempfile
 from pathlib import Path
+from typing import Type
+
+from django.db.models import Model
 
 from sentry.backup.dependencies import NormalizedModelName
 from sentry.backup.scopes import ExportScope, RelocationScope
@@ -14,7 +17,7 @@ from sentry.testutils.cases import TransactionTestCase
 from sentry.testutils.helpers.backups import export_to_file
 from sentry.testutils.silo import assume_test_silo_mode, region_silo_test
 from sentry.utils.json import JSONData
-from tests.sentry.backup import mark, targets
+from tests.sentry.backup import expect_models
 
 DYNAMIC_RELOCATION_SCOPE_TESTED: set[NormalizedModelName] = set()
 
@@ -32,8 +35,8 @@ class DynamicRelocationScopeTests(TransactionTestCase):
             tmp_path = Path(tmp_dir).joinpath(f"{self._testMethodName}.expect.json")
             return export_to_file(tmp_path, ExportScope.Global)
 
-    @targets(mark(DYNAMIC_RELOCATION_SCOPE_TESTED, ApiAuthorization, ApiToken))
-    def test_api_auth(self):
+    @expect_models(DYNAMIC_RELOCATION_SCOPE_TESTED, ApiAuthorization, ApiToken)
+    def test_api_auth(self, expected_models: list[Type[Model]]):
         user = self.create_user()
 
         # Bound to an app == global scope.
@@ -61,8 +64,8 @@ class DynamicRelocationScopeTests(TransactionTestCase):
         assert token.get_relocation_scope() == RelocationScope.Config
         return self.export()
 
-    @targets(mark(DYNAMIC_RELOCATION_SCOPE_TESTED, NotificationAction, NotificationActionProject))
-    def test_notification_action(self):
+    @expect_models(DYNAMIC_RELOCATION_SCOPE_TESTED, NotificationAction, NotificationActionProject)
+    def test_notification_action(self, expected_models: list[Type[Model]]):
         # Bound to an app == global scope.
         app = self.create_sentry_app(name="test_app", organization=self.organization)
         action = self.create_notification_action(

+ 7 - 5
tests/sentry/backup/test_releases.py

@@ -3,8 +3,10 @@ from __future__ import annotations
 import os
 import tempfile
 from pathlib import Path
+from typing import Type
 
 import yaml
+from django.db.models import Model
 
 from sentry.backup.comparators import get_default_comparators
 from sentry.backup.dependencies import NormalizedModelName
@@ -20,7 +22,7 @@ from sentry.testutils.helpers.backups import (
 from sentry.testutils.pytest.fixtures import read_snapshot_file
 from sentry.testutils.silo import region_silo_test, strip_silo_mode_test_suffix
 from sentry.utils import json
-from tests.sentry.backup import mark, targets
+from tests.sentry.backup import expect_models, verify_models_in_output
 
 RELEASE_TESTED: set[NormalizedModelName] = set()
 
@@ -61,8 +63,8 @@ class ReleaseTests(BackupTestCase):
 
         return False
 
-    @targets(mark(RELEASE_TESTED, "__all__"))
-    def test_at_head(self):
+    @expect_models(RELEASE_TESTED, "__all__")
+    def test_at_head(self, expected_models: list[Type[Model]]):
         with tempfile.TemporaryDirectory() as tmp_dir:
             # Convert the existing snapshot from YAML to an equivalent temporary JSON file.
             snapshot_path = self.get_snapshot_path("head")
@@ -93,8 +95,8 @@ class ReleaseTests(BackupTestCase):
                 reference_file=snapshot_path,
             )
 
-            # Return the export so that we can ensure that all models were seen.
-            return exported
+            # Check the export so that we can ensure that all models were seen.
+            verify_models_in_output(expected_models, exported)
 
     def test_at_23_12_1(self):
         with tempfile.TemporaryDirectory() as tmp_dir: