Browse Source

[feat] FK subscriptions to Incident model (#72576)

Currently Alert notifications assume a 1:1 relation between AlertRule's
and Incidents.
When an AlertRule is triggered, then we create an incident - and that
incident won't be resolved until the resolved threshold is triggered.

Notification's are simple because we do not need to reference which
snuba subscription was triggered - since there would only ever be a
single subscription per alert rule per incident.

...

with Activated Alert Rules - we now have the possibility of multiple
subscriptions per AlertRule.
Notification's become a bit more complicated since they still only
reference the Incident - but now the Incident itself doesn't have the
relevant data since linking simply to the AlertRule does not tell the
end user which subscription itself was triggered.

FK'ing the subscription to the Incident will now give us more clarity
into which subscription triggered the AlertRule

TODO:
- UI/UX updates to enable multiple Incident's per AlertRule 🤔
Nathan Hsieh 9 months ago
parent
commit
eca152a4e8

+ 1 - 1
migrations_lockfile.txt

@@ -9,5 +9,5 @@ feedback: 0004_index_together
 hybridcloud: 0016_add_control_cacheversion
 nodestore: 0002_nodestore_no_dictfield
 replays: 0004_index_together
-sentry: 0729_backfill_groupsearchviews_with_pinned_searches
+sentry: 0730_add_subscription_fk_to_incident
 social_auth: 0002_default_auto_field

+ 2 - 0
src/sentry/incidents/logic.py

@@ -122,6 +122,7 @@ def create_incident(
     user=None,
     alert_rule=None,
     activation=None,
+    subscription=None,
 ):
     if date_detected is None:
         date_detected = date_started
@@ -137,6 +138,7 @@ def create_incident(
             date_detected=date_detected,
             alert_rule=alert_rule,
             activation=activation,
+            subscription=subscription,
         )
         if projects:
             incident_projects = [

+ 3 - 1
src/sentry/incidents/models/alert_rule.py

@@ -396,7 +396,7 @@ class AlertRuleThresholdType(Enum):
 @region_silo_model
 class AlertRuleTrigger(Model):
     """
-    This model represents the threshold trigger for an AlertRule
+    This model represents the *threshold* trigger for an AlertRule
 
     threshold_type is AlertRuleThresholdType (Above/Below)
     alert_threshold is the trigger value
@@ -459,6 +459,8 @@ class AlertRuleTriggerAction(AbstractNotificationAction):
     """
     This model represents an action that occurs when a trigger (over/under) is fired. This is
     typically some sort of notification.
+
+    NOTE: AlertRuleTrigger is the 'threshold' for the AlertRule
     """
 
     __relocation_scope__ = RelocationScope.Global

+ 48 - 32
src/sentry/incidents/models/incident.py

@@ -59,29 +59,36 @@ class IncidentSeen(Model):
 
 
 class IncidentManager(BaseManager["Incident"]):
-    CACHE_KEY = "incidents:active:%s:%s"
+    CACHE_KEY = "incidents:active:%s:%s:%s"
 
     def fetch_for_organization(self, organization, projects):
         return self.filter(organization=organization, projects__in=projects).distinct()
 
     @classmethod
-    def _build_active_incident_cache_key(cls, alert_rule_id, project_id):
-        return cls.CACHE_KEY % (alert_rule_id, project_id)
+    def _build_active_incident_cache_key(cls, alert_rule_id, project_id, subscription_id=None):
+        return cls.CACHE_KEY % (alert_rule_id, project_id, subscription_id)
 
-    def get_active_incident(self, alert_rule, project):
-        cache_key = self._build_active_incident_cache_key(alert_rule.id, project.id)
+    def get_active_incident(self, alert_rule, project, subscription=None):
+        """
+        fetches the latest incident for a given alert rule and project (and subscription) that is not closed
+        """
+        cache_key = self._build_active_incident_cache_key(
+            alert_rule_id=alert_rule.id,
+            project_id=project.id,
+            subscription_id=(subscription.id if subscription else None),
+        )
         incident = cache.get(cache_key)
         if incident is None:
             try:
-                incident = (
-                    Incident.objects.filter(
-                        type=IncidentType.ALERT_TRIGGERED.value,
-                        alert_rule=alert_rule,
-                        projects=project,
-                    )
-                    .exclude(status=IncidentStatus.CLOSED.value)
-                    .order_by("-date_added")[0]
+                incident_query = Incident.objects.filter(
+                    type=IncidentType.ALERT_TRIGGERED.value,
+                    alert_rule=alert_rule,
+                    projects=project,
+                    subscription=subscription,
                 )
+                incident = incident_query.exclude(status=IncidentStatus.CLOSED.value).order_by(
+                    "-date_added"
+                )[0]
             except IndexError:
                 # Set this to False so that we can have a negative cache as well.
                 incident = False
@@ -97,28 +104,26 @@ class IncidentManager(BaseManager["Incident"]):
 
     @classmethod
     def clear_active_incident_cache(cls, instance, **kwargs):
+        # instance is an Incident
         for project in instance.projects.all():
-            cache.delete(cls._build_active_incident_cache_key(instance.alert_rule_id, project.id))
-            assert (
-                cache.get(cls._build_active_incident_cache_key(instance.alert_rule_id, project.id))
-                is None
+            subscription = instance.subscription
+            key = cls._build_active_incident_cache_key(
+                instance.alert_rule_id, project.id, subscription.id if subscription else None
             )
+            cache.delete(key)
+            assert cache.get(key) is None
 
     @classmethod
     def clear_active_incident_project_cache(cls, instance, **kwargs):
-        cache.delete(
-            cls._build_active_incident_cache_key(
-                instance.incident.alert_rule_id, instance.project_id
-            )
-        )
-        assert (
-            cache.get(
-                cls._build_active_incident_cache_key(
-                    instance.incident.alert_rule_id, instance.project_id
-                )
-            )
-            is None
+        # instance is an IncidentProject
+        project_id = instance.project_id
+        incident = instance.incident
+        subscription_id = incident.subscription_id if incident.subscription else None
+        key = cls._build_active_incident_cache_key(
+            incident.alert_rule_id, project_id, subscription_id
         )
+        cache.delete(key)
+        assert cache.get(key) is None
 
     @TimedRetryPolicy.wrap(timeout=5, exceptions=(IntegrityError,))
     def create(self, organization, **kwargs):
@@ -173,6 +178,10 @@ class Incident(Model):
     An Incident represents the overarching period during an AlertRule's "unhealthy" state.
     An AlertRule can have multiple IncidentTriggers during an Incident (ie. Critical -> Warning -> Critical)
     but if it has been resolved, will end the Incident.
+
+    An AlertRule may have multiple Incidents that correlate with different subscriptions.
+    TODO:
+    - UI should be able to handle multiple active incidents
     """
 
     __relocation_scope__ = RelocationScope.Organization
@@ -203,9 +212,7 @@ class Incident(Model):
     activation = FlexibleForeignKey(
         "sentry.AlertRuleActivations", on_delete=models.SET_NULL, null=True
     )
-    subscription = FlexibleForeignKey(
-        "sentry.QuerySubscription", null=True, on_delete=models.PROTECT
-    )
+    subscription = FlexibleForeignKey("sentry.QuerySubscription", null=True)
 
     class Meta:
         app_label = "sentry"
@@ -299,6 +306,10 @@ class IncidentActivityType(Enum):
 
 @region_silo_model
 class IncidentActivity(Model):
+    """
+    An IncidentActivity is a record of a change that occurred in an Incident. This could be a status change,
+    """
+
     __relocation_scope__ = RelocationScope.Organization
 
     incident = FlexibleForeignKey("sentry.Incident")
@@ -329,6 +340,11 @@ class IncidentActivity(Model):
 
 @region_silo_model
 class IncidentSubscription(Model):
+    """
+    IncidentSubscription is a record of a user being subscribed to an incident.
+    Not to be confused with a snuba QuerySubscription
+    """
+
     __relocation_scope__ = RelocationScope.Organization
 
     incident = FlexibleForeignKey("sentry.Incident", db_index=False)

+ 40 - 15
src/sentry/incidents/subscription_processor.py

@@ -109,10 +109,21 @@ class SubscriptionProcessor:
 
     @property
     def active_incident(self) -> Incident:
+        """
+        fetches an incident given the alert rule, project (and subscription if available)
+        """
         if not hasattr(self, "_active_incident"):
-            self._active_incident = Incident.objects.get_active_incident(
-                self.alert_rule, self.subscription.project
+            incident = Incident.objects.get_active_incident(
+                alert_rule=self.alert_rule,
+                project=self.subscription.project,
+                subscription=self.subscription,
             )
+            if not incident:
+                # TODO: make subscription required
+                incident = Incident.objects.get_active_incident(
+                    alert_rule=self.alert_rule, project=self.subscription.project
+                )
+            self._active_incident = incident
         return self._active_incident
 
     @active_incident.setter
@@ -120,7 +131,11 @@ class SubscriptionProcessor:
         self._active_incident = active_incident
 
     @property
-    def incident_triggers(self) -> dict[int, IncidentTrigger]:
+    def incident_trigger_map(self) -> dict[int, IncidentTrigger]:
+        """
+        mapping of alert rule trigger id to incident trigger
+        NOTE: IncidentTrigger is keyed via ("incident", "alert_rule_trigger")
+        """
         if not hasattr(self, "_incident_triggers"):
             incident = self.active_incident
             incident_triggers = {}
@@ -142,7 +157,7 @@ class SubscriptionProcessor:
         :param status: A `TriggerStatus`
         :return: True if at the specified status, otherwise False
         """
-        incident_trigger = self.incident_triggers.get(trigger.id)
+        incident_trigger = self.incident_trigger_map.get(trigger.id)
         return incident_trigger is not None and incident_trigger.status == status.value
 
     def reset_trigger_counts(self) -> None:
@@ -533,7 +548,11 @@ class SubscriptionProcessor:
                     self.trigger_resolve_counts[trigger.id] = 0
 
             if fired_incident_triggers:
-                self.handle_trigger_actions(fired_incident_triggers, aggregation_value)
+                # For all the newly created incidents
+                # handle the associated actions (eg. send an email/notification)
+                self.handle_trigger_actions(
+                    incident_triggers=fired_incident_triggers, metric_value=aggregation_value
+                )
 
         # We update the rule stats here after we commit the transaction. This guarantees
         # that we'll never miss an update, since we'll never roll back if the process
@@ -607,7 +626,7 @@ class SubscriptionProcessor:
         if self.trigger_alert_counts[trigger.id] >= self.alert_rule.threshold_period:
             metrics.incr("incidents.alert_rules.trigger", tags={"type": "fire"})
 
-            # Only create a new incident if we don't already have an active one
+            # Only create a new incident if we don't already have an active incident for the AlertRule
             if not self.active_incident:
                 detected_at = self.calculate_event_date_from_update_date(self.last_update)
                 activation: AlertRuleActivations | None = None
@@ -633,10 +652,12 @@ class SubscriptionProcessor:
                     date_detected=self.last_update,
                     projects=[self.subscription.project],
                     activation=activation,
+                    subscription=self.subscription,
                 )
             # Now create (or update if it already exists) the incident trigger so that
             # we have a record of this trigger firing for this incident
-            incident_trigger = self.incident_triggers.get(trigger.id)
+            # NOTE: `incident_triggers` is derived from `self.active_incident`
+            incident_trigger = self.incident_trigger_map.get(trigger.id)
             if incident_trigger:
                 incident_trigger.status = TriggerStatus.ACTIVE.value
                 incident_trigger.save()
@@ -647,7 +668,7 @@ class SubscriptionProcessor:
                     status=TriggerStatus.ACTIVE.value,
                 )
             self.handle_incident_severity_update()
-            self.incident_triggers[trigger.id] = incident_trigger
+            self.incident_trigger_map[trigger.id] = incident_trigger
 
             # TODO: We should create an audit log, and maybe something that keeps
             # all of the details available for showing on the incident. Might be a json
@@ -667,7 +688,7 @@ class SubscriptionProcessor:
         `TriggerStatus.Resolved` state.
         :return:
         """
-        for incident_trigger in self.incident_triggers.values():
+        for incident_trigger in self.incident_trigger_map.values():
             if incident_trigger.status != TriggerStatus.RESOLVED.value:
                 return False
         return True
@@ -683,7 +704,7 @@ class SubscriptionProcessor:
         self.trigger_resolve_counts[trigger.id] += 1
         if self.trigger_resolve_counts[trigger.id] >= self.alert_rule.threshold_period:
             metrics.incr("incidents.alert_rules.trigger", tags={"type": "resolve"})
-            incident_trigger = self.incident_triggers[trigger.id]
+            incident_trigger = self.incident_trigger_map[trigger.id]
             incident_trigger.status = TriggerStatus.RESOLVED.value
             incident_trigger.save()
             self.trigger_resolve_counts[trigger.id] = 0
@@ -696,7 +717,7 @@ class SubscriptionProcessor:
                     date_closed=self.calculate_event_date_from_update_date(self.last_update),
                 )
                 self.active_incident = None
-                self.incident_triggers.clear()
+                self.incident_trigger_map.clear()
             else:
                 self.handle_incident_severity_update()
 
@@ -707,6 +728,7 @@ class SubscriptionProcessor:
     def handle_trigger_actions(
         self, incident_triggers: list[IncidentTrigger], metric_value: float
     ) -> None:
+        # Actions represent the notification type that should be triggered when an alert is fired
         actions = deduplicate_trigger_actions(triggers=deepcopy(self.triggers))
         # Grab the first trigger to get incident id (they are all the same)
         # All triggers should either be firing or resolving, so doesn't matter which we grab.
@@ -716,11 +738,14 @@ class SubscriptionProcessor:
             if incident_trigger.status == TriggerStatus.ACTIVE.value
             else AlertRuleTriggerActionMethod.RESOLVE.value
         )
-        try:
+
+        # NOTE: all incident_triggers are derived from self.active_incident, so if active_incident is
+        # still set we can save ourselves a query
+        incident = self.active_incident
+        if not incident:
+            # NOTE: trigger_resolve_threshold clears the active incident cache
+            # So fetch the incident if active_incident has already been removed
             incident = Incident.objects.get(id=incident_trigger.incident_id)
-        except Incident.DoesNotExist:
-            metrics.incr("incidents.alert_rules.action.skipping_missing_incident")
-            return
 
         incident_activities = IncidentActivity.objects.filter(incident=incident).values_list(
             "value", flat=True

+ 83 - 0
src/sentry/migrations/0730_add_subscription_fk_to_incident.py

@@ -0,0 +1,83 @@
+# Generated by Django 5.0.6 on 2024-06-12 16:47
+
+import logging
+
+import django.db.models.deletion
+from django.db import migrations
+from django.db.backends.base.schema import BaseDatabaseSchemaEditor
+from django.db.migrations.state import StateApps
+
+import sentry.db.models.fields.foreignkey
+from sentry.new_migrations.migrations import CheckedMigration
+from sentry.utils.query import RangeQuerySetWrapper
+
+logger = logging.getLogger(__name__)
+
+
+def _backfill_incident_subscription(
+    apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
+) -> None:
+    Incident = apps.get_model("sentry", "Incident")
+
+    # use RangeQuerySetWrapper to avoid loading all Incidents into memory
+    for incident in RangeQuerySetWrapper(Incident.objects.all()):
+        if incident.subscription:
+            continue
+
+        alert_rule = incident.alert_rule
+        snuba_query = alert_rule.snuba_query
+        subscriptions = list(snuba_query.subscriptions.all())
+        if len(subscriptions) == 0 or len(subscriptions) > 1:
+            logger.info(
+                "Incident found with no or multiple subscriptions. skipping",
+                extra={
+                    "incident_id": incident.id,
+                    "snuba_query_id": snuba_query.id,
+                    "subscription_ids": [
+                        subscription.id for subscription in subscriptions if len(subscriptions) > 0
+                    ],
+                },
+            )
+            continue
+
+        subscription = subscriptions[0]
+        incident.subscription = subscription
+        incident.save(update_fields=["subscription"])
+
+
+class Migration(CheckedMigration):
+    # This flag is used to mark that a migration shouldn't be automatically run in production.
+    # This should only be used for operations where it's safe to run the migration after your
+    # code has deployed. So this should not be used for most operations that alter the schema
+    # of a table.
+    # Here are some things that make sense to mark as post deployment:
+    # - Large data migrations. Typically we want these to be run manually so that they can be
+    #   monitored and not block the deploy for a long period of time while they run.
+    # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
+    #   run this outside deployments so that we don't block them. Note that while adding an index
+    #   is a schema change, it's completely safe to run the operation after the code has deployed.
+    # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
+
+    is_post_deployment = True
+
+    dependencies = [
+        ("sentry", "0729_backfill_groupsearchviews_with_pinned_searches"),
+    ]
+
+    operations = [
+        migrations.AlterField(
+            model_name="incident",
+            name="subscription",
+            field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(
+                null=True,
+                on_delete=django.db.models.deletion.CASCADE,
+                to="sentry.querysubscription",
+            ),
+        ),
+        # Run the data migration
+        migrations.RunPython(
+            _backfill_incident_subscription,
+            migrations.RunPython.noop,
+            hints={"tables": ["sentry_incident", "sentry_querysubscription"]},
+        ),
+    ]

+ 2 - 0
src/sentry/testutils/factories.py

@@ -1458,6 +1458,7 @@ class Factories:
         date_closed=None,
         seen_by=None,
         alert_rule=None,
+        subscription=None,
     ):
         if not title:
             title = petname.generate(2, " ", letters=10).title()
@@ -1476,6 +1477,7 @@ class Factories:
             date_detected=date_detected or timezone.now(),
             date_closed=timezone.now() if date_closed is not None else date_closed,
             type=IncidentType.ALERT_TRIGGERED.value,
+            subscription=subscription,
         )
         for project in projects:
             IncidentProject.objects.create(incident=incident, project=project)

+ 2 - 2
src/sentry/testutils/fixtures.py

@@ -363,14 +363,14 @@ class Fixtures:
     def create_integration_external_project(self, *args, **kwargs):
         return Factories.create_integration_external_project(*args, **kwargs)
 
-    def create_incident(self, organization=None, projects=None, *args, **kwargs):
+    def create_incident(self, organization=None, projects=None, subscription=None, *args, **kwargs):
         if not organization:
             organization = self.organization
         if projects is None:
             projects = [self.project]
 
         return Factories.create_incident(
-            organization=organization, projects=projects, *args, **kwargs
+            organization=organization, projects=projects, subscription=subscription, *args, **kwargs
         )
 
     def create_incident_activity(self, incident, *args, **kwargs):

+ 32 - 12
tests/sentry/incidents/models/test_incidents.py

@@ -63,19 +63,24 @@ class ActiveIncidentClearCacheTest(TestCase):
         self.trigger = self.create_alert_rule_trigger(self.alert_rule)
 
     def test_negative_cache(self):
+        subscription = self.alert_rule.snuba_query.subscriptions.first()
         assert (
             cache.get(
                 Incident.objects._build_active_incident_cache_key(
-                    self.alert_rule.id, self.project.id
+                    alert_rule_id=self.alert_rule.id,
+                    project_id=self.project.id,
+                    subscription_id=subscription.id,
                 )
             )
             is None
         )
-        Incident.objects.get_active_incident(self.alert_rule, self.project)
+        Incident.objects.get_active_incident(self.alert_rule, self.project, subscription)
         assert (
             cache.get(
                 Incident.objects._build_active_incident_cache_key(
-                    self.alert_rule.id, self.project.id
+                    alert_rule_id=self.alert_rule.id,
+                    project_id=self.project.id,
+                    subscription_id=subscription.id,
                 )
             )
             is False
@@ -85,44 +90,59 @@ class ActiveIncidentClearCacheTest(TestCase):
         assert (
             cache.get(
                 Incident.objects._build_active_incident_cache_key(
-                    self.alert_rule.id, self.project.id
+                    alert_rule_id=self.alert_rule.id,
+                    project_id=self.project.id,
+                    subscription_id=subscription.id,
                 )
             )
         ) is False
 
     def test_cache(self):
+        subscription = self.alert_rule.snuba_query.subscriptions.first()
         assert (
             cache.get(
                 Incident.objects._build_active_incident_cache_key(
-                    self.alert_rule.id, self.project.id
+                    alert_rule_id=self.alert_rule.id,
+                    project_id=self.project.id,
+                    subscription_id=subscription.id,
                 )
             )
             is None
         )
-        active_incident = self.create_incident(alert_rule=self.alert_rule, projects=[self.project])
-        Incident.objects.get_active_incident(self.alert_rule, self.project)
+        active_incident = self.create_incident(
+            alert_rule=self.alert_rule, projects=[self.project], subscription=subscription
+        )
+        Incident.objects.get_active_incident(self.alert_rule, self.project, subscription)
         assert (
             cache.get(
                 Incident.objects._build_active_incident_cache_key(
-                    self.alert_rule.id, self.project.id
+                    alert_rule_id=self.alert_rule.id,
+                    project_id=self.project.id,
+                    subscription_id=subscription.id,
                 )
             )
             == active_incident
         )
-        active_incident = self.create_incident(alert_rule=self.alert_rule, projects=[self.project])
+        active_incident = self.create_incident(
+            alert_rule=self.alert_rule, projects=[self.project], subscription=subscription
+        )
         assert (
             cache.get(
                 Incident.objects._build_active_incident_cache_key(
-                    self.alert_rule.id, self.project.id
+                    alert_rule_id=self.alert_rule.id,
+                    project_id=self.project.id,
+                    subscription_id=subscription.id,
                 )
             )
             is None
         )
-        Incident.objects.get_active_incident(self.alert_rule, self.project)
+        Incident.objects.get_active_incident(self.alert_rule, self.project, subscription)
         assert (
             cache.get(
                 Incident.objects._build_active_incident_cache_key(
-                    self.alert_rule.id, self.project.id
+                    alert_rule_id=self.alert_rule.id,
+                    project_id=self.project.id,
+                    subscription_id=subscription.id,
                 )
             )
             == active_incident

+ 149 - 0
tests/sentry/migrations/test_0730_add_subscription_fk_to_incident.py

@@ -0,0 +1,149 @@
+from datetime import timedelta
+
+from sentry.incidents.models.alert_rule import AlertRule, AlertRuleThresholdType
+from sentry.snuba.dataset import Dataset
+from sentry.snuba.models import SnubaQuery
+from sentry.snuba.subscriptions import create_snuba_query
+from sentry.testutils.cases import TestMigrations
+
+
+class AlertRuleProjectBackfillTest(TestMigrations):
+    migrate_from = "0729_backfill_groupsearchviews_with_pinned_searches"
+    migrate_to = "0730_add_subscription_fk_to_incident"
+
+    def setup_before_migration(self, app):
+        self.snuba_query = create_snuba_query(
+            aggregate="",
+            dataset=Dataset.Events,
+            environment=None,
+            query="",
+            query_type=SnubaQuery.Type.ERROR,
+            resolution=timedelta(minutes=5),
+            time_window=timedelta(minutes=5),
+        )
+        self.alert_rule = AlertRule.objects.create(
+            organization=self.organization,
+            snuba_query=self.snuba_query,
+            name="foo",
+            threshold_type=AlertRuleThresholdType.ABOVE.value,
+            threshold_period=1,
+            include_all_projects=False,
+        )
+
+        # creates a QuerySubscription for the alertrule and project
+        self.alert_rule.subscribe_projects(projects=[self.project])
+        self.query_subscription = self.snuba_query.subscriptions.get()
+
+        self.incident = self.create_incident(projects=[self.project], alert_rule=self.alert_rule)
+
+        assert self.incident.alert_rule == self.alert_rule
+        assert not self.incident.subscription
+
+        snuba_query_existing_sub = create_snuba_query(
+            aggregate="",
+            dataset=Dataset.Events,
+            environment=None,
+            query="",
+            query_type=SnubaQuery.Type.ERROR,
+            resolution=timedelta(minutes=5),
+            time_window=timedelta(minutes=5),
+        )
+        alert_rule_existing_sub = AlertRule.objects.create(
+            organization=self.organization,
+            snuba_query=snuba_query_existing_sub,
+            name="foo",
+            threshold_type=AlertRuleThresholdType.ABOVE.value,
+            threshold_period=1,
+            include_all_projects=False,
+        )
+
+        # creates a QuerySubscription for the alertrule and project
+        alert_rule_existing_sub.subscribe_projects(projects=[self.project])
+        self.query_subscription_existing_sub = snuba_query_existing_sub.subscriptions.get()
+
+        self.incident_existing_sub = self.create_incident(
+            projects=[self.project],
+            alert_rule=alert_rule_existing_sub,
+            subscription=self.query_subscription_existing_sub,
+        )
+
+        assert self.incident_existing_sub.alert_rule == alert_rule_existing_sub
+        assert self.incident_existing_sub.subscription == self.query_subscription_existing_sub
+
+        # Incident with a no subscriptions
+        snuba_query_no_sub = create_snuba_query(
+            aggregate="",
+            dataset=Dataset.Events,
+            environment=None,
+            query="",
+            query_type=SnubaQuery.Type.ERROR,
+            resolution=timedelta(minutes=5),
+            time_window=timedelta(minutes=5),
+        )
+        self.alert_rule_no_sub = AlertRule.objects.create(
+            organization=self.organization,
+            snuba_query=snuba_query_no_sub,
+            name="foo",
+            threshold_type=AlertRuleThresholdType.ABOVE.value,
+            threshold_period=1,
+            include_all_projects=False,
+        )
+
+        # creates a QuerySubscription for the alertrule and project
+        self.alert_rule_no_sub.subscribe_projects(projects=[self.project])
+        temp_subscription = snuba_query_no_sub.subscriptions.get()
+        temp_subscription.delete()
+
+        self.incident_no_sub = self.create_incident(
+            projects=[self.project], alert_rule=self.alert_rule_no_sub
+        )
+
+        assert self.incident_no_sub.alert_rule == self.alert_rule_no_sub
+        assert not self.incident_no_sub.subscription
+
+        # Incident for alert rule with multiple subscriptions
+        snuba_query_mult = create_snuba_query(
+            aggregate="",
+            dataset=Dataset.Events,
+            environment=None,
+            query="",
+            query_type=SnubaQuery.Type.ERROR,
+            resolution=timedelta(minutes=5),
+            time_window=timedelta(minutes=5),
+        )
+        self.alert_rule_mult = AlertRule.objects.create(
+            organization=self.organization,
+            snuba_query=snuba_query_mult,
+            name="foo",
+            threshold_type=AlertRuleThresholdType.ABOVE.value,
+            threshold_period=1,
+            include_all_projects=False,
+        )
+
+        # creates a QuerySubscription for the alertrule and project
+        self.alert_rule_mult.subscribe_projects(projects=[self.project])
+        self.alert_rule_mult.subscribe_projects(projects=[self.project])
+        all_subscriptions = snuba_query_mult.subscriptions.all()
+
+        self.incident_mult = self.create_incident(
+            projects=[self.project], alert_rule=self.alert_rule_mult
+        )
+
+        assert len(all_subscriptions) > 1
+        assert self.incident_mult.alert_rule
+        assert not self.incident_mult.subscription
+
+    def test(self):
+        self.incident.refresh_from_db()
+        assert self.incident.subscription is not None
+        assert self.incident.subscription == self.query_subscription
+
+        self.incident_existing_sub.refresh_from_db()
+        assert self.incident_existing_sub.subscription is not None
+        assert self.incident_existing_sub.subscription == self.query_subscription_existing_sub
+
+        self.incident_no_sub.refresh_from_db()
+        assert not self.incident_no_sub.subscription
+
+        self.incident_mult.refresh_from_db()
+        assert not self.incident_mult.subscription