Browse Source

ref(rules): Create apply_delayed processor (#69167)

Adds a processor for rules with "slow" conditions that are evaluated up
to a minute after the event comes in. This PR _only_ goes as far as
getting the rules that we need to fire, the actual firing will happen in
a follow up PR.

Addresses part of
https://github.com/getsentry/team-core-product-foundations/issues/242
Colleen O'Rourke 10 months ago
parent
commit
2bdf2a7198

+ 30 - 0
src/sentry/rules/conditions/event_frequency.py

@@ -260,6 +260,36 @@ class BaseEventFrequencyCondition(EventCondition, abc.ABC):
 
         return result
 
+    def get_rate_bulk(
+        self,
+        duration: timedelta,
+        comparison_interval: timedelta,
+        group_ids: set[int],
+        environment_id: int,
+        comparison_type: str,
+    ) -> dict[int, int]:
+        start, end = self.get_comparison_start_end(timedelta(), duration)
+        with self.get_option_override(duration):
+            result = self.batch_query(
+                group_ids=group_ids,
+                start=start,
+                end=end,
+                environment_id=environment_id,
+            )
+        if comparison_type == ComparisonType.PERCENT:
+            start, comparison_end = self.get_comparison_start_end(comparison_interval, duration)
+            comparison_result = self.batch_query(
+                group_ids=group_ids,
+                start=start,
+                end=comparison_end,
+                environment_id=environment_id,
+            )
+            result = {
+                group_id: percent_increase(result[group_id], comparison_result[group_id])
+                for group_id in group_ids
+            }
+        return result
+
     def get_snuba_query_result(
         self,
         tsdb_function: Callable[..., Any],

+ 196 - 15
src/sentry/rules/processing/delayed_processing.py

@@ -1,10 +1,17 @@
 import logging
-from collections.abc import Mapping
+from collections import defaultdict
+from collections.abc import MutableMapping
+from typing import Any, DefaultDict, NamedTuple
 
 from sentry.buffer.redis import BufferHookEvent, RedisBuffer, redis_buffer_registry
 from sentry.models.project import Project
+from sentry.models.rule import Rule
+from sentry.rules import rules
+from sentry.rules.conditions.event_frequency import BaseEventFrequencyCondition, ComparisonType
+from sentry.rules.processing.processor import is_condition_slow, split_conditions_and_filters
+from sentry.silo.base import SiloMode
+from sentry.tasks.base import instrumented_task
 from sentry.utils import metrics
-from sentry.utils.query import RangeQuerySetWrapper
 from sentry.utils.safe import safe_execute
 
 logger = logging.getLogger("sentry.rules.delayed_processing")
@@ -13,24 +20,198 @@ logger = logging.getLogger("sentry.rules.delayed_processing")
 PROJECT_ID_BUFFER_LIST_KEY = "project_id_buffer_list"
 
 
+class UniqueCondition(NamedTuple):
+    cls_id: str
+    interval: str
+    environment_id: int
+
+    def __repr__(self):
+        return f"id: {self.cls_id},\ninterval: {self.interval},\nenv id: {self.environment_id}"
+
+
+class DataAndGroups(NamedTuple):
+    data: MutableMapping[str, Any] | None
+    group_ids: set[int]
+
+    def __repr__(self):
+        return f"data: {self.data}\ngroup_ids: {self.group_ids}"
+
+
+def get_slow_conditions(rule: Rule) -> list[MutableMapping[str, str]]:
+    """
+    Returns the slow conditions of a rule model instance.
+    """
+    conditions_and_filters = rule.data.get("conditions", ())
+    conditions, _ = split_conditions_and_filters(conditions_and_filters)
+    slow_conditions: list[MutableMapping[str, str]] = [
+        cond for cond in conditions if is_condition_slow(cond)
+    ]
+
+    return slow_conditions
+
+
+def get_rules_to_groups(rulegroup_to_events: list[dict[str, str]]) -> DefaultDict[int, set[int]]:
+    rules_to_groups: DefaultDict[int, set[int]] = defaultdict(set)
+    for rulegroup_to_event in rulegroup_to_events:
+        for rule_group in rulegroup_to_event.keys():
+            rule_id, group_id = rule_group.split(":")
+            rules_to_groups[int(rule_id)].add(int(group_id))
+
+    return rules_to_groups
+
+
+def get_rule_to_slow_conditions(
+    alert_rules: list[Rule],
+) -> DefaultDict[Rule, list[MutableMapping[str, str] | None]]:
+    rule_to_slow_conditions: DefaultDict[Rule, list[MutableMapping[str, str] | None]] = defaultdict(
+        list
+    )
+    for rule in alert_rules:
+        slow_conditions = get_slow_conditions(rule)
+        for condition_data in slow_conditions:
+            rule_to_slow_conditions[rule].append(condition_data)
+
+    return rule_to_slow_conditions
+
+
+def get_condition_groups(
+    alert_rules: list[Rule], rules_to_groups: DefaultDict[int, set[int]]
+) -> dict[UniqueCondition, DataAndGroups]:
+    """Map unique conditions to the group IDs that need to checked for that
+    condition. We also store a pointer to that condition's JSON so we can
+    instantiate the class later
+    """
+    condition_groups: dict[UniqueCondition, DataAndGroups] = {}
+    for rule in alert_rules:
+        # We only want a rule's slow conditions because alert_rules are only added
+        # to the buffer if we've already checked their fast conditions.
+        slow_conditions = get_slow_conditions(rule)
+        for condition_data in slow_conditions:
+            if condition_data:
+                unique_condition = UniqueCondition(
+                    condition_data["id"], condition_data["interval"], rule.environment_id
+                )
+                # Add to set of group_ids if there are already group_ids
+                # that apply to the unique condition
+                if data_and_groups := condition_groups.get(unique_condition):
+                    data_and_groups.group_ids.update(rules_to_groups[rule.id])
+                # Otherwise, create the tuple containing the condition data and the
+                # set of group_ids that apply to the unique condition
+                else:
+                    condition_groups[unique_condition] = DataAndGroups(
+                        condition_data, rules_to_groups[rule.id]
+                    )
+    return condition_groups
+
+
+def get_condition_group_results(
+    condition_groups: dict[UniqueCondition, DataAndGroups],
+    project: Project,
+) -> dict[UniqueCondition, dict[int, int]] | None:
+    condition_group_results: dict[UniqueCondition, dict[int, int]] = {}
+    for unique_condition, (condition_data, group_ids) in condition_groups.items():
+        condition_cls = rules.get(unique_condition.cls_id)
+
+        if condition_cls is None:
+            logger.warning("Unregistered condition %r", unique_condition.cls_id)
+            return None
+
+        condition_inst = condition_cls(project=project, data=condition_data)
+        if not isinstance(condition_inst, BaseEventFrequencyCondition):
+            logger.warning("Unregistered condition %r", condition_cls.id)
+            return None
+
+        _, duration = condition_inst.intervals[unique_condition.interval]
+        comparison_interval = condition_inst.intervals[unique_condition.interval][1]
+        comparison_type = (
+            condition_data.get("comparisonType", ComparisonType.COUNT)
+            if condition_data
+            else ComparisonType.COUNT
+        )
+        result = safe_execute(
+            condition_inst.get_rate_bulk,
+            duration,
+            comparison_interval,
+            group_ids,
+            unique_condition.environment_id,
+            comparison_type,
+        )
+        condition_group_results[unique_condition] = result
+    return condition_group_results
+
+
+def get_rules_to_fire(
+    condition_group_results: dict[UniqueCondition, dict[int, int]],
+    rule_to_slow_conditions: DefaultDict[Rule, list[MutableMapping[str, str] | None]],
+    rules_to_groups: DefaultDict[int, set[int]],
+) -> DefaultDict[Rule, set[int]]:
+    rules_to_fire = defaultdict(set)
+    for alert_rule, slow_conditions in rule_to_slow_conditions.items():
+        for slow_condition in slow_conditions:
+            if slow_condition:
+                condition_id = slow_condition.get("id")
+                condition_interval = slow_condition.get("interval")
+                target_value = int(str(slow_condition.get("value")))
+                for condition_data, results in condition_group_results.items():
+                    if (
+                        alert_rule.environment_id == condition_data.environment_id
+                        and condition_id == condition_data.cls_id
+                        and condition_interval == condition_data.interval
+                    ):
+                        for group_id in rules_to_groups[alert_rule.id]:
+                            if results[group_id] > target_value:
+                                rules_to_fire[alert_rule].add(group_id)
+    return rules_to_fire
+
+
 @redis_buffer_registry.add_handler(BufferHookEvent.FLUSH)
 def process_delayed_alert_conditions(buffer: RedisBuffer) -> None:
     with metrics.timer("delayed_processing.process_all_conditions.duration"):
         project_ids = buffer.get_set(PROJECT_ID_BUFFER_LIST_KEY)
 
-        for project in RangeQuerySetWrapper(Project.objects.filter(id__in=project_ids)):
-            rulegroup_event_mapping = buffer.get_hash(
-                model=Project, field={"project_id": project.id}
-            )
-
+        for project_id in project_ids:
             with metrics.timer("delayed_processing.process_project.duration"):
-                safe_execute(
-                    apply_delayed,
-                    project,
-                    rulegroup_event_mapping,
-                    _with_transaction=False,
-                )
+                apply_delayed.delay(project_id=project_id, buffer=buffer)
+
+
+@instrumented_task(
+    name="sentry.delayed_processing.tasks.apply_delayed",
+    default_retry_delay=5,
+    max_retries=5,
+    soft_time_limit=50,
+    time_limit=60,  # 1 minute
+    silo_mode=SiloMode.REGION,
+)
+def apply_delayed(project_id: int, buffer: RedisBuffer) -> DefaultDict[Rule, set[int]] | None:
+    # XXX(CEO) this is a temporary return value!
+    """
+    Grab rules, groups, and events from the Redis buffer, evaluate the "slow" conditions in a bulk snuba query, and fire them if they pass
+    """
+    # STEP 1: Fetch the rulegroup_to_events mapping for the project from redis
+    project = Project.objects.get(id=project_id)
+    rulegroup_to_events = buffer.get_hash(model=Project, field={"project_id": project.id})
+
+    # STEP 2: Map each rule to the groups that must be checked for that rule.
+    rules_to_groups = get_rules_to_groups(rulegroup_to_events)
+
+    # STEP 3: Fetch the Rule models we need to check
+    alert_rules = Rule.objects.filter(id__in=list(rules_to_groups.keys()))
 
+    # STEP 4: Create a map of unique conditions to a tuple containing the JSON
+    # information needed to instantiate that condition class and the group_ids that
+    # must be checked for that condition. We don't query per rule condition because
+    # condition of the same class, interval, and environment can share a single scan.
+    condition_groups = get_condition_groups(alert_rules, rules_to_groups)
+    # Step 5: Instantiate each unique condition, and evaluate the relevant
+    # group_ids that apply for that condition
+    condition_group_results = get_condition_group_results(condition_groups, project)
+    # Step 6: For each rule and group applying to that rule, check if the group
+    # meets the conditions of the rule (basically doing BaseEventFrequencyCondition.passes)
+    rule_to_slow_conditions = get_rule_to_slow_conditions(alert_rules)
 
-def apply_delayed(project: Project, rule_group_pairs: Mapping[str, str]) -> None:
-    pass
+    if condition_group_results:
+        rules_to_fire = get_rules_to_fire(
+            condition_group_results, rule_to_slow_conditions, rules_to_groups
+        )
+        return rules_to_fire
+    return None

+ 23 - 18
src/sentry/rules/processing/processor.py

@@ -53,6 +53,28 @@ def is_condition_slow(
     return False
 
 
+def get_rule_type(condition: Mapping[str, Any]) -> str | None:
+    rule_cls = rules.get(condition["id"])
+    if rule_cls is None:
+        logger.warning("Unregistered condition or filter %r", condition["id"])
+        return None
+
+    rule_type: str = rule_cls.rule_type
+    return rule_type
+
+
+def split_conditions_and_filters(rule_condition_list):
+    condition_list = []
+    filter_list = []
+    for rule_cond in rule_condition_list:
+        if get_rule_type(rule_cond) == "condition/event":
+            condition_list.append(rule_cond)
+        else:
+            filter_list.append(rule_cond)
+
+    return condition_list, filter_list
+
+
 class RuleProcessor:
     def __init__(
         self,
@@ -166,15 +188,6 @@ class RuleProcessor:
         )
         return passes
 
-    def get_rule_type(self, condition: Mapping[str, Any]) -> str | None:
-        rule_cls = rules.get(condition["id"])
-        if rule_cls is None:
-            logger.warning("Unregistered condition or filter %r", condition["id"])
-            return None
-
-        rule_type: str = rule_cls.rule_type
-        return rule_type
-
     def get_state(self) -> EventState:
         return EventState(
             is_new=self.is_new,
@@ -205,7 +218,6 @@ class RuleProcessor:
 
         condition_match = rule.data.get("action_match") or Rule.DEFAULT_CONDITION_MATCH
         filter_match = rule.data.get("filter_match") or Rule.DEFAULT_FILTER_MATCH
-        rule_condition_list = rule.data.get("conditions", ())
         frequency = rule.data.get("frequency") or Rule.DEFAULT_FREQUENCY
         try:
             environment = self.event.get_environment()
@@ -221,14 +233,7 @@ class RuleProcessor:
             return
 
         state = self.get_state()
-
-        condition_list = []
-        filter_list = []
-        for rule_cond in rule_condition_list:
-            if self.get_rule_type(rule_cond) == "condition/event":
-                condition_list.append(rule_cond)
-            else:
-                filter_list.append(rule_cond)
+        condition_list, filter_list = split_conditions_and_filters(rule.data.get("conditions", ()))
 
         # Sort `condition_list` so that most expensive conditions run last.
         condition_list.sort(key=lambda condition: is_condition_slow(condition))

+ 258 - 30
tests/sentry/rules/processing/test_delayed_processing.py

@@ -1,53 +1,281 @@
+import copy
+from datetime import UTC, datetime
 from unittest.mock import Mock, patch
+from uuid import uuid4
+
+import pytest
 
 from sentry.db import models
+from sentry.eventstore.models import Event
 from sentry.rules.processing.delayed_processing import (
     apply_delayed,
     process_delayed_alert_conditions,
 )
-from sentry.testutils.cases import TestCase
+from sentry.testutils.cases import APITestCase, TestCase
+from sentry.testutils.factories import DEFAULT_EVENT_DATA
+from sentry.testutils.helpers.datetime import iso_format
+from tests.snuba.rules.conditions.test_event_frequency import BaseEventFrequencyPercentTest
 
+pytestmark = pytest.mark.sentry_metrics
 
-class ProcessDelayedAlertConditionsTest(TestCase):
-    def get_rulegroup_event_mapping_from_input(
-        self, model: type[models.Model], field: dict[str, models.Model | str | int]
+
+class ProcessDelayedAlertConditionsTest(TestCase, APITestCase, BaseEventFrequencyPercentTest):
+    def create_event(self, project_id, timestamp, fingerprint, environment=None) -> Event:
+        data = {
+            "timestamp": iso_format(timestamp),
+            "stacktrace": copy.deepcopy(DEFAULT_EVENT_DATA["stacktrace"]),
+            "environment": environment,
+            "fingerprint": [fingerprint],
+            "level": "error",
+            "user": {"id": uuid4().hex},
+            "exception": {
+                "values": [
+                    {
+                        "type": "IntegrationError",
+                        "value": "Identity not found.",
+                    }
+                ]
+            },
+        }
+        return self.store_event(
+            data=data,
+            project_id=project_id,
+            assert_no_errors=False,
+        )
+
+    def create_event_frequency_condition(
+        self,
+        interval="1d",
+        id="EventFrequencyCondition",
+        value=1,
     ):
-        # There will only be one event per rulegroup
-        proj_id = field.popitem()[1]
-        return self.buffer_mapping[proj_id]
+        condition_id = f"sentry.rules.conditions.event_frequency.{id}"
+        return {"interval": interval, "id": condition_id, "value": value}
+
+    def setUp(self):
+        super().setUp()
+        self.event_frequency_condition = self.create_event_frequency_condition()
+        self.event_frequency_condition2 = self.create_event_frequency_condition(value=2)
+        self.event_frequency_condition3 = self.create_event_frequency_condition(
+            interval="1h", value=1
+        )
+        user_frequency_condition = self.create_event_frequency_condition(
+            interval="1m",
+            id="EventUniqueUserFrequencyCondition",
+        )
+        event_frequency_percent_condition = self.create_event_frequency_condition(
+            interval="5m", id="EventFrequencyPercentCondition"
+        )
+        self.now = datetime.now(UTC)
+
+        self.rule1 = self.create_project_rule(
+            project=self.project,
+            condition_match=[self.event_frequency_condition],
+            environment_id=self.environment.id,
+        )
+        self.event1 = self.create_event(self.project.id, self.now, "group-1", self.environment.name)
+        self.create_event(self.project.id, self.now, "group-1", self.environment.name)
 
-    @patch("sentry.rules.processing.delayed_processing.safe_execute")
-    def test_fetches_from_buffer_and_executes(self, mock_safe_execute):
-        project_two = self.create_project()
+        self.group1 = self.event1.group
+        assert self.group1
 
-        rulegroup_event_mapping_one = {
-            f"{self.project.id}:1": "event_1",
-            f"{project_two.id}:2": "event_2",
+        self.rule2 = self.create_project_rule(
+            project=self.project, condition_match=[user_frequency_condition]
+        )
+        self.event2 = self.create_event(self.project, self.now, "group-2", self.environment.name)
+        self.create_event(self.project, self.now, "group-2", self.environment.name)
+        self.group2 = self.event2.group
+        assert self.group2
+
+        self.rulegroup_event_mapping_one = {
+            f"{self.rule1.id}:{self.group1.id}": {self.event1.event_id},
+            f"{self.rule2.id}:{self.group2.id}": {self.event2.event_id},
         }
-        rulegroup_event_mapping_two = {
-            f"{self.project.id}:3": "event_3",
-            f"{project_two.id}:4": "event_4",
+
+        self.project_two = self.create_project(organization=self.organization)
+        self.environment2 = self.create_environment(project=self.project_two)
+
+        self.rule3 = self.create_project_rule(
+            project=self.project_two,
+            condition_match=[self.event_frequency_condition2],
+            environment_id=self.environment2.id,
+        )
+        self.event3 = self.create_event(
+            self.project_two, self.now, "group-3", self.environment2.name
+        )
+        self.create_event(self.project_two, self.now, "group-3", self.environment2.name)
+        self.create_event(self.project_two, self.now, "group-3", self.environment2.name)
+        self.create_event(self.project_two, self.now, "group-3", self.environment2.name)
+        self.group3 = self.event3.group
+        assert self.group3
+
+        self.rule4 = self.create_project_rule(
+            project=self.project_two, condition_match=[event_frequency_percent_condition]
+        )
+        self.event4 = self.create_event(self.project_two, self.now, "group-4")
+        self.create_event(self.project_two, self.now, "group-4")
+        self._make_sessions(60, project=self.project_two)
+        self.group4 = self.event4.group
+        assert self.group4
+
+        self.rulegroup_event_mapping_two = {
+            f"{self.rule3.id}:{self.group3.id}": {self.event3.event_id},
+            f"{self.rule4.id}:{self.group4.id}": {self.event4.event_id},
         }
         self.buffer_mapping = {
-            self.project.id: rulegroup_event_mapping_one,
-            project_two.id: rulegroup_event_mapping_two,
+            self.project.id: self.rulegroup_event_mapping_one,
+            self.project_two.id: self.rulegroup_event_mapping_two,
         }
 
-        mock_buffer = Mock()
-        mock_buffer.get_set.return_value = self.buffer_mapping.keys()
+        self.mock_buffer = Mock()
+
+    def get_rulegroup_event_mapping_from_input(
+        self, model: type[models.Model], field: dict[str, models.Model | str | int]
+    ):
+        # There will only be one event per rulegroup
+        proj_id = field.popitem()[1]
+        return self.buffer_mapping[proj_id]
+
+    @patch("sentry.rules.processing.delayed_processing.apply_delayed")
+    def test_fetches_from_buffer_and_executes(self, mock_apply_delayed):
+        self.mock_buffer.get_set.return_value = self.buffer_mapping.keys()
         # To get the correct mapping, we need to return the correct
         # rulegroup_event mapping based on the project_id input
-        mock_buffer.get_hash.side_effect = self.get_rulegroup_event_mapping_from_input
+        self.mock_buffer.get_hash.side_effect = self.get_rulegroup_event_mapping_from_input
 
-        process_delayed_alert_conditions(mock_buffer)
+        process_delayed_alert_conditions(self.mock_buffer)
 
         for project, rule_group_event_mapping in (
-            (self.project, rulegroup_event_mapping_one),
-            (project_two, rulegroup_event_mapping_two),
+            (self.project, self.rulegroup_event_mapping_one),
+            (self.project_two, self.rulegroup_event_mapping_two),
         ):
-            mock_safe_execute.assert_any_call(
-                apply_delayed,
-                project,
-                rule_group_event_mapping,
-                _with_transaction=False,
-            )
+            assert mock_apply_delayed.delay.call_count == 2
+
+    @patch("sentry.rules.conditions.event_frequency.MIN_SESSIONS_TO_FIRE", 1)
+    def test_apply_delayed_rules_to_fire(self):
+        """
+        Test that rules of various event frequency conditions, projects, environments, etc. are properly scheduled to fire
+        """
+        self.mock_buffer.get_hash.return_value = [self.rulegroup_event_mapping_one]
+
+        rules = apply_delayed(self.project.id, self.mock_buffer)
+        assert self.rule1 in rules
+        assert self.rule2 in rules
+
+        self.mock_buffer.get_hash.return_value = [self.rulegroup_event_mapping_two]
+
+        rules = apply_delayed(self.project_two.id, self.mock_buffer)
+        assert self.rule3 in rules
+        assert self.rule4 in rules
+
+    def test_apply_delayed_same_condition_diff_value(self):
+        """
+        Test that two rules with the same condition and interval but a different value are both scheduled to fire
+        """
+        rule5 = self.create_project_rule(
+            project=self.project_two,
+            condition_match=[self.event_frequency_condition2],
+            environment_id=self.environment.id,
+        )
+        event5 = self.create_event(self.project_two, self.now, "group-5", self.environment.name)
+        self.create_event(self.project_two, self.now, "group-5", self.environment.name)
+        self.create_event(self.project_two, self.now, "group-5", self.environment.name)
+        group5 = event5.group
+        assert group5
+        assert self.group1
+
+        self.mock_buffer.get_hash.return_value = [
+            {
+                f"{self.rule1.id}:{self.group1.id}": {self.event1.event_id},
+                f"{rule5.id}:{group5.id}": {event5.event_id},
+            },
+        ]
+
+        rules = apply_delayed(self.project.id, self.mock_buffer)
+        assert self.rule1 in rules
+        assert rule5 in rules
+
+    def test_apply_delayed_same_condition_diff_interval(self):
+        """
+        Test that two rules with the same condition and value but a different interval are both scheduled to fire
+        """
+        diff_interval_rule = self.create_project_rule(
+            project=self.project,
+            condition_match=[self.event_frequency_condition3],
+            environment_id=self.environment.id,
+        )
+        event5 = self.create_event(self.project.id, self.now, "group-5", self.environment.name)
+        self.create_event(self.project.id, self.now, "group-5", self.environment.name)
+        group5 = event5.group
+        assert group5
+        assert self.group1
+
+        self.mock_buffer.get_hash.return_value = [
+            {
+                f"{self.rule1.id}:{self.group1.id}": {self.event1.event_id},
+                f"{diff_interval_rule.id}:{group5.id}": {event5.event_id},
+            },
+        ]
+
+        rules = apply_delayed(self.project.id, self.mock_buffer)
+        assert self.rule1 in rules
+        assert diff_interval_rule in rules
+
+    def test_apply_delayed_same_condition_diff_env(self):
+        """
+        Test that two rules with the same condition, value, and interval but different environment are both scheduled to fire
+        """
+        environment3 = self.create_environment(project=self.project)
+        diff_env_rule = self.create_project_rule(
+            project=self.project,
+            condition_match=[self.event_frequency_condition],
+            environment_id=environment3.id,
+        )
+        event5 = self.create_event(self.project.id, self.now, "group-5", environment3.name)
+        self.create_event(self.project.id, self.now, "group-5", environment3.name)
+        group5 = event5.group
+        assert group5
+        assert self.group1
+
+        self.mock_buffer.get_hash.return_value = [
+            {
+                f"{self.rule1.id}:{self.group1.id}": {self.event1.event_id},
+                f"{diff_env_rule.id}:{group5.id}": {event5.event_id},
+            },
+        ]
+
+        rules = apply_delayed(self.project.id, self.mock_buffer)
+        assert self.rule1 in rules
+        assert diff_env_rule in rules
+
+    def test_apply_delayed_two_rules_one_fires(self):
+        """
+        Test that with two rules in one project where one rule hasn't met the trigger threshold, only one is scheduled to fire
+        """
+        high_event_frequency_condition = {
+            "interval": "1d",
+            "id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition",
+            "value": 100,
+            "name": "The issue is seen more than 100 times in 1d",
+        }
+        no_fire_rule = self.create_project_rule(
+            project=self.project,
+            condition_match=[high_event_frequency_condition],
+            environment_id=self.environment.id,
+        )
+        event5 = self.create_event(self.project.id, self.now, "group-5", self.environment.name)
+        self.create_event(self.project.id, self.now, "group-5", self.environment.name)
+        group5 = event5.group
+        assert group5
+        assert self.group1
+
+        self.mock_buffer.get_hash.return_value = [
+            {
+                f"{self.rule1.id}:{self.group1.id}": {self.event1.event_id},
+                f"{no_fire_rule.id}:{group5.id}": {event5.event_id},
+            },
+        ]
+
+        rules = apply_delayed(self.project.id, self.mock_buffer)
+        assert self.rule1 in rules
+        assert no_fire_rule not in rules

+ 6 - 3
tests/snuba/rules/conditions/test_event_frequency.py

@@ -8,6 +8,7 @@ import pytest
 from django.utils import timezone
 
 from sentry.issues.grouptype import PerformanceNPlusOneGroupType
+from sentry.models.project import Project
 from sentry.models.rule import Rule
 from sentry.rules.conditions.event_frequency import (
     EventFrequencyCondition,
@@ -29,15 +30,17 @@ pytestmark = [pytest.mark.sentry_metrics, requires_snuba]
 
 
 class BaseEventFrequencyPercentTest(BaseMetricsTestCase):
-    def _make_sessions(self, num: int, environment_name: str | None = None):
+    def _make_sessions(
+        self, num: int, environment_name: str | None = None, project: Project | None = None
+    ):
         received = time.time()
 
         def make_session(i):
             return dict(
                 distinct_id=uuid4().hex,
                 session_id=uuid4().hex,
-                org_id=self.project.organization_id,
-                project_id=self.project.id,
+                org_id=project.organization_id if project else self.project.organization_id,
+                project_id=project.id if project else self.project.id,
                 status="ok",
                 seq=0,
                 release="foo@1.0.0",