123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901 |
- from __future__ import annotations
- import abc
- import time
- import uuid
- from datetime import datetime, timedelta
- from hashlib import md5
- from typing import Any
- from unittest import mock
- from unittest.mock import Mock, patch
- import pytest
- from django.db import router
- from django.test import override_settings
- from django.utils import timezone
- from sentry import buffer
- from sentry.buffer.redis import RedisBuffer
- from sentry.eventstore.models import Event
- from sentry.eventstore.processing import event_processing_store
- from sentry.feedback.usecases.create_feedback import FeedbackCreationSource
- from sentry.ingest.transaction_clusterer import ClustererNamespace
- from sentry.integrations.mixins.commit_context import CommitInfo, FileBlameInfo
- from sentry.issues.grouptype import (
- FeedbackGroup,
- GroupCategory,
- PerformanceNPlusOneGroupType,
- PerformanceP95EndpointRegressionGroupType,
- ProfileFileIOGroupType,
- )
- from sentry.issues.ingest import save_issue_occurrence
- from sentry.models.activity import Activity, ActivityIntegration
- from sentry.models.group import GROUP_SUBSTATUS_TO_STATUS_MAP, Group, GroupStatus
- from sentry.models.groupassignee import GroupAssignee
- from sentry.models.groupinbox import GroupInbox, GroupInboxReason
- from sentry.models.groupowner import (
- ASSIGNEE_EXISTS_DURATION,
- ASSIGNEE_EXISTS_KEY,
- ISSUE_OWNERS_DEBOUNCE_DURATION,
- ISSUE_OWNERS_DEBOUNCE_KEY,
- GroupOwner,
- GroupOwnerType,
- )
- from sentry.models.groupsnooze import GroupSnooze
- from sentry.models.integrations.integration import Integration
- from sentry.models.projectownership import ProjectOwnership
- from sentry.models.projectteam import ProjectTeam
- from sentry.ownership.grammar import Matcher, Owner, Rule, dump_schema
- from sentry.replays.lib import kafka as replays_kafka
- from sentry.replays.lib.kafka import clear_replay_publisher
- from sentry.rules import init_registry
- from sentry.rules.actions.base import EventAction
- from sentry.services.hybrid_cloud.user.service import user_service
- from sentry.silo.base import SiloMode
- from sentry.silo.safety import unguarded_write
- from sentry.tasks.derive_code_mappings import SUPPORTED_LANGUAGES
- from sentry.tasks.merge import merge_groups
- from sentry.tasks.post_process import (
- HIGHER_ISSUE_OWNERS_PER_PROJECT_PER_MIN_RATELIMIT,
- ISSUE_OWNERS_PER_PROJECT_PER_MIN_RATELIMIT,
- feedback_filter_decorator,
- locks,
- post_process_group,
- process_event,
- run_post_process_job,
- )
- from sentry.testutils.cases import BaseTestCase, PerformanceIssueTestCase, SnubaTestCase, TestCase
- from sentry.testutils.helpers import with_feature
- from sentry.testutils.helpers.datetime import before_now, iso_format
- from sentry.testutils.helpers.eventprocessing import write_event_to_cache
- from sentry.testutils.helpers.options import override_options
- from sentry.testutils.performance_issues.store_transaction import store_transaction
- from sentry.testutils.silo import assume_test_silo_mode
- from sentry.testutils.skips import requires_snuba
- from sentry.types.activity import ActivityType
- from sentry.types.group import GroupSubStatus, PriorityLevel
- from sentry.utils import json
- from sentry.utils.cache import cache
- from sentry.utils.sdk_crashes.sdk_crash_detection_config import SdkName
- from tests.sentry.issues.test_utils import OccurrenceTestMixin
- pytestmark = [requires_snuba]
- class EventMatcher:
- def __init__(self, expected, group=None):
- self.expected = expected
- self.expected_group = group
- def __eq__(self, other):
- matching_id = other.event_id == self.expected.event_id
- if self.expected_group:
- return (
- matching_id
- and self.expected_group == other.group
- and self.expected_group.id == other.group_id
- )
- return matching_id
- class BasePostProgressGroupMixin(BaseTestCase, metaclass=abc.ABCMeta):
- @abc.abstractmethod
- def create_event(self, data, project_id, assert_no_errors=True):
- pass
- @abc.abstractmethod
- def call_post_process_group(
- self, is_new, is_regression, is_new_group_environment, event, cache_key=None
- ):
- pass
- class CorePostProcessGroupTestMixin(BasePostProgressGroupMixin):
- @patch("sentry.rules.processing.processor.RuleProcessor")
- @patch("sentry.tasks.servicehooks.process_service_hook")
- @patch("sentry.tasks.sentry_apps.process_resource_change_bound.delay")
- @patch("sentry.signals.event_processed.send_robust")
- def test_issueless(
- self,
- mock_signal,
- mock_process_resource_change_bound,
- mock_process_service_hook,
- mock_processor,
- ):
- min_ago = iso_format(before_now(minutes=1))
- event = self.store_event(
- data={
- "type": "transaction",
- "timestamp": min_ago,
- "start_timestamp": min_ago,
- "contexts": {"trace": {"trace_id": "b" * 32, "span_id": "c" * 16, "op": ""}},
- },
- project_id=self.project.id,
- )
- cache_key = write_event_to_cache(event)
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- cache_key=cache_key,
- )
- assert mock_processor.call_count == 0
- assert mock_process_service_hook.call_count == 0
- assert mock_process_resource_change_bound.call_count == 0
- # transaction events do not call event.processed
- assert mock_signal.call_count == 0
- @patch("sentry.rules.processing.processor.RuleProcessor")
- def test_no_cache_abort(self, mock_processor):
- event = self.create_event(data={}, project_id=self.project.id)
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- cache_key="total-rubbish",
- )
- assert mock_processor.call_count == 0
- def test_processing_cache_cleared(self):
- event = self.create_event(data={}, project_id=self.project.id)
- cache_key = self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- assert event_processing_store.get(cache_key) is None
- def test_processing_cache_cleared_with_commits(self):
- # Regression test to guard against suspect commit calculations breaking the
- # cache
- event = self.create_event(data={}, project_id=self.project.id)
- self.create_commit(repo=self.create_repo())
- cache_key = self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- assert event_processing_store.get(cache_key) is None
- @patch("sentry.utils.metrics.timing")
- @patch("sentry.tasks.post_process.logger")
- def test_time_to_process_metric(self, logger_mock, metric_timing_mock):
- event = self.create_event(data={}, project_id=self.project.id)
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- metric_timing_mock.assert_any_call(
- "events.time-to-post-process",
- mock.ANY,
- instance=mock.ANY,
- tags={"occurrence_type": mock.ANY},
- )
- logger_mock.warning.assert_not_called()
- class DeriveCodeMappingsProcessGroupTestMixin(BasePostProgressGroupMixin):
- def _create_event(
- self,
- data: dict[str, Any],
- project_id: int | None = None,
- ) -> Event:
- data.setdefault("platform", "javascript")
- return self.store_event(data=data, project_id=project_id or self.project.id)
- def _call_post_process_group(self, event: Event) -> None:
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- @patch("sentry.tasks.derive_code_mappings.derive_code_mappings")
- def test_derive_invalid_platform(self, mock_derive_code_mappings):
- event = self._create_event({"platform": "elixir"})
- self._call_post_process_group(event)
- assert mock_derive_code_mappings.delay.call_count == 0
- @patch("sentry.tasks.derive_code_mappings.derive_code_mappings")
- def test_derive_supported_languages(self, mock_derive_code_mappings):
- for platform in SUPPORTED_LANGUAGES:
- event = self._create_event({"platform": platform})
- self._call_post_process_group(event)
- assert mock_derive_code_mappings.delay.call_count == 1
- @patch("sentry.tasks.derive_code_mappings.derive_code_mappings")
- def test_only_maps_a_given_project_once_per_hour(self, mock_derive_code_mappings):
- dogs_project = self.create_project()
- maisey_event = self._create_event(
- {
- "fingerprint": ["themaiseymasieydog"],
- },
- dogs_project.id,
- )
- charlie_event = self._create_event(
- {
- "fingerprint": ["charliebear"],
- },
- dogs_project.id,
- )
- cory_event = self._create_event(
- {
- "fingerprint": ["thenudge"],
- },
- dogs_project.id,
- )
- bodhi_event = self._create_event(
- {
- "fingerprint": ["theescapeartist"],
- },
- dogs_project.id,
- )
- self._call_post_process_group(maisey_event)
- assert mock_derive_code_mappings.delay.call_count == 1
- # second event from project should bail (no increase in call count)
- self._call_post_process_group(charlie_event)
- assert mock_derive_code_mappings.delay.call_count == 1
- # advance the clock 59 minutes, and it should still bail
- with patch("time.time", return_value=time.time() + 60 * 59):
- self._call_post_process_group(cory_event)
- assert mock_derive_code_mappings.delay.call_count == 1
- # now advance the clock 61 minutes, and this time it should go through
- with patch("time.time", return_value=time.time() + 60 * 61):
- self._call_post_process_group(bodhi_event)
- assert mock_derive_code_mappings.delay.call_count == 2
- @patch("sentry.tasks.derive_code_mappings.derive_code_mappings")
- def test_only_maps_a_given_issue_once_per_day(self, mock_derive_code_mappings):
- dogs_project = self.create_project()
- maisey_event1 = self._create_event(
- {
- "fingerprint": ["themaiseymaiseydog"],
- },
- dogs_project.id,
- )
- maisey_event2 = self._create_event(
- {
- "fingerprint": ["themaiseymaiseydog"],
- },
- dogs_project.id,
- )
- maisey_event3 = self._create_event(
- {
- "fingerprint": ["themaiseymaiseydog"],
- },
- dogs_project.id,
- )
- maisey_event4 = self._create_event(
- {
- "fingerprint": ["themaiseymaiseydog"],
- },
- dogs_project.id,
- )
- # because of the fingerprint, the events should always end up in the same group,
- # but the rest of the test is bogus if they aren't, so let's be sure
- assert maisey_event1.group_id == maisey_event2.group_id
- assert maisey_event2.group_id == maisey_event3.group_id
- assert maisey_event3.group_id == maisey_event4.group_id
- self._call_post_process_group(maisey_event1)
- assert mock_derive_code_mappings.delay.call_count == 1
- # second event from group should bail (no increase in call count)
- self._call_post_process_group(maisey_event2)
- assert mock_derive_code_mappings.delay.call_count == 1
- # advance the clock 23 hours and 59 minutes, and it should still bail
- with patch("time.time", return_value=time.time() + (60 * 60 * 23) + (60 * 59)):
- self._call_post_process_group(maisey_event3)
- assert mock_derive_code_mappings.delay.call_count == 1
- # now advance the clock 24 hours and 1 minute, and this time it should go through
- with patch("time.time", return_value=time.time() + (60 * 60 * 24) + (60 * 1)):
- self._call_post_process_group(maisey_event4)
- assert mock_derive_code_mappings.delay.call_count == 2
- @patch("sentry.tasks.derive_code_mappings.derive_code_mappings")
- def test_skipping_an_issue_doesnt_mark_it_processed(self, mock_derive_code_mappings):
- dogs_project = self.create_project()
- maisey_event = self._create_event(
- {
- "fingerprint": ["themaiseymasieydog"],
- },
- dogs_project.id,
- )
- charlie_event1 = self._create_event(
- {
- "fingerprint": ["charliebear"],
- },
- dogs_project.id,
- )
- charlie_event2 = self._create_event(
- {
- "fingerprint": ["charliebear"],
- },
- dogs_project.id,
- )
- # because of the fingerprint, the two Charlie events should always end up in the same group,
- # but the rest of the test is bogus if they aren't, so let's be sure
- assert charlie_event1.group_id == charlie_event2.group_id
- self._call_post_process_group(maisey_event)
- assert mock_derive_code_mappings.delay.call_count == 1
- # second event from project should bail (no increase in call count)
- self._call_post_process_group(charlie_event1)
- assert mock_derive_code_mappings.delay.call_count == 1
- # now advance the clock 61 minutes (so the project should clear the cache), and another
- # event from the Charlie group should go through
- with patch("time.time", return_value=time.time() + 60 * 61):
- self._call_post_process_group(charlie_event2)
- assert mock_derive_code_mappings.delay.call_count == 2
- class RuleProcessorTestMixin(BasePostProgressGroupMixin):
- @patch("sentry.rules.processing.processor.RuleProcessor")
- def test_rule_processor_backwards_compat(self, mock_processor):
- event = self.create_event(data={}, project_id=self.project.id)
- mock_callback = Mock()
- mock_futures = [Mock()]
- mock_processor.return_value.apply.return_value = [(mock_callback, mock_futures)]
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_processor.assert_called_once_with(EventMatcher(event), True, False, True, False, False)
- mock_processor.return_value.apply.assert_called_once_with()
- mock_callback.assert_called_once_with(EventMatcher(event), mock_futures)
- @patch("sentry.rules.processing.processor.RuleProcessor")
- def test_rule_processor(self, mock_processor):
- event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
- mock_callback = Mock()
- mock_futures = [Mock()]
- mock_processor.return_value.apply.return_value = [(mock_callback, mock_futures)]
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_processor.return_value.apply.assert_called_once_with()
- mock_callback.assert_called_once_with(EventMatcher(event), mock_futures)
- def test_rule_processor_buffer_values(self):
- # Test that pending buffer values for `times_seen` are applied to the group and that alerts
- # fire as expected
- from sentry.models.rule import Rule
- MOCK_RULES = ("sentry.rules.filters.issue_occurrences.IssueOccurrencesFilter",)
- redis_buffer = RedisBuffer()
- with (
- mock.patch("sentry.buffer.backend.get", redis_buffer.get),
- mock.patch("sentry.buffer.backend.incr", redis_buffer.incr),
- patch("sentry.constants._SENTRY_RULES", MOCK_RULES),
- patch("sentry.rules.rules", init_registry()) as rules,
- ):
- MockAction = mock.Mock()
- MockAction.id = "tests.sentry.tasks.post_process.tests.MockAction"
- MockAction.return_value = mock.Mock(spec=EventAction)
- MockAction.return_value.after.return_value = []
- rules.add(MockAction)
- conditions = [
- {
- "id": "sentry.rules.filters.issue_occurrences.IssueOccurrencesFilter",
- "value": 10,
- },
- ]
- actions = [{"id": "tests.sentry.tasks.post_process.tests.MockAction"}]
- Rule.objects.filter(project=self.project).delete()
- Rule.objects.create(
- project=self.project, data={"conditions": conditions, "actions": actions}
- )
- event = self.create_event(
- data={"message": "testing", "fingerprint": ["group-1"]}, project_id=self.project.id
- )
- event_2 = self.create_event(
- data={"message": "testing", "fingerprint": ["group-1"]}, project_id=self.project.id
- )
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- event.group.update(times_seen=2)
- assert MockAction.return_value.after.call_count == 0
- buffer.backend.incr(Group, {"times_seen": 15}, filters={"id": event.group.id})
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event_2,
- )
- assert MockAction.return_value.after.call_count == 1
- @patch("sentry.rules.processing.processor.RuleProcessor")
- def test_group_refresh(self, mock_processor):
- event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
- group1 = event.group
- group2 = self.create_group(project=self.project)
- assert event.group_id == group1.id
- assert event.group == group1
- with self.tasks():
- merge_groups([group1.id], group2.id)
- mock_callback = Mock()
- mock_futures = [Mock()]
- mock_processor.return_value.apply.return_value = [(mock_callback, mock_futures)]
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- # Ensure that rule processing sees the merged group.
- mock_processor.assert_called_with(
- EventMatcher(event, group=group2), True, False, True, False, False
- )
- @patch("sentry.rules.processing.processor.RuleProcessor")
- def test_group_last_seen_buffer(self, mock_processor):
- first_event_date = timezone.now() - timedelta(days=90)
- event1 = self.create_event(
- data={"message": "testing"},
- project_id=self.project.id,
- )
- group1 = event1.group
- group1.update(last_seen=first_event_date)
- event2 = self.create_event(data={"message": "testing"}, project_id=self.project.id)
- # Mock set the last_seen value to the first event date
- # To simulate the update to last_seen being buffered
- event2.group.last_seen = first_event_date
- event2.group.update(last_seen=first_event_date)
- assert event2.group_id == group1.id
- mock_callback = Mock()
- mock_futures = [Mock()]
- mock_processor.return_value.apply.return_value = [(mock_callback, mock_futures)]
- self.call_post_process_group(
- is_new=False,
- is_regression=True,
- is_new_group_environment=False,
- event=event2,
- )
- mock_processor.assert_called_with(
- EventMatcher(event2, group=group1), False, True, False, False, False
- )
- sent_group_date = mock_processor.call_args[0][0].group.last_seen
- # Check that last_seen was updated to be at least the new event's date
- self.assertAlmostEqual(sent_group_date, event2.datetime, delta=timedelta(seconds=10))
- class ServiceHooksTestMixin(BasePostProgressGroupMixin):
- @patch("sentry.tasks.servicehooks.process_service_hook")
- def test_service_hook_fires_on_new_event(self, mock_process_service_hook):
- event = self.create_event(data={}, project_id=self.project.id)
- hook = self.create_service_hook(
- project=self.project,
- organization=self.project.organization,
- actor=self.user,
- events=["event.created"],
- )
- with self.feature("projects:servicehooks"):
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- mock_process_service_hook.delay.assert_called_once_with(
- servicehook_id=hook.id, event=EventMatcher(event)
- )
- @patch("sentry.tasks.servicehooks.process_service_hook")
- @patch("sentry.rules.processing.processor.RuleProcessor")
- def test_service_hook_fires_on_alert(self, mock_processor, mock_process_service_hook):
- event = self.create_event(data={}, project_id=self.project.id)
- mock_callback = Mock()
- mock_futures = [Mock()]
- mock_processor.return_value.apply.return_value = [(mock_callback, mock_futures)]
- hook = self.create_service_hook(
- project=self.project,
- organization=self.project.organization,
- actor=self.user,
- events=["event.alert"],
- )
- with self.feature("projects:servicehooks"):
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- mock_process_service_hook.delay.assert_called_once_with(
- servicehook_id=hook.id, event=EventMatcher(event)
- )
- @patch("sentry.tasks.servicehooks.process_service_hook")
- @patch("sentry.rules.processing.processor.RuleProcessor")
- def test_service_hook_does_not_fire_without_alert(
- self, mock_processor, mock_process_service_hook
- ):
- event = self.create_event(data={}, project_id=self.project.id)
- mock_processor.return_value.apply.return_value = []
- self.create_service_hook(
- project=self.project,
- organization=self.project.organization,
- actor=self.user,
- events=["event.alert"],
- )
- with self.feature("projects:servicehooks"):
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assert not mock_process_service_hook.delay.mock_calls
- @patch("sentry.tasks.servicehooks.process_service_hook")
- def test_service_hook_does_not_fire_without_event(self, mock_process_service_hook):
- event = self.create_event(data={}, project_id=self.project.id)
- self.create_service_hook(
- project=self.project, organization=self.project.organization, actor=self.user, events=[]
- )
- with self.feature("projects:servicehooks"):
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assert not mock_process_service_hook.delay.mock_calls
- class ResourceChangeBoundsTestMixin(BasePostProgressGroupMixin):
- @patch("sentry.tasks.sentry_apps.process_resource_change_bound.delay")
- def test_processes_resource_change_task_on_new_group(self, delay):
- event = self.create_event(data={}, project_id=self.project.id)
- group = event.group
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- delay.assert_called_once_with(action="created", sender="Group", instance_id=group.id)
- @with_feature("organizations:integrations-event-hooks")
- @patch("sentry.tasks.sentry_apps.process_resource_change_bound.delay")
- def test_processes_resource_change_task_on_error_events(self, delay):
- event = self.create_event(
- data={
- "message": "Foo bar",
- "exception": {"type": "Foo", "value": "oh no"},
- "level": "error",
- "timestamp": iso_format(timezone.now()),
- },
- project_id=self.project.id,
- assert_no_errors=False,
- )
- self.create_service_hook(
- project=self.project,
- organization=self.project.organization,
- actor=self.user,
- events=["error.created"],
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- delay.assert_called_once_with(
- action="created",
- sender="Error",
- instance_id=event.event_id,
- instance=EventMatcher(event),
- )
- @with_feature("organizations:integrations-event-hooks")
- @patch("sentry.tasks.sentry_apps.process_resource_change_bound.delay")
- def test_processes_resource_change_task_not_called_for_non_errors(self, delay):
- event = self.create_event(
- data={
- "message": "Foo bar",
- "level": "info",
- "timestamp": iso_format(timezone.now()),
- },
- project_id=self.project.id,
- assert_no_errors=False,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assert not delay.called
- @patch("sentry.tasks.sentry_apps.process_resource_change_bound.delay")
- def test_processes_resource_change_task_not_called_without_feature_flag(self, delay):
- event = self.create_event(
- data={
- "message": "Foo bar",
- "level": "info",
- "timestamp": iso_format(timezone.now()),
- },
- project_id=self.project.id,
- assert_no_errors=False,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assert not delay.called
- @with_feature("organizations:integrations-event-hooks")
- @patch("sentry.tasks.sentry_apps.process_resource_change_bound.delay")
- def test_processes_resource_change_task_not_called_without_error_created(self, delay):
- event = self.create_event(
- data={
- "message": "Foo bar",
- "level": "error",
- "exception": {"type": "Foo", "value": "oh no"},
- "timestamp": iso_format(timezone.now()),
- },
- project_id=self.project.id,
- assert_no_errors=False,
- )
- self.create_service_hook(
- project=self.project, organization=self.project.organization, actor=self.user, events=[]
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assert not delay.called
- class InboxTestMixin(BasePostProgressGroupMixin):
- @patch("sentry.rules.processing.processor.RuleProcessor")
- def test_group_inbox_regression(self, mock_processor):
- new_event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
- group = new_event.group
- assert group.status == GroupStatus.UNRESOLVED
- assert group.substatus == GroupSubStatus.ONGOING
- self.call_post_process_group(
- is_new=True,
- is_regression=True,
- is_new_group_environment=False,
- event=new_event,
- )
- assert GroupInbox.objects.filter(group=group, reason=GroupInboxReason.NEW.value).exists()
- GroupInbox.objects.filter(
- group=group
- ).delete() # Delete so it creates the .REGRESSION entry.
- group.refresh_from_db()
- assert group.status == GroupStatus.UNRESOLVED
- assert group.substatus == GroupSubStatus.NEW
- mock_processor.assert_called_with(EventMatcher(new_event), True, True, False, False, False)
- # resolve the new issue so regression actually happens
- group.status = GroupStatus.RESOLVED
- group.substatus = None
- group.active_at = group.active_at - timedelta(minutes=1)
- group.save(update_fields=["status", "substatus", "active_at"])
- # trigger a transition from resolved to regressed by firing an event that groups to that issue
- regressed_event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
- assert regressed_event.group == new_event.group
- group = regressed_event.group
- group.refresh_from_db()
- assert group.status == GroupStatus.UNRESOLVED
- assert group.substatus == GroupSubStatus.REGRESSED
- self.call_post_process_group(
- is_new=False,
- is_regression=True,
- is_new_group_environment=False,
- event=regressed_event,
- )
- mock_processor.assert_called_with(
- EventMatcher(regressed_event), False, True, False, False, False
- )
- group.refresh_from_db()
- assert group.status == GroupStatus.UNRESOLVED
- assert group.substatus == GroupSubStatus.REGRESSED
- assert GroupInbox.objects.filter(
- group=group, reason=GroupInboxReason.REGRESSION.value
- ).exists()
- class AssignmentTestMixin(BasePostProgressGroupMixin):
- def make_ownership(self, extra_rules=None):
- self.user_2 = self.create_user()
- self.create_team_membership(team=self.team, user=self.user_2)
- rules = [
- Rule(Matcher("path", "src/app/*"), [Owner("team", self.team.name)]),
- Rule(Matcher("path", "src/*"), [Owner("user", self.user.email)]),
- Rule(Matcher("path", "tests/*"), [Owner("user", self.user_2.email)]),
- ]
- if extra_rules:
- rules.extend(extra_rules)
- self.prj_ownership = ProjectOwnership.objects.create(
- project_id=self.project.id,
- schema=dump_schema(rules),
- fallthrough=True,
- auto_assignment=True,
- )
- def test_owner_assignment_order_precedence(self):
- self.make_ownership()
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
- },
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assignee = event.group.assignee_set.first()
- assert assignee.user_id == self.user.id
- assert assignee.team is None
- owners = list(GroupOwner.objects.filter(group=event.group))
- assert len(owners) == 2
- assert {(self.user.id, None), (None, self.team.id)} == {
- (o.user_id, o.team_id) for o in owners
- }
- activity = Activity.objects.filter(group=event.group).first()
- assert activity.data == {
- "assignee": str(self.user.id),
- "assigneeEmail": self.user.email,
- "assigneeType": "user",
- "integration": ActivityIntegration.PROJECT_OWNERSHIP.value,
- "rule": str(Rule(Matcher("path", "src/*"), [Owner("user", self.user.email)])),
- }
- def test_owner_assignment_extra_groups(self):
- extra_user = self.create_user()
- self.create_team_membership(self.team, user=extra_user)
- self.make_ownership(
- [Rule(Matcher("path", "src/app/things/in/*"), [Owner("user", extra_user.email)])],
- )
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app/things/in/a/path/example2.py"}]},
- },
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assignee = event.group.assignee_set.first()
- assert assignee.user_id == extra_user.id
- assert assignee.team is None
- owners = list(GroupOwner.objects.filter(group=event.group))
- assert len(owners) == 2
- assert {(extra_user.id, None), (self.user.id, None)} == {
- (o.user_id, o.team_id) for o in owners
- }
- def test_owner_assignment_existing_owners(self):
- extra_team = self.create_team()
- ProjectTeam.objects.create(team=extra_team, project=self.project)
- self.make_ownership(
- [Rule(Matcher("path", "src/app/things/in/*"), [Owner("team", extra_team.slug)])],
- )
- GroupOwner.objects.create(
- group=self.group,
- project=self.project,
- organization=self.organization,
- user_id=self.user.id,
- type=GroupOwnerType.OWNERSHIP_RULE.value,
- )
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app/things/in/a/path/example2.py"}]},
- },
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assignee = event.group.assignee_set.first()
- assert assignee.user_id is None
- assert assignee.team == extra_team
- owners = list(GroupOwner.objects.filter(group=event.group))
- assert {(None, extra_team.id), (self.user.id, None)} == {
- (o.user_id, o.team_id) for o in owners
- }
- def test_owner_assignment_assign_user(self):
- self.make_ownership()
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app.py"}]},
- },
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assignee = event.group.assignee_set.first()
- assert assignee.user_id == self.user.id
- assert assignee.team is None
- def test_owner_assignment_ownership_no_matching_owners(self):
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
- },
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assert not event.group.assignee_set.exists()
- def test_owner_assignment_existing_assignment(self):
- self.make_ownership()
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
- },
- project_id=self.project.id,
- )
- event.group.assignee_set.create(team=self.team, project=self.project)
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assignee = event.group.assignee_set.first()
- assert assignee.user_id is None
- assert assignee.team == self.team
- def test_only_first_assignment_works(self):
- self.make_ownership()
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
- "fingerprint": ["group1"],
- },
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assignee = event.group.assignee_set.first()
- assert assignee.user_id == self.user.id
- assert assignee.team is None
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "tests/src/app/test_example.py"}]},
- "fingerprint": ["group1"],
- },
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assignee = event.group.assignee_set.first()
- # Assignment shouldn't change.
- assert assignee.user_id == self.user.id
- assert assignee.team is None
- def test_owner_assignment_owner_is_gone(self):
- self.make_ownership()
- # Remove the team so the rule match will fail to resolve
- self.team.delete()
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
- },
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assignee = event.group.assignee_set.first()
- assert assignee is None
- def test_suspect_committer_affect_cache_debouncing_issue_owners_calculations(self):
- self.make_ownership()
- committer = GroupOwner(
- group=self.created_event.group,
- project=self.created_event.project,
- organization=self.created_event.project.organization,
- type=GroupOwnerType.SUSPECT_COMMIT.value,
- )
- committer.save()
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
- },
- project_id=self.project.id,
- )
- event.group.assignee_set.create(team=self.team, project=self.project)
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assignee = event.group.assignee_set.first()
- assert assignee.user_id is None
- assert assignee.team == self.team
- def test_owner_assignment_when_owners_have_been_unassigned(self):
- """
- Test that ensures that if certain assignees get unassigned, and project rules are changed
- then the new group assignees should be re-calculated and re-assigned
- """
- # Create rules and check assignees
- self.make_ownership()
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
- },
- project_id=self.project.id,
- )
- event_2 = self.create_event(
- data={
- "message": "Exception",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app/integration.py"}]},
- },
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event_2,
- )
- assignee = event.group.assignee_set.first()
- assert assignee.user_id == self.user.id
- user_3 = self.create_user()
- self.create_team_membership(self.team, user=user_3)
- # De-assign group assignees
- GroupAssignee.objects.deassign(event.group, self.user)
- assert event.group.assignee_set.first() is None
- # Change ProjectOwnership rules
- rules = [
- Rule(Matcher("path", "src/*"), [Owner("user", user_3.email)]),
- ]
- self.prj_ownership.schema = dump_schema(rules)
- self.prj_ownership.save()
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event_2,
- )
- # Group should be re-assigned to the new group owner
- assignee = event.group.assignee_set.first()
- assert assignee.user_id == user_3.id
- # De-assign group assignees
- GroupAssignee.objects.deassign(event.group, user_service.get_user(user_id=assignee.user_id))
- assert event.group.assignee_set.first() is None
- user_4 = self.create_user()
- self.create_team_membership(self.team, user=user_4)
- self.prj_ownership.schema = dump_schema([])
- self.prj_ownership.save()
- code_owners_rule = Rule(
- Matcher("codeowners", "*.py"),
- [Owner("user", user_4.email)],
- )
- self.code_mapping = self.create_code_mapping(project=self.project)
- self.code_owners = self.create_codeowners(
- self.project,
- self.code_mapping,
- schema=dump_schema([code_owners_rule]),
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event_2,
- )
- # Group should be re-assigned to the new group owner
- assignee = event.group.assignee_set.first()
- assert assignee.user_id == user_4.id
- def test_auto_assignment_when_owners_have_been_unassigned(self):
- """
- Test that ensures that if assignee gets unassigned and project rules are changed,
- then the new group assignees should be re-calculated and re-assigned
- """
- # Create rules and check assignees
- self.make_ownership()
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
- },
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assignee = (
- GroupOwner.objects.filter()
- .exclude(user_id__isnull=True, team_id__isnull=True)
- .order_by("type")
- .first()
- )
- assert assignee.user_id == self.user.id
- user_3 = self.create_user()
- self.create_team_membership(self.team, user=user_3)
- # Set assignee_exists cache to self.user
- cache.set(ASSIGNEE_EXISTS_KEY(event.group_id), self.user, ASSIGNEE_EXISTS_DURATION)
- # De-assign group assignees
- GroupAssignee.objects.deassign(event.group, self.user)
- assert event.group.assignee_set.first() is None
- # Change ProjectOwnership rules
- rules = [
- Rule(Matcher("path", "src/*"), [Owner("user", user_3.email)]),
- ]
- self.prj_ownership.schema = dump_schema(rules)
- self.prj_ownership.save()
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- # Mimic filter used in get_autoassigned_owner_cached to get the issue owner to be
- # auto-assigned
- assignee = (
- GroupOwner.objects.filter()
- .exclude(user_id__isnull=True, team_id__isnull=True)
- .order_by("type")
- .first()
- )
- # Group should be re-assigned to the new group owner
- assert assignee.user_id == user_3.id
- def test_ensure_when_assignees_and_owners_are_cached_does_not_cause_unbound_errors(self):
- self.make_ownership()
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app.py"}]},
- },
- project_id=self.project.id,
- )
- assignee_cache_key = "assignee_exists:1:%s" % event.group.id
- owner_cache_key = "owner_exists:1:%s" % event.group.id
- for key in [assignee_cache_key, owner_cache_key]:
- cache.set(key, True)
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- def test_auto_assignment_when_owners_are_invalid(self):
- """
- Test that invalid group owners (that exist due to bugs) are deleted and not assigned
- when no valid issue owner exists
- """
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app/example.py"}]},
- },
- project_id=self.project.id,
- )
- # Hard code an invalid group owner
- invalid_codeowner = GroupOwner(
- group=event.group,
- project=event.project,
- organization=event.project.organization,
- type=GroupOwnerType.CODEOWNERS.value,
- context={"rule": "codeowners:/**/*.css " + self.user.email},
- user_id=self.user.id,
- )
- invalid_codeowner.save()
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- assignee = event.group.assignee_set.first()
- assert assignee is None
- assert len(GroupOwner.objects.filter(group_id=event.group)) == 0
- @patch("sentry.tasks.post_process.logger")
- def test_debounces_handle_owner_assignments(self, logger):
- self.make_ownership()
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app.py"}]},
- },
- project_id=self.project.id,
- )
- cache.set(ISSUE_OWNERS_DEBOUNCE_KEY(event.group_id), True, ISSUE_OWNERS_DEBOUNCE_DURATION)
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- logger.info.assert_any_call(
- "handle_owner_assignment.issue_owners_exist",
- extra={
- "event": event.event_id,
- "group": event.group_id,
- "project": event.project_id,
- "organization": event.project.organization_id,
- "reason": "issue_owners_exist",
- },
- )
- @patch("sentry.tasks.post_process.logger")
- def test_issue_owners_should_ratelimit(self, mock_logger):
- cache.set(
- f"issue_owner_assignment_ratelimiter:{self.project.id}",
- (set(range(0, ISSUE_OWNERS_PER_PROJECT_PER_MIN_RATELIMIT * 10, 10)), datetime.now()),
- )
- event = self.create_event(
- data={
- "message": "oh no",
- "platform": "python",
- "stacktrace": {"frames": [{"filename": "src/app.py"}]},
- },
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- expected_extra = {
- "event": event.event_id,
- "group": event.group_id,
- "project": event.project_id,
- "organization": event.project.organization_id,
- "reason": "ratelimited",
- }
- mock_logger.info.assert_any_call(
- "handle_owner_assignment.ratelimited", extra=expected_extra
- )
- mock_logger.reset_mock()
- # Raise this organization's ratelimit
- with self.feature("organizations:increased-issue-owners-rate-limit"):
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- with pytest.raises(AssertionError):
- mock_logger.info.assert_any_call(
- "handle_owner_assignment.ratelimited", extra=expected_extra
- )
- # Still enforce the raised limit
- mock_logger.reset_mock()
- cache.set(
- f"issue_owner_assignment_ratelimiter:{self.project.id}",
- (
- set(range(0, HIGHER_ISSUE_OWNERS_PER_PROJECT_PER_MIN_RATELIMIT * 10, 10)),
- datetime.now(),
- ),
- )
- with self.feature("organizations:increased-issue-owners-rate-limit"):
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- mock_logger.info.assert_any_call(
- "handle_owner_assignment.ratelimited", extra=expected_extra
- )
- class ProcessCommitsTestMixin(BasePostProgressGroupMixin):
- github_blame_return_value = {
- "commitId": "asdfwreqr",
- "committedDate": (timezone.now() - timedelta(days=2)),
- "commitMessage": "placeholder commit message",
- "commitAuthorName": "",
- "commitAuthorEmail": "admin@localhost",
- }
- def setUp(self):
- self.created_event = self.create_event(
- data={
- "message": "Kaboom!",
- "platform": "python",
- "timestamp": iso_format(before_now(seconds=10)),
- "stacktrace": {
- "frames": [
- {
- "function": "handle_set_commits",
- "abs_path": "/usr/src/sentry/src/sentry/tasks.py",
- "module": "sentry.tasks",
- "in_app": False,
- "lineno": 30,
- "filename": "sentry/tasks.py",
- },
- {
- "function": "set_commits",
- "abs_path": "/usr/src/sentry/src/sentry/models/release.py",
- "module": "sentry.models.release",
- "in_app": True,
- "lineno": 39,
- "filename": "sentry/models/release.py",
- },
- ]
- },
- "fingerprint": ["put-me-in-the-control-group"],
- },
- project_id=self.project.id,
- )
- self.cache_key = write_event_to_cache(self.created_event)
- self.repo = self.create_repo(
- name="org/example", integration_id=self.integration.id, provider="integrations:github"
- )
- self.code_mapping = self.create_code_mapping(
- repo=self.repo, project=self.project, stack_root="sentry/", source_root="sentry/"
- )
- self.commit_author = self.create_commit_author(project=self.project, user=self.user)
- self.commit = self.create_commit(
- project=self.project,
- repo=self.repo,
- author=self.commit_author,
- key="asdfwreqr",
- message="placeholder commit message",
- )
- self.github_blame_all_files_return_value = [
- FileBlameInfo(
- code_mapping=self.code_mapping,
- lineno=39,
- path="sentry/models/release.py",
- ref="master",
- repo=self.repo,
- commit=CommitInfo(
- commitId="asdfwreqr",
- committedDate=(timezone.now() - timedelta(days=2)),
- commitMessage="placeholder commit message",
- commitAuthorName="",
- commitAuthorEmail="admin@localhost",
- ),
- )
- ]
- @patch(
- "sentry.integrations.github.GitHubIntegration.get_commit_context_all_frames",
- return_value=github_blame_return_value,
- )
- def test_logic_fallback_no_scm(self, mock_get_commit_context):
- with assume_test_silo_mode(SiloMode.CONTROL):
- with unguarded_write(using=router.db_for_write(Integration)):
- Integration.objects.all().delete()
- integration = self.create_provider_integration(provider="bitbucket")
- integration.add_organization(self.organization)
- with self.tasks():
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=self.created_event,
- )
- assert not mock_get_commit_context.called
- @patch(
- "sentry.integrations.github_enterprise.GitHubEnterpriseIntegration.get_commit_context_all_frames",
- )
- def test_github_enterprise(self, mock_get_commit_context):
- mock_get_commit_context.return_value = self.github_blame_all_files_return_value
- with assume_test_silo_mode(SiloMode.CONTROL):
- with unguarded_write(using=router.db_for_write(Integration)):
- Integration.objects.all().delete()
- integration = self.create_provider_integration(
- external_id="35.232.149.196:12345",
- provider="github_enterprise",
- metadata={
- "domain_name": "35.232.149.196/baxterthehacker",
- "installation_id": "12345",
- "installation": {"id": "2", "private_key": "private_key", "verify_ssl": True},
- },
- )
- organization_integration = integration.add_organization(self.organization)
- self.repo.update(integration_id=integration.id, provider="integrations:github_enterprise")
- self.code_mapping.update(organization_integration_id=organization_integration.id)
- with self.tasks():
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=self.created_event,
- )
- assert GroupOwner.objects.get(
- group=self.created_event.group,
- project=self.created_event.project,
- organization=self.created_event.project.organization,
- type=GroupOwnerType.SUSPECT_COMMIT.value,
- )
- @patch("sentry.integrations.github.GitHubIntegration.get_commit_context_all_frames")
- def test_skip_when_not_is_new(self, mock_get_commit_context):
- """
- Tests that we do not process commit context if the group isn't new.
- """
- with self.tasks():
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=True,
- event=self.created_event,
- )
- assert not mock_get_commit_context.called
- assert not GroupOwner.objects.filter(
- group=self.created_event.group,
- project=self.created_event.project,
- organization=self.created_event.project.organization,
- type=GroupOwnerType.SUSPECT_COMMIT.value,
- ).exists()
- @patch(
- "sentry.integrations.github.GitHubIntegration.get_commit_context_all_frames",
- )
- def test_does_not_skip_when_is_new(self, mock_get_commit_context):
- """
- Tests that the commit context should be processed when the group is new.
- """
- mock_get_commit_context.return_value = self.github_blame_all_files_return_value
- with self.tasks():
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=self.created_event,
- )
- assert mock_get_commit_context.called
- assert GroupOwner.objects.get(
- group=self.created_event.group,
- project=self.created_event.project,
- organization=self.created_event.project.organization,
- type=GroupOwnerType.SUSPECT_COMMIT.value,
- )
- class SnoozeTestSkipSnoozeMixin(BasePostProgressGroupMixin):
- @patch("sentry.signals.issue_unignored.send_robust")
- @patch("sentry.rules.processing.processor.RuleProcessor")
- def test_invalidates_snooze_issue_platform(self, mock_processor, mock_send_unignored_robust):
- event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
- group = event.group
- should_detect_escalation = group.issue_type.should_detect_escalation(
- self.project.organization
- )
- # Check for has_reappeared=False if is_new=True
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- assert GroupInbox.objects.filter(group=group, reason=GroupInboxReason.NEW.value).exists()
- GroupInbox.objects.filter(group=group).delete() # Delete so it creates the UNIGNORED entry.
- Activity.objects.filter(group=group).delete()
- mock_processor.assert_called_with(EventMatcher(event), True, False, True, False, False)
- event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
- group.status = GroupStatus.IGNORED
- group.substatus = GroupSubStatus.UNTIL_CONDITION_MET
- group.save(update_fields=["status", "substatus"])
- snooze = GroupSnooze.objects.create(group=group, until=timezone.now() - timedelta(hours=1))
- # Check for has_reappeared=True if is_new=False
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_processor.assert_called_with(EventMatcher(event), False, False, True, True, False)
- if should_detect_escalation:
- assert not GroupSnooze.objects.filter(id=snooze.id).exists()
- else:
- assert GroupSnooze.objects.filter(id=snooze.id).exists()
- group.refresh_from_db()
- if should_detect_escalation:
- assert group.status == GroupStatus.UNRESOLVED
- assert group.substatus == GroupSubStatus.ONGOING
- assert GroupInbox.objects.filter(
- group=group, reason=GroupInboxReason.ONGOING.value
- ).exists()
- assert Activity.objects.filter(
- group=group, project=group.project, type=ActivityType.SET_UNRESOLVED.value
- ).exists()
- assert mock_send_unignored_robust.called
- else:
- assert group.status == GroupStatus.IGNORED
- assert group.substatus == GroupSubStatus.UNTIL_CONDITION_MET
- assert not GroupInbox.objects.filter(
- group=group, reason=GroupInboxReason.ESCALATING.value
- ).exists()
- assert not Activity.objects.filter(
- group=group, project=group.project, type=ActivityType.SET_ESCALATING.value
- ).exists()
- assert not mock_send_unignored_robust.called
- class SnoozeTestMixin(BasePostProgressGroupMixin):
- @patch("sentry.signals.issue_unignored.send_robust")
- @patch("sentry.rules.processing.processor.RuleProcessor")
- def test_invalidates_snooze(self, mock_processor, mock_send_unignored_robust):
- event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
- group = event.group
- # Check for has_reappeared=False if is_new=True
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- assert GroupInbox.objects.filter(group=group, reason=GroupInboxReason.NEW.value).exists()
- GroupInbox.objects.filter(group=group).delete() # Delete so it creates the UNIGNORED entry.
- Activity.objects.filter(group=group).delete()
- mock_processor.assert_called_with(EventMatcher(event), True, False, True, False, False)
- event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
- group.status = GroupStatus.IGNORED
- group.substatus = GroupSubStatus.UNTIL_CONDITION_MET
- group.save(update_fields=["status", "substatus"])
- snooze = GroupSnooze.objects.create(group=group, until=timezone.now() - timedelta(hours=1))
- # Check for has_reappeared=True if is_new=False
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_processor.assert_called_with(EventMatcher(event), False, False, True, True, False)
- assert not GroupSnooze.objects.filter(id=snooze.id).exists()
- group.refresh_from_db()
- assert group.status == GroupStatus.UNRESOLVED
- assert group.substatus == GroupSubStatus.ONGOING
- assert GroupInbox.objects.filter(
- group=group, reason=GroupInboxReason.ONGOING.value
- ).exists()
- assert Activity.objects.filter(
- group=group, project=group.project, type=ActivityType.SET_UNRESOLVED.value
- ).exists()
- assert mock_send_unignored_robust.called
- @override_settings(SENTRY_BUFFER="sentry.buffer.redis.RedisBuffer")
- @patch("sentry.signals.issue_unignored.send_robust")
- @patch("sentry.rules.processing.processor.RuleProcessor")
- def test_invalidates_snooze_with_buffers(self, mock_processor, send_robust):
- redis_buffer = RedisBuffer()
- with (
- mock.patch("sentry.buffer.backend.get", redis_buffer.get),
- mock.patch("sentry.buffer.backend.incr", redis_buffer.incr),
- ):
- event = self.create_event(
- data={"message": "testing", "fingerprint": ["group-1"]}, project_id=self.project.id
- )
- event_2 = self.create_event(
- data={"message": "testing", "fingerprint": ["group-1"]}, project_id=self.project.id
- )
- group = event.group
- group.times_seen = 50
- group.status = GroupStatus.IGNORED
- group.substatus = GroupSubStatus.UNTIL_CONDITION_MET
- group.save(update_fields=["times_seen", "status", "substatus"])
- snooze = GroupSnooze.objects.create(group=group, count=100, state={"times_seen": 0})
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- assert GroupSnooze.objects.filter(id=snooze.id).exists()
- buffer.backend.incr(Group, {"times_seen": 60}, filters={"id": event.group.id})
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=True,
- event=event_2,
- )
- assert not GroupSnooze.objects.filter(id=snooze.id).exists()
- @patch("sentry.rules.processing.processor.RuleProcessor")
- def test_maintains_valid_snooze(self, mock_processor):
- event = self.create_event(data={}, project_id=self.project.id)
- group = event.group
- assert group.status == GroupStatus.UNRESOLVED
- assert group.substatus == GroupSubStatus.ONGOING
- snooze = GroupSnooze.objects.create(group=group, until=timezone.now() + timedelta(hours=1))
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_processor.assert_called_with(EventMatcher(event), True, False, True, False, False)
- assert GroupSnooze.objects.filter(id=snooze.id).exists()
- group.refresh_from_db()
- assert group.status == GroupStatus.UNRESOLVED
- assert group.substatus == GroupSubStatus.NEW
- @patch("sentry.issues.escalating.is_escalating", return_value=(True, 0))
- def test_forecast_in_activity(self, mock_is_escalating):
- """
- Test that the forecast is added to the activity for escalating issues that were
- previously ignored until_escalating.
- """
- event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
- group = event.group
- group.status = GroupStatus.IGNORED
- group.substatus = GroupSubStatus.UNTIL_ESCALATING
- group.save()
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- assert Activity.objects.filter(
- group=group,
- project=group.project,
- type=ActivityType.SET_ESCALATING.value,
- data={"event_id": event.event_id, "forecast": 0},
- ).exists()
- @with_feature("projects:first-event-severity-new-escalation")
- @patch("sentry.issues.escalating.is_escalating")
- def test_skip_escalation_logic_for_new_groups(self, mock_is_escalating):
- """
- Test that we skip checking escalation in the process_snoozes job if the group is less than
- a day old.
- """
- event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
- group = event.group
- group.status = GroupStatus.IGNORED
- group.substatus = GroupSubStatus.UNTIL_ESCALATING
- group.update(first_seen=timezone.now() - timedelta(hours=1))
- group.save()
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_is_escalating.assert_not_called()
- @patch("sentry.utils.sdk_crashes.sdk_crash_detection.sdk_crash_detection")
- class SDKCrashMonitoringTestMixin(BasePostProgressGroupMixin):
- @with_feature("organizations:sdk-crash-detection")
- @override_options(
- {
- "issues.sdk_crash_detection.cocoa.project_id": 1234,
- "issues.sdk_crash_detection.cocoa.sample_rate": 1.0,
- "issues.sdk_crash_detection.react-native.project_id": 12345,
- "issues.sdk_crash_detection.react-native.sample_rate": 1.0,
- "issues.sdk_crash_detection.react-native.organization_allowlist": [1],
- }
- )
- def test_sdk_crash_monitoring_is_called(self, mock_sdk_crash_detection):
- event = self.create_event(
- data={"message": "testing"},
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_sdk_crash_detection.detect_sdk_crash.assert_called_once()
- args = mock_sdk_crash_detection.detect_sdk_crash.call_args[-1]
- assert args["event"].project.id == event.project.id
- assert len(args["configs"]) == 2
- cocoa_config = args["configs"][0]
- assert cocoa_config.sdk_name == SdkName.Cocoa
- assert cocoa_config.project_id == 1234
- assert cocoa_config.sample_rate == 1.0
- assert cocoa_config.organization_allowlist == []
- react_native_config = args["configs"][1]
- assert react_native_config.sdk_name == SdkName.ReactNative
- assert react_native_config.project_id == 12345
- assert react_native_config.sample_rate == 1.0
- assert react_native_config.organization_allowlist == [1]
- @with_feature("organizations:sdk-crash-detection")
- @override_options(
- {
- "issues.sdk_crash_detection.cocoa.project_id": 1234,
- "issues.sdk_crash_detection.cocoa.sample_rate": 0.0,
- }
- )
- def test_sdk_crash_monitoring_not_called_without_sample_rate(self, mock_sdk_crash_detection):
- event = self.create_event(
- data={"message": "testing"},
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_sdk_crash_detection.detect_sdk_crash.assert_not_called()
- def test_sdk_crash_monitoring_is_not_called_with_disabled_feature(
- self, mock_sdk_crash_detection
- ):
- event = self.create_event(
- data={"message": "testing"},
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_sdk_crash_detection.detect_sdk_crash.assert_not_called()
- @override_options(
- {
- "issues.sdk_crash_detection.cocoa.project_id": None,
- }
- )
- @with_feature("organizations:sdk-crash-detection")
- def test_sdk_crash_monitoring_is_not_called_without_project_id(self, mock_sdk_crash_detection):
- event = self.create_event(
- data={"message": "testing"},
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_sdk_crash_detection.detect_sdk_crash.assert_not_called()
- @mock.patch.object(replays_kafka, "get_kafka_producer_cluster_options")
- @mock.patch.object(replays_kafka, "KafkaPublisher")
- @mock.patch("sentry.utils.metrics.incr")
- class ReplayLinkageTestMixin(BasePostProgressGroupMixin):
- def test_replay_linkage(self, incr, kafka_producer, kafka_publisher):
- replay_id = uuid.uuid4().hex
- event = self.create_event(
- data={"message": "testing", "contexts": {"replay": {"replay_id": replay_id}}},
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- assert kafka_producer.return_value.publish.call_count == 1
- assert kafka_producer.return_value.publish.call_args[0][0] == "ingest-replay-events"
- ret_value = json.loads(kafka_producer.return_value.publish.call_args[0][1])
- assert ret_value["type"] == "replay_event"
- assert ret_value["start_time"]
- assert ret_value["replay_id"] == replay_id
- assert ret_value["project_id"] == self.project.id
- assert ret_value["segment_id"] is None
- assert ret_value["retention_days"] == 90
- assert ret_value["payload"] == {
- "type": "event_link",
- "replay_id": replay_id,
- "error_id": event.event_id,
- "timestamp": int(event.datetime.timestamp()),
- "event_hash": str(uuid.UUID(md5((event.event_id).encode("utf-8")).hexdigest())),
- }
- incr.assert_any_call("post_process.process_replay_link.id_sampled")
- incr.assert_any_call("post_process.process_replay_link.id_exists")
- def test_replay_linkage_with_tag(self, incr, kafka_producer, kafka_publisher):
- replay_id = uuid.uuid4().hex
- event = self.create_event(
- data={"message": "testing", "tags": {"replayId": replay_id}},
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- assert kafka_producer.return_value.publish.call_count == 1
- assert kafka_producer.return_value.publish.call_args[0][0] == "ingest-replay-events"
- ret_value = json.loads(kafka_producer.return_value.publish.call_args[0][1])
- assert ret_value["type"] == "replay_event"
- assert ret_value["start_time"]
- assert ret_value["replay_id"] == replay_id
- assert ret_value["project_id"] == self.project.id
- assert ret_value["segment_id"] is None
- assert ret_value["retention_days"] == 90
- assert ret_value["payload"] == {
- "type": "event_link",
- "replay_id": replay_id,
- "error_id": event.event_id,
- "timestamp": int(event.datetime.timestamp()),
- "event_hash": str(uuid.UUID(md5((event.event_id).encode("utf-8")).hexdigest())),
- }
- incr.assert_any_call("post_process.process_replay_link.id_sampled")
- incr.assert_any_call("post_process.process_replay_link.id_exists")
- def test_replay_linkage_with_tag_pii_scrubbed(self, incr, kafka_producer, kafka_publisher):
- event = self.create_event(
- data={"message": "testing", "tags": {"replayId": "***"}},
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- assert kafka_producer.return_value.publish.call_count == 0
- def test_no_replay(self, incr, kafka_producer, kafka_publisher):
- event = self.create_event(
- data={"message": "testing"},
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- assert kafka_producer.return_value.publish.call_count == 0
- incr.assert_any_call("post_process.process_replay_link.id_sampled")
- def test_0_sample_rate_replays(self, incr, kafka_producer, kafka_publisher):
- event = self.create_event(
- data={"message": "testing"},
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- assert kafka_producer.return_value.publish.call_count == 0
- for args, _ in incr.call_args_list:
- self.assertNotEqual(args, ("post_process.process_replay_link.id_sampled"))
- class DetectNewEscalationTestMixin(BasePostProgressGroupMixin):
- @patch("sentry.tasks.post_process.run_post_process_job", side_effect=run_post_process_job)
- @with_feature("projects:issue-priority")
- def test_has_escalated(self, mock_run_post_process_job):
- event = self.create_event(data={}, project_id=self.project.id)
- group = event.group
- group.update(
- first_seen=timezone.now() - timedelta(hours=1),
- times_seen=10,
- priority=PriorityLevel.LOW,
- )
- event.group = Group.objects.get(id=group.id)
- with self.feature("projects:first-event-severity-new-escalation"):
- with patch("sentry.issues.issue_velocity.calculate_threshold", return_value=9):
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- job = mock_run_post_process_job.call_args[0][0]
- assert job["has_escalated"]
- group.refresh_from_db()
- assert group.substatus == GroupSubStatus.ESCALATING
- assert group.priority == PriorityLevel.MEDIUM
- @patch("sentry.issues.issue_velocity.get_latest_threshold", return_value=1)
- @patch("sentry.tasks.post_process.run_post_process_job", side_effect=run_post_process_job)
- @with_feature("projects:issue-priority")
- def test_has_escalated_no_flag(self, mock_run_post_process_job, mock_threshold):
- event = self.create_event(data={}, project_id=self.project.id)
- group = event.group
- group.update(first_seen=timezone.now() - timedelta(hours=1), times_seen=10000)
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- job = mock_run_post_process_job.call_args[0][0]
- assert not job["has_escalated"]
- group.refresh_from_db()
- assert group.substatus == GroupSubStatus.NEW
- assert group.priority == PriorityLevel.HIGH
- @patch("sentry.issues.issue_velocity.get_latest_threshold")
- @patch("sentry.tasks.post_process.run_post_process_job", side_effect=run_post_process_job)
- @with_feature("projects:issue-priority")
- def test_has_escalated_old(self, mock_run_post_process_job, mock_threshold):
- event = self.create_event(data={}, project_id=self.project.id)
- group = event.group
- group.update(first_seen=timezone.now() - timedelta(days=2), times_seen=10000)
- with self.feature("projects:first-event-severity-new-escalation"):
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_threshold.assert_not_called()
- job = mock_run_post_process_job.call_args[0][0]
- assert not job["has_escalated"]
- group.refresh_from_db()
- assert group.substatus == GroupSubStatus.NEW
- assert group.priority == PriorityLevel.HIGH
- @patch("sentry.issues.issue_velocity.get_latest_threshold", return_value=11)
- @patch("sentry.tasks.post_process.run_post_process_job", side_effect=run_post_process_job)
- @with_feature("projects:issue-priority")
- def test_has_not_escalated(self, mock_run_post_process_job, mock_threshold):
- event = self.create_event(data={}, project_id=self.project.id)
- group = event.group
- group.update(
- first_seen=timezone.now() - timedelta(hours=1),
- times_seen=10,
- priority=PriorityLevel.LOW,
- )
- with self.feature("projects:first-event-severity-new-escalation"):
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_threshold.assert_called()
- job = mock_run_post_process_job.call_args[0][0]
- assert not job["has_escalated"]
- group.refresh_from_db()
- assert group.substatus == GroupSubStatus.NEW
- assert group.priority == PriorityLevel.LOW
- @patch("sentry.issues.issue_velocity.get_latest_threshold")
- @patch("sentry.tasks.post_process.run_post_process_job", side_effect=run_post_process_job)
- def test_has_escalated_locked(self, mock_run_post_process_job, mock_threshold):
- event = self.create_event(data={}, project_id=self.project.id)
- group = event.group
- group.update(first_seen=timezone.now() - timedelta(hours=1), times_seen=10000)
- lock = locks.get(f"detect_escalation:{group.id}", duration=10, name="detect_escalation")
- with self.feature("projects:first-event-severity-new-escalation"), lock.acquire():
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_threshold.assert_not_called()
- job = mock_run_post_process_job.call_args[0][0]
- assert not job["has_escalated"]
- group.refresh_from_db()
- assert group.substatus == GroupSubStatus.NEW
- @patch("sentry.issues.issue_velocity.get_latest_threshold")
- @patch("sentry.tasks.post_process.run_post_process_job", side_effect=run_post_process_job)
- def test_has_escalated_already_escalated(self, mock_run_post_process_job, mock_threshold):
- event = self.create_event(data={}, project_id=self.project.id)
- group = event.group
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- group.update(
- first_seen=timezone.now() - timedelta(hours=1),
- times_seen=10000,
- substatus=GroupSubStatus.ESCALATING,
- priority=PriorityLevel.MEDIUM,
- )
- with self.feature("projects:first-event-severity-new-escalation"):
- self.call_post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- event=event,
- )
- mock_threshold.assert_not_called()
- job = mock_run_post_process_job.call_args[0][0]
- assert not job["has_escalated"]
- group.refresh_from_db()
- assert group.substatus == GroupSubStatus.ESCALATING
- assert group.priority == PriorityLevel.MEDIUM
- @patch("sentry.issues.issue_velocity.get_latest_threshold")
- @patch("sentry.tasks.post_process.run_post_process_job", side_effect=run_post_process_job)
- def test_does_not_escalate_non_new_substatus(self, mock_run_post_process_job, mock_threshold):
- for substatus, status in GROUP_SUBSTATUS_TO_STATUS_MAP.items():
- if substatus == GroupSubStatus.NEW:
- continue
- event = self.create_event(data={}, project_id=self.project.id)
- group = event.group
- group.update(
- first_seen=timezone.now() - timedelta(hours=1),
- times_seen=10000,
- status=status,
- substatus=substatus,
- )
- group.save()
- with self.feature("projects:first-event-severity-new-escalation"):
- self.call_post_process_group(
- is_new=False, # when true, post_process sets the substatus to NEW
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_threshold.assert_not_called()
- job = mock_run_post_process_job.call_args[0][0]
- assert not job["has_escalated"]
- group.refresh_from_db()
- assert group.substatus == substatus
- @patch("sentry.issues.issue_velocity.get_latest_threshold", return_value=8)
- @patch("sentry.tasks.post_process.run_post_process_job", side_effect=run_post_process_job)
- def test_no_escalation_less_than_floor(self, mock_run_post_process_job, mock_threshold):
- event = self.create_event(data={}, project_id=self.project.id)
- group = event.group
- group.update(first_seen=timezone.now() - timedelta(hours=1), times_seen=9)
- event.group = Group.objects.get(id=group.id)
- with self.feature("projects:first-event-severity-new-escalation"):
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_threshold.assert_not_called()
- job = mock_run_post_process_job.call_args[0][0]
- assert not job["has_escalated"]
- group.refresh_from_db()
- assert group.substatus == GroupSubStatus.NEW
- @patch("sentry.issues.issue_velocity.get_latest_threshold", return_value=11)
- @patch("sentry.tasks.post_process.run_post_process_job", side_effect=run_post_process_job)
- def test_has_not_escalated_less_than_an_hour(self, mock_run_post_process_job, mock_threshold):
- event = self.create_event(data={}, project_id=self.project.id)
- group = event.group
- # the group is less than an hour old, but we use 1 hr for the hourly event rate anyway
- group.update(first_seen=timezone.now() - timedelta(minutes=1), times_seen=10)
- event.group = Group.objects.get(id=group.id)
- with self.feature("projects:first-event-severity-new-escalation"):
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- job = mock_run_post_process_job.call_args[0][0]
- assert not job["has_escalated"]
- group.refresh_from_db()
- assert group.substatus == GroupSubStatus.NEW
- @patch("sentry.issues.issue_velocity.get_latest_threshold", return_value=0)
- @patch("sentry.tasks.post_process.run_post_process_job", side_effect=run_post_process_job)
- def test_zero_escalation_rate(self, mock_run_post_process_job, mock_threshold):
- event = self.create_event(data={}, project_id=self.project.id)
- group = event.group
- group.update(first_seen=timezone.now() - timedelta(hours=1), times_seen=10000)
- with self.feature("projects:first-event-severity-new-escalation"):
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- )
- mock_threshold.assert_called()
- job = mock_run_post_process_job.call_args[0][0]
- assert not job["has_escalated"]
- group.refresh_from_db()
- assert group.substatus == GroupSubStatus.NEW
- class PostProcessGroupErrorTest(
- TestCase,
- AssignmentTestMixin,
- ProcessCommitsTestMixin,
- CorePostProcessGroupTestMixin,
- DeriveCodeMappingsProcessGroupTestMixin,
- InboxTestMixin,
- ResourceChangeBoundsTestMixin,
- RuleProcessorTestMixin,
- ServiceHooksTestMixin,
- SnoozeTestMixin,
- SnoozeTestSkipSnoozeMixin,
- SDKCrashMonitoringTestMixin,
- ReplayLinkageTestMixin,
- DetectNewEscalationTestMixin,
- ):
- def setUp(self):
- super().setUp()
- clear_replay_publisher()
- def create_event(self, data, project_id, assert_no_errors=True):
- return self.store_event(data=data, project_id=project_id, assert_no_errors=assert_no_errors)
- def call_post_process_group(
- self, is_new, is_regression, is_new_group_environment, event, cache_key=None
- ):
- if cache_key is None:
- cache_key = write_event_to_cache(event)
- post_process_group(
- is_new=is_new,
- is_regression=is_regression,
- is_new_group_environment=is_new_group_environment,
- cache_key=cache_key,
- group_id=event.group_id,
- project_id=event.project_id,
- )
- return cache_key
- @with_feature("organizations:escalating-metrics-backend")
- @patch("sentry.sentry_metrics.client.generic_metrics_backend.counter")
- @patch("sentry.utils.metrics.incr")
- @patch("sentry.utils.metrics.timer")
- def test_generic_metrics_backend_counter(
- self, metric_timer_mock, metric_incr_mock, generic_metrics_backend_mock
- ):
- min_ago = iso_format(before_now(minutes=1))
- event = self.create_event(
- data={
- "exception": {
- "values": [
- {
- "type": "ZeroDivisionError",
- "stacktrace": {"frames": [{"function": f} for f in ["a", "b"]]},
- }
- ]
- },
- "timestamp": min_ago,
- "start_timestamp": min_ago,
- "contexts": {"trace": {"trace_id": "b" * 32, "span_id": "c" * 16, "op": ""}},
- },
- project_id=self.project.id,
- )
- self.call_post_process_group(
- is_new=True, is_regression=False, is_new_group_environment=True, event=event
- )
- assert generic_metrics_backend_mock.call_count == 1
- metric_incr_mock.assert_any_call(
- "sentry.tasks.post_process.post_process_group.completed",
- tags={"issue_category": "error", "pipeline": "process_rules"},
- )
- metric_timer_mock.assert_any_call(
- "tasks.post_process.run_post_process_job.pipeline.duration",
- tags={
- "pipeline": "process_rules",
- "issue_category": "error",
- "is_reprocessed": False,
- },
- )
- class PostProcessGroupPerformanceTest(
- TestCase,
- SnubaTestCase,
- CorePostProcessGroupTestMixin,
- InboxTestMixin,
- RuleProcessorTestMixin,
- SnoozeTestMixin,
- SnoozeTestSkipSnoozeMixin,
- PerformanceIssueTestCase,
- ):
- def create_event(self, data, project_id, assert_no_errors=True):
- fingerprint = data["fingerprint"][0] if data.get("fingerprint") else "some_group"
- fingerprint = f"{PerformanceNPlusOneGroupType.type_id}-{fingerprint}"
- return self.create_performance_issue(fingerprint=fingerprint)
- def call_post_process_group(
- self, is_new, is_regression, is_new_group_environment, event, cache_key=None
- ):
- if cache_key is None:
- cache_key = write_event_to_cache(event)
- with self.feature(PerformanceNPlusOneGroupType.build_post_process_group_feature_name()):
- post_process_group(
- is_new=is_new,
- is_regression=is_regression,
- is_new_group_environment=is_new_group_environment,
- cache_key=cache_key,
- group_id=event.group_id,
- project_id=event.project_id,
- )
- return cache_key
- @patch("sentry.sentry_metrics.client.generic_metrics_backend.counter")
- @patch("sentry.tasks.post_process.run_post_process_job")
- @patch("sentry.rules.processing.processor.RuleProcessor")
- @patch("sentry.signals.transaction_processed.send_robust")
- @patch("sentry.signals.event_processed.send_robust")
- def test_process_transaction_event_with_no_group(
- self,
- event_processed_signal_mock,
- transaction_processed_signal_mock,
- mock_processor,
- run_post_process_job_mock,
- generic_metrics_backend_mock,
- ):
- min_ago = before_now(minutes=1)
- event = store_transaction(
- test_case=self,
- project_id=self.project.id,
- user_id=self.create_user(name="user1").name,
- fingerprint=[],
- environment=None,
- timestamp=min_ago,
- )
- assert len(event.groups) == 0
- cache_key = write_event_to_cache(event)
- post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- cache_key=cache_key,
- group_id=None,
- group_states=None,
- )
- assert transaction_processed_signal_mock.call_count == 1
- assert event_processed_signal_mock.call_count == 0
- assert mock_processor.call_count == 0
- assert run_post_process_job_mock.call_count == 0
- assert generic_metrics_backend_mock.call_count == 0
- @patch("sentry.tasks.post_process.handle_owner_assignment")
- @patch("sentry.tasks.post_process.handle_auto_assignment")
- @patch("sentry.tasks.post_process.process_rules")
- @patch("sentry.tasks.post_process.run_post_process_job")
- @patch("sentry.rules.processing.processor.RuleProcessor")
- @patch("sentry.signals.transaction_processed.send_robust")
- @patch("sentry.signals.event_processed.send_robust")
- def test_full_pipeline_with_group_states(
- self,
- event_processed_signal_mock,
- transaction_processed_signal_mock,
- mock_processor,
- run_post_process_job_mock,
- mock_process_rules,
- mock_handle_auto_assignment,
- mock_handle_owner_assignment,
- ):
- event = self.create_performance_issue()
- assert event.group
- # TODO(jangjodi): Fix this ordering test; side_effects should be a function (lambda),
- # but because post-processing is async, this causes the assert to fail because it doesn't
- # wait for the side effects to happen
- call_order = [mock_handle_owner_assignment, mock_handle_auto_assignment, mock_process_rules]
- mock_handle_owner_assignment.side_effect = None
- mock_handle_auto_assignment.side_effect = None
- mock_process_rules.side_effect = None
- post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- cache_key="dummykey",
- group_id=event.group_id,
- occurrence_id=event.occurrence_id,
- project_id=self.project.id,
- )
- assert transaction_processed_signal_mock.call_count == 1
- assert event_processed_signal_mock.call_count == 0
- assert mock_processor.call_count == 0
- assert run_post_process_job_mock.call_count == 1
- assert call_order == [
- mock_handle_owner_assignment,
- mock_handle_auto_assignment,
- mock_process_rules,
- ]
- class PostProcessGroupAggregateEventTest(
- TestCase,
- SnubaTestCase,
- CorePostProcessGroupTestMixin,
- SnoozeTestSkipSnoozeMixin,
- PerformanceIssueTestCase,
- ):
- def create_event(self, data, project_id):
- group = self.create_group(
- type=PerformanceP95EndpointRegressionGroupType.type_id,
- )
- event = self.store_event(data=data, project_id=project_id)
- event.group = group
- event = event.for_group(group)
- return event
- def call_post_process_group(
- self, is_new, is_regression, is_new_group_environment, event, cache_key=None
- ):
- if cache_key is None:
- cache_key = write_event_to_cache(event)
- with self.feature(
- PerformanceP95EndpointRegressionGroupType.build_post_process_group_feature_name()
- ):
- post_process_group(
- is_new=is_new,
- is_regression=is_regression,
- is_new_group_environment=is_new_group_environment,
- cache_key=cache_key,
- group_id=event.group_id,
- project_id=event.project_id,
- )
- return cache_key
- class TransactionClustererTestCase(TestCase, SnubaTestCase):
- @patch("sentry.ingest.transaction_clusterer.datasource.redis._record_sample")
- def test_process_transaction_event_clusterer(
- self,
- mock_store_transaction_name,
- ):
- min_ago = before_now(minutes=1)
- event = process_event(
- data={
- "project": self.project.id,
- "event_id": "b" * 32,
- "transaction": "foo",
- "start_timestamp": str(min_ago),
- "timestamp": str(min_ago),
- "type": "transaction",
- "transaction_info": {
- "source": "url",
- },
- "contexts": {"trace": {"trace_id": "b" * 32, "span_id": "c" * 16, "op": ""}},
- },
- group_id=0,
- )
- cache_key = write_event_to_cache(event)
- post_process_group(
- is_new=False,
- is_regression=False,
- is_new_group_environment=False,
- cache_key=cache_key,
- group_id=None,
- )
- assert mock_store_transaction_name.mock_calls == [
- mock.call(ClustererNamespace.TRANSACTIONS, self.project, "foo")
- ]
- class PostProcessGroupGenericTest(
- TestCase,
- SnubaTestCase,
- OccurrenceTestMixin,
- CorePostProcessGroupTestMixin,
- InboxTestMixin,
- RuleProcessorTestMixin,
- SnoozeTestMixin,
- ):
- def create_event(self, data, project_id, assert_no_errors=True):
- data["type"] = "generic"
- event = self.store_event(
- data=data, project_id=project_id, assert_no_errors=assert_no_errors
- )
- occurrence_data = self.build_occurrence_data(event_id=event.event_id, project_id=project_id)
- occurrence, group_info = save_issue_occurrence(occurrence_data, event)
- assert group_info is not None
- group_event = event.for_group(group_info.group)
- group_event.occurrence = occurrence
- return group_event
- def call_post_process_group(
- self, is_new, is_regression, is_new_group_environment, event, cache_key=None
- ):
- with self.feature(ProfileFileIOGroupType.build_post_process_group_feature_name()):
- post_process_group(
- is_new=is_new,
- is_regression=is_regression,
- is_new_group_environment=is_new_group_environment,
- cache_key=None,
- group_id=event.group_id,
- occurrence_id=event.occurrence.id,
- project_id=event.group.project_id,
- )
- return cache_key
- def test_issueless(self):
- # Skip this test since there's no way to have issueless events in the issue platform
- pass
- def test_no_cache_abort(self):
- # We don't use the cache for generic issues, so skip this test
- pass
- @patch("sentry.rules.processing.processor.RuleProcessor")
- def test_occurrence_deduping(self, mock_processor):
- event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
- self.call_post_process_group(
- is_new=True,
- is_regression=True,
- is_new_group_environment=False,
- event=event,
- )
- assert mock_processor.call_count == 1
- mock_processor.assert_called_with(EventMatcher(event), True, True, False, False, False)
- # Calling this again should do nothing, since we've already processed this occurrence.
- self.call_post_process_group(
- is_new=False,
- is_regression=True,
- is_new_group_environment=False,
- event=event,
- )
- # Make sure we haven't called this again, since we should exit early.
- assert mock_processor.call_count == 1
- @patch("sentry.tasks.post_process.handle_owner_assignment")
- @patch("sentry.tasks.post_process.handle_auto_assignment")
- @patch("sentry.tasks.post_process.process_rules")
- @patch("sentry.tasks.post_process.run_post_process_job")
- @patch("sentry.rules.processing.processor.RuleProcessor")
- @patch("sentry.signals.event_processed.send_robust")
- @patch("sentry.utils.snuba.raw_query")
- def test_full_pipeline_with_group_states(
- self,
- snuba_raw_query_mock,
- event_processed_signal_mock,
- mock_processor,
- run_post_process_job_mock,
- mock_process_rules,
- mock_handle_auto_assignment,
- mock_handle_owner_assignment,
- ):
- event = self.create_event(data={"message": "testing"}, project_id=self.project.id)
- call_order = [mock_handle_owner_assignment, mock_handle_auto_assignment, mock_process_rules]
- mock_handle_owner_assignment.side_effect = None
- mock_handle_auto_assignment.side_effect = None
- mock_process_rules.side_effect = None
- self.call_post_process_group(
- is_new=False,
- is_regression=True,
- is_new_group_environment=False,
- event=event,
- )
- assert event_processed_signal_mock.call_count == 0
- assert mock_processor.call_count == 0
- assert run_post_process_job_mock.call_count == 1
- assert call_order == [
- mock_handle_owner_assignment,
- mock_handle_auto_assignment,
- mock_process_rules,
- ]
- assert snuba_raw_query_mock.call_count == 0
- @pytest.mark.skip(reason="those tests do not work with the given call_post_process_group impl")
- def test_processing_cache_cleared(self):
- pass
- @pytest.mark.skip(reason="those tests do not work with the given call_post_process_group impl")
- def test_processing_cache_cleared_with_commits(self):
- pass
- class PostProcessGroupFeedbackTest(
- TestCase,
- SnubaTestCase,
- OccurrenceTestMixin,
- CorePostProcessGroupTestMixin,
- InboxTestMixin,
- RuleProcessorTestMixin,
- SnoozeTestMixin,
- ):
- def create_event(
- self,
- data,
- project_id,
- assert_no_errors=True,
- feedback_type=FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE,
- is_spam=False,
- ):
- data["type"] = "generic"
- event = self.store_event(
- data=data, project_id=project_id, assert_no_errors=assert_no_errors
- )
- evidence_data = {
- "Test": 123,
- "source": feedback_type.value,
- }
- evidence_display = [
- {"name": "hi", "value": "bye", "important": True},
- {"name": "what", "value": "where", "important": False},
- ]
- if is_spam:
- evidence_data["is_spam"] = True
- occurrence_data = self.build_occurrence_data(
- event_id=event.event_id,
- project_id=project_id,
- **{
- "id": uuid.uuid4().hex,
- "fingerprint": ["c" * 32],
- "issue_title": "User Feedback",
- "subtitle": "it was bad",
- "culprit": "api/123",
- "resource_id": "1234",
- "evidence_data": evidence_data,
- "evidence_display": evidence_display,
- "type": FeedbackGroup.type_id,
- "detection_time": datetime.now().timestamp(),
- "level": "info",
- },
- )
- occurrence, group_info = save_issue_occurrence(occurrence_data, event)
- assert group_info is not None
- group_event = event.for_group(group_info.group)
- group_event.occurrence = occurrence
- return group_event
- @override_options({"feedback.spam-detection-actions": True})
- def call_post_process_group(
- self, is_new, is_regression, is_new_group_environment, event, cache_key=None
- ):
- with self.feature(FeedbackGroup.build_post_process_group_feature_name()):
- post_process_group(
- is_new=is_new,
- is_regression=is_regression,
- is_new_group_environment=is_new_group_environment,
- cache_key=None,
- group_id=event.group_id,
- occurrence_id=event.occurrence.id,
- project_id=event.group.project_id,
- )
- return cache_key
- @override_options({"feedback.spam-detection-actions": True})
- def test_not_ran_if_crash_report_option_disabled(self):
- self.project.update_option("sentry:feedback_user_report_notifications", False)
- event = self.create_event(
- data={},
- project_id=self.project.id,
- feedback_type=FeedbackCreationSource.CRASH_REPORT_EMBED_FORM,
- )
- mock_process_func = Mock()
- with patch(
- "sentry.tasks.post_process.GROUP_CATEGORY_POST_PROCESS_PIPELINE",
- {
- GroupCategory.FEEDBACK: [
- feedback_filter_decorator(mock_process_func),
- ]
- },
- ):
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- cache_key="total_rubbish",
- )
- assert mock_process_func.call_count == 0
- @override_options({"feedback.spam-detection-actions": True})
- def test_not_ran_if_spam(self):
- event = self.create_event(
- data={},
- project_id=self.project.id,
- feedback_type=FeedbackCreationSource.CRASH_REPORT_EMBED_FORM,
- is_spam=True,
- )
- mock_process_func = Mock()
- with patch(
- "sentry.tasks.post_process.GROUP_CATEGORY_POST_PROCESS_PIPELINE",
- {
- GroupCategory.FEEDBACK: [
- feedback_filter_decorator(mock_process_func),
- ]
- },
- ):
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- cache_key="total_rubbish",
- )
- assert mock_process_func.call_count == 0
- @override_options({"feedback.spam-detection-actions": True})
- def test_not_ran_if_crash_report_project_option_enabled(self):
- self.project.update_option("sentry:feedback_user_report_notifications", True)
- event = self.create_event(
- data={},
- project_id=self.project.id,
- feedback_type=FeedbackCreationSource.CRASH_REPORT_EMBED_FORM,
- )
- mock_process_func = Mock()
- with patch(
- "sentry.tasks.post_process.GROUP_CATEGORY_POST_PROCESS_PIPELINE",
- {
- GroupCategory.FEEDBACK: [
- feedback_filter_decorator(mock_process_func),
- ]
- },
- ):
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- cache_key="total_rubbish",
- )
- assert mock_process_func.call_count == 1
- @override_options({"feedback.spam-detection-actions": True})
- def test_not_ran_if_crash_report_setting_option_epoch_0(self):
- self.project.update_option("sentry:option-epoch", 1)
- event = self.create_event(
- data={},
- project_id=self.project.id,
- feedback_type=FeedbackCreationSource.CRASH_REPORT_EMBED_FORM,
- )
- mock_process_func = Mock()
- with patch(
- "sentry.tasks.post_process.GROUP_CATEGORY_POST_PROCESS_PIPELINE",
- {
- GroupCategory.FEEDBACK: [
- feedback_filter_decorator(mock_process_func),
- ]
- },
- ):
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- cache_key="total_rubbish",
- )
- assert mock_process_func.call_count == 0
- @override_options({"feedback.spam-detection-actions": True})
- def test_ran_if_default_on_new_projects(self):
- event = self.create_event(
- data={},
- project_id=self.project.id,
- feedback_type=FeedbackCreationSource.CRASH_REPORT_EMBED_FORM,
- )
- mock_process_func = Mock()
- with patch(
- "sentry.tasks.post_process.GROUP_CATEGORY_POST_PROCESS_PIPELINE",
- {
- GroupCategory.FEEDBACK: [
- feedback_filter_decorator(mock_process_func),
- ]
- },
- ):
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- cache_key="total_rubbish",
- )
- assert mock_process_func.call_count == 1
- @override_options({"feedback.spam-detection-actions": True})
- def test_ran_if_crash_feedback_envelope(self):
- event = self.create_event(
- data={},
- project_id=self.project.id,
- feedback_type=FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE,
- )
- mock_process_func = Mock()
- with patch(
- "sentry.tasks.post_process.GROUP_CATEGORY_POST_PROCESS_PIPELINE",
- {
- GroupCategory.FEEDBACK: [
- feedback_filter_decorator(mock_process_func),
- ]
- },
- ):
- self.call_post_process_group(
- is_new=True,
- is_regression=False,
- is_new_group_environment=True,
- event=event,
- cache_key="total_rubbish",
- )
- assert mock_process_func.call_count == 1
- @pytest.mark.skip(
- reason="Skip this test since there's no way to have issueless events in the issue platform"
- )
- def test_issueless(self):
- ...
- def test_no_cache_abort(self):
- # We don't use the cache for generic issues, so skip this test
- pass
- @pytest.mark.skip(reason="those tests do not work with the given call_post_process_group impl")
- def test_processing_cache_cleared(self):
- pass
- @pytest.mark.skip(reason="those tests do not work with the given call_post_process_group impl")
- def test_processing_cache_cleared_with_commits(self):
- pass
|