Browse Source

ref(sort): Replace betterPriority with priority (#52915)

Now that the betterPriority sort is out and has replaced the old
priority, just rename everything to priority for simplicity.

Step 1: FE PR: https://github.com/getsentry/sentry/pull/52910
Step 2: Migration: https://github.com/getsentry/sentry/pull/52909
Step 3: This PR!
Colleen O'Rourke 1 year ago
parent
commit
91ef3f00c4

+ 0 - 1
src/sentry/constants.py

@@ -46,7 +46,6 @@ SORT_OPTIONS = {
     "date": _("Last Seen"),
     "new": _("First Seen"),
     "freq": _("Frequency"),
-    "better_priority": _("Better Priority"),
 }
 
 SEARCH_SORT_OPTIONS = {

+ 0 - 2
src/sentry/models/savedsearch.py

@@ -18,7 +18,6 @@ class SortOptions:
     FREQ = "freq"
     USER = "user"
     INBOX = "inbox"
-    BETTER_PRIORITY = "betterPriority"
 
     @classmethod
     def as_choices(cls):
@@ -29,7 +28,6 @@ class SortOptions:
             (cls.FREQ, _("Events")),
             (cls.USER, _("Users")),
             (cls.INBOX, _("Date Added")),
-            (cls.BETTER_PRIORITY, _("Better Priority")),
         )
 
 

+ 16 - 21
src/sentry/search/snuba/executors.py

@@ -76,7 +76,7 @@ DEFAULT_PRIORITY_WEIGHTS: PrioritySortWeights = {
 
 
 @dataclass
-class BetterPriorityParams:
+class PriorityParams:
     # (event or issue age_hours) / (event or issue halflife hours)
     # any event or issue age that is greater than max_pow times the half-life hours will get clipped
     max_pow: int
@@ -238,7 +238,7 @@ class AbstractQueryExecutor(metaclass=ABCMeta):
         end: datetime,
         having: Sequence[Sequence[Any]],
         aggregate_kwargs: Optional[PrioritySortWeights] = None,
-        replace_better_priority_aggregation: Optional[bool] = False,
+        replace_priority_aggregation: Optional[bool] = False,
     ) -> list[Any]:
         extra_aggregations = self.dependency_aggregations.get(sort_field, [])
         required_aggregations = set([sort_field, "total"] + extra_aggregations)
@@ -249,8 +249,8 @@ class AbstractQueryExecutor(metaclass=ABCMeta):
         aggregations = []
         for alias in required_aggregations:
             aggregation = self.aggregation_defs[alias]
-            if replace_better_priority_aggregation and alias in ["priority", "better_priority"]:
-                aggregation = self.aggregation_defs["better_priority_issue_platform"]
+            if replace_priority_aggregation and alias == "priority":
+                aggregation = self.aggregation_defs["priority_issue_platform"]
             if callable(aggregation):
                 if aggregate_kwargs:
                     aggregation = aggregation(start, end, aggregate_kwargs.get(alias, {}))
@@ -302,10 +302,7 @@ class AbstractQueryExecutor(metaclass=ABCMeta):
                 else:
                     conditions.append(converted_filter)
 
-        if (
-            sort_field in ["priority", "better_priority"]
-            and group_category is not GroupCategory.ERROR.value
-        ):
+        if sort_field == "priority" and group_category is not GroupCategory.ERROR.value:
             aggregations = self._prepare_aggregations(
                 sort_field, start, end, having, aggregate_kwargs, True
             )
@@ -503,13 +500,13 @@ class AbstractQueryExecutor(metaclass=ABCMeta):
         return sort_by in self.sort_strategies.keys()
 
 
-def better_priority_aggregation(
+def priority_aggregation(
     start: datetime,
     end: datetime,
     aggregate_kwargs: PrioritySortWeights,
 ) -> Sequence[str]:
-    return better_priority_aggregation_impl(
-        BetterPriorityParams(
+    return priority_aggregation_impl(
+        PriorityParams(
             max_pow=16,
             min_score=0.01,
             event_age_weight=1,
@@ -529,13 +526,13 @@ def better_priority_aggregation(
     )
 
 
-def better_priority_issue_platform_aggregation(
+def priority_issue_platform_aggregation(
     start: datetime,
     end: datetime,
     aggregate_kwargs: PrioritySortWeights,
 ) -> Sequence[str]:
-    return better_priority_aggregation_impl(
-        BetterPriorityParams(
+    return priority_aggregation_impl(
+        PriorityParams(
             max_pow=16,
             min_score=0.01,
             event_age_weight=1,
@@ -555,8 +552,8 @@ def better_priority_issue_platform_aggregation(
     )
 
 
-def better_priority_aggregation_impl(
-    params: BetterPriorityParams,
+def priority_aggregation_impl(
+    params: PriorityParams,
     timestamp_column: str,
     use_stacktrace: bool,
     start: datetime,
@@ -695,24 +692,22 @@ class PostgresSnubaQueryExecutor(AbstractQueryExecutor):
         "date": "last_seen",
         "freq": "times_seen",
         "new": "first_seen",
-        "priority": "better_priority",
+        "priority": "priority",
         "user": "user_count",
         # We don't need a corresponding snuba field here, since this sort only happens
         # in Postgres
         "inbox": "",
-        "betterPriority": "better_priority",
     }
 
     aggregation_defs = {
         "times_seen": ["count()", ""],
         "first_seen": ["multiply(toUInt64(min(timestamp)), 1000)", ""],
         "last_seen": ["multiply(toUInt64(max(timestamp)), 1000)", ""],
-        "priority": better_priority_aggregation,
+        "priority": priority_aggregation,
         # Only makes sense with WITH TOTALS, returns 1 for an individual group.
         "total": ["uniq", ISSUE_FIELD_NAME],
         "user_count": ["uniq", "tags[sentry:user]"],
-        "better_priority": better_priority_aggregation,
-        "better_priority_issue_platform": better_priority_issue_platform_aggregation,
+        "priority_issue_platform": priority_issue_platform_aggregation,
     }
 
     @property

+ 2 - 2
tests/snuba/api/endpoints/test_organization_group_index.py

@@ -114,7 +114,7 @@ class GroupListTest(APITestCase, SnubaTestCase):
         assert len(response.data) == 1
         assert response.data[0]["id"] == str(group.id)
 
-    def test_sort_by_better_priority(self):
+    def test_sort_by_priority(self):
         group = self.store_event(
             data={
                 "timestamp": iso_format(before_now(seconds=10)),
@@ -164,7 +164,7 @@ class GroupListTest(APITestCase, SnubaTestCase):
         }
 
         response = self.get_success_response(
-            sort="betterPriority",
+            sort="priority",
             query="is:unresolved",
             limit=25,
             start=iso_format(before_now(days=1)),

+ 26 - 26
tests/snuba/search/test_backend.py

@@ -79,7 +79,7 @@ class SharedSnubaTest(TestCase, SnubaTestCase):
         if limit is not None:
             kwargs["limit"] = limit
         if aggregate_kwargs:
-            kwargs["aggregate_kwargs"] = {"better_priority": {**aggregate_kwargs}}
+            kwargs["aggregate_kwargs"] = {"priority": {**aggregate_kwargs}}
 
         return self.backend.query(
             projects,
@@ -364,7 +364,7 @@ class EventsSnubaSearchTest(SharedSnubaTest):
         results = self.make_query(sort_by="user")
         assert list(results) == [self.group1, self.group2]
 
-    def test_better_priority_sort(self):
+    def test_priority_sort(self):
         weights: PrioritySortWeights = {
             "log_level": 5,
             "has_stacktrace": 5,
@@ -375,7 +375,7 @@ class EventsSnubaSearchTest(SharedSnubaTest):
             "norm": False,
         }
         results = self.make_query(
-            sort_by="betterPriority",
+            sort_by="priority",
             aggregate_kwargs=weights,
         )
         assert list(results) == [self.group2, self.group1]
@@ -2597,12 +2597,12 @@ class EventsSnubaSearchTest(SharedSnubaTest):
         assert len(results) == 0
 
 
-class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
+class EventsPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
     @property
     def backend(self):
         return EventsDatasetSnubaSearchBackend()
 
-    def test_better_priority_sort_old_and_new_events(self):
+    def test_priority_sort_old_and_new_events(self):
         """Test that an issue with only one old event is ranked lower than an issue with only one new event"""
         new_project = self.create_project(organization=self.project.organization)
         base_datetime = (datetime.utcnow() - timedelta(days=3)).replace(tzinfo=pytz.utc)
@@ -2644,7 +2644,7 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
             "norm": False,
         }
         results = self.make_query(
-            sort_by="betterPriority",
+            sort_by="priority",
             projects=[new_project],
             aggregate_kwargs=weights,
         )
@@ -2652,7 +2652,7 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
         old_group = Group.objects.get(id=old_event.group.id)
         assert list(results) == [recent_group, old_group]
 
-    def test_better_priority_sort_v2(self):
+    def test_priority_sort_v2(self):
         """Test that the v2 formula works."""
         new_project = self.create_project(organization=self.project.organization)
         base_datetime = (datetime.utcnow() - timedelta(days=3)).replace(tzinfo=pytz.utc)
@@ -2694,7 +2694,7 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
             "norm": False,
         }
         results = self.make_query(
-            sort_by="betterPriority",
+            sort_by="priority",
             projects=[new_project],
             aggregate_kwargs=weights,
         )
@@ -2702,7 +2702,7 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
         old_group = Group.objects.get(id=old_event.group.id)
         assert list(results) == [recent_group, old_group]
 
-    def test_better_priority_log_level_results(self):
+    def test_priority_log_level_results(self):
         """Test that the scoring results change when we pass in different log level weights"""
         base_datetime = (datetime.utcnow() - timedelta(hours=1)).replace(tzinfo=pytz.utc)
         event1 = self.store_event(
@@ -2733,7 +2733,7 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
         group2 = Group.objects.get(id=event2.group.id)
 
         agg_kwargs = {
-            "better_priority": {
+            "priority": {
                 "log_level": 0,
                 "has_stacktrace": 0,
                 "relative_volume": 1,
@@ -2749,7 +2749,7 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
             end=None,
             project_ids=[self.project.id],
             environment_ids=[],
-            sort_field="better_priority",
+            sort_field="priority",
             organization=self.organization,
             group_ids=[group1.id, group2.id],
             limit=150,
@@ -2760,14 +2760,14 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
         # initially group 2's score is higher since it has a more recent event
         assert group2_score_before > group1_score_before
 
-        agg_kwargs["better_priority"].update({"log_level": 5})
+        agg_kwargs["priority"].update({"log_level": 5})
 
         results2 = query_executor.snuba_search(
             start=None,
             end=None,
             project_ids=[self.project.id],
             environment_ids=[],
-            sort_field="better_priority",
+            sort_field="priority",
             organization=self.organization,
             group_ids=[group1.id, group2.id],
             limit=150,
@@ -2778,11 +2778,11 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
         # ensure fatal has a higher score than error
         assert group1_score_after > group2_score_after
 
-    def test_better_priority_has_stacktrace_results(self):
+    def test_priority_has_stacktrace_results(self):
         """Test that the scoring results change when we pass in different has_stacktrace weights"""
         base_datetime = (datetime.utcnow() - timedelta(hours=1)).replace(tzinfo=pytz.utc)
         agg_kwargs = {
-            "better_priority": {
+            "priority": {
                 "log_level": 0,
                 "has_stacktrace": 0,
                 "relative_volume": 1,
@@ -2833,7 +2833,7 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
             end=None,
             project_ids=[self.project.id],
             environment_ids=[],
-            sort_field="better_priority",
+            sort_field="priority",
             organization=self.organization,
             group_ids=[group1.id, group2.id],
             limit=150,
@@ -2843,13 +2843,13 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
         group2_score = results[1][1]
         assert group1_score == group2_score
 
-        agg_kwargs["better_priority"].update({"has_stacktrace": 3})
+        agg_kwargs["priority"].update({"has_stacktrace": 3})
         results = query_executor.snuba_search(
             start=None,
             end=None,
             project_ids=[self.project.id],
             environment_ids=[],
-            sort_field="better_priority",
+            sort_field="priority",
             organization=self.organization,
             group_ids=[group1.id, group2.id],
             limit=150,
@@ -2860,7 +2860,7 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
         # check that a group with an event with a stacktrace has a higher weight than one without
         assert group1_score < group2_score
 
-    def test_better_priority_event_halflife_results(self):
+    def test_priority_event_halflife_results(self):
         """Test that the scoring results change when we pass in different event halflife weights"""
         base_datetime = (datetime.utcnow() - timedelta(hours=1)).replace(tzinfo=pytz.utc)
         event1 = self.store_event(
@@ -2891,7 +2891,7 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
         group2 = Group.objects.get(id=event2.group.id)
 
         agg_kwargs = {
-            "better_priority": {
+            "priority": {
                 "log_level": 0,
                 "has_stacktrace": 0,
                 "relative_volume": 1,
@@ -2907,7 +2907,7 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
             end=None,
             project_ids=[self.project.id],
             environment_ids=[],
-            sort_field="better_priority",
+            sort_field="priority",
             organization=self.organization,
             group_ids=[group1.id, group2.id],
             limit=150,
@@ -2918,13 +2918,13 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
         # initially group 2's score is higher since it has a more recent event
         assert group2_score_before > group1_score_before
 
-        agg_kwargs["better_priority"].update({"event_halflife_hours": 2})
+        agg_kwargs["priority"].update({"event_halflife_hours": 2})
         results = query_executor.snuba_search(
             start=None,
             end=None,
             project_ids=[self.project.id],
             environment_ids=[],
-            sort_field="better_priority",
+            sort_field="priority",
             organization=self.organization,
             group_ids=[group1.id, group2.id],
             limit=150,
@@ -2934,7 +2934,7 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
         group2_score_after = results[1][1]
         assert group1_score_after < group2_score_after
 
-    def test_better_priority_mixed_group_types(self):
+    def test_priority_mixed_group_types(self):
         base_datetime = (datetime.utcnow() - timedelta(hours=1)).replace(tzinfo=pytz.utc)
 
         error_event = self.store_event(
@@ -2967,7 +2967,7 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
         profile_group_1 = group_info.group
 
         agg_kwargs = {
-            "better_priority": {
+            "priority": {
                 "log_level": 0,
                 "has_stacktrace": 0,
                 "relative_volume": 1,
@@ -2989,7 +2989,7 @@ class EventsBetterPriorityTest(SharedSnubaTest, OccurrenceTestMixin):
                 end=None,
                 project_ids=[self.project.id],
                 environment_ids=[],
-                sort_field="better_priority",
+                sort_field="priority",
                 organization=self.organization,
                 group_ids=[profile_group_1.id, error_group.id],
                 limit=150,