Browse Source

ref(perf-issues): Remove unused N+1 spans detector (#42226)

This detector has been split into the N+1 Database Queries detector
(released) and the N+1 API Calls detector (pending), so this code is no
longer used. A lot of the Python tests reference this detector's output
(e.g., `"n_plus_one_spans"`) but they really _should_ be referencing
`"n_plus_one_db_queries"`, which is the detector we shipped, so I
updated them.
George Gritsouk 2 years ago
parent
commit
f0b9457037

+ 1 - 1
src/sentry/api/issue_search.py

@@ -160,7 +160,7 @@ def convert_type_value(
     user: User,
     environments: Optional[Sequence[Environment]],
 ) -> List[int]:
-    """Convert a value like 'error' or 'performance_n_plus_one' to the GroupType value for issue lookup"""
+    """Convert a value like 'error' or 'performance_n_plus_one_db_queries' to the GroupType value for issue lookup"""
     if features.has("organizations:performance-issues", projects[0].organization):
         results = []
         for type in value:

+ 0 - 1
src/sentry/models/group.py

@@ -424,7 +424,6 @@ class Group(Model):
         default=GroupType.ERROR.value,
         choices=(
             (GroupType.ERROR.value, _("Error")),
-            (GroupType.PERFORMANCE_N_PLUS_ONE.value, _("N Plus One")),
             (GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value, _("N Plus One DB Queries")),
             (GroupType.PERFORMANCE_SLOW_SPAN.value, _("Slow Span")),
             (

+ 1 - 1
src/sentry/testutils/cases.py

@@ -480,7 +480,7 @@ class PerformanceIssueTestCase(BaseTestCase):
         event_data = load_data(
             "transaction-n-plus-one",
             timestamp=before_now(minutes=10),
-            fingerprint=[f"{GroupType.PERFORMANCE_N_PLUS_ONE.value}-group1"],
+            fingerprint=[f"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1"],
         )
         perf_event_manager = EventManager(event_data)
         perf_event_manager.normalize()

+ 0 - 3
src/sentry/types/issues.py

@@ -4,7 +4,6 @@ from enum import Enum
 
 class GroupType(Enum):
     ERROR = 1
-    PERFORMANCE_N_PLUS_ONE = 1000
     PERFORMANCE_SLOW_SPAN = 1001
     PERFORMANCE_RENDER_BLOCKING_ASSET_SPAN = 1004
     PERFORMANCE_DUPLICATE_SPANS = 1005
@@ -27,7 +26,6 @@ GROUP_CATEGORIES_CUSTOM_EMAIL = (GroupCategory.ERROR, GroupCategory.PERFORMANCE)
 
 GROUP_TYPE_TO_CATEGORY = {
     GroupType.ERROR: GroupCategory.ERROR,
-    GroupType.PERFORMANCE_N_PLUS_ONE: GroupCategory.PERFORMANCE,
     GroupType.PERFORMANCE_SLOW_SPAN: GroupCategory.PERFORMANCE,
     GroupType.PERFORMANCE_RENDER_BLOCKING_ASSET_SPAN: GroupCategory.PERFORMANCE,
     GroupType.PERFORMANCE_DUPLICATE_SPANS: GroupCategory.PERFORMANCE,
@@ -40,7 +38,6 @@ GROUP_TYPE_TO_CATEGORY = {
 
 GROUP_TYPE_TO_TEXT = {
     GroupType.ERROR: "Error",
-    GroupType.PERFORMANCE_N_PLUS_ONE: "N+1",  # may be N+1 Spans, N+1 Web Requests
     GroupType.PERFORMANCE_SLOW_SPAN: "Slow Span",
     GroupType.PERFORMANCE_RENDER_BLOCKING_ASSET_SPAN: "Render Blocking Asset Span",
     GroupType.PERFORMANCE_DUPLICATE_SPANS: "Duplicate Spans",

+ 0 - 75
src/sentry/utils/performance_issues/performance_detection.py

@@ -56,7 +56,6 @@ class DetectorType(Enum):
     SLOW_SPAN = "slow_span"
     DUPLICATE_SPANS = "duplicates"
     RENDER_BLOCKING_ASSET_SPAN = "render_blocking_assets"
-    N_PLUS_ONE_SPANS = "n_plus_one"
     N_PLUS_ONE_DB_QUERIES = "n_plus_one_db"
     N_PLUS_ONE_DB_QUERIES_EXTENDED = "n_plus_one_db_ext"
     N_PLUS_ONE_API_CALLS = "n_plus_one_api_calls"
@@ -69,7 +68,6 @@ DETECTOR_TYPE_TO_GROUP_TYPE = {
     DetectorType.SLOW_SPAN: GroupType.PERFORMANCE_SLOW_SPAN,
     DetectorType.DUPLICATE_SPANS: GroupType.PERFORMANCE_DUPLICATE_SPANS,
     DetectorType.RENDER_BLOCKING_ASSET_SPAN: GroupType.PERFORMANCE_RENDER_BLOCKING_ASSET_SPAN,
-    DetectorType.N_PLUS_ONE_SPANS: GroupType.PERFORMANCE_N_PLUS_ONE,
     DetectorType.N_PLUS_ONE_DB_QUERIES: GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES,
     DetectorType.N_PLUS_ONE_DB_QUERIES_EXTENDED: GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES,
     DetectorType.N_PLUS_ONE_API_CALLS: GroupType.PERFORMANCE_N_PLUS_ONE_API_CALLS,
@@ -288,13 +286,6 @@ def get_detection_settings(project_id: Optional[str] = None) -> Dict[DetectorTyp
             "fcp_maximum_threshold": settings["render_blocking_fcp_max"],  # ms
             "fcp_ratio_threshold": settings["render_blocking_fcp_ratio"],  # in the range [0, 1]
         },
-        DetectorType.N_PLUS_ONE_SPANS: [
-            {
-                "count": 3,
-                "start_time_threshold": 5.0,  # ms
-                "allowed_span_ops": ["http.client"],
-            }
-        ],
         DetectorType.N_PLUS_ONE_DB_QUERIES: {
             "count": settings["n_plus_one_db_count"],
             "duration_threshold": settings["n_plus_one_db_duration_threshold"],  # ms
@@ -344,7 +335,6 @@ def _detect_performance_problems(data: Event, sdk_span: Any) -> List[Performance
         DetectorType.RENDER_BLOCKING_ASSET_SPAN: RenderBlockingAssetSpanDetector(
             detection_settings, data
         ),
-        DetectorType.N_PLUS_ONE_SPANS: NPlusOneSpanDetector(detection_settings, data),
         DetectorType.N_PLUS_ONE_DB_QUERIES: NPlusOneDBSpanDetector(detection_settings, data),
         DetectorType.N_PLUS_ONE_DB_QUERIES_EXTENDED: NPlusOneDBSpanDetectorExtended(
             detection_settings, data
@@ -736,71 +726,6 @@ class RenderBlockingAssetSpanDetector(PerformanceDetector):
         return span_duration / self.fcp > fcp_ratio_threshold
 
 
-class NPlusOneSpanDetector(PerformanceDetector):
-    """
-    Checks for multiple concurrent API calls.
-    N.B.1. Non-greedy! Returns the first N concurrent spans of a series of
-      concurrent spans, rather than all spans in a concurrent series.
-    N.B.2. Assumes that spans are passed in ascending order of `start_timestamp`
-    N.B.3. Only returns _the first_ set of concurrent calls of all possible.
-    """
-
-    __slots__ = ("spans_involved", "stored_problems")
-
-    settings_key = DetectorType.N_PLUS_ONE_SPANS
-
-    def init(self):
-        self.spans_involved = {}
-        self.most_recent_start_time = {}
-        self.most_recent_hash = {}
-        self.stored_problems = {}
-
-    def visit_span(self, span: Span):
-        settings_for_span = self.settings_for_span(span)
-        if not settings_for_span:
-            return
-
-        op, span_id, op_prefix, span_duration, settings = settings_for_span
-
-        start_time_threshold = timedelta(milliseconds=settings.get("start_time_threshold", 0))
-        count = settings.get("count", 10)
-
-        fingerprint = fingerprint_span_op(span)
-        if not fingerprint:
-            return
-
-        hash = span.get("hash", None)
-        if not hash:
-            return
-
-        if fingerprint not in self.spans_involved:
-            self.spans_involved[fingerprint] = []
-            self.most_recent_start_time[fingerprint] = 0
-            self.most_recent_hash[fingerprint] = ""
-
-        delta_to_previous_span_start_time = timedelta(
-            seconds=(span["start_timestamp"] - self.most_recent_start_time[fingerprint])
-        )
-
-        is_concurrent_with_previous_span = delta_to_previous_span_start_time < start_time_threshold
-        has_same_hash_as_previous_span = span["hash"] == self.most_recent_hash[fingerprint]
-
-        self.most_recent_start_time[fingerprint] = span["start_timestamp"]
-        self.most_recent_hash[fingerprint] = hash
-
-        if is_concurrent_with_previous_span and has_same_hash_as_previous_span:
-            self.spans_involved[fingerprint].append(span)
-        else:
-            self.spans_involved[fingerprint] = [span_id]
-            return
-
-        if not self.stored_problems.get(fingerprint, False):
-            if len(self.spans_involved[fingerprint]) >= count:
-                self.stored_problems[fingerprint] = PerformanceSpanProblem(
-                    span_id, op_prefix, self.spans_involved[fingerprint]
-                )
-
-
 class NPlusOneAPICallsDetector(PerformanceDetector):
     """
     Detect parallel network calls to the same endpoint.

+ 1 - 1
tests/sentry/api/endpoints/test_event_grouping_info.py

@@ -63,7 +63,7 @@ class EventGroupingInfoEndpointTestCase(APITestCase):
     def test_transaction_event_with_problem(self):
         event_data = load_data(
             "transaction-n-plus-one",
-            fingerprint=[f"{GroupType.PERFORMANCE_N_PLUS_ONE.value}-group1"],
+            fingerprint=[f"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1"],
         )
         perf_event_manager = EventManager(event_data)
         perf_event_manager.normalize()

+ 2 - 2
tests/sentry/api/serializers/test_group.py

@@ -365,7 +365,7 @@ class GroupSerializerTest(TestCase):
             "timestamp": cur_time.timestamp(),
             "start_timestamp": cur_time.timestamp(),
             "received": cur_time.timestamp(),
-            "fingerprint": [f"{GroupType.PERFORMANCE_N_PLUS_ONE.value}-group1"],
+            "fingerprint": [f"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1"],
         }
         event = self.store_event(
             data=event_data,
@@ -375,4 +375,4 @@ class GroupSerializerTest(TestCase):
         serialized = serialize(perf_group)
         assert serialized["count"] == "1"
         assert serialized["issueCategory"] == "performance"
-        assert serialized["issueType"] == "performance_n_plus_one"
+        assert serialized["issueType"] == "performance_n_plus_one_db_queries"

+ 2 - 2
tests/sentry/api/serializers/test_group_stream.py

@@ -62,7 +62,7 @@ class StreamGroupSerializerTestCase(TestCase):
             "timestamp": cur_time.timestamp(),
             "start_timestamp": cur_time.timestamp(),
             "received": cur_time.timestamp(),
-            "fingerprint": [f"{GroupType.PERFORMANCE_N_PLUS_ONE.value}-group1"],
+            "fingerprint": [f"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1"],
         }
         event = self.store_event(
             data=event_data,
@@ -72,6 +72,6 @@ class StreamGroupSerializerTestCase(TestCase):
         serialized = serialize(group, serializer=StreamGroupSerializerSnuba(stats_period="24h"))
         assert serialized["count"] == "1"
         assert serialized["issueCategory"] == "performance"
-        assert serialized["issueType"] == "performance_n_plus_one"
+        assert serialized["issueType"] == "performance_n_plus_one_db_queries"
         assert [stat[1] for stat in serialized["stats"]["24h"][:-1]] == [0] * 23
         assert serialized["stats"]["24h"][-1][1] == 1

+ 4 - 4
tests/sentry/api/test_issue_search.py

@@ -303,13 +303,13 @@ class ConvertTypeValueTest(TestCase):
         with self.feature("organizations:performance-issues"):
             assert convert_type_value(["error"], [self.project], self.user, None) == [1]
             assert convert_type_value(
-                ["performance_n_plus_one"], [self.project], self.user, None
-            ) == [1000]
+                ["performance_n_plus_one_db_queries"], [self.project], self.user, None
+            ) == [1006]
             assert convert_type_value(
                 ["performance_slow_span"], [self.project], self.user, None
             ) == [1001]
             assert convert_type_value(
-                ["error", "performance_n_plus_one"], [self.project], self.user, None
-            ) == [1, 1000]
+                ["error", "performance_n_plus_one_db_queries"], [self.project], self.user, None
+            ) == [1, 1006]
             with pytest.raises(InvalidSearchQuery):
                 convert_type_value(["hellboy"], [self.project], self.user, None)

+ 1 - 1
tests/sentry/event_manager/test_event_manager.py

@@ -2397,7 +2397,7 @@ class EventManagerTest(TestCase, SnubaTestCase, EventManagerTestMixin):
 
             # sneakily make the group type wrong
             group = event.groups[0]
-            group.type = GroupType.PERFORMANCE_N_PLUS_ONE.value
+            group.type = GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value
             group.save()
             manager = EventManager(make_event())
             manager.normalize()

Some files were not shown because too many files changed in this diff