Browse Source

chore(eap-spans): Add feature to control span ingestion in EAP (#80064)

Pierre Massat 4 months ago
parent
commit
085c11e43d
3 changed files with 85 additions and 28 deletions
  1. 3 0
      src/sentry/features/temporary.py
  2. 33 10
      src/sentry/relay/config/__init__.py
  3. 49 18
      src/sentry/testutils/cases.py

+ 3 - 0
src/sentry/features/temporary.py

@@ -528,6 +528,9 @@ def register_temporary_features(manager: FeatureManager):
     manager.add("organizations:event-unique-user-frequency-condition-with-conditions", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True)
     # Use spans instead of transactions for dynamic sampling calculations. This will become the new default.
     manager.add("organizations:dynamic-sampling-spans", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False)
+    # Enable tagging span with whether or not we should ingest it in the EAP
+    manager.add("organizations:ingest-spans-in-eap", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False)
+
     # NOTE: Don't add features down here! Add them to their specific group and sort
     #       them alphabetically! The order features are registered is not important.
 

+ 33 - 10
src/sentry/relay/config/__init__.py

@@ -65,6 +65,7 @@ EXPOSABLE_FEATURES = [
     "projects:span-metrics-extraction",
     "projects:span-metrics-extraction-addons",
     "organizations:indexed-spans-extraction",
+    "organizations:ingest-spans-in-eap",
     "projects:relay-otel-endpoint",
 ]
 
@@ -90,12 +91,14 @@ def get_exposed_features(project: Project) -> Sequence[str]:
 
         if has_feature:
             metrics.incr(
-                "sentry.relay.config.features", tags={"outcome": "enabled", "feature": feature}
+                "sentry.relay.config.features",
+                tags={"outcome": "enabled", "feature": feature},
             )
             active_features.append(feature)
         else:
             metrics.incr(
-                "sentry.relay.config.features", tags={"outcome": "disabled", "feature": feature}
+                "sentry.relay.config.features",
+                tags={"outcome": "disabled", "feature": feature},
             )
 
     return active_features
@@ -371,11 +374,14 @@ class SpanDescriptionRule(TypedDict):
 
 def _should_extract_abnormal_mechanism(project: Project) -> bool:
     return sample_modulo(
-        "sentry-metrics.releasehealth.abnormal-mechanism-extraction-rate", project.organization_id
+        "sentry-metrics.releasehealth.abnormal-mechanism-extraction-rate",
+        project.organization_id,
     )
 
 
-def _get_desktop_browser_performance_profiles(organization: Organization) -> list[dict[str, Any]]:
+def _get_desktop_browser_performance_profiles(
+    organization: Organization,
+) -> list[dict[str, Any]]:
     return [
         {
             "name": "Chrome",
@@ -631,7 +637,9 @@ def _get_desktop_browser_performance_profiles(organization: Organization) -> lis
     ]
 
 
-def _get_mobile_browser_performance_profiles(organization: Organization) -> list[dict[str, Any]]:
+def _get_mobile_browser_performance_profiles(
+    organization: Organization,
+) -> list[dict[str, Any]]:
     return [
         {
             "name": "Chrome Mobile",
@@ -882,7 +890,9 @@ def _get_mobile_browser_performance_profiles(organization: Organization) -> list
     ]
 
 
-def _get_default_browser_performance_profiles(organization: Organization) -> list[dict[str, Any]]:
+def _get_default_browser_performance_profiles(
+    organization: Organization,
+) -> list[dict[str, Any]]:
     return [
         {
             "name": "Default",
@@ -940,7 +950,9 @@ def _get_default_browser_performance_profiles(organization: Organization) -> lis
     ]
 
 
-def _get_mobile_performance_profiles(organization: Organization) -> list[dict[str, Any]]:
+def _get_mobile_performance_profiles(
+    organization: Organization,
+) -> list[dict[str, Any]]:
     if not features.has(
         "organizations:performance-calculate-mobile-perf-score-relay", organization
     ):
@@ -986,8 +998,16 @@ def _get_mobile_performance_profiles(organization: Organization) -> list[dict[st
                     {
                         "op": "or",
                         "inner": [
-                            {"op": "eq", "name": "event.sdk.name", "value": "sentry.cocoa"},
-                            {"op": "eq", "name": "event.sdk.name", "value": "sentry.java.android"},
+                            {
+                                "op": "eq",
+                                "name": "event.sdk.name",
+                                "value": "sentry.cocoa",
+                            },
+                            {
+                                "op": "eq",
+                                "name": "event.sdk.name",
+                                "value": "sentry.java.android",
+                            },
                         ],
                     },
                     {"op": "eq", "name": "event.contexts.trace.op", "value": "ui.load"},
@@ -1064,7 +1084,10 @@ def _get_project_config(
         # is however currently both only applied to transaction metrics in
         # Relay, and only used to tag transaction metrics in Sentry.
         add_experimental_config(
-            config, "metricConditionalTagging", get_metric_conditional_tagging_rules, project
+            config,
+            "metricConditionalTagging",
+            get_metric_conditional_tagging_rules,
+            project,
         )
 
         if metric_extraction := get_metric_extraction_config(project):

+ 49 - 18
src/sentry/testutils/cases.py

@@ -674,7 +674,9 @@ class PerformanceIssueTestCase(BaseTestCase):
                 side_effect=detect_performance_problems_interceptor,
             ),
             mock.patch.object(
-                issue_type, "noise_config", new=NoiseConfig(noise_limit, timedelta(minutes=1))
+                issue_type,
+                "noise_config",
+                new=NoiseConfig(noise_limit, timedelta(minutes=1)),
             ),
             override_options(
                 {"performance.issues.all.problem-detection": 1.0, detector_option: 1.0}
@@ -846,7 +848,8 @@ class APITestCaseMixin:
             return response
 
         with mock.patch(
-            "sentry.hybridcloud.apigateway.proxy.external_request", new=proxy_raw_request
+            "sentry.hybridcloud.apigateway.proxy.external_request",
+            new=proxy_raw_request,
         ):
             yield
 
@@ -1007,7 +1010,8 @@ class PermissionTestCase(TestCase):
         super().setUp()
         self.owner = self.create_user(is_superuser=False)
         self.organization = self.create_organization(
-            owner=self.owner, flags=0  # disable default allow_joinleave access
+            owner=self.owner,
+            flags=0,  # disable default allow_joinleave access
         )
         self.team = self.create_team(organization=self.organization)
 
@@ -1210,7 +1214,9 @@ class IntegrationTestCase(TestCase):
         self.request = self.make_request(self.user)
         # XXX(dcramer): this is a bit of a hack, but it helps contain this test
         self.pipeline = IntegrationPipeline(
-            request=self.request, organization=rpc_organization, provider_key=self.provider.key
+            request=self.request,
+            organization=rpc_organization,
+            provider_key=self.provider.key,
         )
 
         self.init_path = reverse(
@@ -1365,12 +1371,14 @@ class SnubaTestCase(BaseTestCase):
         data = [self.__wrap_group(group)]
         assert (
             requests.post(
-                settings.SENTRY_SNUBA + "/tests/entities/outcomes/insert", data=json.dumps(data)
+                settings.SENTRY_SNUBA + "/tests/entities/outcomes/insert",
+                data=json.dumps(data),
             ).status_code
             == 200
         )
 
     def store_span(self, span, is_eap=False):
+        span["ingest_in_eap"] = is_eap
         assert (
             requests.post(
                 settings.SENTRY_SNUBA + f"/tests/entities/{'eap_' if is_eap else ''}spans/insert",
@@ -1380,6 +1388,8 @@ class SnubaTestCase(BaseTestCase):
         )
 
     def store_spans(self, spans, is_eap=False):
+        for span in spans:
+            span["ingest_in_eap"] = is_eap
         assert (
             requests.post(
                 settings.SENTRY_SNUBA + f"/tests/entities/{'eap_' if is_eap else ''}spans/insert",
@@ -1495,7 +1505,8 @@ class SnubaTestCase(BaseTestCase):
 
         assert (
             requests.post(
-                settings.SENTRY_SNUBA + "/tests/entities/events/insert", data=json.dumps(events)
+                settings.SENTRY_SNUBA + "/tests/entities/events/insert",
+                data=json.dumps(events),
             ).status_code
             == 200
         )
@@ -1736,7 +1747,6 @@ class BaseMetricsTestCase(SnubaTestCase):
         aggregation_option: AggregationOption | None = None,
         sampling_weight: int | None = None,
     ) -> None:
-
         parsed = parse_mri(mri)
         metric_type = parsed.entity
         use_case_id = UseCaseID(parsed.namespace)
@@ -1853,7 +1863,6 @@ class BaseMetricsTestCase(SnubaTestCase):
 
 
 class BaseMetricsLayerTestCase(BaseMetricsTestCase):
-
     # In order to avoid complexity and edge cases while working on tests, all children of this class should use
     # this mocked time, except in case in which a specific time is required. This is suggested because working
     # with time ranges in metrics is very error-prone and requires an in-depth knowledge of the underlying
@@ -2385,7 +2394,8 @@ class OutcomesSnubaTest(TestCase):
 
         assert (
             requests.post(
-                settings.SENTRY_SNUBA + "/tests/entities/outcomes/insert", data=json.dumps(outcomes)
+                settings.SENTRY_SNUBA + "/tests/entities/outcomes/insert",
+                data=json.dumps(outcomes),
             ).status_code
             == 200
         )
@@ -2442,7 +2452,8 @@ class ProfilesSnubaTestCase(
             functions_payload.update(extras)
 
         response = requests.post(
-            settings.SENTRY_SNUBA + "/tests/entities/functions/insert", json=[functions_payload]
+            settings.SENTRY_SNUBA + "/tests/entities/functions/insert",
+            json=[functions_payload],
         )
         assert response.status_code == 200
 
@@ -2545,12 +2556,19 @@ class IntegrationRepositoryTestCase(APITestCase):
 
     @assume_test_silo_mode(SiloMode.REGION)
     def create_repository(
-        self, repository_config, integration_id, organization_slug=None, add_responses=True
+        self,
+        repository_config,
+        integration_id,
+        organization_slug=None,
+        add_responses=True,
     ):
         if add_responses:
             self.add_create_repository_responses(repository_config)
         if not integration_id:
-            data = {"provider": self.provider_name, "identifier": repository_config["id"]}
+            data = {
+                "provider": self.provider_name,
+                "identifier": repository_config["id"],
+            }
         else:
             data = {
                 "provider": self.provider_name,
@@ -2654,7 +2672,9 @@ class OrganizationDashboardWidgetTestCase(APITestCase):
         super().setUp()
         self.login_as(self.user)
         self.dashboard = Dashboard.objects.create(
-            title="Dashboard 1", created_by_id=self.user.id, organization=self.organization
+            title="Dashboard 1",
+            created_by_id=self.user.id,
+            organization=self.organization,
         )
         self.anon_users_query: _QueryDict = {
             "name": "Anonymous Users",
@@ -2755,7 +2775,10 @@ class OrganizationDashboardWidgetTestCase(APITestCase):
     def create_user_member_role(self):
         self.user = self.create_user(is_superuser=False)
         self.create_member(
-            user=self.user, organization=self.organization, role="member", teams=[self.team]
+            user=self.user,
+            organization=self.organization,
+            role="member",
+            teams=[self.team],
         )
         self.login_as(self.user)
 
@@ -2905,7 +2928,9 @@ class ActivityTestCase(TestCase):
         release.add_project(self.project)
         release.add_project(self.project2)
         deploy = Deploy.objects.create(
-            release=release, organization_id=self.org.id, environment_id=self.environment.id
+            release=release,
+            organization_id=self.org.id,
+            environment_id=self.environment.id,
         )
 
         return release, deploy
@@ -3237,7 +3262,9 @@ class MonitorTestCase(APITestCase):
         }
 
         return MonitorEnvironment.objects.create(
-            monitor=monitor, environment_id=environment.id, **monitorenvironment_defaults
+            monitor=monitor,
+            environment_id=environment.id,
+            **monitorenvironment_defaults,
         )
 
     def _create_issue_alert_rule(self, monitor, exclude_slug_filter=False):
@@ -3319,7 +3346,8 @@ class UptimeTestCaseMixin:
             "sentry.uptime.rdap.query.resolve_hostname", return_value="192.168.0.1"
         )
         self.mock_resolve_rdap_provider_ctx = mock.patch(
-            "sentry.uptime.rdap.query.resolve_rdap_provider", return_value="https://fake.com/"
+            "sentry.uptime.rdap.query.resolve_rdap_provider",
+            return_value="https://fake.com/",
         )
         self.mock_requests_get_ctx = mock.patch("sentry.uptime.rdap.query.requests.get")
         self.mock_resolve_hostname = self.mock_resolve_hostname_ctx.__enter__()
@@ -3349,7 +3377,10 @@ class UptimeTestCase(UptimeTestCaseMixin, TestCase):
             "guid": uuid.uuid4().hex,
             "subscription_id": subscription_id,
             "status": status,
-            "status_reason": {"type": CHECKSTATUSREASONTYPE_TIMEOUT, "description": "it timed out"},
+            "status_reason": {
+                "type": CHECKSTATUSREASONTYPE_TIMEOUT,
+                "description": "it timed out",
+            },
             "span_id": uuid.uuid4().hex,
             "trace_id": uuid.uuid4().hex,
             "scheduled_check_time_ms": int(scheduled_check_time.timestamp() * 1000),