Browse Source

ref: delete some dead code in metrics / sentry_metrics (#82407)

candidates for deletion were found automatically utilizing
[dead](https://github.com/asottile/dead)

commits are split by deleted function (with references to when they were
last referenced)

<!-- Describe your PR here. -->
anthony sottile 2 months ago
parent
commit
af4c71a0ea

+ 0 - 1
src/sentry/api/endpoints/organization_metrics_meta.py

@@ -11,7 +11,6 @@ from sentry.search.events.fields import get_function_alias
 from sentry.snuba import metrics_performance
 
 COUNT_UNPARAM = "count_unparameterized_transactions()"
-COUNT_HAS_TXN = "count_has_transaction_name()"
 COUNT_NULL = "count_null_transactions()"
 
 

+ 0 - 22
src/sentry/integrations/slack/metrics.py

@@ -17,28 +17,6 @@ SLACK_METRIC_ALERT_FAILURE_DATADOG_METRIC = "sentry.integrations.slack.metric_al
 SLACK_NOTIFY_RECIPIENT_SUCCESS_DATADOG_METRIC = "sentry.integrations.slack.notify_recipient.success"
 SLACK_NOTIFY_RECIPIENT_FAILURE_DATADOG_METRIC = "sentry.integrations.slack.notify_recipient.failure"
 
-# Bot commands
-SLACK_BOT_COMMAND_LINK_IDENTITY_SUCCESS_DATADOG_METRIC = (
-    "sentry.integrations.slack.link_identity_view.success"
-)
-SLACK_BOT_COMMAND_LINK_IDENTITY_FAILURE_DATADOG_METRIC = (
-    "sentry.integrations.slack.link_identity_view.failure"
-)
-SLACK_BOT_COMMAND_UNLINK_IDENTITY_SUCCESS_DATADOG_METRIC = (
-    "sentry.integrations.slack.unlink_identity_view.success"
-)
-SLACK_BOT_COMMAND_UNLINK_IDENTITY_FAILURE_DATADOG_METRIC = (
-    "sentry.integrations.slack.unlink_identity_view.failure"
-)
-SLACK_BOT_COMMAND_UNLINK_TEAM_SUCCESS_DATADOG_METRIC = (
-    "sentry.integrations.slack.unlink_team.success"
-)
-SLACK_BOT_COMMAND_UNLINK_TEAM_FAILURE_DATADOG_METRIC = (
-    "sentry.integrations.slack.unlink_team.failure"
-)
-SLACK_BOT_COMMAND_LINK_TEAM_SUCCESS_DATADOG_METRIC = "sentry.integrations.slack.link_team.success"
-SLACK_BOT_COMMAND_LINK_TEAM_FAILURE_DATADOG_METRIC = "sentry.integrations.slack.link_team.failure"
-
 # Webhooks
 SLACK_WEBHOOK_DM_ENDPOINT_SUCCESS_DATADOG_METRIC = "sentry.integrations.slack.dm_endpoint.success"
 SLACK_WEBHOOK_DM_ENDPOINT_FAILURE_DATADOG_METRIC = "sentry.integrations.slack.dm_endpoint.failure"

+ 0 - 4
src/sentry/search/events/builder/metrics.py

@@ -1953,10 +1953,6 @@ class TopMetricsQueryBuilder(TimeseriesMetricQueryBuilder):
                 [column for column in self.columns if column not in self.aggregates]
             )
 
-    @cached_property
-    def non_aggregate_columns(self) -> list[str]:
-        return list(set(self.original_selected_columns) - set(self.timeseries_columns))
-
     @property
     def translated_groupby(self) -> list[str]:
         """Get the names of the groupby columns to create the series names"""

+ 0 - 5
src/sentry/search/events/datasets/metrics.py

@@ -80,11 +80,6 @@ class MetricsDatasetConfig(DatasetConfig):
         self.builder.metric_ids.add(metric_id)
         return metric_id
 
-    def resolve_value(self, value: str) -> int:
-        value_id = self.builder.resolve_tag_value(value)
-
-        return value_id
-
     @property
     def should_skip_interval_calculation(self):
         return self.builder.builder_config.skip_time_conditions and (

+ 1 - 277
src/sentry/search/events/datasets/spans_metrics.py

@@ -5,7 +5,7 @@ from datetime import datetime
 from typing import TypedDict
 
 import sentry_sdk
-from snuba_sdk import AliasedExpression, Column, Condition, Function, Identifier, Op, OrderBy
+from snuba_sdk import Column, Condition, Function, Identifier, Op, OrderBy
 
 from sentry.api.event_search import SearchFilter
 from sentry.exceptions import IncompatibleMetricsQuery, InvalidSearchQuery
@@ -16,7 +16,6 @@ from sentry.search.events.datasets.base import DatasetConfig
 from sentry.search.events.fields import SnQLStringArg, get_function_alias
 from sentry.search.events.types import SelectType, WhereType
 from sentry.search.utils import DEVICE_CLASS
-from sentry.snuba.metrics.naming_layer.mri import SpanMRI
 from sentry.snuba.referrer import Referrer
 
 
@@ -1362,278 +1361,3 @@ class SpansMetricsDatasetConfig(DatasetConfig):
     @property
     def orderby_converter(self) -> Mapping[str, OrderBy]:
         return {}
-
-
-class SpansMetricsLayerDatasetConfig(DatasetConfig):
-    missing_function_error = IncompatibleMetricsQuery
-
-    def __init__(self, builder: spans_metrics.SpansMetricsQueryBuilder):
-        self.builder = builder
-        self.total_span_duration: float | None = None
-
-    def resolve_mri(self, value: str) -> Column:
-        """Given the public facing column name resolve it to the MRI and return a Column"""
-        # If the query builder has not detected a transaction use the light self time metric to get a performance boost
-        if value == "span.self_time" and not self.builder.has_transaction:
-            return Column(constants.SELF_TIME_LIGHT)
-        else:
-            return Column(constants.SPAN_METRICS_MAP[value])
-
-    @property
-    def search_filter_converter(
-        self,
-    ) -> Mapping[str, Callable[[SearchFilter], WhereType | None]]:
-        return {}
-
-    @property
-    def field_alias_converter(self) -> Mapping[str, Callable[[str], SelectType]]:
-        return {
-            constants.SPAN_MODULE_ALIAS: lambda alias: field_aliases.resolve_span_module(
-                self.builder, alias
-            )
-        }
-
-    @property
-    def function_converter(self) -> Mapping[str, fields.MetricsFunction]:
-        """Make sure to update METRIC_FUNCTION_LIST_BY_TYPE when adding functions here, can't be a dynamic list since
-        the Metric Layer will actually handle which dataset each function goes to
-        """
-
-        function_converter = {
-            function.name: function
-            for function in [
-                fields.MetricsFunction(
-                    "count_unique",
-                    required_args=[
-                        fields.MetricArg(
-                            "column",
-                            allowed_columns=["user"],
-                            allow_custom_measurements=False,
-                        )
-                    ],
-                    snql_metric_layer=lambda args, alias: Function(
-                        "count_unique",
-                        [self.resolve_mri("user")],
-                        alias,
-                    ),
-                    default_result_type="integer",
-                ),
-                fields.MetricsFunction(
-                    "epm",
-                    snql_metric_layer=lambda args, alias: Function(
-                        "rate",
-                        [
-                            self.resolve_mri("span.self_time"),
-                            args["interval"],
-                            60,
-                        ],
-                        alias,
-                    ),
-                    optional_args=[fields.IntervalDefault("interval", 1, None)],
-                    default_result_type="rate",
-                ),
-                fields.MetricsFunction(
-                    "eps",
-                    snql_metric_layer=lambda args, alias: Function(
-                        "rate",
-                        [
-                            self.resolve_mri("span.self_time"),
-                            args["interval"],
-                            1,
-                        ],
-                        alias,
-                    ),
-                    optional_args=[fields.IntervalDefault("interval", 1, None)],
-                    default_result_type="rate",
-                ),
-                fields.MetricsFunction(
-                    "count",
-                    snql_metric_layer=lambda args, alias: Function(
-                        "count",
-                        [
-                            self.resolve_mri("span.self_time"),
-                        ],
-                        alias,
-                    ),
-                    default_result_type="integer",
-                ),
-                fields.MetricsFunction(
-                    "sum",
-                    optional_args=[
-                        fields.with_default(
-                            "span.self_time",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=constants.SPAN_METRIC_SUMMABLE_COLUMNS,
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    snql_metric_layer=lambda args, alias: Function(
-                        "sum",
-                        [self.resolve_mri(args["column"])],
-                        alias,
-                    ),
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "avg",
-                    optional_args=[
-                        fields.with_default(
-                            "span.self_time",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS.union(
-                                    constants.SPAN_METRIC_BYTES_COLUMNS
-                                ),
-                            ),
-                        ),
-                    ],
-                    snql_metric_layer=lambda args, alias: Function(
-                        "avg",
-                        [self.resolve_mri(args["column"])],
-                        alias,
-                    ),
-                    result_type_fn=self.reflective_result_type(),
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "percentile",
-                    required_args=[
-                        fields.with_default(
-                            "span.self_time",
-                            fields.MetricArg(
-                                "column", allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS
-                            ),
-                        ),
-                        fields.NumberRange("percentile", 0, 1),
-                    ],
-                    snql_metric_layer=lambda args, alias: function_aliases.resolve_metrics_layer_percentile(
-                        args,
-                        alias,
-                        self.resolve_mri,
-                    ),
-                    result_type_fn=self.reflective_result_type(),
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "p50",
-                    optional_args=[
-                        fields.with_default(
-                            "span.self_time",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS,
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    snql_metric_layer=lambda args, alias: function_aliases.resolve_metrics_layer_percentile(
-                        args=args, alias=alias, resolve_mri=self.resolve_mri, fixed_percentile=0.50
-                    ),
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "p75",
-                    optional_args=[
-                        fields.with_default(
-                            "span.self_time",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS,
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    snql_metric_layer=lambda args, alias: function_aliases.resolve_metrics_layer_percentile(
-                        args=args, alias=alias, resolve_mri=self.resolve_mri, fixed_percentile=0.75
-                    ),
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "p95",
-                    optional_args=[
-                        fields.with_default(
-                            "span.self_time",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS,
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    snql_metric_layer=lambda args, alias: function_aliases.resolve_metrics_layer_percentile(
-                        args=args, alias=alias, resolve_mri=self.resolve_mri, fixed_percentile=0.95
-                    ),
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "p99",
-                    optional_args=[
-                        fields.with_default(
-                            "span.self_time",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS,
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    snql_metric_layer=lambda args, alias: function_aliases.resolve_metrics_layer_percentile(
-                        args=args, alias=alias, resolve_mri=self.resolve_mri, fixed_percentile=0.99
-                    ),
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "p100",
-                    optional_args=[
-                        fields.with_default(
-                            "span.self_time",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=constants.SPAN_METRIC_DURATION_COLUMNS,
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    snql_metric_layer=lambda args, alias: function_aliases.resolve_metrics_layer_percentile(
-                        args=args, alias=alias, resolve_mri=self.resolve_mri, fixed_percentile=1.0
-                    ),
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "http_error_count",
-                    snql_metric_layer=lambda args, alias: AliasedExpression(
-                        Column(
-                            SpanMRI.HTTP_ERROR_COUNT_LIGHT.value
-                            if not self.builder.has_transaction
-                            else SpanMRI.HTTP_ERROR_COUNT.value
-                        ),
-                        alias,
-                    ),
-                    default_result_type="integer",
-                ),
-                fields.MetricsFunction(
-                    "http_error_rate",
-                    snql_metric_layer=lambda args, alias: AliasedExpression(
-                        Column(
-                            SpanMRI.HTTP_ERROR_RATE_LIGHT.value
-                            if not self.builder.has_transaction
-                            else SpanMRI.HTTP_ERROR_RATE.value
-                        ),
-                        alias,
-                    ),
-                    default_result_type="percentage",
-                ),
-            ]
-        }
-
-        for alias, name in constants.SPAN_FUNCTION_ALIASES.items():
-            if name in function_converter:
-                function_converter[alias] = function_converter[name].alias_as(alias)
-
-        return function_converter
-
-    @property
-    def orderby_converter(self) -> Mapping[str, OrderBy]:
-        return {}

+ 0 - 22
src/sentry/sentry_metrics/configuration.py

@@ -27,8 +27,6 @@ class UseCaseKey(Enum):
 # backwards compatibility
 RELEASE_HEALTH_PG_NAMESPACE = "releasehealth"
 PERFORMANCE_PG_NAMESPACE = "performance"
-RELEASE_HEALTH_CS_NAMESPACE = "releasehealth.cs"
-PERFORMANCE_CS_NAMESPACE = "performance.cs"
 
 RELEASE_HEALTH_SCHEMA_VALIDATION_RULES_OPTION_NAME = (
     "sentry-metrics.indexer.release-health.schema-validation-rules"
@@ -172,23 +170,3 @@ def initialize_main_process_state(config: MetricsIngestConfiguration) -> None:
     global_tag_map = {"pipeline": config.internal_metrics_tag or ""}
 
     add_global_tags(_all_threads=True, **global_tag_map)
-
-
-HARD_CODED_UNITS = {"span.duration": "millisecond"}
-ALLOWED_TYPES = {"c", "d", "s", "g"}
-
-# METRICS_AGGREGATES specifies the aggregates that are available for a metric type - AGGREGATES_TO_METRICS reverses this,
-# and provides a map from the aggregate to the metric type in the form {'count': 'c', 'avg':'g', ...}. This is needed
-# when the UI lets the user select the aggregate, and the backend infers the metric_type from it. It is programmatic
-# and not hard-coded, so that in case of a change, the two mappings are aligned.
-METRIC_TYPE_TO_AGGREGATE = {
-    "c": ["count"],
-    "g": ["avg", "min", "max", "sum"],
-    "d": ["p50", "p75", "p90", "p95", "p99"],
-    "s": ["count_unique"],
-}
-AGGREGATE_TO_METRIC_TYPE = {
-    aggregate: metric_type
-    for metric_type, aggregate_list in METRIC_TYPE_TO_AGGREGATE.items()
-    for aggregate in aggregate_list
-}

+ 1 - 22
src/sentry/sentry_metrics/consumers/indexer/batch.py

@@ -1,7 +1,7 @@
 import logging
 import random
 from collections import defaultdict
-from collections.abc import Callable, Iterable, Mapping, MutableMapping, MutableSequence, Sequence
+from collections.abc import Callable, Iterable, Mapping, MutableMapping, MutableSequence
 from dataclasses import dataclass
 from typing import Any, cast
 
@@ -248,27 +248,6 @@ class IndexerBatch:
             )
             raise ValueError(f"Invalid metric tags: {tags}")
 
-    @metrics.wraps("process_messages.filter_messages")
-    def filter_messages(self, keys_to_remove: Sequence[BrokerMeta]) -> None:
-        # XXX: it is useful to be able to get a sample of organization ids that are affected by rate limits, but this is really slow.
-        for broker_meta in keys_to_remove:
-            if _should_sample_debug_log():
-                sentry_sdk.set_tag(
-                    "sentry_metrics.organization_id",
-                    self.parsed_payloads_by_meta[broker_meta]["org_id"],
-                )
-                sentry_sdk.set_tag(
-                    "sentry_metrics.metric_name", self.parsed_payloads_by_meta[broker_meta]["name"]
-                )
-                logger.error(
-                    "process_messages.dropped_message",
-                    extra={
-                        "reason": "cardinality_limit",
-                    },
-                )
-
-        self.filtered_msg_meta.update(keys_to_remove)
-
     @metrics.wraps("process_messages.extract_strings")
     def extract_strings(self) -> Mapping[UseCaseID, Mapping[OrgId, set[str]]]:
         strings: Mapping[UseCaseID, Mapping[OrgId, set[str]]] = defaultdict(

+ 0 - 3
src/sentry/sentry_metrics/consumers/indexer/common.py

@@ -26,9 +26,6 @@ MessageBatch = list[Message[KafkaPayload]]
 
 logger = logging.getLogger(__name__)
 
-DEFAULT_QUEUED_MAX_MESSAGE_KBYTES = 50000
-DEFAULT_QUEUED_MIN_MESSAGES = 100000
-
 
 @dataclass(frozen=True)
 class IndexerOutputMessageBatch:

+ 0 - 54
src/sentry/sentry_metrics/indexer/id_generator.py

@@ -1,54 +0,0 @@
-import random
-import time
-
-_VERSION_BITS = 4
-_TS_BITS = 32
-_RANDOM_BITS = 28
-_TOTAL_BITS = _VERSION_BITS + _TS_BITS + _RANDOM_BITS
-assert _TOTAL_BITS == 64
-
-_VERSION = 2
-
-# Warning! The version must be an even number as this is already
-# written to a BigInt field in Postgres
-assert _VERSION % 2 == 0
-
-# 1st January 2022
-_INDEXER_EPOCH_START = 1641024000
-
-
-def reverse_bits(number: int, bit_size: int) -> int:
-    return int(bin(number)[2:].zfill(bit_size)[::-1], 2)
-
-
-# we will have room b/n version and time since for a while
-# so let's reverse the version bits to grow to the right
-# instead of left should we need more than 3 bits for version
-
-_VERSION_PREFIX = reverse_bits(_VERSION, _VERSION_BITS)
-
-
-def get_id() -> int:
-    """
-    Generates IDs for use by indexer storages that do not have autoincrement sequences.
-
-    This function does not provide any guarantee of uniqueness, just a low probability of collisions.
-    It relies on the database to be strongly consistent and reject writes with duplicate IDs. These should
-    be retried with a newly generated ID.
-
-    The ID generated is in roughly incrementing order.
-
-    Metric IDs are 64 bit but this function only generates IDs that fit in 63 bits. The leading bit is always zero.
-    This is because they were stored in Postgres as BigInt (signed 64 bit) and we do not want to change that now.
-    In ClickHouse it is an unsigned 64 bit integer.
-    """
-
-    now = int(time.time())
-    time_since_epoch = now - _INDEXER_EPOCH_START
-    rand = random.getrandbits(_RANDOM_BITS)
-
-    id = _VERSION_PREFIX << (_TOTAL_BITS - _VERSION_BITS)
-    id |= time_since_epoch << (_TOTAL_BITS - _VERSION_BITS - _TS_BITS)
-    id |= rand
-
-    return id

+ 0 - 1
src/sentry/sentry_metrics/indexer/postgres/postgres_v2.py

@@ -31,7 +31,6 @@ from sentry.utils import metrics
 __all__ = ["PostgresIndexer"]
 
 
-_INDEXER_CACHE_METRIC = "sentry_metrics.indexer.memcache"
 _INDEXER_DB_METRIC = "sentry_metrics.indexer.postgres"
 
 _PARTITION_KEY = "pg"

Some files were not shown because too many files changed in this diff