Просмотр исходного кода

chore(profiling): Remove unused profile functions metrics dataset (#81398)

This dataset is no longer in use so we can delete it.
Tony Xiao 3 месяцев назад
Родитель
Сommit
7db47f1c87

+ 0 - 2
src/sentry/api/endpoints/organization_events_stats.py

@@ -22,7 +22,6 @@ from sentry.snuba import (
     functions,
     metrics_enhanced_performance,
     metrics_performance,
-    profile_functions_metrics,
     spans_eap,
     spans_indexed,
     spans_metrics,
@@ -253,7 +252,6 @@ class OrganizationEventsStatsEndpoint(OrganizationEventsV2EndpointBase):
                         functions,
                         metrics_performance,
                         metrics_enhanced_performance,
-                        profile_functions_metrics,
                         spans_indexed,
                         spans_metrics,
                         spans_eap,

+ 0 - 1
src/sentry/search/events/builder/base.py

@@ -87,7 +87,6 @@ class BaseQueryBuilder:
     organization_column: str = "organization.id"
     function_alias_prefix: str | None = None
     spans_metrics_builder = False
-    profile_functions_metrics_builder = False
     entity: Entity | None = None
     config_class: type[DatasetConfig] | None = None
     duration_fields: set[str] = set()

+ 1 - 3
src/sentry/search/events/builder/metrics.py

@@ -444,8 +444,6 @@ class MetricsQueryBuilder(BaseQueryBuilder):
             return UseCaseID.SPANS
         elif self.is_performance:
             return UseCaseID.TRANSACTIONS
-        elif self.profile_functions_metrics_builder:
-            return UseCaseID.PROFILES
         else:
             return UseCaseID.SESSIONS
 
@@ -759,7 +757,7 @@ class MetricsQueryBuilder(BaseQueryBuilder):
 
     def resolve_tag_value(self, value: str) -> int | str | None:
         # We only use the indexer for alerts queries
-        if self.is_performance or self.use_metrics_layer or self.profile_functions_metrics_builder:
+        if self.is_performance or self.use_metrics_layer:
             return value
         return self.resolve_metric_index(value)
 

+ 0 - 60
src/sentry/search/events/builder/profile_functions_metrics.py

@@ -1,60 +0,0 @@
-from sentry.search.events.builder.metrics import (
-    MetricsQueryBuilder,
-    TimeseriesMetricQueryBuilder,
-    TopMetricsQueryBuilder,
-)
-from sentry.search.events.datasets.profile_functions_metrics import (
-    ProfileFunctionsMetricsDatasetConfig,
-)
-from sentry.search.events.types import SelectType
-
-
-class ProfileFunctionsMetricsQueryBuilder(MetricsQueryBuilder):
-    requires_organization_condition = True
-    profile_functions_metrics_builder = True
-    config_class = ProfileFunctionsMetricsDatasetConfig
-
-    column_remapping = {
-        # We want to remap `message` to `name` for the free
-        # text search use case so that it searches the `name`
-        # (function name) when the user performs a free text search
-        "message": "name",
-    }
-    default_metric_tags = {
-        "project_id",
-        "fingerprint",
-        "function",
-        "package",
-        "is_application",
-        "platform",
-        "environment",
-        "release",
-    }
-
-    @property
-    def use_default_tags(self) -> bool:
-        return True
-
-    def get_field_type(self, field: str) -> str | None:
-        if field in self.meta_resolver_map:
-            return self.meta_resolver_map[field]
-        return None
-
-    def resolve_select(
-        self, selected_columns: list[str] | None, equations: list[str] | None
-    ) -> list[SelectType]:
-        if selected_columns and "transaction" in selected_columns:
-            self.has_transaction = True  # if always true can we skip this?
-        return super().resolve_select(selected_columns, equations)
-
-
-class TimeseriesProfileFunctionsMetricsQueryBuilder(
-    ProfileFunctionsMetricsQueryBuilder, TimeseriesMetricQueryBuilder
-):
-    pass
-
-
-class TopProfileFunctionsMetricsQueryBuilder(
-    TimeseriesProfileFunctionsMetricsQueryBuilder, TopMetricsQueryBuilder
-):
-    pass

+ 0 - 3
src/sentry/search/events/constants.py

@@ -367,9 +367,6 @@ SPAN_METRICS_MAP = {
     "mobile.frames_delay": "g:spans/mobile.frames_delay@second",
     "messaging.message.receive.latency": SPAN_MESSAGING_LATENCY,
 }
-PROFILE_METRICS_MAP = {
-    "function.duration": "d:profiles/function.duration@millisecond",
-}
 # 50 to match the size of tables in the UI + 1 for pagination reasons
 METRICS_MAX_LIMIT = 101
 

+ 0 - 518
src/sentry/search/events/datasets/profile_functions_metrics.py

@@ -1,518 +0,0 @@
-from __future__ import annotations
-
-from collections.abc import Callable, Mapping
-from datetime import datetime
-
-from snuba_sdk import Column, Function, OrderBy
-
-from sentry.api.event_search import SearchFilter
-from sentry.exceptions import IncompatibleMetricsQuery, InvalidSearchQuery
-from sentry.search.events import constants, fields
-from sentry.search.events.builder import profile_functions_metrics
-from sentry.search.events.constants import PROJECT_ALIAS, PROJECT_NAME_ALIAS
-from sentry.search.events.datasets import field_aliases, filter_aliases, function_aliases
-from sentry.search.events.datasets.base import DatasetConfig
-from sentry.search.events.types import SelectType, WhereType
-
-
-class ProfileFunctionsMetricsDatasetConfig(DatasetConfig):
-    missing_function_error = IncompatibleMetricsQuery
-
-    def __init__(self, builder: profile_functions_metrics.ProfileFunctionsMetricsQueryBuilder):
-        self.builder = builder
-
-    def resolve_mri(self, value: str) -> Column:
-        return Column(constants.PROFILE_METRICS_MAP[value])
-
-    @property
-    def search_filter_converter(
-        self,
-    ) -> Mapping[str, Callable[[SearchFilter], WhereType | None]]:
-        return {
-            PROJECT_ALIAS: self._project_slug_filter_converter,
-            PROJECT_NAME_ALIAS: self._project_slug_filter_converter,
-        }
-
-    @property
-    def orderby_converter(self) -> Mapping[str, OrderBy]:
-        return {}
-
-    @property
-    def field_alias_converter(self) -> Mapping[str, Callable[[str], SelectType]]:
-        return {
-            PROJECT_ALIAS: self._resolve_project_slug_alias,
-            PROJECT_NAME_ALIAS: self._resolve_project_slug_alias,
-        }
-
-    def _project_slug_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
-        return filter_aliases.project_slug_converter(self.builder, search_filter)
-
-    def _resolve_project_slug_alias(self, alias: str) -> SelectType:
-        return field_aliases.resolve_project_slug_alias(self.builder, alias)
-
-    def resolve_metric(self, value: str) -> int:
-        # "function.duration" --> "d:profiles/function.duration@millisecond"
-        metric_id = self.builder.resolve_metric_index(
-            constants.PROFILE_METRICS_MAP.get(value, value)
-        )
-        # If it's still None its not a custom measurement
-        if metric_id is None:
-            raise IncompatibleMetricsQuery(f"Metric: {value} could not be resolved")
-        self.builder.metric_ids.add(metric_id)
-        return metric_id
-
-    def _resolve_avg(self, args, alias):
-        return Function(
-            "avgIf",
-            [
-                Column("value"),
-                Function("equals", [Column("metric_id"), args["metric_id"]]),
-            ],
-            alias,
-        )
-
-    def _resolve_cpm(
-        self,
-        args: Mapping[str, str | Column | SelectType | int | float],
-        alias: str | None,
-        extra_condition: Function | None = None,
-    ) -> SelectType:
-        assert (
-            self.builder.params.end is not None and self.builder.params.start is not None
-        ), f"params.end: {self.builder.params.end} - params.start: {self.builder.params.start}"
-        interval = (self.builder.params.end - self.builder.params.start).total_seconds()
-
-        base_condition = Function(
-            "equals",
-            [
-                Column("metric_id"),
-                self.resolve_metric("function.duration"),
-            ],
-        )
-        if extra_condition:
-            condition = Function("and", [base_condition, extra_condition])
-        else:
-            condition = base_condition
-
-        return Function(
-            "divide",
-            [
-                Function(
-                    "countIf",
-                    [
-                        Column("value"),
-                        condition,
-                    ],
-                ),
-                Function("divide", [interval, 60]),
-            ],
-            alias,
-        )
-
-    def _resolve_cpm_cond(
-        self,
-        args: Mapping[str, str | Column | SelectType | int | float | datetime],
-        cond: str,
-        alias: str | None,
-    ) -> SelectType:
-        timestmp = args["timestamp"]
-        if cond == "greater":
-            assert isinstance(self.builder.params.end, datetime) and isinstance(
-                timestmp, datetime
-            ), f"params.end: {self.builder.params.end} - timestmp: {timestmp}"
-            interval = (self.builder.params.end - timestmp).total_seconds()
-            # interval = interval
-        elif cond == "less":
-            assert isinstance(self.builder.params.start, datetime) and isinstance(
-                timestmp, datetime
-            ), f"params.start: {self.builder.params.start} - timestmp: {timestmp}"
-            interval = (timestmp - self.builder.params.start).total_seconds()
-        else:
-            raise InvalidSearchQuery(f"Unsupported condition for cpm: {cond}")
-
-        metric_id_condition = Function(
-            "equals", [Column("metric_id"), self.resolve_metric("function.duration")]
-        )
-
-        return Function(
-            "divide",
-            [
-                Function(
-                    "countIf",
-                    [
-                        Column("value"),
-                        Function(
-                            "and",
-                            [
-                                metric_id_condition,
-                                Function(
-                                    cond,
-                                    [
-                                        Column("timestamp"),
-                                        args["timestamp"],
-                                    ],
-                                ),
-                            ],
-                        ),  # close and condition
-                    ],
-                ),
-                Function("divide", [interval, 60]),
-            ],
-            alias,
-        )
-
-    def _resolve_cpm_delta(
-        self,
-        args: Mapping[str, str | Column | SelectType | int | float],
-        alias: str,
-    ) -> SelectType:
-        return Function(
-            "minus",
-            [
-                self._resolve_cpm_cond(args, "greater", None),
-                self._resolve_cpm_cond(args, "less", None),
-            ],
-            alias,
-        )
-
-    def _resolve_regression_score(
-        self,
-        args: Mapping[str, str | Column | SelectType | int | float | datetime],
-        alias: str | None = None,
-    ) -> SelectType:
-        return Function(
-            "minus",
-            [
-                Function(
-                    "multiply",
-                    [
-                        self._resolve_cpm_cond(args, "greater", None),
-                        function_aliases.resolve_metrics_percentile(
-                            args=args,
-                            alias=None,
-                            extra_conditions=[
-                                Function("greater", [Column("timestamp"), args["timestamp"]])
-                            ],
-                        ),
-                    ],
-                ),
-                Function(
-                    "multiply",
-                    [
-                        self._resolve_cpm_cond(args, "less", None),
-                        function_aliases.resolve_metrics_percentile(
-                            args=args,
-                            alias=None,
-                            extra_conditions=[
-                                Function("less", [Column("timestamp"), args["timestamp"]])
-                            ],
-                        ),
-                    ],
-                ),
-            ],
-            alias,
-        )
-
-    @property
-    def function_converter(self) -> Mapping[str, fields.MetricsFunction]:
-        """While the final functions in clickhouse must have their -Merge combinators in order to function, we don't
-        need to add them here since snuba has a FunctionMapper that will add it for us. Basically it turns expressions
-        like quantiles(0.9)(value) into quantilesMerge(0.9)(percentiles)
-        Make sure to update METRIC_FUNCTION_LIST_BY_TYPE when adding functions here, can't be a dynamic list since the
-        Metric Layer will actually handle which dataset each function goes to
-        """
-        resolve_metric_id = {
-            "name": "metric_id",
-            "fn": lambda args: self.resolve_metric(args["column"]),
-        }
-        function_converter = {
-            function.name: function
-            for function in [
-                fields.MetricsFunction(
-                    "count",
-                    snql_distribution=lambda args, alias: Function(
-                        "countIf",
-                        [
-                            Column("value"),
-                            Function(
-                                "equals",
-                                [
-                                    Column("metric_id"),
-                                    self.resolve_metric("function.duration"),
-                                ],
-                            ),
-                        ],
-                        alias,
-                    ),
-                    default_result_type="integer",
-                ),
-                fields.MetricsFunction(
-                    "cpm",  # calls per minute
-                    snql_distribution=lambda args, alias: self._resolve_cpm(args, alias),
-                    default_result_type="number",
-                ),
-                fields.MetricsFunction(
-                    "cpm_before",
-                    required_args=[fields.TimestampArg("timestamp")],
-                    snql_distribution=lambda args, alias: self._resolve_cpm_cond(
-                        args, "less", alias
-                    ),
-                    default_result_type="number",
-                ),
-                fields.MetricsFunction(
-                    "cpm_after",
-                    required_args=[fields.TimestampArg("timestamp")],
-                    snql_distribution=lambda args, alias: self._resolve_cpm_cond(
-                        args, "greater", alias
-                    ),
-                    default_result_type="number",
-                ),
-                fields.MetricsFunction(
-                    "cpm_delta",
-                    required_args=[fields.TimestampArg("timestamp")],
-                    snql_distribution=self._resolve_cpm_delta,
-                    default_result_type="number",
-                ),
-                fields.MetricsFunction(
-                    "percentile",
-                    required_args=[
-                        fields.with_default(
-                            "function.duration",
-                            fields.MetricArg("column", allowed_columns=["function.duration"]),
-                        ),
-                        fields.NumberRange("percentile", 0, 1),
-                    ],
-                    calculated_args=[resolve_metric_id],
-                    snql_distribution=function_aliases.resolve_metrics_percentile,
-                    is_percentile=True,
-                    result_type_fn=self.reflective_result_type(),
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "p50",
-                    optional_args=[
-                        fields.with_default(
-                            "function.duration",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=["function.duration"],
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    calculated_args=[resolve_metric_id],
-                    snql_distribution=lambda args, alias: function_aliases.resolve_metrics_percentile(
-                        args=args, alias=alias, fixed_percentile=0.50
-                    ),
-                    is_percentile=True,
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "p75",
-                    optional_args=[
-                        fields.with_default(
-                            "function.duration",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=["function.duration"],
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    calculated_args=[resolve_metric_id],
-                    snql_distribution=lambda args, alias: function_aliases.resolve_metrics_percentile(
-                        args=args, alias=alias, fixed_percentile=0.75
-                    ),
-                    is_percentile=True,
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "p95",
-                    optional_args=[
-                        fields.with_default(
-                            "function.duration",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=["function.duration"],
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    calculated_args=[resolve_metric_id],
-                    snql_distribution=lambda args, alias: function_aliases.resolve_metrics_percentile(
-                        args=args, alias=alias, fixed_percentile=0.95
-                    ),
-                    is_percentile=True,
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "p99",
-                    optional_args=[
-                        fields.with_default(
-                            "function.duration",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=["function.duration"],
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    calculated_args=[resolve_metric_id],
-                    snql_distribution=lambda args, alias: function_aliases.resolve_metrics_percentile(
-                        args=args, alias=alias, fixed_percentile=0.99
-                    ),
-                    is_percentile=True,
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "avg",
-                    optional_args=[
-                        fields.with_default(
-                            "function.duration",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=["function.duration"],
-                            ),
-                        ),
-                    ],
-                    calculated_args=[resolve_metric_id],
-                    snql_gauge=self._resolve_avg,
-                    snql_distribution=self._resolve_avg,
-                    result_type_fn=self.reflective_result_type(),
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "sum",
-                    optional_args=[
-                        fields.with_default(
-                            "function.duration",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=["function.duration"],
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    calculated_args=[resolve_metric_id],
-                    snql_distribution=lambda args, alias: Function(
-                        "sumIf",
-                        [
-                            Column("value"),
-                            Function("equals", [Column("metric_id"), args["metric_id"]]),
-                        ],
-                        alias,
-                    ),
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "percentile_before",
-                    required_args=[
-                        fields.TimestampArg("timestamp"),
-                        fields.NumberRange("percentile", 0, 1),
-                    ],
-                    optional_args=[
-                        fields.with_default(
-                            "function.duration",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=["function.duration"],
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    calculated_args=[resolve_metric_id],
-                    snql_distribution=lambda args, alias: function_aliases.resolve_metrics_percentile(
-                        args=args,
-                        alias=alias,
-                        extra_conditions=[
-                            Function("less", [Column("timestamp"), args["timestamp"]])
-                        ],
-                    ),
-                    is_percentile=True,
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "percentile_after",
-                    required_args=[
-                        fields.TimestampArg("timestamp"),
-                        fields.NumberRange("percentile", 0, 1),
-                    ],
-                    optional_args=[
-                        fields.with_default(
-                            "function.duration",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=["function.duration"],
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    calculated_args=[resolve_metric_id],
-                    snql_distribution=lambda args, alias: function_aliases.resolve_metrics_percentile(
-                        args=args,
-                        alias=alias,
-                        extra_conditions=[
-                            Function("greater", [Column("timestamp"), args["timestamp"]])
-                        ],
-                    ),
-                    is_percentile=True,
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "percentile_delta",
-                    required_args=[
-                        fields.TimestampArg("timestamp"),
-                        fields.NumberRange("percentile", 0, 1),
-                    ],
-                    optional_args=[
-                        fields.with_default(
-                            "function.duration",
-                            fields.MetricArg(
-                                "column",
-                                allowed_columns=["function.duration"],
-                                allow_custom_measurements=False,
-                            ),
-                        ),
-                    ],
-                    calculated_args=[resolve_metric_id],
-                    snql_distribution=lambda args, alias: Function(
-                        "minus",
-                        [
-                            function_aliases.resolve_metrics_percentile(
-                                args=args,
-                                alias=alias,
-                                extra_conditions=[
-                                    Function("greater", [Column("timestamp"), args["timestamp"]])
-                                ],
-                            ),
-                            function_aliases.resolve_metrics_percentile(
-                                args=args,
-                                alias=alias,
-                                extra_conditions=[
-                                    Function("less", [Column("timestamp"), args["timestamp"]])
-                                ],
-                            ),
-                        ],
-                        alias,
-                    ),
-                    is_percentile=True,
-                    default_result_type="duration",
-                ),
-                fields.MetricsFunction(
-                    "regression_score",
-                    required_args=[
-                        fields.MetricArg(
-                            "column",
-                            allowed_columns=["function.duration"],
-                            allow_custom_measurements=False,
-                        ),
-                        fields.TimestampArg("timestamp"),
-                        fields.NumberRange("percentile", 0, 1),
-                    ],
-                    calculated_args=[resolve_metric_id],
-                    snql_distribution=self._resolve_regression_score,
-                    default_result_type="number",
-                ),
-            ]
-        }
-        return function_converter

+ 0 - 299
src/sentry/snuba/profile_functions_metrics.py

@@ -1,299 +0,0 @@
-import logging
-from datetime import timedelta
-
-from snuba_sdk import Column, Condition
-
-import sentry.models
-from sentry.search.events.builder.profile_functions_metrics import (
-    ProfileFunctionsMetricsQueryBuilder,
-    TimeseriesProfileFunctionsMetricsQueryBuilder,
-    TopProfileFunctionsMetricsQueryBuilder,
-)
-from sentry.search.events.types import EventsResponse, QueryBuilderConfig, SnubaParams
-from sentry.snuba import discover
-from sentry.snuba.dataset import Dataset
-from sentry.snuba.metrics.extraction import MetricSpecType
-from sentry.snuba.query_sources import QuerySource
-from sentry.utils.snuba import SnubaTSResult
-
-logger = logging.getLogger(__name__)
-
-
-def query(
-    selected_columns: list[str],
-    query: str,
-    referrer: str,
-    snuba_params: SnubaParams | None = None,
-    equations: list[str] | None = None,
-    orderby: list[str] | None = None,
-    offset: int | None = None,
-    limit: int = 50,
-    auto_fields: bool = False,
-    auto_aggregations: bool = False,
-    include_equation_fields: bool = False,
-    allow_metric_aggregates: bool = False,
-    use_aggregate_conditions: bool = False,
-    conditions: list[Condition] | None = None,
-    functions_acl: list[str] | None = None,
-    transform_alias_to_input_format: bool = False,
-    sample: float | None = None,
-    has_metrics: bool = False,
-    use_metrics_layer: bool = False,
-    skip_tag_resolution: bool = False,
-    extra_columns: list[Column] | None = None,
-    on_demand_metrics_enabled: bool = False,
-    on_demand_metrics_type: MetricSpecType | None = None,
-    fallback_to_transactions: bool = False,
-    query_source: QuerySource | None = None,
-):
-    builder = ProfileFunctionsMetricsQueryBuilder(
-        dataset=Dataset.PerformanceMetrics,
-        params={},
-        snuba_params=snuba_params,
-        query=query,
-        selected_columns=selected_columns,
-        equations=equations,
-        orderby=orderby,
-        limit=limit,
-        offset=offset,
-        sample_rate=sample,
-        config=QueryBuilderConfig(
-            auto_fields=auto_fields,
-            auto_aggregations=auto_aggregations,
-            use_aggregate_conditions=use_aggregate_conditions,
-            functions_acl=functions_acl,
-            equation_config={"auto_add": include_equation_fields},
-            has_metrics=has_metrics,
-            use_metrics_layer=use_metrics_layer,
-            transform_alias_to_input_format=transform_alias_to_input_format,
-            skip_tag_resolution=skip_tag_resolution,
-        ),
-    )
-
-    result = builder.process_results(builder.run_query(referrer, query_source=query_source))
-    return result
-
-
-def timeseries_query(
-    selected_columns: list[str],
-    query: str,
-    snuba_params: SnubaParams,
-    rollup: int,
-    referrer: str,
-    zerofill_results: bool = True,
-    allow_metric_aggregates=True,
-    comparison_delta: timedelta | None = None,
-    functions_acl: list[str] | None = None,
-    has_metrics: bool = True,
-    use_metrics_layer: bool = False,
-    on_demand_metrics_enabled: bool = False,
-    on_demand_metrics_type: MetricSpecType | None = None,
-    groupby: Column | None = None,
-    query_source: QuerySource | None = None,
-    fallback_to_transactions: bool = False,
-) -> SnubaTSResult:
-    """
-    High-level API for doing arbitrary user timeseries queries against events.
-    this API should match that of sentry.snuba.discover.timeseries_query
-    """
-
-    metrics_query = TimeseriesProfileFunctionsMetricsQueryBuilder(
-        {},
-        rollup,
-        snuba_params=snuba_params,
-        dataset=Dataset.PerformanceMetrics,
-        query=query,
-        selected_columns=selected_columns,
-        groupby=groupby,
-        config=QueryBuilderConfig(
-            functions_acl=functions_acl,
-            allow_metric_aggregates=allow_metric_aggregates,
-            use_metrics_layer=use_metrics_layer,
-        ),
-    )
-    result = metrics_query.run_query(referrer, query_source=query_source)
-
-    result = metrics_query.process_results(result)
-    result["data"] = (
-        discover.zerofill(
-            result["data"],
-            snuba_params.start_date,
-            snuba_params.end_date,
-            rollup,
-            ["time"],
-        )
-        if zerofill_results
-        else result["data"]
-    )
-
-    result["meta"]["isMetricsData"] = True
-
-    return SnubaTSResult(
-        {
-            "data": result["data"],
-            "isMetricsData": True,
-            "meta": result["meta"],
-        },
-        snuba_params.start_date,
-        snuba_params.end_date,
-        rollup,
-    )
-
-
-def top_events_timeseries(
-    timeseries_columns: list[str],
-    selected_columns: list[str],
-    user_query: str,
-    snuba_params: SnubaParams,
-    orderby: list[str],
-    rollup: int,
-    limit: int,
-    referrer: str,
-    organization: sentry.models.Organization,
-    equations: list[str] | None = None,
-    top_events: EventsResponse | None = None,
-    allow_empty: bool = True,
-    zerofill_results: bool = True,
-    include_other: bool = False,
-    functions_acl: list[str] | None = None,
-    on_demand_metrics_enabled: bool = False,
-    on_demand_metrics_type: MetricSpecType | None = None,
-    fallback_to_transactions: bool = False,
-):
-    """
-    High-level API for doing arbitrary user timeseries queries for a limited number of top events
-
-    Returns a dictionary of SnubaTSResult objects that have been zerofilled in
-    case of gaps. Each value of the dictionary should match the result of a timeseries query
-
-    timeseries_columns (Sequence[str]) List of public aliases to fetch for the timeseries query,
-                    usually matches the y-axis of the graph
-    selected_columns (Sequence[str]) List of public aliases to fetch for the events query,
-                    this is to determine what the top events are
-    user_query (str) Filter query string to create conditions from. needs to be user_query
-                    to not conflict with the function query
-    params (Dict[str, str]) Filtering parameters with start, end, project_id, environment,
-    orderby (Sequence[str]) The fields to order results by.
-    rollup (int) The bucket width in seconds
-    limit (int) The number of events to get timeseries for
-    organization (Organization) Used to map group ids to short ids
-    referrer (str|None) A referrer string to help locate the origin of this query.
-    top_events (dict|None) A dictionary with a 'data' key containing a list of dictionaries that
-                    represent the top events matching the query. Useful when you have found
-                    the top events earlier and want to save a query.
-    """
-
-    if top_events is None:
-        top_events = query(
-            selected_columns,
-            query=user_query,
-            snuba_params=snuba_params,
-            equations=equations,
-            orderby=orderby,
-            limit=limit,
-            referrer=referrer,
-            auto_aggregations=True,
-            use_aggregate_conditions=True,
-            include_equation_fields=True,
-            skip_tag_resolution=True,
-        )
-
-    top_events_builder = TopProfileFunctionsMetricsQueryBuilder(
-        Dataset.PerformanceMetrics,
-        {},
-        rollup,
-        top_events["data"],
-        snuba_params=snuba_params,
-        other=False,
-        query=user_query,
-        selected_columns=selected_columns,
-        timeseries_columns=timeseries_columns,
-        config=QueryBuilderConfig(
-            functions_acl=functions_acl,
-            skip_tag_resolution=True,
-        ),
-    )
-    if len(top_events["data"]) == limit and include_other:
-        other_events_builder = TopProfileFunctionsMetricsQueryBuilder(
-            Dataset.PerformanceMetrics,
-            {},
-            rollup,
-            top_events["data"],
-            snuba_params=snuba_params,
-            other=True,
-            query=user_query,
-            selected_columns=selected_columns,
-            timeseries_columns=timeseries_columns,
-        )
-
-        # TODO: use bulk_snuba_queries
-        other_result = other_events_builder.run_query(referrer)
-        result = top_events_builder.run_query(referrer)
-    else:
-        result = top_events_builder.run_query(referrer)
-        other_result = {"data": []}
-    if (
-        not allow_empty
-        and not len(result.get("data", []))
-        and not len(other_result.get("data", []))
-    ):
-        return SnubaTSResult(
-            {
-                "data": (
-                    discover.zerofill(
-                        [], snuba_params.start_date, snuba_params.end_date, rollup, ["time"]
-                    )
-                    if zerofill_results
-                    else []
-                ),
-            },
-            snuba_params.start_date,
-            snuba_params.end_date,
-            rollup,
-        )
-
-    result = top_events_builder.process_results(result)
-
-    translated_groupby = top_events_builder.translated_groupby
-
-    results = (
-        {discover.OTHER_KEY: {"order": limit, "data": other_result["data"]}}
-        if len(other_result.get("data", []))
-        else {}
-    )
-    # Using the top events add the order to the results
-    for index, item in enumerate(top_events["data"]):
-        result_key = discover.create_result_key(item, translated_groupby, {})
-        results[result_key] = {"order": index, "data": []}
-    for row in result["data"]:
-        result_key = discover.create_result_key(row, translated_groupby, {})
-        if result_key in results:
-            results[result_key]["data"].append(row)
-        else:
-            logger.warning(
-                "profile_functions_metrics.top-events.timeseries.key-mismatch",
-                extra={"result_key": result_key, "top_event_keys": list(results.keys())},
-            )
-    snuba_ts_result: dict[str, SnubaTSResult] = {}
-    for key, item in results.items():
-        snuba_ts_result[key] = SnubaTSResult(
-            {
-                "data": (
-                    discover.zerofill(
-                        item["data"],
-                        snuba_params.start_date,
-                        snuba_params.end_date,
-                        rollup,
-                        ["time"],
-                    )
-                    if zerofill_results
-                    else item["data"]
-                ),
-                "order": item["order"],
-            },
-            snuba_params.start_date,
-            snuba_params.end_date,
-            rollup,
-        )
-
-    return snuba_ts_result

+ 0 - 2
src/sentry/snuba/utils.py

@@ -9,7 +9,6 @@ from sentry.snuba import (
     issue_platform,
     metrics_enhanced_performance,
     metrics_performance,
-    profile_functions_metrics,
     profiles,
     spans_eap,
     spans_indexed,
@@ -32,7 +31,6 @@ DATASET_OPTIONS = {
     "spansIndexed": spans_indexed,
     "spansMetrics": spans_metrics,
     "transactions": transactions,
-    "profileFunctionsMetrics": profile_functions_metrics,
 }
 DATASET_LABELS = {value: key for key, value in DATASET_OPTIONS.items()}
 

+ 0 - 44
src/sentry/testutils/cases.py

@@ -124,7 +124,6 @@ from sentry.search.events.constants import (
     METRIC_SATISFIED_TAG_VALUE,
     METRIC_TOLERATED_TAG_VALUE,
     METRICS_MAP,
-    PROFILE_METRICS_MAP,
     SPAN_METRICS_MAP,
 )
 from sentry.sentry_metrics import indexer
@@ -2224,49 +2223,6 @@ class MetricsEnhancedPerformanceTestCase(BaseMetricsLayerTestCase, TestCase):
                 subvalue,
             )
 
-    def store_profile_functions_metric(
-        self,
-        value: dict[str, int] | list[int] | int,
-        metric: str = "function.duration",
-        internal_metric: str | None = None,
-        entity: str | None = None,
-        tags: dict[str, str] | None = None,
-        timestamp: datetime | None = None,
-        project: int | None = None,
-        use_case_id: UseCaseID = UseCaseID.SPANS,
-    ):
-        internal_metric = (
-            PROFILE_METRICS_MAP[metric] if internal_metric is None else internal_metric
-        )
-        entity = self.ENTITY_MAP[metric] if entity is None else entity
-        org_id = self.organization.id
-
-        if tags is None:
-            tags = {}
-
-        if timestamp is None:
-            metric_timestamp = self.DEFAULT_METRIC_TIMESTAMP.timestamp()
-        else:
-            metric_timestamp = timestamp.timestamp()
-
-        if project is None:
-            project = self.project.id
-
-        val_list: list[int | dict[str, int]] = []
-        if not isinstance(value, list):
-            val_list.append(value)
-        else:
-            val_list = value
-        for subvalue in val_list:
-            self.store_metric(
-                org_id,
-                project,
-                internal_metric,
-                tags,
-                int(metric_timestamp),
-                subvalue,
-            )
-
     def wait_for_metric_count(
         self,
         project,

+ 0 - 99
tests/sentry/search/events/builder/test_profile_functions_metrics.py

@@ -1,99 +0,0 @@
-from datetime import datetime, timedelta, timezone
-
-import pytest
-from snuba_sdk.column import Column
-from snuba_sdk.conditions import Condition, Op
-from snuba_sdk.function import Function
-
-from sentry.search.events.builder.profile_functions_metrics import (
-    ProfileFunctionsMetricsQueryBuilder,
-)
-from sentry.testutils.factories import Factories
-from sentry.testutils.pytest.fixtures import django_db_all
-
-pytestmark = pytest.mark.sentry_metrics
-
-
-@pytest.fixture
-def now():
-    return datetime(2022, 10, 31, 0, 0, tzinfo=timezone.utc)
-
-
-@pytest.fixture
-def today(now):
-    return now.replace(hour=0, minute=0, second=0, microsecond=0)
-
-
-@pytest.fixture
-def params(now, today):
-    organization = Factories.create_organization()
-    team = Factories.create_team(organization=organization)
-    project1 = Factories.create_project(organization=organization, teams=[team])
-    project2 = Factories.create_project(organization=organization, teams=[team])
-
-    user = Factories.create_user()
-    Factories.create_team_membership(team=team, user=user)
-
-    return {
-        "start": now - timedelta(days=7),
-        "end": now - timedelta(seconds=1),
-        "project_id": [project1.id, project2.id],
-        "project_objects": [project1, project2],
-        "organization_id": organization.id,
-        "user_id": user.id,
-        "team_id": [team.id],
-    }
-
-
-@pytest.mark.parametrize(
-    "search,condition",
-    [
-        pytest.param(
-            'package:""',
-            Condition(
-                Function("has", parameters=[Column("tags.key"), 9223372036854776075]), Op("!="), 1
-            ),
-            id="empty package",
-            marks=pytest.mark.querybuilder,
-        ),
-        pytest.param(
-            '!package:""',
-            Condition(
-                Function("has", parameters=[Column("tags.key"), 9223372036854776075]), Op("="), 1
-            ),
-            id="not empty package",
-        ),
-        pytest.param(
-            'function:""',
-            Condition(
-                Function("has", parameters=[Column("tags.key"), 9223372036854776074]), Op("!="), 1
-            ),
-            id="empty function",
-        ),
-        pytest.param(
-            '!function:""',
-            Condition(
-                Function("has", parameters=[Column("tags.key"), 9223372036854776074]), Op("="), 1
-            ),
-            id="not empty function",
-        ),
-        pytest.param(
-            "fingerprint:123",
-            Condition(Column("tags[9223372036854776076]"), Op("="), "123"),
-            id="fingerprint",
-        ),
-        pytest.param(
-            "!fingerprint:123",
-            Condition(Column("tags[9223372036854776076]"), Op("!="), "123"),
-            id="not fingerprint",
-        ),
-    ],
-)
-@django_db_all
-def test_where(params, search, condition):
-    builder = ProfileFunctionsMetricsQueryBuilder(
-        params,
-        query=search,
-        selected_columns=["count()"],
-    )
-    assert condition in builder.where

Некоторые файлы не были показаны из-за большого количества измененных файлов