Browse Source

ref(sessions): Remove deprecated get_filter usage [TET-226] (#38300)

Refactors `get_filter` usage as it is deprecated,
and replaces it with `SessionsV2QueryBuilder` and
`TimeseriesSessionsV2QueryBuilder` that inherit from
`QueryBuilder`
Ahmed Etefy 2 years ago
parent
commit
4d421ef76c

+ 1 - 1
src/sentry/release_health/duplex.py

@@ -973,7 +973,7 @@ class DuplexReleaseHealthBackend(ReleaseHealthBackend):
         relative_hours = math.ceil((query.end - now).total_seconds() / 3600)
         relative_hours = math.ceil((query.end - now).total_seconds() / 3600)
         sentry_tags = {"run_sessions_query.rel_end": f"{relative_hours}h"}
         sentry_tags = {"run_sessions_query.rel_end": f"{relative_hours}h"}
 
 
-        project_ids = query.filter_keys.get("project_id")
+        project_ids = query.params.get("project_id")
         if project_ids and len(project_ids) == 1:
         if project_ids and len(project_ids) == 1:
             project_id = project_ids[0]
             project_id = project_ids[0]
             sentry_tags["run_sessions_query.project_id"] = str(project_id)
             sentry_tags["run_sessions_query.project_id"] = str(project_id)

+ 3 - 15
src/sentry/release_health/metrics_sessions_v2.py

@@ -40,7 +40,6 @@ from snuba_sdk import (
     Op,
     Op,
 )
 )
 from snuba_sdk.conditions import ConditionGroup
 from snuba_sdk.conditions import ConditionGroup
-from snuba_sdk.legacy import json_to_snql
 
 
 from sentry.api.utils import InvalidParams as UtilsInvalidParams
 from sentry.api.utils import InvalidParams as UtilsInvalidParams
 from sentry.models import Release
 from sentry.models import Release
@@ -54,7 +53,6 @@ from sentry.release_health.base import (
     SessionsQueryValue,
     SessionsQueryValue,
 )
 )
 from sentry.sentry_metrics.configuration import UseCaseKey
 from sentry.sentry_metrics.configuration import UseCaseKey
-from sentry.snuba.dataset import EntityKey
 from sentry.snuba.metrics.datasource import get_series
 from sentry.snuba.metrics.datasource import get_series
 from sentry.snuba.metrics.naming_layer.public import SessionMetricKey
 from sentry.snuba.metrics.naming_layer.public import SessionMetricKey
 from sentry.snuba.metrics.query import MetricField, MetricsQuery, OrderBy
 from sentry.snuba.metrics.query import MetricField, MetricsQuery, OrderBy
@@ -415,7 +413,8 @@ def run_sessions_query(
     if not intervals:
     if not intervals:
         return _empty_result(query)
         return _empty_result(query)
 
 
-    conditions = _get_filter_conditions(query.conditions)
+    conditions = query.get_filter_conditions()
+
     where, status_filter = _extract_status_filter_from_conditions(conditions)
     where, status_filter = _extract_status_filter_from_conditions(conditions)
     if status_filter == frozenset():
     if status_filter == frozenset():
         # There was a condition that cannot be met, such as 'session:status:foo'
         # There was a condition that cannot be met, such as 'session:status:foo'
@@ -433,10 +432,7 @@ def run_sessions_query(
     if not fields:
     if not fields:
         return _empty_result(query)
         return _empty_result(query)
 
 
-    filter_keys = query.filter_keys.copy()
-    project_ids = filter_keys.pop("project_id")
-    assert not filter_keys
-
+    project_ids = query.params["project_id"]
     limit = Limit(query.limit) if query.limit else None
     limit = Limit(query.limit) if query.limit else None
 
 
     ordered_preflight_filters: Dict[GroupByFieldName, Sequence[str]] = {}
     ordered_preflight_filters: Dict[GroupByFieldName, Sequence[str]] = {}
@@ -702,14 +698,6 @@ def _empty_result(query: QueryDefinition) -> SessionsQueryResult:
     }
     }
 
 
 
 
-def _get_filter_conditions(conditions: Any) -> ConditionGroup:
-    """Translate given conditions to snql"""
-    dummy_entity = EntityKey.MetricsSets.value
-    return json_to_snql(
-        {"selected_columns": ["value"], "conditions": conditions}, entity=dummy_entity
-    ).query.where
-
-
 def _extract_status_filter_from_conditions(
 def _extract_status_filter_from_conditions(
     conditions: ConditionGroup,
     conditions: ConditionGroup,
 ) -> Tuple[ConditionGroup, StatusFilter]:
 ) -> Tuple[ConditionGroup, StatusFilter]:

+ 81 - 2
src/sentry/search/events/builder.py

@@ -1,6 +1,19 @@
 from collections import defaultdict
 from collections import defaultdict
 from datetime import datetime, timedelta
 from datetime import datetime, timedelta
-from typing import Any, Callable, Dict, List, Mapping, Match, Optional, Set, Tuple, Union, cast
+from typing import (
+    Any,
+    Callable,
+    Dict,
+    List,
+    Mapping,
+    Match,
+    Optional,
+    Sequence,
+    Set,
+    Tuple,
+    Union,
+    cast,
+)
 
 
 import sentry_sdk
 import sentry_sdk
 from django.utils import timezone
 from django.utils import timezone
@@ -1165,7 +1178,7 @@ class QueryBuilder:
         if (
         if (
             search_filter.operator in ("!=", "NOT IN")
             search_filter.operator in ("!=", "NOT IN")
             and not search_filter.key.is_tag
             and not search_filter.key.is_tag
-            and name != "event.type"
+            and name not in self.config.non_nullable_keys
         ):
         ):
             # Handle null columns on inequality comparisons. Any comparison
             # Handle null columns on inequality comparisons. Any comparison
             # between a value and a null will result to null, so we need to
             # between a value and a null will result to null, so we need to
@@ -1645,12 +1658,78 @@ class HistogramQueryBuilder(QueryBuilder):
 
 
 
 
 class SessionsQueryBuilder(QueryBuilder):
 class SessionsQueryBuilder(QueryBuilder):
+    # ToDo(ahmed): Rename this to AlertsSessionsQueryBuilder as it is exclusively used for crash rate alerts
     def resolve_params(self) -> List[WhereType]:
     def resolve_params(self) -> List[WhereType]:
         conditions = super().resolve_params()
         conditions = super().resolve_params()
         conditions.append(Condition(self.column("org_id"), Op.EQ, self.organization_id))
         conditions.append(Condition(self.column("org_id"), Op.EQ, self.organization_id))
         return conditions
         return conditions
 
 
 
 
+class SessionsV2QueryBuilder(QueryBuilder):
+    filter_allowlist_fields = {"project", "project_id", "environment", "release"}
+
+    def __init__(
+        self,
+        *args: Any,
+        granularity: Optional[int] = None,
+        extra_filter_allowlist_fields: Optional[Sequence[str]] = None,
+        **kwargs: Any,
+    ):
+        self._extra_filter_allowlist_fields = extra_filter_allowlist_fields or []
+        self.granularity = Granularity(granularity) if granularity is not None else None
+        super().__init__(*args, **kwargs)
+
+    def resolve_params(self) -> List[WhereType]:
+        conditions = super().resolve_params()
+        conditions.append(Condition(self.column("org_id"), Op.EQ, self.organization_id))
+        return conditions
+
+    def resolve_groupby(self, groupby_columns: Optional[List[str]] = None) -> List[SelectType]:
+        """
+        The default QueryBuilder `resolve_groupby` function needs to be overridden here because, it only adds the
+        columns in the groupBy clause to the query if the query has `aggregates` present in it. For this specific case
+        of the `sessions` dataset, the session fields are aggregates but these aggregate definitions are hidden away in
+        snuba so if we rely on the default QueryBuilder `resolve_groupby` method, then it won't add the requested
+        groupBy columns as it does not consider these fields as aggregates, and so we end up with clickhouse error that
+        the column is not under an aggregate function or in the `groupBy` basically.
+        """
+        if groupby_columns is None:
+            return []
+        return list({self.resolve_column(column) for column in groupby_columns})
+
+    def _default_filter_converter(self, search_filter: SearchFilter) -> Optional[WhereType]:
+        name = search_filter.key.name
+        if name in self.filter_allowlist_fields or name in self._extra_filter_allowlist_fields:
+            return super()._default_filter_converter(search_filter)
+        raise InvalidSearchQuery(f"Invalid search filter: {name}")
+
+
+class TimeseriesSessionsV2QueryBuilder(SessionsV2QueryBuilder):
+    time_column = "bucketed_started"
+
+    def get_snql_query(self) -> Request:
+        self.validate_having_clause()
+
+        return Request(
+            dataset=self.dataset.value,
+            app_id="default",
+            query=Query(
+                match=Entity(self.dataset.value, sample=self.sample_rate),
+                select=[Column(self.time_column)] + self.columns,
+                array_join=self.array_join,
+                where=self.where,
+                having=self.having,
+                groupby=[Column(self.time_column)] + self.groupby,
+                orderby=self.orderby,
+                limit=self.limit,
+                offset=self.offset,
+                granularity=self.granularity,
+                limitby=self.limitby,
+            ),
+            flags=Flags(turbo=self.turbo),
+        )
+
+
 class MetricsQueryBuilder(QueryBuilder):
 class MetricsQueryBuilder(QueryBuilder):
     def __init__(
     def __init__(
         self,
         self,

+ 1 - 0
src/sentry/search/events/datasets/base.py

@@ -9,6 +9,7 @@ from sentry.search.events.types import SelectType, WhereType
 
 
 class DatasetConfig(abc.ABC):
 class DatasetConfig(abc.ABC):
     custom_threshold_columns = {}
     custom_threshold_columns = {}
+    non_nullable_keys = set()
     missing_function_error = InvalidSearchQuery
     missing_function_error = InvalidSearchQuery
 
 
     @property
     @property

+ 1 - 0
src/sentry/search/events/datasets/discover.py

@@ -96,6 +96,7 @@ class DiscoverDatasetConfig(DatasetConfig):
         "count_miserable(user)",
         "count_miserable(user)",
         "user_misery()",
         "user_misery()",
     }
     }
+    non_nullable_keys = {"event.type"}
 
 
     def __init__(self, builder: QueryBuilder):
     def __init__(self, builder: QueryBuilder):
         self.builder = builder
         self.builder = builder

+ 2 - 0
src/sentry/search/events/datasets/sessions.py

@@ -12,6 +12,8 @@ from sentry.search.events.types import SelectType, WhereType
 
 
 
 
 class SessionsDatasetConfig(DatasetConfig):
 class SessionsDatasetConfig(DatasetConfig):
+    non_nullable_keys = {"project", "project_id", "environment", "release"}
+
     def __init__(self, builder: QueryBuilder):
     def __init__(self, builder: QueryBuilder):
         self.builder = builder
         self.builder = builder
 
 

+ 4 - 10
src/sentry/snuba/metrics/query.py

@@ -5,7 +5,7 @@ from dataclasses import dataclass
 from datetime import datetime, timedelta
 from datetime import datetime, timedelta
 from typing import Literal, Optional, Sequence, Set, Union
 from typing import Literal, Optional, Sequence, Set, Union
 
 
-from snuba_sdk import Column, Direction, Function, Granularity, Limit, Offset
+from snuba_sdk import Column, Direction, Granularity, Limit, Offset
 from snuba_sdk.conditions import Condition, ConditionGroup
 from snuba_sdk.conditions import Condition, ConditionGroup
 
 
 from sentry.api.utils import InvalidParams
 from sentry.api.utils import InvalidParams
@@ -134,16 +134,10 @@ class MetricsQuery(MetricsQueryValidationRunner):
         for condition in self.where:
         for condition in self.where:
             if (
             if (
                 isinstance(condition, Condition)
                 isinstance(condition, Condition)
-                and isinstance(condition.lhs, Function)
-                and condition.lhs.function == "ifNull"
+                and isinstance(condition.lhs, Column)
+                and condition.lhs.name in UNALLOWED_TAGS
             ):
             ):
-                parameter = condition.lhs.parameters[0]
-                if isinstance(parameter, Column) and parameter.name.startswith(
-                    ("tags_raw[", "tags[")
-                ):
-                    tag_name = parameter.name.split("[")[1].split("]")[0]
-                    if tag_name in UNALLOWED_TAGS:
-                        raise InvalidParams(f"Tag name {tag_name} is not a valid query filter")
+                raise InvalidParams(f"Tag name {condition.lhs.name} is not a valid query filter")
 
 
     def validate_orderby(self) -> None:
     def validate_orderby(self) -> None:
         if not self.orderby:
         if not self.orderby:

+ 78 - 65
src/sentry/snuba/sessions_v2.py

@@ -5,12 +5,13 @@ from datetime import datetime, timedelta
 from typing import Any, Dict, List, Optional, Tuple
 from typing import Any, Dict, List, Optional, Tuple
 
 
 import pytz
 import pytz
+from snuba_sdk import Column, Condition, Function, Limit, Op
 
 
 from sentry.api.utils import get_date_range_from_params
 from sentry.api.utils import get_date_range_from_params
 from sentry.release_health.base import AllowedResolution, SessionsQueryConfig
 from sentry.release_health.base import AllowedResolution, SessionsQueryConfig
-from sentry.search.events.filter import get_filter
+from sentry.search.events.builder import SessionsV2QueryBuilder, TimeseriesSessionsV2QueryBuilder
 from sentry.utils.dates import parse_stats_period, to_datetime, to_timestamp
 from sentry.utils.dates import parse_stats_period, to_datetime, to_timestamp
-from sentry.utils.snuba import Dataset, raw_query, resolve_condition
+from sentry.utils.snuba import Dataset
 
 
 logger = logging.getLogger(__name__)
 logger = logging.getLogger(__name__)
 
 
@@ -241,6 +242,10 @@ class InvalidField(Exception):
     pass
     pass
 
 
 
 
+class ZeroIntervalsException(Exception):
+    pass
+
+
 class QueryDefinition:
 class QueryDefinition:
     """
     """
     This is the definition of the query the user wants to execute.
     This is the definition of the query the user wants to execute.
@@ -262,6 +267,7 @@ class QueryDefinition:
         self.raw_orderby = query.getlist("orderBy")  # only respected by metrics implementation
         self.raw_orderby = query.getlist("orderBy")  # only respected by metrics implementation
         self.limit = limit
         self.limit = limit
         self.offset = offset
         self.offset = offset
+        self._query_config = query_config
 
 
         if len(raw_fields) == 0:
         if len(raw_fields) == 0:
             raise InvalidField('Request is missing a "field"')
             raise InvalidField('Request is missing a "field"')
@@ -311,28 +317,49 @@ class QueryDefinition:
             query_groupby.update(groupby.get_snuba_groupby())
             query_groupby.update(groupby.get_snuba_groupby())
         self.query_groupby = list(query_groupby)
         self.query_groupby = list(query_groupby)
 
 
-        # the `params` are:
-        # project_id, organization_id, environment;
-        # also: start, end; but we got those ourselves.
-        snuba_filter = get_filter(self.query, params)
-
-        # this makes sure that literals in complex queries are properly quoted,
-        # and unknown fields are raised as errors
-        if query_config.allow_session_status_query:
-            # NOTE: "''" is added because we use the event search parser, which
-            # resolves "session.status" to ifNull(..., "''")
-            column_resolver = lambda col: resolve_column(col, ["session.status", "''"])
-        else:
-            column_resolver = resolve_column
-
-        conditions = [resolve_condition(c, column_resolver) for c in snuba_filter.conditions]
-        filter_keys = {
-            resolve_filter_key(key): value for key, value in snuba_filter.filter_keys.items()
+    def to_query_builder_dict(self, orderby=None):
+        num_intervals = len(get_timestamps(self))
+        if num_intervals == 0:
+            raise ZeroIntervalsException
+
+        max_groups = SNUBA_LIMIT // num_intervals
+
+        query_builder_dict = {
+            "dataset": Dataset.Sessions,
+            "params": {
+                **self.params,
+                "start": self.start,
+                "end": self.end,
+            },
+            "selected_columns": self.query_columns,
+            "groupby_columns": self.query_groupby,
+            "query": self.query,
+            "orderby": orderby,
+            "limit": max_groups,
+            "auto_aggregations": True,
+            "granularity": self.rollup,
         }
         }
-
-        self.aggregations = snuba_filter.aggregations
-        self.conditions = conditions
-        self.filter_keys = filter_keys
+        if self._query_config.allow_session_status_query:
+            query_builder_dict.update({"extra_filter_allowlist_fields": ["session.status"]})
+        return query_builder_dict
+
+    def get_filter_conditions(self):
+        """
+        Returns filter conditions for the query to be used for metrics queries, and hence excluding timestamp and
+        organization id condition that are later added by the metrics layer.
+        """
+        conditions = SessionsV2QueryBuilder(**self.to_query_builder_dict()).where
+        filter_conditions = []
+        for condition in conditions:
+            # Exclude sessions "started" timestamp condition and org_id condition, as it is not needed for metrics queries.
+            if (
+                isinstance(condition, Condition)
+                and isinstance(condition.lhs, Column)
+                and condition.lhs.name in ["started", "org_id"]
+            ):
+                continue
+            filter_conditions.append(condition)
+        return filter_conditions
 
 
     def __repr__(self):
     def __repr__(self):
         return f"{self.__class__.__name__}({repr(self.__dict__)})"
         return f"{self.__class__.__name__}({repr(self.__dict__)})"
@@ -465,65 +492,51 @@ def _run_sessions_query(query):
     `totals` and again for the actual time-series data grouped by the requested
     `totals` and again for the actual time-series data grouped by the requested
     interval.
     interval.
     """
     """
-
-    num_intervals = len(get_timestamps(query))
-    if num_intervals == 0:
-        return [], []
-
     # We only return the top-N groups, based on the first field that is being
     # We only return the top-N groups, based on the first field that is being
     # queried, assuming that those are the most relevant to the user.
     # queried, assuming that those are the most relevant to the user.
     # In a future iteration we might expose an `orderBy` query parameter.
     # In a future iteration we might expose an `orderBy` query parameter.
     orderby = [f"-{query.primary_column}"]
     orderby = [f"-{query.primary_column}"]
-    max_groups = SNUBA_LIMIT // num_intervals
-
-    result_totals = raw_query(
-        dataset=Dataset.Sessions,
-        selected_columns=query.query_columns,
-        groupby=query.query_groupby,
-        aggregations=query.aggregations,
-        conditions=query.conditions,
-        filter_keys=query.filter_keys,
-        start=query.start,
-        end=query.end,
-        rollup=query.rollup,
-        orderby=orderby,
-        limit=max_groups,
-        referrer="sessions.totals",
-    )
 
 
-    totals = result_totals["data"]
-    if not totals:
+    try:
+        query_builder_dict = query.to_query_builder_dict(orderby=orderby)
+    except ZeroIntervalsException:
+        return [], []
+
+    result_totals = SessionsV2QueryBuilder(**query_builder_dict).run_query("sessions.totals")[
+        "data"
+    ]
+    if not result_totals:
         # No need to query time series if totals is already empty
         # No need to query time series if totals is already empty
         return [], []
         return [], []
 
 
     # We only get the time series for groups which also have a total:
     # We only get the time series for groups which also have a total:
     if query.query_groupby:
     if query.query_groupby:
         # E.g. (release, environment) IN [(1, 2), (3, 4), ...]
         # E.g. (release, environment) IN [(1, 2), (3, 4), ...]
-        groups = {tuple(row[column] for column in query.query_groupby) for row in totals}
-        extra_conditions = [[["tuple", query.query_groupby], "IN", groups]] + [
-            # This condition is redundant but might lead to better query performance
-            # Eg. [release IN [1, 3]], [environment IN [2, 4]]
-            [column, "IN", {row[column] for row in totals}]
+        groups = {tuple(row[column] for column in query.query_groupby) for row in result_totals}
+
+        extra_conditions = [
+            Condition(
+                Function("tuple", [Column(col) for col in query.query_groupby]),
+                Op.IN,
+                Function("tuple", list(groups)),
+            )
+        ] + [
+            Condition(
+                Column(column),
+                Op.IN,
+                Function("tuple", list({row[column] for row in result_totals})),
+            )
             for column in query.query_groupby
             for column in query.query_groupby
         ]
         ]
     else:
     else:
         extra_conditions = []
         extra_conditions = []
 
 
-    result_timeseries = raw_query(
-        dataset=Dataset.Sessions,
-        selected_columns=[TS_COL] + query.query_columns,
-        groupby=[TS_COL] + query.query_groupby,
-        aggregations=query.aggregations,
-        conditions=query.conditions + extra_conditions,
-        filter_keys=query.filter_keys,
-        start=query.start,
-        end=query.end,
-        rollup=query.rollup,
-        limit=SNUBA_LIMIT,
-        referrer="sessions.timeseries",
-    )
+    timeseries_query_builder = TimeseriesSessionsV2QueryBuilder(**query_builder_dict)
+    timeseries_query_builder.where.extend(extra_conditions)
+    timeseries_query_builder.limit = Limit(SNUBA_LIMIT)
+    result_timeseries = timeseries_query_builder.run_query("sessions.timeseries")["data"]
 
 
-    return totals, result_timeseries["data"]
+    return result_totals, result_timeseries
 
 
 
 
 def massage_sessions_result(
 def massage_sessions_result(

+ 8 - 0
src/sentry/utils/snuba.py

@@ -92,9 +92,17 @@ SESSIONS_FIELD_LIST = [
     "project_id",
     "project_id",
     "org_id",
     "org_id",
     "environment",
     "environment",
+    "session.status",
+    "users_errored",
+    "users_abnormal",
+    "sessions_errored",
+    "sessions_abnormal",
+    "duration_quantiles",
+    "duration_avg",
 ]
 ]
 
 
 SESSIONS_SNUBA_MAP = {column: column for column in SESSIONS_FIELD_LIST}
 SESSIONS_SNUBA_MAP = {column: column for column in SESSIONS_FIELD_LIST}
+SESSIONS_SNUBA_MAP.update({"timestamp": "started"})
 
 
 # This maps the public column aliases to the discover dataset column names.
 # This maps the public column aliases to the discover dataset column names.
 # Longer term we would like to not expose the transactions dataset directly
 # Longer term we would like to not expose the transactions dataset directly

+ 13 - 14
tests/snuba/api/endpoints/test_organization_sessions.py

@@ -160,7 +160,7 @@ class OrganizationSessionsEndpointTest(APITestCase, SnubaTestCase):
         )
         )
 
 
         assert response.status_code == 400, response.content
         assert response.status_code == 400, response.content
-        assert response.data == {"detail": 'Invalid query field: "foo"'}
+        assert response.data["detail"] == "Invalid search filter: foo"
 
 
         response = self.do_request(
         response = self.do_request(
             {
             {
@@ -173,14 +173,14 @@ class OrganizationSessionsEndpointTest(APITestCase, SnubaTestCase):
         assert response.status_code == 400, response.content
         assert response.status_code == 400, response.content
         # TODO: it would be good to provide a better error here,
         # TODO: it would be good to provide a better error here,
         # since its not obvious where `message` comes from.
         # since its not obvious where `message` comes from.
-        assert response.data == {"detail": 'Invalid query field: "message"'}
+        assert response.data["detail"] == "Invalid search filter: message"
 
 
     def test_illegal_query(self):
     def test_illegal_query(self):
         response = self.do_request(
         response = self.do_request(
             {"statsPeriod": "1d", "field": ["sum(session)"], "query": ["issue.id:123"]}
             {"statsPeriod": "1d", "field": ["sum(session)"], "query": ["issue.id:123"]}
         )
         )
         assert response.status_code == 400, response.content
         assert response.status_code == 400, response.content
-        assert response.data == {"detail": 'Invalid query field: "group_id"'}
+        assert response.data["detail"] == "Invalid search filter: issue.id"
 
 
     def test_too_many_points(self):
     def test_too_many_points(self):
         # default statsPeriod is 90d
         # default statsPeriod is 90d
@@ -1074,17 +1074,16 @@ class OrganizationSessionsEndpointTest(APITestCase, SnubaTestCase):
 
 
     @freeze_time(MOCK_DATETIME)
     @freeze_time(MOCK_DATETIME)
     def test_mix_known_and_unknown_strings(self):
     def test_mix_known_and_unknown_strings(self):
-        for query_string in ("environment:[production,foo]",):
-            response = self.do_request(
-                {
-                    "project": self.project.id,  # project without users
-                    "statsPeriod": "1d",
-                    "interval": "1d",
-                    "field": ["count_unique(user)", "sum(session)"],
-                    "query": query_string,
-                }
-            )
-            assert response.status_code == 200, response.data
+        response = self.do_request(
+            {
+                "project": self.project.id,  # project without users
+                "statsPeriod": "1d",
+                "interval": "1d",
+                "field": ["count_unique(user)", "sum(session)"],
+                "query": "environment:[production,foo]",
+            }
+        )
+        assert response.status_code == 200, response.data
 
 
 
 
 @patch("sentry.api.endpoints.organization_sessions.release_health", MetricsReleaseHealthBackend())
 @patch("sentry.api.endpoints.organization_sessions.release_health", MetricsReleaseHealthBackend())

Some files were not shown because too many files changed in this diff