Browse Source

feat(api): Sort sessions by release timestamp [TET-7] (#34699)

This PR adds the ability to sort by release timestamp in sessions v2 over metrics
Essentially it:

Runs a pre-flight query to Release model according to ordering of release timestamp whether ASC or DESC and gets the resulting release groups. It also adds environment filters to the pre flight query if environment filters are sent in the query param of the request
Fetches those groups and uses them as a filter in subsequent metrics queries
Formats the output so if groups are present in the Release model but not in the metrics dataset, those groups still exist but are nulled out in the output. Otherwise, it shows the results of the groups in the metrics dataset.
Orders the output according to resulting groups from preflight query
Ahmed Etefy 2 years ago
parent
commit
af66ec160d

+ 273 - 23
src/sentry/release_health/metrics_sessions_v2.py

@@ -10,9 +10,12 @@ from dataclasses import dataclass, replace
 from enum import Enum
 from typing import (
     Any,
+    Callable,
+    Dict,
     FrozenSet,
     Iterable,
     List,
+    Literal,
     Mapping,
     MutableMapping,
     Optional,
@@ -40,9 +43,13 @@ from snuba_sdk.conditions import ConditionGroup
 from snuba_sdk.legacy import json_to_snql
 
 from sentry.api.utils import InvalidParams as UtilsInvalidParams
+from sentry.models import Release
 from sentry.models.project import Project
 from sentry.release_health.base import (
+    GroupByFieldName,
+    ProjectId,
     SessionsQueryFunction,
+    SessionsQueryGroup,
     SessionsQueryResult,
     SessionsQueryValue,
 )
@@ -54,6 +61,7 @@ from sentry.snuba.metrics.query import QueryDefinition as MetricsQuery
 from sentry.snuba.metrics.utils import OrderByNotSupportedOverCompositeEntityException
 from sentry.snuba.sessions_v2 import (
     InvalidParams,
+    NonPreflightOrderByException,
     QueryDefinition,
     finite_or_none,
     get_timestamps,
@@ -387,6 +395,8 @@ FIELD_MAP: Mapping[SessionsQueryFunction, Type[Field]] = {
     "crash_free_rate(session)": SimpleForwardingField,
     "crash_free_rate(user)": SimpleForwardingField,
 }
+PREFLIGHT_QUERY_COLUMNS = {"release.timestamp"}
+VirtualOrderByName = Literal["release.timestamp"]
 
 
 def run_sessions_query(
@@ -425,13 +435,78 @@ def run_sessions_query(
     project_ids = filter_keys.pop("project_id")
     assert not filter_keys
 
-    orderby = _parse_orderby(query, fields)
-    if orderby is None:
-        # We only return the top-N groups, based on the first field that is being
-        # queried, assuming that those are the most relevant to the user.
-        # In a future iteration we might expose an `orderBy` query parameter.
-        primary_metric_field = _get_primary_field(list(fields.values()), query.raw_groupby)
-        orderby = OrderBy(primary_metric_field, Direction.DESC)
+    ordered_preflight_filters: Dict[GroupByFieldName, Sequence[str]] = {}
+    try:
+        orderby = _parse_orderby(query, fields)
+    except NonPreflightOrderByException as exc:
+        # We hit this branch when we suspect that the orderBy columns is one of the virtual
+        # columns like `release.timestamp` that require a preflight query to be run, and so we
+        # check here if it is one of the supported preflight query columns and if so we run the
+        # preflight query. Otherwise we re-raise the exception
+        raw_orderby = query.raw_orderby[0]
+        if raw_orderby[0] == "-":
+            raw_orderby = raw_orderby[1:]
+            direction = Direction.DESC
+        else:
+            direction = Direction.ASC
+
+        if raw_orderby not in PREFLIGHT_QUERY_COLUMNS:
+            raise exc
+
+        preflight_query_conditions = {
+            "orderby_field": raw_orderby,
+            "direction": direction,
+            "org_id": org_id,
+            "project_ids": project_ids,
+            "limit": query.limit,
+        }
+
+        # For preflight queries, we need to evaluate environment conditions because these might
+        # be used in the preflight query. Example when we sort by `-release.timestamp`, and when
+        # we have environment filters applied to the query, then we need to include the
+        # environment filters otherwise we might end up with metrics queries filters that do not
+        # belong to the same environment
+        environment_conditions = []
+        for condition in where:
+            preflight_query_condition = _get_filters_for_preflight_query_condition(
+                tag_name="environment", condition=condition
+            )
+            if preflight_query_condition != (None, None):
+                environment_conditions.append(preflight_query_condition)
+
+        if len(environment_conditions) > 1:
+            # Should never hit this branch. Added as a fail safe
+            raise InvalidParams("Environment condition was parsed incorrectly")
+        else:
+            try:
+                preflight_query_conditions.update({"env_condition": environment_conditions[0]})
+            except IndexError:
+                pass
+
+        preflight_query_filters = _generate_preflight_query_conditions(**preflight_query_conditions)
+
+        if len(preflight_query_filters) == 0:
+            # If we get no results from the pre-flight query that are supposed to be used as a
+            # filter in the metrics query, then there is no point in running the metrics query
+            return _empty_result(query)
+
+        condition_lhs: Optional[GroupByFieldName] = None
+        if raw_orderby == "release.timestamp":
+            condition_lhs = "release"
+            ordered_preflight_filters[condition_lhs] = preflight_query_filters
+
+        if condition_lhs is not None:
+            where += [Condition(Column(condition_lhs), Op.IN, preflight_query_filters)]
+
+        # Clear OrderBy because query is already filtered and we will re-order the results
+        # according to the order of the filter list later on
+        orderby = None
+    else:
+        if orderby is None:
+            # We only return the top-N groups, based on the first field that is being
+            # queried, assuming that those are the most relevant to the user.
+            primary_metric_field = _get_primary_field(list(fields.values()), query.raw_groupby)
+            orderby = OrderBy(primary_metric_field, Direction.DESC)
 
     metrics_query = MetricsQuery(
         org_id,
@@ -460,15 +535,15 @@ def run_sessions_query(
         GroupKey.from_input_dict(group["by"]): group for group in metrics_results["groups"]
     }
 
-    output_groups: MutableMapping[GroupKey, Group] = defaultdict(
-        lambda: {
-            "totals": {field: default_for(field) for field in query.raw_fields},
-            "series": {
-                field: len(metrics_results["intervals"]) * [default_for(field)]
-                for field in query.raw_fields
-            },
-        }
-    )
+    default_group_gen_func: Callable[[], Group] = lambda: {
+        "totals": {field: default_for(field) for field in query.raw_fields},
+        "series": {
+            field: len(metrics_results["intervals"]) * [default_for(field)]
+            for field in query.raw_fields
+        },
+    }
+
+    output_groups: MutableMapping[GroupKey, Group] = defaultdict(default_group_gen_func)
 
     for field in fields.values():
         field.extract_values(input_groups, output_groups)
@@ -484,12 +559,17 @@ def run_sessions_query(
                 # Create entry in default dict:
                 output_groups[GroupKey(session_status=status)]
 
+    result_groups: Sequence[SessionsQueryGroup] = [
+        # Convert group keys back to dictionaries:
+        {"by": group_key.to_output_dict(), **group}  # type: ignore
+        for group_key, group in output_groups.items()
+    ]
+    result_groups = _order_by_preflight_query_results(
+        ordered_preflight_filters, query.raw_groupby, result_groups, default_group_gen_func
+    )
+
     return {
-        "groups": [
-            # Convert group keys back to dictionaries:
-            {"by": group_key.to_output_dict(), **group}  # type: ignore
-            for group_key, group in output_groups.items()
-        ],
+        "groups": result_groups,
         "start": isoformat_z(metrics_results["start"]),
         "end": isoformat_z(metrics_results["end"]),
         "intervals": [isoformat_z(ts) for ts in metrics_results["intervals"]],
@@ -497,6 +577,87 @@ def run_sessions_query(
     }
 
 
+def _order_by_preflight_query_results(
+    ordered_preflight_filters: Dict[GroupByFieldName, Sequence[str]],
+    groupby: GroupByFieldName,
+    result_groups: Sequence[SessionsQueryGroup],
+    default_group_gen_func: Callable[[], Group],
+) -> Sequence[SessionsQueryGroup]:
+    """
+    If a preflight query was run, then we want to preserve the order of results
+    returned by the preflight query
+    We create a mapping between the group value to the result group, so we are able
+    to easily sort the resulting groups.
+    For example, if we are ordering by `-release.timestamp`, we might get from
+    postgres a list of results ['1B', '1A'], and results from metrics dataset
+    [
+        {
+            "by": {"release": "1A"},
+            "totals": {"sum(session)": 0},
+            "series": {"sum(session)": [0]},
+        },
+        {
+            "by": {"release": "1B"},
+            "totals": {"sum(session)": 10},
+            "series": {"sum(session)": [10]},
+        },
+    ]
+    Then we create a mapping from release value to the result group:
+    {
+        "1A": [
+            {
+                "by": {"release": "1A"},
+                "totals": {"sum(session)": 0},
+                "series": {"sum(session)": [0]},
+            },
+        ],
+        "1B": [
+            {
+                "by": {"release": "1B"},
+                "totals": {"sum(session)": 10},
+                "series": {"sum(session)": [10]},
+            },
+        ],
+    }
+    Then loop over the releases list sequentially, and rebuild the result_groups
+    array based on that order by appending to the list the values from that mapping
+    and accessing it through the key which is the group value
+    """
+    if len(ordered_preflight_filters) == 1:
+        orderby_field = list(ordered_preflight_filters.keys())[0]
+        grp_value_to_result_grp_mapping: Dict[Union[int, str], List[SessionsQueryGroup]] = {}
+        # If the preflight query occurs, but the relevant column is not in the groupBy then there
+        # is no need to re-order the results
+        if orderby_field in groupby:
+            for result_group in result_groups:
+                grp_value = result_group["by"][orderby_field]
+                grp_value_to_result_grp_mapping.setdefault(grp_value, []).append(result_group)
+            result_groups = []
+            for elem in ordered_preflight_filters[orderby_field]:
+                try:
+                    for grp in grp_value_to_result_grp_mapping[elem]:
+                        result_groups += [grp]
+                except KeyError:
+                    # We get into this branch if there are groups in the preflight query that do
+                    # not have matching data in the metrics dataset, and since we want to show
+                    # those groups in the output, we add them but null out the fields requested
+                    # This could occur for example, when ordering by `-release.timestamp` and
+                    # some of the latest releases in Postgres do not have matching data in
+                    # metrics dataset
+                    group_key_dict = {orderby_field: elem}
+                    for key in groupby:
+                        if key == orderby_field:
+                            continue
+                        # Added a mypy ignore here because this is a one off as result groups
+                        # will never have null group values except when the group exists in the
+                        # preflight query but not in the metrics dataset
+                        group_key_dict.update({key: None})  # type: ignore
+                    result_groups += [
+                        {"by": group_key_dict, **default_group_gen_func()}  # type: ignore
+                    ]
+    return result_groups
+
+
 def _empty_result(query: QueryDefinition) -> SessionsQueryResult:
     intervals = get_timestamps(query)
     return {
@@ -570,6 +731,40 @@ def _transform_single_condition(
     return condition, None
 
 
+def _get_filters_for_preflight_query_condition(
+    tag_name: str, condition: Union[Condition, BooleanCondition]
+) -> Tuple[Optional[Op], Optional[Set[str]]]:
+    """
+    Function that takes a tag name and a condition, and checks if that condition is for that tag
+    and if so returns a tuple of the op applied either Op.IN or Op.NOT_IN and a set of the tag
+    values
+    """
+    if isinstance(condition, Condition) and condition.lhs == Column(tag_name):
+        if condition.op in [Op.EQ, Op.NEQ, Op.IN, Op.NOT_IN]:
+            filters = (
+                {condition.rhs}
+                if isinstance(condition.rhs, str)
+                else {elem for elem in condition.rhs}
+            )
+            op = {Op.EQ: Op.IN, Op.IN: Op.IN, Op.NEQ: Op.NOT_IN, Op.NOT_IN: Op.NOT_IN}[condition.op]
+            return op, filters
+        raise InvalidParams(
+            f"Unable to resolve {tag_name} filter due to unsupported op {condition.op}"
+        )
+
+    if tag_name in str(condition):
+        # Anything not handled by the code above cannot be parsed for now,
+        # for two reasons:
+        # 1) Queries like session.status:healthy OR release:foo are hard to
+        #    translate, because they would require different conditions on the separate
+        #    metric fields.
+        # 2) AND and OR conditions come in the form `Condition(Function("or", [...]), Op.EQ, 1)`
+        #    where [...] can again contain any condition encoded as a Function. For this, we would
+        #    have to replicate the translation code above.
+        raise InvalidParams(f"Unable to parse condition with {tag_name}")
+    return None, None
+
+
 def _parse_session_status(status: Any) -> FrozenSet[SessionStatus]:
     try:
         return frozenset([SessionStatus(status)])
@@ -583,12 +778,21 @@ def _parse_orderby(
     orderbys = query.raw_orderby
     if orderbys == []:
         return None
+
+    # ToDo(ahmed): We might want to enable multi field ordering if some of the fields ordered by
+    #  are generated from pre-flight queries, and thereby are popped from metrics queries,
+    #  but I though it might be confusing behavior so restricting it for now.
     if len(orderbys) > 1:
         raise InvalidParams("Cannot order by multiple fields")
     orderby = orderbys[0]
 
     if "session.status" in query.raw_groupby:
-        raise InvalidParams("Cannot use 'orderBy' when grouping by sessions.status")
+        # We can allow grouping by `session.status` when having an orderBy column to be a field
+        # that if the orderBy columns is one of the virtual columns that indicates that a preflight
+        # query (like `release.timestamp`) needs to be run to evaluate the query, and so we raise an
+        # instance of `NonPreflightOrderByException` and delegate handling this case to the
+        # `run_sessions_query` function
+        raise NonPreflightOrderByException("Cannot use 'orderBy' when grouping by sessions.status")
 
     direction = Direction.ASC
     if orderby[0] == "-":
@@ -597,7 +801,12 @@ def _parse_orderby(
 
     assert query.raw_fields
     if orderby not in query.raw_fields:
-        raise InvalidParams("'orderBy' must be one of the provided 'fields'")
+        # We can allow orderBy column to be a field that is not requested in the select
+        # statements if it is one of the virtual columns that indicated a preflight query needs
+        # to be run to evaluate the query, and so we raise an instance of
+        # `NonPreflightOrderByException` and delegate handling this case to the
+        # `run_sessions_query` function
+        raise NonPreflightOrderByException("'orderBy' must be one of the provided 'fields'")
 
     field = fields[orderby]
 
@@ -617,3 +826,44 @@ def _get_primary_field(fields: Sequence[Field], raw_groupby: Sequence[str]) -> M
 
     assert primary_metric_field
     return primary_metric_field
+
+
+def _generate_preflight_query_conditions(
+    orderby_field: VirtualOrderByName,
+    direction: Direction,
+    org_id: int,
+    project_ids: Sequence[ProjectId],
+    limit: int,
+    env_condition: Optional[Tuple[Op, Set[str]]] = None,
+) -> Sequence[str]:
+    """
+    Function that fetches the preflight query filters that need to be applied to the subsequent
+    metrics query
+    """
+    queryset_results = []
+    if orderby_field == "release.timestamp":
+        queryset = Release.objects.filter(
+            organization=org_id,
+            projects__id__in=project_ids,
+        )
+        if env_condition is not None:
+            op, env_filter_set = env_condition
+            environment_orm_conditions = {
+                "releaseprojectenvironment__environment__name__in": env_filter_set,
+                "releaseprojectenvironment__project_id__in": project_ids,
+            }
+            if op == Op.IN:
+                queryset = queryset.filter(**environment_orm_conditions)
+            else:
+                assert op == Op.NOT_IN
+                queryset = queryset.exclude(**environment_orm_conditions)
+
+        if direction == Direction.DESC:
+            queryset = queryset.order_by("-date_added", "-id")
+        else:
+            queryset = queryset.order_by("date_added", "id")
+
+        if limit is not None:
+            queryset = queryset[: limit - 1]
+        queryset_results = list(queryset.values_list("version", flat=True))
+    return queryset_results

+ 9 - 0
src/sentry/snuba/sessions_v2.py

@@ -356,6 +356,15 @@ class InvalidParams(Exception):
     pass
 
 
+class NonPreflightOrderByException(InvalidParams):
+    """
+    An exception that is raised when parsing orderBy, to indicate that this is only an exception
+    in the case where we don't run a preflight query on an accepted pre-flight query field
+    """
+
+    ...
+
+
 def get_now():
     """Wrapper function to make it mockable in unit tests"""
     return datetime.now(tz=pytz.utc)

+ 467 - 0
tests/snuba/api/endpoints/test_organization_sessions.py

@@ -1428,3 +1428,470 @@ class OrganizationSessionsEndpointMetricsTest(
                     "totals": {"sum(session)": 1},
                 }
             ]
+
+
+@patch("sentry.api.endpoints.organization_sessions.release_health", MetricsReleaseHealthBackend())
+class SessionsMetricsSortReleaseTimestampTest(SessionMetricsTestCase, APITestCase):
+    def do_request(self, query, user=None, org=None):
+        self.login_as(user=user or self.user)
+        url = reverse(
+            "sentry-api-0-organization-sessions",
+            kwargs={"organization_slug": (org or self.organization).slug},
+        )
+        return self.client.get(url, query, format="json")
+
+    @freeze_time(MOCK_DATETIME)
+    def test_order_by_with_no_releases(self):
+        """
+        Test that ensures if we have no releases in the preflight query when trying to order by
+        `release.timestamp`, we get no groups.
+        Essentially testing the empty preflight query filters branch.
+        """
+        project_random = self.create_project()
+        for _ in range(0, 2):
+            self.store_session(make_session(project_random))
+        self.store_session(make_session(project_random, status="crashed"))
+
+        response = self.do_request(
+            {
+                "project": project_random.id,
+                "statsPeriod": "1d",
+                "interval": "1d",
+                "field": ["crash_free_rate(session)"],
+                "groupBy": ["release"],
+                "orderBy": "-release.timestamp",
+                "per_page": 3,
+            }
+        )
+        assert response.data["groups"] == []
+
+    @freeze_time(MOCK_DATETIME)
+    def test_order_by(self):
+        """
+        Test that ensures that we are able to get the crash_free_rate for the most 2 recent
+        releases when grouping by release
+        """
+        # Step 1: Create 3 releases
+        release1b = self.create_release(version="1B")
+        release1c = self.create_release(version="1C")
+        release1d = self.create_release(version="1D")
+
+        # Step 2: Create crash free rate for each of those releases
+        # Release 1c -> 66.7% Crash free rate
+        for _ in range(0, 2):
+            self.store_session(make_session(self.project, release=release1c.version))
+        self.store_session(make_session(self.project, release=release1c.version, status="crashed"))
+
+        # Release 1b -> 33.3% Crash free rate
+        for _ in range(0, 2):
+            self.store_session(
+                make_session(self.project, release=release1b.version, status="crashed")
+            )
+        self.store_session(make_session(self.project, release=release1b.version))
+
+        # Create Sessions in each of these releases
+        # Release 1d -> 80% Crash free rate
+        for _ in range(0, 4):
+            self.store_session(make_session(self.project, release=release1d.version))
+        self.store_session(make_session(self.project, release=release1d.version, status="crashed"))
+
+        # Step 3: Make request
+        response = self.do_request(
+            {
+                "project": self.project.id,  # project without users
+                "statsPeriod": "1d",
+                "interval": "1d",
+                "field": ["crash_free_rate(session)"],
+                "groupBy": ["release"],
+                "orderBy": "-release.timestamp",
+                "per_page": 3,
+            }
+        )
+        # Step 4: Validate Results
+        assert response.data["groups"] == [
+            {
+                "by": {"release": "1D"},
+                "totals": {"crash_free_rate(session)": 0.8},
+                "series": {"crash_free_rate(session)": [0.8]},
+            },
+            {
+                "by": {"release": "1C"},
+                "totals": {"crash_free_rate(session)": 0.6666666666666667},
+                "series": {"crash_free_rate(session)": [0.6666666666666667]},
+            },
+            {
+                "by": {"release": "1B"},
+                "totals": {"crash_free_rate(session)": 0.33333333333333337},
+                "series": {"crash_free_rate(session)": [0.33333333333333337]},
+            },
+        ]
+
+    @freeze_time(MOCK_DATETIME)
+    def test_order_by_with_session_status_groupby(self):
+        """
+        Test that ensures we are able to group by session.status and order by `release.timestamp`
+        since `release.timestamp` is generated from a preflight query
+        """
+        rando_project = self.create_project()
+
+        release_1a = self.create_release(project=rando_project, version="1A")
+        release_1b = self.create_release(project=rando_project, version="1B")
+
+        # Release 1B sessions
+        for _ in range(4):
+            self.store_session(
+                make_session(rando_project, release=release_1b.version, status="crashed")
+            )
+        for _ in range(10):
+            self.store_session(make_session(rando_project, release=release_1b.version))
+        for _ in range(3):
+            self.store_session(make_session(rando_project, errors=1, release=release_1b.version))
+
+        # Release 1A sessions
+        for _ in range(0, 2):
+            self.store_session(
+                make_session(rando_project, release=release_1a.version, status="crashed")
+            )
+        self.store_session(make_session(rando_project, release=release_1a.version))
+        for _ in range(3):
+            self.store_session(make_session(rando_project, errors=1, release=release_1a.version))
+
+        response = self.do_request(
+            {
+                "project": rando_project.id,
+                "statsPeriod": "1d",
+                "interval": "1d",
+                "field": ["sum(session)"],
+                "groupBy": ["release", "session.status"],
+                "orderBy": "-release.timestamp",
+            }
+        )
+        assert response.data["groups"] == [
+            {
+                "by": {"release": "1B", "session.status": "abnormal"},
+                "totals": {"sum(session)": 0},
+                "series": {"sum(session)": [0]},
+            },
+            {
+                "by": {"release": "1B", "session.status": "crashed"},
+                "totals": {"sum(session)": 4},
+                "series": {"sum(session)": [4]},
+            },
+            {
+                "by": {"release": "1B", "session.status": "errored"},
+                "totals": {"sum(session)": 3},
+                "series": {"sum(session)": [3]},
+            },
+            {
+                "by": {"release": "1B", "session.status": "healthy"},
+                "totals": {"sum(session)": 10},
+                "series": {"sum(session)": [10]},
+            },
+            {
+                "by": {"release": "1A", "session.status": "abnormal"},
+                "totals": {"sum(session)": 0},
+                "series": {"sum(session)": [0]},
+            },
+            {
+                "by": {"release": "1A", "session.status": "crashed"},
+                "totals": {"sum(session)": 2},
+                "series": {"sum(session)": [2]},
+            },
+            {
+                "by": {"release": "1A", "session.status": "errored"},
+                "totals": {"sum(session)": 3},
+                "series": {"sum(session)": [3]},
+            },
+            {
+                "by": {"release": "1A", "session.status": "healthy"},
+                "totals": {"sum(session)": 1},
+                "series": {"sum(session)": [1]},
+            },
+        ]
+
+    @freeze_time(MOCK_DATETIME)
+    def test_order_by_with_limit(self):
+        rando_project = self.create_project()
+
+        # Create two releases with no metrics data and then two releases with metric data
+        release_1a = self.create_release(project=rando_project, version="1A")
+        release_1b = self.create_release(project=rando_project, version="1B")
+        self.create_release(project=rando_project, version="1C")
+        self.create_release(project=rando_project, version="1D")
+
+        self.store_session(make_session(rando_project, release=release_1a.version))
+        self.store_session(make_session(rando_project, release=release_1b.version))
+
+        response = self.do_request(
+            {
+                "project": rando_project.id,
+                "statsPeriod": "1d",
+                "interval": "1d",
+                "field": ["sum(session)"],
+                "groupBy": ["release"],
+                "orderBy": "-release.timestamp",
+                "per_page": 3,
+            }
+        )
+
+        assert response.data["groups"] == [
+            {
+                "by": {"release": "1D"},
+                "totals": {"sum(session)": 0},
+                "series": {"sum(session)": [0]},
+            },
+            {
+                "by": {"release": "1C"},
+                "totals": {"sum(session)": 0},
+                "series": {"sum(session)": [0]},
+            },
+            {
+                "by": {"release": "1B"},
+                "totals": {"sum(session)": 1},
+                "series": {"sum(session)": [1]},
+            },
+        ]
+
+        response = self.do_request(
+            {
+                "project": rando_project.id,
+                "statsPeriod": "1d",
+                "interval": "1d",
+                "field": ["sum(session)"],
+                "groupBy": ["release", "session.status"],
+                "orderBy": "-release.timestamp",
+                "per_page": 2,
+            }
+        )
+        assert response.data["groups"] == [
+            {
+                "by": {"release": "1D", "session.status": None},
+                "totals": {"sum(session)": 0},
+                "series": {"sum(session)": [0]},
+            },
+            {
+                "by": {"release": "1C", "session.status": None},
+                "totals": {"sum(session)": 0},
+                "series": {"sum(session)": [0]},
+            },
+        ]
+
+        response = self.do_request(
+            {
+                "project": rando_project.id,
+                "statsPeriod": "1d",
+                "interval": "1d",
+                "field": ["sum(session)"],
+                "groupBy": ["release", "session.status", "project"],
+                "orderBy": "-release.timestamp",
+                "per_page": 2,
+            }
+        )
+        assert response.data["groups"] == [
+            {
+                "by": {"release": "1D", "session.status": None, "project": None},
+                "totals": {"sum(session)": 0},
+                "series": {"sum(session)": [0]},
+            },
+            {
+                "by": {"release": "1C", "session.status": None, "project": None},
+                "totals": {"sum(session)": 0},
+                "series": {"sum(session)": [0]},
+            },
+        ]
+
+    @freeze_time(MOCK_DATETIME)
+    def test_order_by_with_environment_filter_on_preflight(self):
+        rando_project = self.create_project()
+        rando_env = self.create_environment(name="rando_env", project=self.project)
+
+        # Create two releases with no metrics data and then two releases with metric data
+        release_1a = self.create_release(
+            project=rando_project, version="1A", environments=[rando_env]
+        )
+        release_1b = self.create_release(
+            project=rando_project, version="1B", environments=[rando_env]
+        )
+        release_1c = self.create_release(project=rando_project, version="1C")
+        release_1d = self.create_release(project=rando_project, version="1D")
+
+        self.store_session(
+            make_session(rando_project, release=release_1a.version, environment="rando_env")
+        )
+        self.store_session(
+            make_session(rando_project, release=release_1b.version, environment="rando_env")
+        )
+        self.store_session(make_session(rando_project, release=release_1c.version))
+        self.store_session(make_session(rando_project, release=release_1d.version))
+
+        # Test env condition with IN
+        response = self.do_request(
+            {
+                "project": rando_project.id,
+                "statsPeriod": "1d",
+                "interval": "1d",
+                "field": ["sum(session)"],
+                "query": "environment:[rando_env,rando_enc2]",
+                "groupBy": ["release", "environment"],
+                "orderBy": "-release.timestamp",
+                "per_page": 4,
+            }
+        )
+        assert response.data["groups"] == [
+            {
+                "by": {"release": "1B", "environment": "rando_env"},
+                "totals": {"sum(session)": 1},
+                "series": {"sum(session)": [1]},
+            },
+            {
+                "by": {"release": "1A", "environment": "rando_env"},
+                "totals": {"sum(session)": 1},
+                "series": {"sum(session)": [1]},
+            },
+        ]
+
+        # Test env condition with NOT IN
+        response = self.do_request(
+            {
+                "project": rando_project.id,
+                "statsPeriod": "1d",
+                "interval": "1d",
+                "field": ["sum(session)"],
+                "query": "!environment:[rando_env,rando_enc2]",
+                "groupBy": ["release", "environment"],
+                "orderBy": "-release.timestamp",
+                "per_page": 4,
+            }
+        )
+        assert response.data["groups"] == [
+            {
+                "by": {"release": "1D", "environment": "production"},
+                "totals": {"sum(session)": 1},
+                "series": {"sum(session)": [1]},
+            },
+            {
+                "by": {"release": "1C", "environment": "production"},
+                "totals": {"sum(session)": 1},
+                "series": {"sum(session)": [1]},
+            },
+        ]
+
+        # Test env condition with invalid OR operation
+        response = self.do_request(
+            {
+                "project": rando_project.id,
+                "statsPeriod": "1d",
+                "interval": "1d",
+                "field": ["sum(session)"],
+                "query": "environment:rando_env OR environment:rando_enc2",
+                "groupBy": ["release", "environment"],
+                "orderBy": "-release.timestamp",
+                "per_page": 4,
+            }
+        )
+        assert response.json()["detail"] == "Unable to parse condition with environment"
+
+    @freeze_time(MOCK_DATETIME)
+    def test_order_by_without_release_groupby(self):
+        rando_project = self.create_project()
+
+        release_1a = self.create_release(project=rando_project, version="1A")
+        release_1b = self.create_release(project=rando_project, version="1B")
+        release_1c = self.create_release(project=rando_project, version="1C")
+
+        # Release 1B sessions
+        for _ in range(4):
+            self.store_session(
+                make_session(rando_project, release=release_1b.version, status="crashed")
+            )
+        for _ in range(10):
+            self.store_session(make_session(rando_project, release=release_1b.version))
+        for _ in range(3):
+            self.store_session(make_session(rando_project, errors=1, release=release_1b.version))
+
+        # Release 1A sessions
+        for _ in range(0, 2):
+            self.store_session(
+                make_session(rando_project, release=release_1a.version, status="crashed")
+            )
+        self.store_session(make_session(rando_project, release=release_1a.version))
+        for _ in range(3):
+            self.store_session(make_session(rando_project, errors=1, release=release_1a.version))
+
+        # Release 1C sessions
+        for _ in range(0, 2):
+            self.store_session(
+                make_session(rando_project, release=release_1c.version, status="crashed")
+            )
+        for _ in range(3):
+            self.store_session(make_session(rando_project, errors=1, release=release_1c.version))
+
+        response = self.do_request(
+            {
+                "project": rando_project.id,
+                "statsPeriod": "1d",
+                "interval": "1d",
+                "query": "session.status:[crashed,errored]",
+                "field": ["sum(session)"],
+                "orderBy": "-release.timestamp",
+                "per_page": 2,
+            }
+        )
+        assert response.data["groups"] == [
+            {
+                "by": {},
+                "totals": {"sum(session)": 12},
+                "series": {"sum(session)": [12]},
+            },
+        ]
+
+    @freeze_time(MOCK_DATETIME)
+    def test_order_by_release_with_session_status_current_filter(self):
+        rando_project = self.create_project()
+
+        release_1a = self.create_release(project=rando_project, version="1A")
+        release_1b = self.create_release(project=rando_project, version="1B")
+
+        # Release 1B sessions
+        for _ in range(4):
+            self.store_session(
+                make_session(rando_project, release=release_1b.version, status="crashed")
+            )
+        for _ in range(10):
+            self.store_session(make_session(rando_project, release=release_1b.version))
+        for _ in range(3):
+            self.store_session(make_session(rando_project, errors=1, release=release_1b.version))
+
+        # Release 1A sessions
+        for _ in range(0, 2):
+            self.store_session(
+                make_session(rando_project, release=release_1a.version, status="crashed")
+            )
+        self.store_session(make_session(rando_project, release=release_1a.version))
+        for _ in range(3):
+            self.store_session(make_session(rando_project, errors=1, release=release_1a.version))
+
+        response = self.do_request(
+            {
+                "project": rando_project.id,
+                "statsPeriod": "1d",
+                "interval": "1d",
+                "query": "session.status:[crashed,errored]",
+                "field": ["sum(session)"],
+                "groupBy": ["release"],
+                "orderBy": "-release.timestamp",
+            }
+        )
+
+        assert response.data["groups"] == [
+            {
+                "by": {"release": "1B"},
+                "totals": {"sum(session)": 7},
+                "series": {"sum(session)": [7]},
+            },
+            {
+                "by": {"release": "1A"},
+                "totals": {"sum(session)": 5},
+                "series": {"sum(session)": [5]},
+            },
+        ]