Browse Source

chore(grouping): Kill grouping levels endpoints (#54073)

The grouping levels and grouping levels new issues endpoints were used to power the Grouping tab on the issue details page, which has since [been removed](https://github.com/getsentry/sentry/pull/52895). Since they're no longer used anywhere in the codebase (in any of our repos), they can go away.
Katie Byers 1 year ago
parent
commit
db2bae48da

+ 0 - 1
pyproject.toml

@@ -188,7 +188,6 @@ module = [
     "sentry.api.endpoints.group_tagkey_details",
     "sentry.api.endpoints.group_tagkey_values",
     "sentry.api.endpoints.group_tags",
-    "sentry.api.endpoints.grouping_levels",
     "sentry.api.endpoints.index",
     "sentry.api.endpoints.integration_features",
     "sentry.api.endpoints.integrations.index",

+ 0 - 247
src/sentry/api/endpoints/grouping_level_new_issues.py

@@ -1,247 +0,0 @@
-from __future__ import annotations
-
-import datetime
-
-from django.core.cache import cache
-from rest_framework.request import Request
-from rest_framework.response import Response
-from snuba_sdk import Request as SnubaRequest
-from snuba_sdk.conditions import Condition, Op
-from snuba_sdk.orderby import Direction, OrderBy
-from snuba_sdk.query import Column, Entity, Function, Query
-
-from sentry import nodestore
-from sentry.api.base import region_silo_endpoint
-from sentry.api.bases import GroupEndpoint
-from sentry.api.endpoints.group_hashes_split import _get_group_filters
-from sentry.api.endpoints.grouping_levels import LevelsOverview, check_feature, get_levels_overview
-from sentry.api.paginator import GenericOffsetPaginator
-from sentry.api.serializers import EventSerializer, serialize
-from sentry.event_manager import get_event_type
-from sentry.eventstore.models import Event
-from sentry.models import Group
-from sentry.utils import snuba
-from sentry.utils.safe import get_path
-
-
-@region_silo_endpoint
-class GroupingLevelNewIssuesEndpoint(GroupEndpoint):
-    def get(self, request: Request, id: str, group: Group) -> Response:
-        """
-        Retrieve information about a particular grouping level, including a
-        list of issues it would create.
-
-        ```
-        GET /api/0/issues/<group_id>/grouping/levels/<level_id>/new-issues/
-
-        [
-            {"hash": "...", "latestEvent": ..., "eventCount": 132},
-            ...
-        ]
-        ```
-
-        Available level IDs can be fetched from `GroupingLevelsEndpoint`.
-
-        Each row/array item corresponds to one *new issue* that selecting this
-        level would create in place of the *affected issues*. The array items
-        are not groups, but groups that will be created, therefore a lot of
-        information normally available for groups is missing.
-
-        - `latestEvent`: a sample event in the same format returned by the
-          event details endpoint(s).
-
-        - `hash`: The grouping hash, probably insignificant to the user but can
-          be shown for diagnostic purposes.
-
-        - `eventCount`: How many events this issue would contain. Note that
-          like with any other event count, this number can change all the time
-          because events keep coming in.
-
-        The "would-be issues" are returned in-order such that the most recently
-        seen "issue" is at the top, i.e. it is sorted in descending order of
-        `latestEvent.dateCreated`.
-
-        The *affected issue* (=to-be-deleted issue) is often just the current one,
-        however if the previewed grouping level is reduced, this endpoint can
-        return a list of entries which together have more events than the
-        current issue (meaning issues will be merged together).
-
-        In the future there will be an endpoint that allows you to fetch the
-        list of affected issues. For now the UI should simply show a warning if
-        the level is decreased (and possibly only if the summed up events of
-        the new issues are more than what the current issue has).
-        """
-
-        check_feature(group.project.organization, request)
-
-        parsed_id = int(id)
-
-        def data_fn(offset=None, limit=None):
-            return _query_snuba(group, parsed_id, offset=offset, limit=limit)
-
-        def on_results(results):
-            return _process_snuba_results(results, group, parsed_id, request.user)
-
-        return self.paginate(
-            request=request,
-            on_results=on_results,
-            paginator=GenericOffsetPaginator(data_fn=data_fn),
-        )
-
-
-def _get_hash_for_parent_level(group: Group, id: int, levels_overview: LevelsOverview) -> str:
-    # If this is violated, there cannot be a 1:1 mapping between level and hash.
-    assert 0 <= id < levels_overview.current_level
-
-    # This cache never needs explicit invalidation because during every level
-    # change, the group ID changes.
-    #
-    # No idea if the query is slow, caching just because I can.
-    cache_key = f"group-parent-level-hash:{group.id}:{id}"
-
-    return_hash = cache.get(cache_key)
-
-    if return_hash is None:
-        query = (
-            Query(Entity("events"))
-            .set_select([Function("arrayElement", [Column("hierarchical_hashes"), id + 1], "hash")])
-            .set_where(_get_group_filters(group))
-            .set_limit(1)
-        )
-        request = SnubaRequest(
-            dataset="events",
-            app_id="grouping",
-            query=query,
-            tenant_ids={
-                "referrer": "api.group_hashes_levels.get_hash_for_parent_level",
-                "organization_id": group.project.organization_id,
-            },
-        )
-        return_hash: str = get_path(snuba.raw_snql_query(request), "data", 0, "hash")  # type: ignore
-        cache.set(cache_key, return_hash)
-
-    assert return_hash
-    return return_hash
-
-
-def _query_snuba(group: Group, id: int, offset=None, limit=None):
-    query = (
-        Query(Entity("events"))
-        .set_select(
-            [
-                Function(
-                    "arrayElement",
-                    [
-                        Column("hierarchical_hashes"),
-                        Function(
-                            "least", [id + 1, Function("length", [Column("hierarchical_hashes")])]
-                        ),
-                    ],
-                    "new_materialized_hash",
-                ),
-                Function("argMax", [Column("event_id"), Column("timestamp")], "latest_event_id"),
-                Function("max", [Column("timestamp")], "latest_event_timestamp"),
-                Function("count", [], "event_count"),
-            ]
-        )
-        .set_groupby([Column("new_materialized_hash")])
-        .set_orderby(
-            [
-                OrderBy(Column("event_count"), Direction.DESC),
-                # Completely useless sorting key, only there to achieve stable sort
-                # order in tests.
-                OrderBy(Column("latest_event_timestamp"), Direction.DESC),
-            ]
-        )
-    )
-
-    levels_overview = get_levels_overview(group)
-
-    # These conditions are always valid
-    common_where = [
-        Condition(Column("primary_hash"), Op.EQ, levels_overview.only_primary_hash),
-        Condition(Column("project_id"), Op.EQ, group.project_id),
-    ]
-
-    if id >= levels_overview.current_level:
-        # Good path: Since we increase the level we can easily constrain the
-        # entire query by group_id and timerange
-        query = query.set_where(common_where + _get_group_filters(group))
-    else:
-        # Bad path: We decreased the level and now we need to count events from
-        # other groups. If we cannot filter by group_id, we can also not
-        # restrict the timerange to anything at all. The Snuba API still
-        # requires us to set a timerange, so we set it to the maximum of 90d.
-        #
-        # Luckily the minmax index on group_id alone is reasonably efficient so
-        # that filtering by timerange (=primary key) is only a little bit
-        # faster.
-        now = datetime.datetime.now()
-        new_materialized_hash = _get_hash_for_parent_level(group, id, levels_overview)
-        query = query.set_where(
-            common_where
-            + [
-                Condition(
-                    Function("arrayElement", [Column("hierarchical_hashes"), id + 1]),
-                    Op.EQ,
-                    new_materialized_hash,
-                ),
-                Condition(Column("timestamp"), Op.GTE, now - datetime.timedelta(days=90)),
-                Condition(Column("timestamp"), Op.LT, now + datetime.timedelta(seconds=10)),
-            ]
-        )
-
-    if offset is not None:
-        query = query.set_offset(offset)
-
-    if limit is not None:
-        query = query.set_limit(limit)
-
-    request = SnubaRequest(
-        dataset="events",
-        app_id="grouping",
-        query=query,
-        tenant_ids={"organization_id": group.project.organization_id},
-    )
-    return snuba.raw_snql_query(request, "api.group_hashes_levels.get_level_new_issues")["data"]
-
-
-def _process_snuba_results(query_res, group: Group, id: int, user):
-    event_ids = {
-        row["latest_event_id"]: Event.generate_node_id(group.project_id, row["latest_event_id"])
-        for row in query_res
-    }
-
-    node_data = nodestore.backend.get_multi(list(event_ids.values()))
-
-    response = []
-
-    for row in query_res:
-        response_item = {
-            "hash": row["new_materialized_hash"],
-            "eventCount": row["event_count"],
-        }
-        event_id = row["latest_event_id"]
-        event_data = node_data.get(event_ids[event_id], None)
-
-        if event_data is not None:
-            event = Event(group.project_id, event_id, group_id=group.id, data=event_data)
-            response_item["latestEvent"] = serialize(event, user, EventSerializer())
-
-            tree_label = get_path(event_data, "hierarchical_tree_labels", id) or get_path(
-                event_data, "hierarchical_tree_labels", -1
-            )
-
-            # Rough approximation of what happens with Group title
-            event_type = get_event_type(event.data)
-            metadata: dict[str, str | bool] = dict(event.get_event_metadata())
-            metadata["current_tree_label"] = tree_label
-            # Force rendering of grouping tree labels irrespective of platform
-            metadata["display_title_with_tree_label"] = True
-            title = event_type.get_title(metadata)
-            response_item["title"] = title or event.title
-            response_item["metadata"] = metadata
-
-        response.append(response_item)
-
-    return response

+ 0 - 180
src/sentry/api/endpoints/grouping_levels.py

@@ -1,180 +0,0 @@
-from dataclasses import dataclass
-
-from snuba_sdk import Request as SnubaRequest
-from snuba_sdk.query import Column, Entity, Function, Query
-
-from sentry import features
-from sentry.api.base import region_silo_endpoint
-from sentry.api.bases import GroupEndpoint
-from sentry.api.endpoints.group_hashes_split import _construct_arraymax, _get_group_filters
-from sentry.api.exceptions import SentryAPIException, status
-from sentry.grouping.api import get_grouping_config_dict_for_project, load_grouping_config
-from sentry.models import Group, GroupHash
-from sentry.utils import snuba
-
-
-class NoEvents(SentryAPIException):
-    status_code = status.HTTP_403_FORBIDDEN
-    code = "no_events"
-    message = "This issue has no events."
-
-
-class MergedIssues(SentryAPIException):
-    status_code = status.HTTP_403_FORBIDDEN
-    code = "merged_issues"
-    message = "The issue can only contain one fingerprint. It needs to be fully unmerged before grouping levels can be shown."
-
-
-class MissingFeature(SentryAPIException):
-    status_code = status.HTTP_403_FORBIDDEN
-    code = "missing_feature"
-    message = "This project does not have the grouping tree feature."
-
-
-class IssueNotHierarchical(SentryAPIException):
-    status_code = status.HTTP_403_FORBIDDEN
-    code = "issue_not_hierarchical"
-    message = "This issue does not have hierarchical grouping."
-
-
-class ProjectNotHierarchical(SentryAPIException):
-    status_code = status.HTTP_403_FORBIDDEN
-    code = "project_not_hierarchical"
-    message = "This project does not have hierarchical grouping."
-
-
-from rest_framework.request import Request
-from rest_framework.response import Response
-
-
-@region_silo_endpoint
-class GroupingLevelsEndpoint(GroupEndpoint):
-    def get(self, request: Request, group: Group) -> Response:
-        """
-        Return the available levels for this group.
-
-        ```
-        GET /api/0/issues/123/grouping/levels/
-
-        {"levels": [{"id": "0", "isCurrent": true}, {"id": "1"}, {"id": "2"}]}
-        ```
-
-        `isCurrent` is the currently applied level that the server groups by.
-        It cannot be reapplied.
-
-        The levels are returned in-order, such that the first level produces
-        the least amount of issues, and the last level the most amount.
-
-        The IDs correspond to array indices in the underlying ClickHouse column
-        and are parseable as integers, but this must be treated as
-        implementation detail. Clients should pass IDs around as opaque
-        strings.
-
-        A single `id` can be passed as part of the URL to
-        `GroupingLevelNewIssuesEndpoint`.
-
-        Returns a 403 if grouping levels are unavailable or the required
-        featureflags are missing.
-        """
-
-        check_feature(group.project.organization, request)
-
-        return self.respond(_list_levels(group), status=200)
-
-
-def check_feature(organization, request):
-    if not features.has("organizations:grouping-tree-ui", organization, actor=request.user):
-        raise MissingFeature()
-
-
-def _current_level_expr(group):
-    materialized_hashes = {
-        gh.hash for gh in GroupHash.objects.filter(project=group.project, group=group)
-    }
-
-    # Evaluates to the index of the last hash that is in materialized_hashes,
-    # or 1 otherwise.
-    find_hash_expr = _construct_arraymax(
-        [1]
-        + [
-            Function("indexOf", [Column("hierarchical_hashes"), hash])
-            for hash in materialized_hashes
-        ]
-    )
-
-    return Function("max", [find_hash_expr], "current_level")
-
-
-@dataclass
-class LevelsOverview:
-    current_level: int
-    only_primary_hash: str
-    num_levels: int
-
-
-def get_levels_overview(group: Group):
-    query = (
-        Query(Entity("events"))
-        .set_select(
-            [
-                Column("primary_hash"),
-                Function(
-                    "max", [Function("length", [Column("hierarchical_hashes")])], "num_levels"
-                ),
-                _current_level_expr(group),
-            ]
-        )
-        .set_where(_get_group_filters(group))
-        .set_groupby([Column("primary_hash")])
-    )
-    request = SnubaRequest(
-        dataset="events",
-        app_id="grouping",
-        query=query,
-        tenant_ids={"organization_id": group.project.organization_id},
-    )
-    res = snuba.raw_snql_query(request, "api.group_hashes_levels.get_levels_overview")
-
-    if not res["data"]:
-        raise NoEvents()
-
-    if len(res["data"]) > 1:
-        raise MergedIssues()
-
-    assert len(res["data"]) == 1
-
-    fields = res["data"][0]
-
-    if fields["num_levels"] <= 0:
-        if not _project_has_hierarchical_grouping(group.project):
-            raise ProjectNotHierarchical()
-        raise IssueNotHierarchical()
-
-    # TODO: Cache this if it takes too long. This is called from multiple
-    # places, grouping overview and then again in the new-issues endpoint.
-
-    return LevelsOverview(
-        current_level=fields["current_level"] - 1,
-        only_primary_hash=fields["primary_hash"],
-        num_levels=fields["num_levels"],
-    )
-
-
-def _list_levels(group):
-    fields = get_levels_overview(group)
-
-    # It is a little silly to transfer a list of integers rather than just
-    # giving the UI a range, but in the future we may want to add
-    # additional fields to each level.
-    levels = [{"id": i} for i in range(fields.num_levels)]
-
-    current_level = fields.current_level
-    assert levels[current_level]["id"] == current_level
-    levels[current_level]["isCurrent"] = True
-    return {"levels": levels}
-
-
-def _project_has_hierarchical_grouping(project):
-    config_dict = get_grouping_config_dict_for_project(project)
-    config = load_grouping_config(config_dict)
-    return config.initial_context["hierarchical_grouping"]

+ 0 - 10
src/sentry/api/urls.py

@@ -184,8 +184,6 @@ from .endpoints.group_tombstone import GroupTombstoneEndpoint
 from .endpoints.group_tombstone_details import GroupTombstoneDetailsEndpoint
 from .endpoints.group_user_reports import GroupUserReportsEndpoint
 from .endpoints.grouping_configs import GroupingConfigsEndpoint
-from .endpoints.grouping_level_new_issues import GroupingLevelNewIssuesEndpoint
-from .endpoints.grouping_levels import GroupingLevelsEndpoint
 from .endpoints.index import IndexEndpoint
 from .endpoints.integration_features import IntegrationFeaturesEndpoint
 from .endpoints.integrations import (
@@ -586,14 +584,6 @@ GROUP_URLS = [
         r"^(?P<issue_id>[^\/]+)/hashes/$",
         GroupHashesEndpoint.as_view(),
     ),
-    re_path(
-        r"^(?P<issue_id>[^\/]+)/grouping/levels/$",
-        GroupingLevelsEndpoint.as_view(),
-    ),
-    re_path(
-        r"^(?P<issue_id>[^\/]+)/grouping/levels/(?P<id>[^\/]+)/new-issues/$",
-        GroupingLevelNewIssuesEndpoint.as_view(),
-    ),
     re_path(
         r"^(?P<issue_id>[^\/]+)/hashes/split/$",
         GroupHashesSplitEndpoint.as_view(),

+ 3 - 1
src/sentry/grouping/result.py

@@ -34,7 +34,9 @@ StrippedTreeLabel = Sequence[StrippedTreeLabelPart]
 # To get around this, we truncate the tree label down to some arbitrary
 # number of functions. This does not apply to the grouping breakdown, as in
 # grouping_level_new_issues endpoint we populate the tree labels not through
-# this function at all.
+# this function at all. EDIT: This endpoint is no longer, nor is the FE it
+# powered. Does the "grouping breakdown" exist anywhere else, or can this part
+# of the comment go away?
 #
 # The reason we do this on the backend instead of the frontend's title
 # component is because JIRA/Slack/Email titles suffer from the same issue:

+ 0 - 296
tests/sentry/api/endpoints/test_grouping_levels.py

@@ -1,296 +0,0 @@
-import time
-
-import pytest
-
-from sentry.models import Group, GroupHash
-from sentry.models.project import Project
-from sentry.testutils.helpers import Feature
-from sentry.testutils.pytest.fixtures import django_db_all
-from sentry.testutils.silo import region_silo_test
-from sentry.utils.json import prune_empty_keys
-
-
-@pytest.fixture(autouse=True)
-def hierarchical_grouping_features():
-    with Feature({"organizations:grouping-tree-ui": True}):
-        yield
-
-
-@pytest.fixture(autouse=True)
-def auto_login(settings, client, default_user):
-    assert client.login(username=default_user.username, password="admin")
-
-
-@pytest.fixture
-def store_stacktrace(default_project, factories):
-    default_project.update_option("sentry:grouping_config", "mobile:2021-02-12")
-
-    timestamp = time.time() - 3600
-
-    def inner(functions, interface="exception", type="error", extra_event_data=None):
-        nonlocal timestamp
-
-        timestamp += 1
-
-        event = {
-            "timestamp": timestamp,
-            interface: {
-                "values": [
-                    {
-                        "type": "ZeroDivisionError",
-                        "stacktrace": {"frames": [{"function": f} for f in functions]},
-                    }
-                ]
-            },
-            "type": type,
-            **(extra_event_data or {}),
-        }
-
-        return factories.store_event(data=event, project_id=default_project.id)
-
-    return inner
-
-
-@pytest.fixture
-def _render_all_previews(client):
-    def inner(group: Group):
-        rv = [f"group: {group.title}"]
-        assert "finest_tree_label" not in group.data["metadata"]
-
-        response = client.get(f"/api/0/issues/{group.id}/grouping/levels/", format="json")
-        assert response.status_code == 200
-
-        for level in response.data["levels"]:
-            rv.append(f"level {level['id']}{level.get('isCurrent') and '*' or ''}")
-
-            response = client.get(
-                f"/api/0/issues/{group.id}/grouping/levels/{level['id']}/new-issues/", format="json"
-            )
-
-            assert response.status_code == 200
-
-            rv.extend(
-                f"{preview['hash']}: {preview['title']} ({preview['eventCount']})"
-                for preview in response.data
-            )
-
-        return "\n".join(rv)
-
-    return inner
-
-
-@django_db_all
-def test_error_missing_feature(client, default_project):
-    group = Group.objects.create(project=default_project)
-
-    with Feature({"organizations:grouping-tree-ui": False}):
-        response = client.get(f"/api/0/issues/{group.id}/grouping/levels/", format="json")
-        assert response.status_code == 403
-        assert response.data["detail"]["code"] == "missing_feature"
-
-
-@django_db_all
-def test_error_no_events(client, default_project):
-    group = Group.objects.create(project=default_project)
-
-    response = client.get(f"/api/0/issues/{group.id}/grouping/levels/", format="json")
-    assert response.status_code == 403
-    assert response.data["detail"]["code"] == "no_events"
-
-
-@region_silo_test(stable=True)
-@django_db_all
-@pytest.mark.snuba
-def test_error_not_hierarchical(client, default_project, reset_snuba, factories):
-    default_project.update_option("sentry:grouping_config", "mobile:2021-02-12")
-    group = Group.objects.create(project=default_project)
-    grouphash = GroupHash.objects.create(
-        project=default_project, group=group, hash="d41d8cd98f00b204e9800998ecf8427e"
-    )
-
-    # we cannot run one of the other test_error testcases here because it would
-    # populate Snuba caches. Then we would not be able to observe our write, at
-    # least not within the same second we wrote.
-
-    factories.store_event(
-        data={"message": "hello world", "checksum": grouphash.hash}, project_id=default_project.id
-    )
-
-    response = client.get(f"/api/0/issues/{group.id}/grouping/levels/", format="json")
-    assert response.status_code == 403
-    assert response.data["detail"]["code"] == "issue_not_hierarchical"
-
-
-@django_db_all
-@pytest.mark.snuba
-def test_error_project_not_hierarchical(client, default_organization, reset_snuba, factories):
-
-    project = Project.objects.create(organization=default_organization, slug="test-project")
-    project.update_option("sentry:grouping_config", "newstyle:2023-01-11")
-
-    group = Group.objects.create(project=project)
-    grouphash = GroupHash.objects.create(
-        project=project, group=group, hash="d41d8cd98f00b204e9800998ecf8427e"
-    )
-
-    factories.store_event(
-        data={"message": "hello world", "checksum": grouphash.hash}, project_id=project.id
-    )
-
-    response = client.get(f"/api/0/issues/{group.id}/grouping/levels/", format="json")
-    assert response.status_code == 403
-    assert response.data["detail"]["code"] == "project_not_hierarchical"
-
-
-def _assert_tree_labels(event, functions):
-    # This should really be its own test, but it is cheaper to run as part of an existing test.
-    assert [
-        prune_empty_keys(frame)
-        for frame in event.data["exception"]["values"][0]["stacktrace"]["frames"]
-    ] == [
-        {
-            "data": {
-                "min_grouping_level": len(functions) - i - 1,
-                "orig_in_app": -1,
-            },
-            "function": function,
-            "in_app": False,
-        }
-        for i, function in enumerate(functions)
-    ]
-
-    assert (
-        event.data["metadata"]["finest_tree_label"]
-        == [{"function": function} for function in reversed(functions)][:2]
-    )
-
-
-@django_db_all
-@pytest.mark.snuba
-@region_silo_test(stable=True)
-def test_downwards(default_project, store_stacktrace, reset_snuba, _render_all_previews):
-    events = [
-        # store events with a common crashing frame `foo` and diverging threadbases
-        store_stacktrace(["bam", "baz2", "bar2", "foo"]),
-        store_stacktrace(["baz", "bar", "foo"]),
-        store_stacktrace(["baz2", "bar2", "foo"]),
-        store_stacktrace(["bar3", "foo"]),
-    ]
-
-    # assert [e.title for e in events] == [
-    # "ZeroDivisionError | foo | bar2",
-    # "ZeroDivisionError | foo | bar",
-    # "ZeroDivisionError | foo | bar2",
-    # "ZeroDivisionError | foo | bar3",
-    # ]
-
-    assert len({e.group_id for e in events}) == 1
-
-    _assert_tree_labels(events[0], ["bam", "baz2", "bar2", "foo"])
-    _assert_tree_labels(events[1], ["baz", "bar", "foo"])
-    _assert_tree_labels(events[2], ["baz2", "bar2", "foo"])
-    _assert_tree_labels(events[3], ["bar3", "foo"])
-
-    group = events[0].group
-
-    assert (
-        _render_all_previews(group)
-        == """\
-group: ZeroDivisionError
-level 0*
-bab925683e73afdb4dc4047397a7b36b: ZeroDivisionError | foo (4)
-level 1
-c8ef2dd3dedeed29b4b74b9c579eea1a: ZeroDivisionError | foo | bar2 (2)
-64686dcd59e0cf97f34113e9d360541a: ZeroDivisionError | foo | bar3 (1)
-aa1c4037371150958f9ea22adb110bbc: ZeroDivisionError | foo | bar (1)
-level 2
-8c0bbfebc194c7aa3e77e95436fd61e5: ZeroDivisionError | foo | bar2 | baz2 (2)
-64686dcd59e0cf97f34113e9d360541a: ZeroDivisionError | foo | bar3 (1)
-b8d08a573c62ca8c84de14c12c0e19fe: ZeroDivisionError | foo | bar | baz (1)
-level 3
-64686dcd59e0cf97f34113e9d360541a: ZeroDivisionError | foo | bar3 (1)
-8c0bbfebc194c7aa3e77e95436fd61e5: ZeroDivisionError | foo | bar2 | baz2 (1)
-b8d08a573c62ca8c84de14c12c0e19fe: ZeroDivisionError | foo | bar | baz (1)
-b0505d7461a2e36c4a8235bb6c310a3b: ZeroDivisionError | foo | bar2 | baz2 | bam (1)\
-"""
-    )
-
-
-@django_db_all
-@pytest.mark.snuba
-@region_silo_test(stable=True)
-def test_upwards(default_project, store_stacktrace, reset_snuba, _render_all_previews):
-    GroupHash.objects.create(
-        project_id=default_project.id,
-        hash="c8ef2dd3dedeed29b4b74b9c579eea1a",
-        state=GroupHash.State.SPLIT,
-        group_id=None,
-    )
-
-    GroupHash.objects.create(
-        project_id=default_project.id,
-        hash="aa1c4037371150958f9ea22adb110bbc",
-        state=GroupHash.State.SPLIT,
-        group_id=None,
-    )
-
-    events = [
-        store_stacktrace(["baz", "bar2", "foo"]),
-        store_stacktrace(["baz", "bar", "foo"]),
-        store_stacktrace(["bam", "bar", "foo"]),
-    ]
-
-    assert len({e.group_id for e in events}) == 3
-
-    assert (
-        _render_all_previews(events[0].group)
-        == """\
-group: ZeroDivisionError
-level 0
-bab925683e73afdb4dc4047397a7b36b: ZeroDivisionError | foo (3)
-level 1
-c8ef2dd3dedeed29b4b74b9c579eea1a: ZeroDivisionError | foo | bar2 (1)
-level 2*
-7411b56aa6591edbdba71898d3a9f01c: ZeroDivisionError | foo | bar2 | baz (1)\
-"""
-    )
-    assert (
-        _render_all_previews(events[1].group)
-        == """\
-group: ZeroDivisionError
-level 0
-bab925683e73afdb4dc4047397a7b36b: ZeroDivisionError | foo (3)
-level 1
-aa1c4037371150958f9ea22adb110bbc: ZeroDivisionError | foo | bar (2)
-level 2*
-b8d08a573c62ca8c84de14c12c0e19fe: ZeroDivisionError | foo | bar | baz (1)\
-"""
-    )
-
-    assert (
-        _render_all_previews(events[2].group)
-        == """\
-group: ZeroDivisionError
-level 0
-bab925683e73afdb4dc4047397a7b36b: ZeroDivisionError | foo (3)
-level 1
-aa1c4037371150958f9ea22adb110bbc: ZeroDivisionError | foo | bar (2)
-level 2*
-97df6b60ec530c65ab227585143a087a: ZeroDivisionError | foo | bar | bam (1)\
-"""
-    )
-
-
-@django_db_all
-@pytest.mark.snuba
-@region_silo_test(stable=True)
-def test_default_events(default_project, store_stacktrace, reset_snuba, _render_all_previews):
-    # Would like to add tree labels to default event titles as well,
-    # But leave as is for now.
-    event = store_stacktrace(["bar", "foo"], interface="threads", type="default")
-    assert event.title == "<unlabeled event>"
-
-    event = store_stacktrace(
-        ["bar", "foo"], interface="threads", type="default", extra_event_data={"message": "hello"}
-    )
-    assert event.title == "hello"