Browse Source

feat(workflow): Add chart to metric alert unfurl (#34531)

Adds a chart to metric alert unfurling. Similar to discover, also switches the unfurl to the slack "blocks" like discover.
Scott Cooper 2 years ago
parent
commit
28e72df974

+ 1 - 0
mypy.ini

@@ -43,6 +43,7 @@ files = src/sentry/analytics/,
         src/sentry/grouping/strategies/security.py,
         src/sentry/grouping/strategies/template.py,
         src/sentry/grouping/strategies/utils.py,
+        src/sentry/incidents/charts.py,
         src/sentry/integrations/base.py,
         src/sentry/integrations/github/,
         src/sentry/integrations/slack/,

+ 4 - 1
src/sentry/charts/chartcuterie.py

@@ -10,6 +10,7 @@ from django.conf import settings
 from sentry import options
 from sentry.exceptions import InvalidConfiguration
 from sentry.models.file import get_storage
+from sentry.utils import json
 from sentry.utils.http import absolute_uri
 
 from .base import ChartRenderer, logger
@@ -65,9 +66,11 @@ class Chartcuterie(ChartRenderer):
             description=type(self).__name__,
         ):
 
+            # Using sentry json formatter to handle datetime objects
             resp = requests.post(
                 url=urljoin(self.service_url, "render"),
-                json=data,
+                data=json.dumps(data, cls=json._default_encoder),
+                headers={"Content-Type": "application/json"},
             )
 
             if resp.status_code == 503 and settings.DEBUG:

+ 2 - 0
src/sentry/conf/server.py

@@ -993,6 +993,8 @@ SENTRY_FEATURES = {
     "organizations:issue-search-use-cdc-secondary": False,
     # Enable metrics feature on the backend
     "organizations:metrics": False,
+    # Enable metric alert charts in email/slack
+    "organizations:metric-alert-chartcuterie": False,
     # Enable the new widget builder experience on Dashboards
     "organizations:new-widget-builder-experience": False,
     # Enable the new widget builder experience "design" on Dashboards

+ 1 - 0
src/sentry/features/__init__.py

@@ -83,6 +83,7 @@ default_manager.add("organizations:issue-search-use-cdc-secondary", Organization
 default_manager.add("organizations:large-debug-files", OrganizationFeature)
 default_manager.add("organizations:metric-alert-threshold-period", OrganizationFeature, True)
 default_manager.add("organizations:metrics", OrganizationFeature, True)
+default_manager.add("organizations:metric-alert-chartcuterie", OrganizationFeature, True)
 default_manager.add("organizations:new-widget-builder-experience", OrganizationFeature, True)
 default_manager.add("organizations:new-widget-builder-experience-design", OrganizationFeature, True)
 default_manager.add(

+ 235 - 0
src/sentry/incidents/charts.py

@@ -0,0 +1,235 @@
+from datetime import datetime, timedelta
+from functools import reduce
+from typing import Any, List, Mapping, Optional, cast
+
+from django.utils import timezone
+
+from sentry.api import client
+from sentry.api.base import logger
+from sentry.api.serializers import serialize
+from sentry.api.serializers.models.alert_rule import AlertRuleSerializer
+from sentry.api.serializers.models.incident import DetailedIncidentSerializer
+from sentry.api.utils import get_datetime_from_stats_period
+from sentry.charts import generate_chart
+from sentry.charts.types import ChartType
+from sentry.incidents.logic import translate_aggregate_field
+from sentry.incidents.models import AlertRule, Incident, User
+from sentry.models import ApiKey, Organization
+from sentry.snuba.dataset import Dataset
+from sentry.snuba.entity_subscription import apply_dataset_query_conditions
+from sentry.snuba.models import QueryDatasets, SnubaQuery
+
+CRASH_FREE_SESSIONS = "percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate"
+CRASH_FREE_USERS = "percentage(users_crashed, users) AS _crash_rate_alert_aggregate"
+SESSION_AGGREGATE_TO_FIELD = {
+    CRASH_FREE_SESSIONS: "sum(session)",
+    CRASH_FREE_USERS: "count_unique(user)",
+}
+
+
+API_INTERVAL_POINTS_LIMIT = 10000
+API_INTERVAL_POINTS_MIN = 150
+TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
+
+
+def incident_date_range(alert_time_window: int, incident: Incident) -> Mapping[str, str]:
+    """
+    Retrieve the start/end for graphing an incident.
+    Will show at least 150 and no more than 10,000 data points.
+    This function should match what is in the frontend.
+    """
+    time_window_milliseconds = alert_time_window * 1000
+    min_range = time_window_milliseconds * API_INTERVAL_POINTS_MIN
+    max_range = time_window_milliseconds * API_INTERVAL_POINTS_LIMIT
+    now = timezone.now()
+    start_date: datetime = incident.date_started
+    end_date: datetime = incident.date_closed if incident.date_closed else now
+    incident_range = max(
+        (end_date - start_date).total_seconds() * 1000, 3 * time_window_milliseconds
+    )
+    range = min(max_range, max(min_range, incident_range))
+    half_range = timedelta(milliseconds=range / 2)
+    return {
+        "start": (start_date - half_range).strftime(TIME_FORMAT),
+        "end": min((end_date + half_range), now).strftime(TIME_FORMAT),
+    }
+
+
+def fetch_metric_alert_sessions_data(
+    organization: Organization,
+    rule_aggregate: str,
+    query_params: Mapping[str, str],
+    user: Optional["User"] = None,
+) -> Any:
+    try:
+        resp = client.get(
+            auth=ApiKey(organization=organization, scope_list=["org:read"]),
+            user=user,
+            path=f"/organizations/{organization.slug}/sessions/",
+            params={
+                "field": SESSION_AGGREGATE_TO_FIELD[rule_aggregate],
+                "groupBy": "session.status",
+                **query_params,
+            },
+        )
+        return resp.data
+    except Exception as exc:
+        logger.error(
+            f"Failed to load sessions for chart: {exc}",
+            exc_info=True,
+        )
+        return None
+
+
+def fetch_metric_alert_events_timeseries(
+    organization: Organization,
+    rule_aggregate: str,
+    query_params: Mapping[str, str],
+    user: Optional["User"] = None,
+) -> List[Any]:
+    try:
+        resp = client.get(
+            auth=ApiKey(organization=organization, scope_list=["org:read"]),
+            user=user,
+            path=f"/organizations/{organization.slug}/events-stats/",
+            params={
+                "yAxis": rule_aggregate,
+                **query_params,
+            },
+        )
+        # Format the data into a timeseries object for charts
+        series = {
+            "seriesName": rule_aggregate,
+            "data": [
+                {
+                    "name": point[0] * 1000,
+                    "value": reduce(lambda a, b: a + float(b["count"]), point[1], 0.0),
+                }
+                for point in resp.data["data"]
+            ],
+        }
+        return [series]
+    except Exception as exc:
+        logger.error(
+            f"Failed to load events-stats for chart: {exc}",
+            exc_info=True,
+        )
+        return []
+
+
+def fetch_metric_alert_incidents(
+    organization: Organization,
+    alert_rule: AlertRule,
+    time_period: Mapping[str, str],
+    user: Optional["User"] = None,
+) -> List[Any]:
+    try:
+        resp = client.get(
+            auth=ApiKey(organization=organization, scope_list=["org:read"]),
+            user=user,
+            path=f"/organizations/{organization.slug}/incidents/",
+            params={
+                "alertRule": alert_rule.id,
+                "expand": "activities",
+                "includeSnapshots": True,
+                "project": -1,
+                **time_period,
+            },
+        )
+        return cast(List[Any], resp.data)
+    except Exception as exc:
+        logger.error(
+            f"Failed to load incidents for chart: {exc}",
+            exc_info=True,
+        )
+        return []
+
+
+def build_metric_alert_chart(
+    organization: Organization,
+    alert_rule: AlertRule,
+    selected_incident: Optional[Incident] = None,
+    period: Optional[str] = None,
+    start: Optional[str] = None,
+    end: Optional[str] = None,
+    user: Optional["User"] = None,
+) -> Optional[str]:
+    """Builds the dataset required for metric alert chart the same way the frontend would"""
+    snuba_query: SnubaQuery = alert_rule.snuba_query
+    dataset = snuba_query.dataset
+    is_crash_free_alert = dataset in {Dataset.Sessions.value, Dataset.Metrics.value}
+    style = (
+        ChartType.SLACK_METRIC_ALERT_SESSIONS
+        if is_crash_free_alert
+        else ChartType.SLACK_METRIC_ALERT_EVENTS
+    )
+
+    if selected_incident:
+        time_period = incident_date_range(snuba_query.time_window, selected_incident)
+    elif start and end:
+        time_period = {"start": start, "end": end}
+    else:
+        period_start = get_datetime_from_stats_period(period if period else "10000m")
+        time_period = {
+            "start": period_start.strftime(TIME_FORMAT),
+            "end": timezone.now().strftime(TIME_FORMAT),
+        }
+
+    chart_data = {
+        "rule": serialize(alert_rule, user, AlertRuleSerializer()),
+        "selectedIncident": serialize(selected_incident, user, DetailedIncidentSerializer()),
+        "incidents": fetch_metric_alert_incidents(
+            organization,
+            alert_rule,
+            time_period,
+            user,
+        ),
+    }
+
+    aggregate = translate_aggregate_field(snuba_query.aggregate, reverse=True)
+    # If we allow alerts to be across multiple orgs this will break
+    project_id = snuba_query.subscriptions.first().project_id
+    time_window_minutes = snuba_query.time_window / 60
+    env_params = {"environment": snuba_query.environment.name} if snuba_query.environment else {}
+    query = (
+        snuba_query.query
+        if is_crash_free_alert
+        else apply_dataset_query_conditions(
+            QueryDatasets(snuba_query.dataset),
+            snuba_query.query,
+            snuba_query.event_types,
+            discover=True,
+        )
+    )
+
+    query_params = {
+        **env_params,
+        **time_period,
+        "project": str(project_id),
+        "interval": f"{time_window_minutes}m",
+        "query": query,
+    }
+    if is_crash_free_alert:
+        chart_data["sessionResponse"] = fetch_metric_alert_sessions_data(
+            organization,
+            aggregate,
+            query_params,
+            user,
+        )
+    else:
+        chart_data["timeseriesData"] = fetch_metric_alert_events_timeseries(
+            organization,
+            aggregate,
+            query_params,
+            user,
+        )
+
+    try:
+        url = generate_chart(style, chart_data)
+        return cast(str, url)
+    except RuntimeError as exc:
+        logger.error(
+            f"Failed to generate chart for metric alert: {exc}",
+            exc_info=True,
+        )
+        return None

+ 19 - 3
src/sentry/integrations/slack/message_builder/base/block.py

@@ -1,5 +1,16 @@
 from abc import ABC
-from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Sequence, Tuple, TypedDict
+from typing import (
+    Any,
+    Dict,
+    List,
+    Mapping,
+    MutableMapping,
+    Optional,
+    Sequence,
+    Tuple,
+    TypedDict,
+    cast,
+)
 
 from sentry.integrations.slack.message_builder import SlackBlock, SlackBody
 from sentry.integrations.slack.message_builder.base.base import SlackMessageBuilder
@@ -57,8 +68,13 @@ class BlockSlackMessageBuilder(SlackMessageBuilder, ABC):
         return action_block
 
     @staticmethod
-    def _build_blocks(*args: SlackBlock) -> SlackBody:
-        return {"blocks": list(args)}
+    def _build_blocks(*args: SlackBlock, color: Optional[str] = None) -> SlackBody:
+        blocks: MutableMapping[str, Any] = {"blocks": list(args)}
+
+        if color:
+            blocks["color"] = color
+
+        return cast(SlackBody, blocks)
 
     def as_payload(self) -> Mapping[str, Any]:
         return self.build()  # type: ignore

+ 22 - 30
src/sentry/integrations/slack/message_builder/metric_alerts.py

@@ -1,36 +1,23 @@
-from datetime import datetime
 from typing import Optional
 
 from sentry.incidents.models import AlertRule, Incident, IncidentStatus
 from sentry.integrations.metric_alerts import metric_alert_attachment_info
-from sentry.integrations.slack.message_builder import INCIDENT_COLOR_MAPPING, SlackBody
-from sentry.integrations.slack.message_builder.base.base import SlackMessageBuilder
-from sentry.utils.dates import to_timestamp
+from sentry.integrations.slack.message_builder import (
+    INCIDENT_COLOR_MAPPING,
+    LEVEL_TO_COLOR,
+    SlackBody,
+)
+from sentry.integrations.slack.message_builder.base.block import BlockSlackMessageBuilder
 
 
-def get_footer(
-    incident_triggered_date: Optional[datetime], last_triggered_date: Optional[datetime]
-) -> str:
-    if incident_triggered_date:
-        return "<!date^{:.0f}^Sentry Incident - Started {} at {} | Sentry Incident>".format(
-            to_timestamp(incident_triggered_date), "{date_pretty}", "{time}"
-        )
-
-    if last_triggered_date:
-        return "<!date^{:.0f}^Metric Alert - Last Triggered {} at {} | Metric Alert>".format(
-            to_timestamp(last_triggered_date), "{date_pretty}", "{time}"
-        )
-
-    return "Metric Alert"
-
-
-class SlackMetricAlertMessageBuilder(SlackMessageBuilder):
+class SlackMetricAlertMessageBuilder(BlockSlackMessageBuilder):
     def __init__(
         self,
         alert_rule: AlertRule,
         incident: Optional[Incident] = None,
         new_status: Optional[IncidentStatus] = None,
         metric_value: Optional[int] = None,
+        chart_url: Optional[str] = None,
     ) -> None:
         """
         Builds a metric alert attachment for slack unfurling.
@@ -46,19 +33,24 @@ class SlackMetricAlertMessageBuilder(SlackMessageBuilder):
         self.incident = incident
         self.metric_value = metric_value
         self.new_status = new_status
+        self.chart_url = chart_url
 
     def build(self) -> SlackBody:
         data = metric_alert_attachment_info(
             self.alert_rule, self.incident, self.new_status, self.metric_value
         )
 
-        return self._build(
-            actions=[],
-            color=INCIDENT_COLOR_MAPPING.get(data["status"]),
-            fallback=data["title"],
-            fields=[],
-            footer=get_footer(data["date_started"], data["last_triggered_date"]),
-            text=data["text"],
-            title=data["title"],
-            title_link=data["title_link"],
+        blocks = [
+            self.get_markdown_block(
+                text=f"<{data['title_link']}|*{data['title']}*>  \n{data['text']}"
+            )
+        ]
+
+        if self.chart_url:
+            blocks.append(self.get_image_block(self.chart_url, alt="Metric Alert Chart"))
+
+        color = LEVEL_TO_COLOR.get(INCIDENT_COLOR_MAPPING.get(data["status"], ""))
+        return self._build_blocks(
+            *blocks,
+            color=color,
         )

+ 50 - 13
src/sentry/integrations/slack/unfurl/metric_alerts.py

@@ -1,14 +1,17 @@
 import html
 import re
-from typing import Any, List, Mapping, Optional
+from typing import Any, Dict, List, Mapping, Optional
 from urllib.parse import urlparse
 
+import sentry_sdk
 from django.db.models import Q
 from django.http.request import HttpRequest, QueryDict
 
+from sentry import features
+from sentry.incidents.charts import build_metric_alert_chart
 from sentry.incidents.models import AlertRule, Incident, User
 from sentry.integrations.slack.message_builder.metric_alerts import SlackMetricAlertMessageBuilder
-from sentry.models import Integration
+from sentry.models import Integration, Organization
 
 from . import Handler, UnfurlableUrl, UnfurledUrl, make_type_coercer
 
@@ -17,6 +20,9 @@ map_incident_args = make_type_coercer(
         "org_slug": str,
         "alert_rule_id": int,
         "incident_id": int,
+        "period": str,
+        "start": str,
+        "end": str,
     }
 )
 
@@ -67,28 +73,59 @@ def unfurl_metric_alerts(
         )
     }
 
-    return {
-        link.url: SlackMetricAlertMessageBuilder(
-            alert_rule=alert_rule_map[link.args["alert_rule_id"]],
-            incident=incident_map.get(link.args["incident_id"]),
+    orgs_by_slug: Dict[str, Organization] = {org.slug: org for org in all_integration_orgs}
+
+    result = {}
+    for link in links:
+        if link.args["alert_rule_id"] not in alert_rule_map:
+            continue
+        org = orgs_by_slug.get(link.args["org_slug"])
+        if org is None:
+            continue
+
+        alert_rule = alert_rule_map[link.args["alert_rule_id"]]
+        selected_incident = incident_map.get(link.args["incident_id"])
+
+        chart_url = None
+        if features.has("organizations:metric-alert-chartcuterie", org):
+            try:
+                chart_url = build_metric_alert_chart(
+                    organization=org,
+                    alert_rule=alert_rule,
+                    selected_incident=selected_incident,
+                    period=link.args["period"],
+                    start=link.args["start"],
+                    end=link.args["end"],
+                    user=user,
+                )
+            except Exception as e:
+                sentry_sdk.capture_exception(e)
+
+        result[link.url] = SlackMetricAlertMessageBuilder(
+            alert_rule=alert_rule,
+            incident=selected_incident,
+            chart_url=chart_url,
         ).build()
-        for link in links
-        if link.args["alert_rule_id"] in alert_rule_map
-    }
+
+    return result
 
 
 def map_metric_alert_query_args(url: str, args: Mapping[str, str]) -> Mapping[str, Any]:
-    """
-    Extracts selected incident id
-    """
+    """Extracts selected incident id and some query parameters"""
+
     # Slack uses HTML escaped ampersands in its Event Links, when need
     # to be unescaped for QueryDict to split properly.
     url = html.unescape(url)
     parsed_url = urlparse(url)
     params = QueryDict(parsed_url.query)
     incident_id = params.get("alert", None)
+    period = params.get("period", None)
+    start = params.get("start", None)
+    end = params.get("end", None)
 
-    return map_incident_args(url, {**args, "incident_id": incident_id})
+    return map_incident_args(
+        url, {**args, "incident_id": incident_id, "period": period, "start": start, "end": end}
+    )
 
 
 handler: Handler = Handler(

+ 54 - 0
tests/sentry/incidents/test_charts.py

@@ -0,0 +1,54 @@
+from django.utils.dateparse import parse_datetime
+from freezegun import freeze_time
+
+from sentry.incidents.charts import incident_date_range
+from sentry.incidents.models import Incident
+from sentry.testutils import TestCase
+
+now = "2022-05-16T20:00:00"
+frozen_time = f"{now}Z"
+
+
+class IncidentDateRangeTest(TestCase):
+    @freeze_time(frozen_time)
+    def test_use_current_date_for_active_incident(self):
+        incident = Incident(date_started=parse_datetime("2022-05-16T18:55:00Z"), date_closed=None)
+        assert incident_date_range(60, incident) == {
+            "start": "2022-05-16T17:40:00",
+            "end": now,
+        }
+
+    @freeze_time(frozen_time)
+    def test_use_current_date_for_recently_closed_alert(self):
+        incident = Incident(
+            date_started=parse_datetime("2022-05-16T18:55:00Z"),
+            date_closed=parse_datetime("2022-05-16T18:57:00Z"),
+        )
+        assert incident_date_range(60, incident) == {
+            "start": "2022-05-16T17:40:00",
+            "end": now,
+        }
+
+    @freeze_time(frozen_time)
+    def test_use_a_past_date_for_an_older_alert(self):
+        #  Incident is from over a week ago
+        incident = Incident(
+            date_started=parse_datetime("2022-05-04T18:55:00Z"),
+            date_closed=parse_datetime("2022-05-04T18:57:00Z"),
+        )
+        assert incident_date_range(60, incident) == {
+            "start": "2022-05-04T17:40:00",
+            "end": "2022-05-04T20:12:00",
+        }
+
+    @freeze_time(frozen_time)
+    def test_large_time_windows(self):
+        incident = Incident(
+            date_started=parse_datetime("2022-04-20T20:28:00Z"),
+            date_closed=None,
+        )
+        one_day = 1440 * 60
+        assert incident_date_range(one_day, incident) == {
+            "start": "2022-02-04T20:28:00",
+            "end": now,
+        }

+ 95 - 77
tests/sentry/integrations/slack/test_message_builder.py

@@ -218,32 +218,31 @@ class BuildIncidentAttachmentTest(TestCase):
 
 class BuildMetricAlertAttachmentTest(TestCase):
     def test_metric_alert_without_incidents(self):
-        logo_url = absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png"))
         alert_rule = self.create_alert_rule()
         title = f"Resolved: {alert_rule.name}"
+        link = absolute_uri(
+            reverse(
+                "sentry-metric-alert-details",
+                kwargs={
+                    "organization_slug": alert_rule.organization.slug,
+                    "alert_rule_id": alert_rule.id,
+                },
+            )
+        )
         assert SlackMetricAlertMessageBuilder(alert_rule).build() == {
-            "fallback": title,
-            "title": title,
-            "title_link": absolute_uri(
-                reverse(
-                    "sentry-metric-alert-details",
-                    kwargs={
-                        "organization_slug": alert_rule.organization.slug,
-                        "alert_rule_id": alert_rule.id,
-                    },
-                )
-            ),
-            "text": "",
-            "fields": [],
-            "mrkdwn_in": ["text"],
-            "footer_icon": logo_url,
-            "footer": "Metric Alert",
             "color": LEVEL_TO_COLOR["_incident_resolved"],
-            "actions": [],
+            "blocks": [
+                {
+                    "text": {
+                        "text": f"<{link}|*{title}*>  \n",
+                        "type": "mrkdwn",
+                    },
+                    "type": "section",
+                },
+            ],
         }
 
     def test_metric_alert_with_selected_incident(self):
-        logo_url = absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png"))
         alert_rule = self.create_alert_rule()
         incident = self.create_incident(alert_rule=alert_rule, status=IncidentStatus.CLOSED.value)
         trigger = self.create_alert_rule_trigger(alert_rule, CRITICAL_TRIGGER_LABEL, 100)
@@ -251,15 +250,8 @@ class BuildMetricAlertAttachmentTest(TestCase):
             alert_rule_trigger=trigger, triggered_for_incident=incident
         )
         title = f"Resolved: {alert_rule.name}"
-        incident_footer_ts = (
-            "<!date^{:.0f}^Sentry Incident - Started {} at {} | Sentry Incident>".format(
-                to_timestamp(incident.date_started), "{date_pretty}", "{time}"
-            )
-        )
-        assert SlackMetricAlertMessageBuilder(alert_rule, incident).build() == {
-            "fallback": title,
-            "title": title,
-            "title_link": absolute_uri(
+        link = (
+            absolute_uri(
                 reverse(
                     "sentry-metric-alert-details",
                     kwargs={
@@ -268,18 +260,24 @@ class BuildMetricAlertAttachmentTest(TestCase):
                     },
                 )
             )
-            + f"?alert={incident.identifier}",
-            "text": "0 events in the last 10 minutes\nFilter: level:error",
-            "fields": [],
-            "mrkdwn_in": ["text"],
-            "footer_icon": logo_url,
-            "footer": incident_footer_ts,
+            + f"?alert={incident.identifier}"
+        )
+        assert SlackMetricAlertMessageBuilder(alert_rule, incident).build() == {
             "color": LEVEL_TO_COLOR["_incident_resolved"],
-            "actions": [],
+            "blocks": [
+                {
+                    "text": {
+                        "text": f"<{link}|*{title}*>  \n"
+                        "0 events in the last 10 minutes\n"
+                        "Filter: level:error",
+                        "type": "mrkdwn",
+                    },
+                    "type": "section",
+                },
+            ],
         }
 
     def test_metric_alert_with_active_incident(self):
-        logo_url = absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png"))
         alert_rule = self.create_alert_rule()
         incident = self.create_incident(alert_rule=alert_rule, status=IncidentStatus.CRITICAL.value)
         trigger = self.create_alert_rule_trigger(alert_rule, CRITICAL_TRIGGER_LABEL, 100)
@@ -287,34 +285,31 @@ class BuildMetricAlertAttachmentTest(TestCase):
             alert_rule_trigger=trigger, triggered_for_incident=incident
         )
         title = f"Critical: {alert_rule.name}"
-        incident_footer_ts = (
-            "<!date^{:.0f}^Metric Alert - Last Triggered {} at {} | Metric Alert>".format(
-                to_timestamp(incident.date_started), "{date_pretty}", "{time}"
+        link = absolute_uri(
+            reverse(
+                "sentry-metric-alert-details",
+                kwargs={
+                    "organization_slug": alert_rule.organization.slug,
+                    "alert_rule_id": alert_rule.id,
+                },
             )
         )
         assert SlackMetricAlertMessageBuilder(alert_rule).build() == {
-            "fallback": title,
-            "title": title,
-            "title_link": absolute_uri(
-                reverse(
-                    "sentry-metric-alert-details",
-                    kwargs={
-                        "organization_slug": alert_rule.organization.slug,
-                        "alert_rule_id": alert_rule.id,
-                    },
-                )
-            ),
-            "text": "0 events in the last 10 minutes\nFilter: level:error",
-            "fields": [],
-            "mrkdwn_in": ["text"],
-            "footer_icon": logo_url,
-            "footer": incident_footer_ts,
             "color": LEVEL_TO_COLOR["fatal"],
-            "actions": [],
+            "blocks": [
+                {
+                    "text": {
+                        "text": f"<{link}|*{title}*>  \n"
+                        "0 events in the last 10 minutes\n"
+                        "Filter: level:error",
+                        "type": "mrkdwn",
+                    },
+                    "type": "section",
+                },
+            ],
         }
 
     def test_metric_value(self):
-        logo_url = absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png"))
         alert_rule = self.create_alert_rule()
         incident = self.create_incident(alert_rule=alert_rule, status=IncidentStatus.CLOSED.value)
 
@@ -325,31 +320,54 @@ class BuildMetricAlertAttachmentTest(TestCase):
         self.create_alert_rule_trigger_action(
             alert_rule_trigger=trigger, triggered_for_incident=incident
         )
-        incident_footer_ts = (
-            "<!date^{:.0f}^Sentry Incident - Started {} at {} | Sentry Incident>".format(
-                to_timestamp(incident.date_started), "{date_pretty}", "{time}"
+        link = absolute_uri(
+            reverse(
+                "sentry-metric-alert-details",
+                kwargs={
+                    "organization_slug": alert_rule.organization.slug,
+                    "alert_rule_id": alert_rule.id,
+                },
             )
         )
         assert SlackMetricAlertMessageBuilder(
             alert_rule, incident, IncidentStatus.CRITICAL, metric_value=metric_value
         ).build() == {
-            "fallback": title,
-            "title": title,
-            "title_link": absolute_uri(
-                reverse(
-                    "sentry-metric-alert-details",
-                    kwargs={
-                        "organization_slug": alert_rule.organization.slug,
-                        "alert_rule_id": alert_rule.id,
+            "color": LEVEL_TO_COLOR["fatal"],
+            "blocks": [
+                {
+                    "text": {
+                        "text": f"<{link}?alert={incident.identifier}|*{title}*>  \n"
+                        f"{metric_value} events in the last 10 minutes\n"
+                        "Filter: level:error",
+                        "type": "mrkdwn",
                     },
-                )
+                    "type": "section",
+                },
+            ],
+        }
+
+    def test_metric_alert_chart(self):
+        alert_rule = self.create_alert_rule()
+        title = f"Resolved: {alert_rule.name}"
+        link = absolute_uri(
+            reverse(
+                "sentry-metric-alert-details",
+                kwargs={
+                    "organization_slug": alert_rule.organization.slug,
+                    "alert_rule_id": alert_rule.id,
+                },
             )
-            + f"?alert={incident.identifier}",
-            "text": f"{metric_value} events in the last 10 minutes\nFilter: level:error",
-            "fields": [],
-            "mrkdwn_in": ["text"],
-            "footer_icon": logo_url,
-            "footer": incident_footer_ts,
-            "color": LEVEL_TO_COLOR["fatal"],
-            "actions": [],
+        )
+        assert SlackMetricAlertMessageBuilder(alert_rule, chart_url="chart_url").build() == {
+            "color": LEVEL_TO_COLOR["_incident_resolved"],
+            "blocks": [
+                {
+                    "text": {
+                        "text": f"<{link}|*{title}*>  \n",
+                        "type": "mrkdwn",
+                    },
+                    "type": "section",
+                },
+                {"alt_text": "Metric Alert Chart", "image_url": "chart_url", "type": "image"},
+            ],
         }

Some files were not shown because too many files changed in this diff