123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966 |
- from datetime import timedelta
- from unittest import mock
- import pytest
- from django.urls import reverse
- from sentry.sentry_metrics.use_case_id_registry import UseCaseID
- from sentry.snuba.metrics.extraction import OnDemandMetricSpec
- from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
- from sentry.testutils.helpers.datetime import before_now, iso_format
- from sentry.testutils.silo import region_silo_test
- pytestmark = pytest.mark.sentry_metrics
- @region_silo_test
- class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
- MetricsEnhancedPerformanceTestCase
- ):
- endpoint = "sentry-api-0-organization-events-stats"
- METRIC_STRINGS = [
- "foo_transaction",
- "d:transactions/measurements.datacenter_memory@pebibyte",
- ]
- def setUp(self):
- super().setUp()
- self.login_as(user=self.user)
- self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
- self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
- self.url = reverse(
- "sentry-api-0-organization-events-stats",
- kwargs={"organization_slug": self.project.organization.slug},
- )
- self.features = {
- "organizations:performance-use-metrics": True,
- }
- def do_request(self, data, url=None, features=None):
- if features is None:
- features = {"organizations:discover-basic": True}
- features.update(self.features)
- with self.feature(features):
- return self.client.get(self.url if url is None else url, data=data, format="json")
- # These throughput tests should roughly match the ones in OrganizationEventsStatsEndpointTest
- def test_throughput_epm_hour_rollup(self):
- # Each of these denotes how many events to create in each hour
- event_counts = [6, 0, 6, 3, 0, 3]
- for hour, count in enumerate(event_counts):
- for minute in range(count):
- self.store_transaction_metric(
- 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
- )
- for axis in ["epm()", "tpm()"]:
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=6)),
- "interval": "1h",
- "yAxis": axis,
- "project": self.project.id,
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- assert len(data) == 6
- assert response.data["isMetricsData"]
- rows = data[0:6]
- for test in zip(event_counts, rows):
- assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
- def test_throughput_epm_day_rollup(self):
- # Each of these denotes how many events to create in each minute
- event_counts = [6, 0, 6, 3, 0, 3]
- for hour, count in enumerate(event_counts):
- for minute in range(count):
- self.store_transaction_metric(
- 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
- )
- for axis in ["epm()", "tpm()"]:
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=24)),
- "interval": "24h",
- "yAxis": axis,
- "project": self.project.id,
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- assert len(data) == 2
- assert response.data["isMetricsData"]
- assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
- def test_throughput_epm_hour_rollup_offset_of_hour(self):
- # Each of these denotes how many events to create in each hour
- event_counts = [6, 0, 6, 3, 0, 3]
- for hour, count in enumerate(event_counts):
- for minute in range(count):
- self.store_transaction_metric(
- 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute + 30)
- )
- for axis in ["tpm()", "epm()"]:
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago + timedelta(minutes=30)),
- "end": iso_format(self.day_ago + timedelta(hours=6, minutes=30)),
- "interval": "1h",
- "yAxis": axis,
- "project": self.project.id,
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- assert len(data) == 6
- assert response.data["isMetricsData"]
- rows = data[0:6]
- for test in zip(event_counts, rows):
- assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
- def test_throughput_eps_minute_rollup(self):
- # Each of these denotes how many events to create in each minute
- event_counts = [6, 0, 6, 3, 0, 3]
- for minute, count in enumerate(event_counts):
- for second in range(count):
- self.store_transaction_metric(
- 1, timestamp=self.day_ago + timedelta(minutes=minute, seconds=second)
- )
- for axis in ["eps()", "tps()"]:
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(minutes=6)),
- "interval": "1m",
- "yAxis": axis,
- "project": self.project.id,
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- assert len(data) == 6
- assert response.data["isMetricsData"]
- rows = data[0:6]
- for test in zip(event_counts, rows):
- assert test[1][1][0]["count"] == test[0] / 60.0
- def test_failure_rate(self):
- for hour in range(6):
- timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
- self.store_transaction_metric(1, tags={"transaction.status": "ok"}, timestamp=timestamp)
- if hour < 3:
- self.store_transaction_metric(
- 1, tags={"transaction.status": "internal_error"}, timestamp=timestamp
- )
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=6)),
- "interval": "1h",
- "yAxis": ["failure_rate()"],
- "project": self.project.id,
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- assert len(data) == 6
- assert response.data["isMetricsData"]
- assert [attrs for time, attrs in response.data["data"]] == [
- [{"count": 0.5}],
- [{"count": 0.5}],
- [{"count": 0.5}],
- [{"count": 0}],
- [{"count": 0}],
- [{"count": 0}],
- ]
- def test_percentiles_multi_axis(self):
- for hour in range(6):
- timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
- self.store_transaction_metric(111, timestamp=timestamp)
- self.store_transaction_metric(222, metric="measurements.lcp", timestamp=timestamp)
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=6)),
- "interval": "1h",
- "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
- "project": self.project.id,
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- lcp = response.data["p75(measurements.lcp)"]
- duration = response.data["p75(transaction.duration)"]
- assert len(duration["data"]) == 6
- assert duration["isMetricsData"]
- assert len(lcp["data"]) == 6
- assert lcp["isMetricsData"]
- for item in duration["data"]:
- assert item[1][0]["count"] == 111
- for item in lcp["data"]:
- assert item[1][0]["count"] == 222
- @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query", return_value={})
- def test_multiple_yaxis_only_one_query(self, mock_query):
- self.do_request(
- data={
- "project": self.project.id,
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
- "dataset": "metricsEnhanced",
- },
- )
- assert mock_query.call_count == 1
- def test_aggregate_function_user_count(self):
- self.store_transaction_metric(
- 1, metric="user", timestamp=self.day_ago + timedelta(minutes=30)
- )
- self.store_transaction_metric(
- 1, metric="user", timestamp=self.day_ago + timedelta(hours=1, minutes=30)
- )
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "yAxis": "count_unique(user)",
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- assert response.data["isMetricsData"]
- assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
- meta = response.data["meta"]
- assert meta["isMetricsData"] == response.data["isMetricsData"]
- def test_non_mep_query_fallsback(self):
- def get_mep(query):
- response = self.do_request(
- data={
- "project": self.project.id,
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "query": query,
- "yAxis": ["epm()"],
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- return response.data["isMetricsData"]
- assert get_mep(""), "empty query"
- assert get_mep("event.type:transaction"), "event type transaction"
- assert not get_mep("event.type:error"), "event type error"
- assert not get_mep("transaction.duration:<15min"), "outlier filter"
- assert get_mep("epm():>0.01"), "throughput filter"
- assert not get_mep(
- "event.type:transaction OR event.type:error"
- ), "boolean with non-mep filter"
- assert get_mep(
- "event.type:transaction OR transaction:foo_transaction"
- ), "boolean with mep filter"
- def test_having_condition_with_preventing_aggregates(self):
- response = self.do_request(
- data={
- "project": self.project.id,
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "query": "p95():<5s",
- "yAxis": ["epm()"],
- "dataset": "metricsEnhanced",
- "preventMetricAggregates": "1",
- },
- )
- assert response.status_code == 200, response.content
- assert not response.data["isMetricsData"]
- meta = response.data["meta"]
- assert meta["isMetricsData"] == response.data["isMetricsData"]
- def test_explicit_not_mep(self):
- response = self.do_request(
- data={
- "project": self.project.id,
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- # Should be a mep able query
- "query": "",
- "yAxis": ["epm()"],
- "metricsEnhanced": "0",
- },
- )
- assert response.status_code == 200, response.content
- assert not response.data["isMetricsData"]
- meta = response.data["meta"]
- assert meta["isMetricsData"] == response.data["isMetricsData"]
- def test_sum_transaction_duration(self):
- self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
- self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
- self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "yAxis": "sum(transaction.duration)",
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- assert response.data["isMetricsData"]
- assert [attrs for time, attrs in response.data["data"]] == [
- [{"count": 123}],
- [{"count": 1245}],
- ]
- meta = response.data["meta"]
- assert meta["isMetricsData"] == response.data["isMetricsData"]
- assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
- assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
- def test_sum_transaction_duration_with_comparison(self):
- # We store the data for the previous day (in order to have values for the comparison).
- self.store_transaction_metric(
- 1, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
- )
- self.store_transaction_metric(
- 2, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
- )
- # We store the data for today.
- self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
- self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(minutes=30))
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(days=1)),
- "interval": "1d",
- "yAxis": "sum(transaction.duration)",
- "comparisonDelta": 86400,
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- assert response.data["isMetricsData"]
- # For some reason, if all tests run, there is some shared state that makes this test have data in the second
- # time bucket, which is filled automatically by the zerofilling. In order to avoid this flaky failure, we will
- # only check that the first bucket contains the actual data.
- assert [attrs for time, attrs in response.data["data"]][0] == [
- {"comparisonCount": 3.0, "count": 579.0}
- ]
- meta = response.data["meta"]
- assert meta["isMetricsData"] == response.data["isMetricsData"]
- assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
- assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
- def test_custom_measurement(self):
- self.store_transaction_metric(
- 123,
- metric="measurements.bytes_transfered",
- internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
- entity="metrics_distributions",
- tags={"transaction": "foo_transaction"},
- timestamp=self.day_ago + timedelta(minutes=30),
- )
- self.store_transaction_metric(
- 456,
- metric="measurements.bytes_transfered",
- internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
- entity="metrics_distributions",
- tags={"transaction": "foo_transaction"},
- timestamp=self.day_ago + timedelta(hours=1, minutes=30),
- )
- self.store_transaction_metric(
- 789,
- metric="measurements.bytes_transfered",
- internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
- entity="metrics_distributions",
- tags={"transaction": "foo_transaction"},
- timestamp=self.day_ago + timedelta(hours=1, minutes=30),
- )
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "yAxis": "sum(measurements.datacenter_memory)",
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- assert response.data["isMetricsData"]
- assert [attrs for time, attrs in response.data["data"]] == [
- [{"count": 123}],
- [{"count": 1245}],
- ]
- meta = response.data["meta"]
- assert meta["isMetricsData"] == response.data["isMetricsData"]
- assert meta["fields"] == {"time": "date", "sum_measurements_datacenter_memory": "size"}
- assert meta["units"] == {"time": None, "sum_measurements_datacenter_memory": "pebibyte"}
- def test_does_not_fallback_if_custom_metric_is_out_of_request_time_range(self):
- self.store_transaction_metric(
- 123,
- timestamp=self.day_ago + timedelta(hours=1),
- internal_metric="d:transactions/measurements.custom@kibibyte",
- entity="metrics_distributions",
- )
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "yAxis": "p99(measurements.custom)",
- "dataset": "metricsEnhanced",
- },
- )
- meta = response.data["meta"]
- assert response.status_code == 200, response.content
- assert response.data["isMetricsData"]
- assert meta["isMetricsData"]
- assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
- assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
- def test_multi_yaxis_custom_measurement(self):
- self.store_transaction_metric(
- 123,
- metric="measurements.bytes_transfered",
- internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
- entity="metrics_distributions",
- tags={"transaction": "foo_transaction"},
- timestamp=self.day_ago + timedelta(minutes=30),
- )
- self.store_transaction_metric(
- 456,
- metric="measurements.bytes_transfered",
- internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
- entity="metrics_distributions",
- tags={"transaction": "foo_transaction"},
- timestamp=self.day_ago + timedelta(hours=1, minutes=30),
- )
- self.store_transaction_metric(
- 789,
- metric="measurements.bytes_transfered",
- internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
- entity="metrics_distributions",
- tags={"transaction": "foo_transaction"},
- timestamp=self.day_ago + timedelta(hours=1, minutes=30),
- )
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "yAxis": [
- "sum(measurements.datacenter_memory)",
- "p50(measurements.datacenter_memory)",
- ],
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- sum_data = response.data["sum(measurements.datacenter_memory)"]
- p50_data = response.data["p50(measurements.datacenter_memory)"]
- assert sum_data["isMetricsData"]
- assert p50_data["isMetricsData"]
- assert [attrs for time, attrs in sum_data["data"]] == [
- [{"count": 123}],
- [{"count": 1245}],
- ]
- assert [attrs for time, attrs in p50_data["data"]] == [
- [{"count": 123}],
- [{"count": 622.5}],
- ]
- sum_meta = sum_data["meta"]
- assert sum_meta["isMetricsData"] == sum_data["isMetricsData"]
- assert sum_meta["fields"] == {
- "time": "date",
- "sum_measurements_datacenter_memory": "size",
- "p50_measurements_datacenter_memory": "size",
- }
- assert sum_meta["units"] == {
- "time": None,
- "sum_measurements_datacenter_memory": "pebibyte",
- "p50_measurements_datacenter_memory": "pebibyte",
- }
- p50_meta = p50_data["meta"]
- assert p50_meta["isMetricsData"] == p50_data["isMetricsData"]
- assert p50_meta["fields"] == {
- "time": "date",
- "sum_measurements_datacenter_memory": "size",
- "p50_measurements_datacenter_memory": "size",
- }
- assert p50_meta["units"] == {
- "time": None,
- "sum_measurements_datacenter_memory": "pebibyte",
- "p50_measurements_datacenter_memory": "pebibyte",
- }
- def test_dataset_metrics_does_not_fallback(self):
- self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
- self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
- self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "query": "transaction.duration:<5s",
- "yAxis": "sum(transaction.duration)",
- "dataset": "metrics",
- },
- )
- assert response.status_code == 400, response.content
- def test_title_filter(self):
- self.store_transaction_metric(
- 123,
- tags={"transaction": "foo_transaction"},
- timestamp=self.day_ago + timedelta(minutes=30),
- )
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "query": "title:foo_transaction",
- "yAxis": [
- "sum(transaction.duration)",
- ],
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- assert [attrs for time, attrs in data] == [
- [{"count": 123}],
- [{"count": 0}],
- ]
- def test_transaction_status_unknown_error(self):
- self.store_transaction_metric(
- 123,
- tags={"transaction.status": "unknown"},
- timestamp=self.day_ago + timedelta(minutes=30),
- )
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "query": "transaction.status:unknown_error",
- "yAxis": [
- "sum(transaction.duration)",
- ],
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- assert [attrs for time, attrs in data] == [
- [{"count": 123}],
- [{"count": 0}],
- ]
- def test_custom_performance_metric_meta_contains_field_and_unit_data(self):
- self.store_transaction_metric(
- 123,
- timestamp=self.day_ago + timedelta(hours=1),
- internal_metric="d:transactions/measurements.custom@kibibyte",
- entity="metrics_distributions",
- )
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "yAxis": "p99(measurements.custom)",
- "query": "",
- },
- )
- assert response.status_code == 200
- meta = response.data["meta"]
- assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
- assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
- def test_multi_series_custom_performance_metric_meta_contains_field_and_unit_data(self):
- self.store_transaction_metric(
- 123,
- timestamp=self.day_ago + timedelta(hours=1),
- internal_metric="d:transactions/measurements.custom@kibibyte",
- entity="metrics_distributions",
- )
- self.store_transaction_metric(
- 123,
- timestamp=self.day_ago + timedelta(hours=1),
- internal_metric="d:transactions/measurements.another.custom@pebibyte",
- entity="metrics_distributions",
- )
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "yAxis": [
- "p95(measurements.custom)",
- "p99(measurements.custom)",
- "p99(measurements.another.custom)",
- ],
- "query": "",
- },
- )
- assert response.status_code == 200
- meta = response.data["p95(measurements.custom)"]["meta"]
- assert meta["fields"] == {
- "time": "date",
- "p95_measurements_custom": "size",
- "p99_measurements_custom": "size",
- "p99_measurements_another_custom": "size",
- }
- assert meta["units"] == {
- "time": None,
- "p95_measurements_custom": "kibibyte",
- "p99_measurements_custom": "kibibyte",
- "p99_measurements_another_custom": "pebibyte",
- }
- assert meta == response.data["p99(measurements.custom)"]["meta"]
- assert meta == response.data["p99(measurements.another.custom)"]["meta"]
- def test_no_top_events_with_project_field(self):
- project = self.create_project()
- response = self.do_request(
- data={
- # make sure to query the project with 0 events
- "project": project.id,
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "yAxis": "count()",
- "orderby": ["-count()"],
- "field": ["count()", "project"],
- "topEvents": 5,
- "dataset": "metrics",
- },
- )
- assert response.status_code == 200, response.content
- # When there are no top events, we do not return an empty dict.
- # Instead, we return a single zero-filled series for an empty graph.
- data = response.data["data"]
- assert [attrs for time, attrs in data] == [[{"count": 0}], [{"count": 0}]]
- def test_top_events_with_transaction(self):
- transaction_spec = [("foo", 100), ("bar", 200), ("baz", 300)]
- for offset in range(5):
- for transaction, duration in transaction_spec:
- self.store_transaction_metric(
- duration,
- tags={"transaction": f"{transaction}_transaction"},
- timestamp=self.day_ago + timedelta(hours=offset, minutes=30),
- )
- response = self.do_request(
- data={
- # make sure to query the project with 0 events
- "project": self.project.id,
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=5)),
- "interval": "1h",
- "yAxis": "p75(transaction.duration)",
- "orderby": ["-p75(transaction.duration)"],
- "field": ["p75(transaction.duration)", "transaction"],
- "topEvents": 5,
- "dataset": "metrics",
- },
- )
- assert response.status_code == 200, response.content
- for position, (transaction, duration) in enumerate(transaction_spec):
- data = response.data[f"{transaction}_transaction"]
- chart_data = data["data"]
- assert data["order"] == 2 - position
- assert [attrs for time, attrs in chart_data] == [[{"count": duration}]] * 5
- class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
- OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest
- ):
- def setUp(self):
- super().setUp()
- self.features["organizations:use-metrics-layer"] = True
- def test_counter_custom_metric(self):
- mri = "c:custom/sentry.process_profile.track_outcome@second"
- for index, value in enumerate((10, 20, 30, 40, 50, 60)):
- self.store_transaction_metric(
- value,
- metric=mri,
- internal_metric=mri,
- entity="metrics_counters",
- timestamp=self.day_ago + timedelta(hours=index),
- use_case_id=UseCaseID.CUSTOM,
- )
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=6)),
- "interval": "1h",
- "yAxis": [f"sum({mri})"],
- "project": self.project.id,
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
- assert value[0]["count"] == expected_value # type:ignore
- def test_distribution_custom_metric(self):
- mri = "d:custom/sentry.process_profile.track_outcome@second"
- for index, value in enumerate((10, 20, 30, 40, 50, 60)):
- for multiplier in (1, 2, 3):
- self.store_transaction_metric(
- value * multiplier,
- metric=mri,
- internal_metric=mri,
- entity="metrics_distributions",
- timestamp=self.day_ago + timedelta(hours=index),
- use_case_id=UseCaseID.CUSTOM,
- )
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=6)),
- "interval": "1h",
- "yAxis": [f"min({mri})", f"max({mri})", f"p90({mri})"],
- "project": self.project.id,
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- data = response.data
- min = data[f"min({mri})"]["data"]
- for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
- assert value[0]["count"] == expected_value # type:ignore
- max = data[f"max({mri})"]["data"]
- for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
- assert value[0]["count"] == expected_value # type:ignore
- p90 = data[f"p90({mri})"]["data"]
- for (_, value), expected_value in zip(p90, [28.0, 56.0, 84.0, 112.0, 140.0, 168.0]):
- assert value[0]["count"] == expected_value # type:ignore
- def test_set_custom_metric(self):
- mri = "s:custom/sentry.process_profile.track_outcome@second"
- for index, value in enumerate((10, 20, 30, 40, 50, 60)):
- # We store each value a second time, since we want to check the de-duplication of sets.
- for i in range(0, 2):
- self.store_transaction_metric(
- value,
- metric=mri,
- internal_metric=mri,
- entity="metrics_sets",
- timestamp=self.day_ago + timedelta(hours=index),
- use_case_id=UseCaseID.CUSTOM,
- )
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=6)),
- "interval": "1h",
- "yAxis": [f"count_unique({mri})"],
- "project": self.project.id,
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- data = response.data["data"]
- for (_, value), expected_value in zip(data, [1, 1, 1, 1, 1, 1]):
- assert value[0]["count"] == expected_value # type:ignore
- def test_gauge_custom_metric(self):
- mri = "g:custom/sentry.process_profile.track_outcome@second"
- for index, value in enumerate((10, 20, 30, 40, 50, 60)):
- for multiplier in (1, 3):
- self.store_transaction_metric(
- value * multiplier,
- metric=mri,
- internal_metric=mri,
- entity="metrics_gauges",
- # When multiple gauges are merged, in order to make the `last` merge work deterministically it's
- # better to have the gauges with different timestamps so that the last value is always the same.
- timestamp=self.day_ago + timedelta(hours=index, minutes=multiplier),
- use_case_id=UseCaseID.CUSTOM,
- )
- response = self.do_request(
- data={
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=6)),
- "interval": "1h",
- "yAxis": [
- f"min({mri})",
- f"max({mri})",
- f"last({mri})",
- f"sum({mri})",
- f"count({mri})",
- ],
- "project": self.project.id,
- "dataset": "metricsEnhanced",
- },
- )
- assert response.status_code == 200, response.content
- data = response.data
- min = data[f"min({mri})"]["data"]
- for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
- assert value[0]["count"] == expected_value # type:ignore
- max = data[f"max({mri})"]["data"]
- for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
- assert value[0]["count"] == expected_value # type:ignore
- last = data[f"last({mri})"]["data"]
- for (_, value), expected_value in zip(last, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
- assert value[0]["count"] == expected_value # type:ignore
- sum = data[f"sum({mri})"]["data"]
- for (_, value), expected_value in zip(sum, [40.0, 80.0, 120.0, 160.0, 200.0, 240.0]):
- assert value[0]["count"] == expected_value # type:ignore
- count = data[f"count({mri})"]["data"]
- for (_, value), expected_value in zip(count, [40, 80, 120, 160, 200, 240]):
- assert value[0]["count"] == expected_value # type:ignore
- @region_silo_test
- class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithOnDemand(
- MetricsEnhancedPerformanceTestCase
- ):
- endpoint = "sentry-api-0-organization-events-stats"
- METRIC_STRINGS = [
- "foo_transaction",
- "d:transactions/measurements.datacenter_memory@pebibyte",
- ]
- def setUp(self):
- super().setUp()
- self.login_as(user=self.user)
- self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
- self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
- self.url = reverse(
- "sentry-api-0-organization-events-stats",
- kwargs={"organization_slug": self.project.organization.slug},
- )
- self.features = {
- "organizations:performance-use-metrics": True,
- }
- def do_request(self, data, url=None, features=None):
- if features is None:
- features = {"organizations:discover-basic": True}
- features.update(self.features)
- with self.feature(features):
- return self.client.get(self.url if url is None else url, data=data, format="json")
- def test_top_events_with_transaction_on_demand(self):
- field = "count()"
- field_two = "count_web_vitals(measurements.lcp, good)"
- groupbys = ["customtag1", "customtag2"]
- query = "transaction.duration:>=100"
- spec = OnDemandMetricSpec(field=field, groupbys=groupbys, query=query)
- spec_two = OnDemandMetricSpec(field=field_two, groupbys=groupbys, query=query)
- for hour in range(0, 5):
- self.store_on_demand_metric(
- hour * 62 * 24,
- spec=spec,
- additional_tags={
- "customtag1": "foo",
- "customtag2": "red",
- },
- timestamp=self.day_ago + timedelta(hours=hour),
- )
- self.store_on_demand_metric(
- hour * 60 * 24,
- spec=spec_two,
- additional_tags={
- "customtag1": "bar",
- "customtag2": "blue",
- },
- timestamp=self.day_ago + timedelta(hours=hour),
- )
- yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
- response = self.do_request(
- data={
- "project": self.project.id,
- "start": iso_format(self.day_ago),
- "end": iso_format(self.day_ago + timedelta(hours=2)),
- "interval": "1h",
- "orderby": ["-count()"],
- "query": query,
- "yAxis": yAxis,
- "field": [
- "count()",
- "count_web_vitals(measurements.lcp, good)",
- "customtag1",
- "customtag2",
- ],
- "topEvents": 5,
- "dataset": "metrics",
- "useOnDemandMetrics": "true",
- },
- )
- assert response.status_code == 200, response.content
- groups = [
- ("foo,red", "count()", 0.0, 1488.0),
- ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
- ("bar,blue", "count()", 0.0, 0.0),
- ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
- ]
- assert len(response.data.keys()) == 2
- for group_count in groups:
- group, agg, row1, row2 = group_count
- row_data = response.data[group][agg]["data"][:2]
- assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
|