test_organization_events_stats_mep.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. from datetime import timedelta
  2. from unittest import mock
  3. import pytest
  4. from django.urls import reverse
  5. from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
  6. from sentry.testutils.helpers.datetime import before_now, iso_format
  7. from sentry.testutils.silo import region_silo_test
  8. pytestmark = pytest.mark.sentry_metrics
  9. @region_silo_test
  10. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
  11. MetricsEnhancedPerformanceTestCase
  12. ):
  13. endpoint = "sentry-api-0-organization-events-stats"
  14. METRIC_STRINGS = [
  15. "foo_transaction",
  16. "d:transactions/measurements.datacenter_memory@pebibyte",
  17. ]
  18. def setUp(self):
  19. super().setUp()
  20. self.login_as(user=self.user)
  21. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  22. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  23. self.url = reverse(
  24. "sentry-api-0-organization-events-stats",
  25. kwargs={"organization_slug": self.project.organization.slug},
  26. )
  27. self.features = {
  28. "organizations:performance-use-metrics": True,
  29. }
  30. def do_request(self, data, url=None, features=None):
  31. if features is None:
  32. features = {"organizations:discover-basic": True}
  33. features.update(self.features)
  34. with self.feature(features):
  35. return self.client.get(self.url if url is None else url, data=data, format="json")
  36. # These throughput tests should roughly match the ones in OrganizationEventsStatsEndpointTest
  37. def test_throughput_epm_hour_rollup(self):
  38. # Each of these denotes how many events to create in each hour
  39. event_counts = [6, 0, 6, 3, 0, 3]
  40. for hour, count in enumerate(event_counts):
  41. for minute in range(count):
  42. self.store_transaction_metric(
  43. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  44. )
  45. for axis in ["epm()", "tpm()"]:
  46. response = self.do_request(
  47. data={
  48. "start": iso_format(self.day_ago),
  49. "end": iso_format(self.day_ago + timedelta(hours=6)),
  50. "interval": "1h",
  51. "yAxis": axis,
  52. "project": self.project.id,
  53. "dataset": "metricsEnhanced",
  54. },
  55. )
  56. assert response.status_code == 200, response.content
  57. data = response.data["data"]
  58. assert len(data) == 6
  59. assert response.data["isMetricsData"]
  60. rows = data[0:6]
  61. for test in zip(event_counts, rows):
  62. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  63. def test_throughput_epm_day_rollup(self):
  64. # Each of these denotes how many events to create in each minute
  65. event_counts = [6, 0, 6, 3, 0, 3]
  66. for hour, count in enumerate(event_counts):
  67. for minute in range(count):
  68. self.store_transaction_metric(
  69. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  70. )
  71. for axis in ["epm()", "tpm()"]:
  72. response = self.do_request(
  73. data={
  74. "start": iso_format(self.day_ago),
  75. "end": iso_format(self.day_ago + timedelta(hours=24)),
  76. "interval": "24h",
  77. "yAxis": axis,
  78. "project": self.project.id,
  79. "dataset": "metricsEnhanced",
  80. },
  81. )
  82. assert response.status_code == 200, response.content
  83. data = response.data["data"]
  84. assert len(data) == 2
  85. assert response.data["isMetricsData"]
  86. assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
  87. def test_throughput_epm_hour_rollup_offset_of_hour(self):
  88. # Each of these denotes how many events to create in each hour
  89. event_counts = [6, 0, 6, 3, 0, 3]
  90. for hour, count in enumerate(event_counts):
  91. for minute in range(count):
  92. self.store_transaction_metric(
  93. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute + 30)
  94. )
  95. for axis in ["tpm()", "epm()"]:
  96. response = self.do_request(
  97. data={
  98. "start": iso_format(self.day_ago + timedelta(minutes=30)),
  99. "end": iso_format(self.day_ago + timedelta(hours=6, minutes=30)),
  100. "interval": "1h",
  101. "yAxis": axis,
  102. "project": self.project.id,
  103. "dataset": "metricsEnhanced",
  104. },
  105. )
  106. assert response.status_code == 200, response.content
  107. data = response.data["data"]
  108. assert len(data) == 6
  109. assert response.data["isMetricsData"]
  110. rows = data[0:6]
  111. for test in zip(event_counts, rows):
  112. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  113. def test_throughput_eps_minute_rollup(self):
  114. # Each of these denotes how many events to create in each minute
  115. event_counts = [6, 0, 6, 3, 0, 3]
  116. for minute, count in enumerate(event_counts):
  117. for second in range(count):
  118. self.store_transaction_metric(
  119. 1, timestamp=self.day_ago + timedelta(minutes=minute, seconds=second)
  120. )
  121. for axis in ["eps()", "tps()"]:
  122. response = self.do_request(
  123. data={
  124. "start": iso_format(self.day_ago),
  125. "end": iso_format(self.day_ago + timedelta(minutes=6)),
  126. "interval": "1m",
  127. "yAxis": axis,
  128. "project": self.project.id,
  129. "dataset": "metricsEnhanced",
  130. },
  131. )
  132. assert response.status_code == 200, response.content
  133. data = response.data["data"]
  134. assert len(data) == 6
  135. assert response.data["isMetricsData"]
  136. rows = data[0:6]
  137. for test in zip(event_counts, rows):
  138. assert test[1][1][0]["count"] == test[0] / 60.0
  139. def test_failure_rate(self):
  140. for hour in range(6):
  141. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  142. self.store_transaction_metric(1, tags={"transaction.status": "ok"}, timestamp=timestamp)
  143. if hour < 3:
  144. self.store_transaction_metric(
  145. 1, tags={"transaction.status": "internal_error"}, timestamp=timestamp
  146. )
  147. response = self.do_request(
  148. data={
  149. "start": iso_format(self.day_ago),
  150. "end": iso_format(self.day_ago + timedelta(hours=6)),
  151. "interval": "1h",
  152. "yAxis": ["failure_rate()"],
  153. "project": self.project.id,
  154. "dataset": "metricsEnhanced",
  155. },
  156. )
  157. assert response.status_code == 200, response.content
  158. data = response.data["data"]
  159. assert len(data) == 6
  160. assert response.data["isMetricsData"]
  161. assert [attrs for time, attrs in response.data["data"]] == [
  162. [{"count": 0.5}],
  163. [{"count": 0.5}],
  164. [{"count": 0.5}],
  165. [{"count": 0}],
  166. [{"count": 0}],
  167. [{"count": 0}],
  168. ]
  169. def test_percentiles_multi_axis(self):
  170. for hour in range(6):
  171. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  172. self.store_transaction_metric(111, timestamp=timestamp)
  173. self.store_transaction_metric(222, metric="measurements.lcp", timestamp=timestamp)
  174. response = self.do_request(
  175. data={
  176. "start": iso_format(self.day_ago),
  177. "end": iso_format(self.day_ago + timedelta(hours=6)),
  178. "interval": "1h",
  179. "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
  180. "project": self.project.id,
  181. "dataset": "metricsEnhanced",
  182. },
  183. )
  184. assert response.status_code == 200, response.content
  185. lcp = response.data["p75(measurements.lcp)"]
  186. duration = response.data["p75(transaction.duration)"]
  187. assert len(duration["data"]) == 6
  188. assert duration["isMetricsData"]
  189. assert len(lcp["data"]) == 6
  190. assert lcp["isMetricsData"]
  191. for item in duration["data"]:
  192. assert item[1][0]["count"] == 111
  193. for item in lcp["data"]:
  194. assert item[1][0]["count"] == 222
  195. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query", return_value={})
  196. def test_multiple_yaxis_only_one_query(self, mock_query):
  197. self.do_request(
  198. data={
  199. "project": self.project.id,
  200. "start": iso_format(self.day_ago),
  201. "end": iso_format(self.day_ago + timedelta(hours=2)),
  202. "interval": "1h",
  203. "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
  204. "dataset": "metricsEnhanced",
  205. },
  206. )
  207. assert mock_query.call_count == 1
  208. def test_aggregate_function_user_count(self):
  209. self.store_transaction_metric(
  210. 1, metric="user", timestamp=self.day_ago + timedelta(minutes=30)
  211. )
  212. self.store_transaction_metric(
  213. 1, metric="user", timestamp=self.day_ago + timedelta(hours=1, minutes=30)
  214. )
  215. response = self.do_request(
  216. data={
  217. "start": iso_format(self.day_ago),
  218. "end": iso_format(self.day_ago + timedelta(hours=2)),
  219. "interval": "1h",
  220. "yAxis": "count_unique(user)",
  221. "dataset": "metricsEnhanced",
  222. },
  223. )
  224. assert response.status_code == 200, response.content
  225. assert response.data["isMetricsData"]
  226. assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
  227. meta = response.data["meta"]
  228. assert meta["isMetricsData"] == response.data["isMetricsData"]
  229. def test_non_mep_query_fallsback(self):
  230. def get_mep(query):
  231. response = self.do_request(
  232. data={
  233. "project": self.project.id,
  234. "start": iso_format(self.day_ago),
  235. "end": iso_format(self.day_ago + timedelta(hours=2)),
  236. "interval": "1h",
  237. "query": query,
  238. "yAxis": ["epm()"],
  239. "dataset": "metricsEnhanced",
  240. },
  241. )
  242. assert response.status_code == 200, response.content
  243. return response.data["isMetricsData"]
  244. assert get_mep(""), "empty query"
  245. assert get_mep("event.type:transaction"), "event type transaction"
  246. assert not get_mep("event.type:error"), "event type error"
  247. assert not get_mep("transaction.duration:<15min"), "outlier filter"
  248. assert get_mep("epm():>0.01"), "throughput filter"
  249. assert not get_mep(
  250. "event.type:transaction OR event.type:error"
  251. ), "boolean with non-mep filter"
  252. assert get_mep(
  253. "event.type:transaction OR transaction:foo_transaction"
  254. ), "boolean with mep filter"
  255. def test_having_condition_with_preventing_aggregates(self):
  256. response = self.do_request(
  257. data={
  258. "project": self.project.id,
  259. "start": iso_format(self.day_ago),
  260. "end": iso_format(self.day_ago + timedelta(hours=2)),
  261. "interval": "1h",
  262. "query": "p95():<5s",
  263. "yAxis": ["epm()"],
  264. "dataset": "metricsEnhanced",
  265. "preventMetricAggregates": "1",
  266. },
  267. )
  268. assert response.status_code == 200, response.content
  269. assert not response.data["isMetricsData"]
  270. meta = response.data["meta"]
  271. assert meta["isMetricsData"] == response.data["isMetricsData"]
  272. def test_explicit_not_mep(self):
  273. response = self.do_request(
  274. data={
  275. "project": self.project.id,
  276. "start": iso_format(self.day_ago),
  277. "end": iso_format(self.day_ago + timedelta(hours=2)),
  278. "interval": "1h",
  279. # Should be a mep able query
  280. "query": "",
  281. "yAxis": ["epm()"],
  282. "metricsEnhanced": "0",
  283. },
  284. )
  285. assert response.status_code == 200, response.content
  286. assert not response.data["isMetricsData"]
  287. meta = response.data["meta"]
  288. assert meta["isMetricsData"] == response.data["isMetricsData"]
  289. def test_sum_transaction_duration(self):
  290. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  291. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  292. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  293. response = self.do_request(
  294. data={
  295. "start": iso_format(self.day_ago),
  296. "end": iso_format(self.day_ago + timedelta(hours=2)),
  297. "interval": "1h",
  298. "yAxis": "sum(transaction.duration)",
  299. "dataset": "metricsEnhanced",
  300. },
  301. )
  302. assert response.status_code == 200, response.content
  303. assert response.data["isMetricsData"]
  304. assert [attrs for time, attrs in response.data["data"]] == [
  305. [{"count": 123}],
  306. [{"count": 1245}],
  307. ]
  308. meta = response.data["meta"]
  309. assert meta["isMetricsData"] == response.data["isMetricsData"]
  310. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  311. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  312. def test_custom_measurement(self):
  313. self.store_transaction_metric(
  314. 123,
  315. metric="measurements.bytes_transfered",
  316. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  317. entity="metrics_distributions",
  318. tags={"transaction": "foo_transaction"},
  319. timestamp=self.day_ago + timedelta(minutes=30),
  320. )
  321. self.store_transaction_metric(
  322. 456,
  323. metric="measurements.bytes_transfered",
  324. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  325. entity="metrics_distributions",
  326. tags={"transaction": "foo_transaction"},
  327. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  328. )
  329. self.store_transaction_metric(
  330. 789,
  331. metric="measurements.bytes_transfered",
  332. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  333. entity="metrics_distributions",
  334. tags={"transaction": "foo_transaction"},
  335. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  336. )
  337. response = self.do_request(
  338. data={
  339. "start": iso_format(self.day_ago),
  340. "end": iso_format(self.day_ago + timedelta(hours=2)),
  341. "interval": "1h",
  342. "yAxis": "sum(measurements.datacenter_memory)",
  343. "dataset": "metricsEnhanced",
  344. },
  345. )
  346. assert response.status_code == 200, response.content
  347. assert response.data["isMetricsData"]
  348. assert [attrs for time, attrs in response.data["data"]] == [
  349. [{"count": 123}],
  350. [{"count": 1245}],
  351. ]
  352. meta = response.data["meta"]
  353. assert meta["isMetricsData"] == response.data["isMetricsData"]
  354. assert meta["fields"] == {"time": "date", "sum_measurements_datacenter_memory": "size"}
  355. assert meta["units"] == {"time": None, "sum_measurements_datacenter_memory": "pebibyte"}
  356. def test_does_not_fallback_if_custom_metric_is_out_of_request_time_range(self):
  357. self.store_transaction_metric(
  358. 123,
  359. timestamp=self.day_ago + timedelta(hours=1),
  360. internal_metric="d:transactions/measurements.custom@kibibyte",
  361. entity="metrics_distributions",
  362. )
  363. response = self.do_request(
  364. data={
  365. "start": iso_format(self.day_ago),
  366. "end": iso_format(self.day_ago + timedelta(hours=2)),
  367. "interval": "1h",
  368. "yAxis": "p99(measurements.custom)",
  369. "dataset": "metricsEnhanced",
  370. },
  371. )
  372. meta = response.data["meta"]
  373. assert response.status_code == 200, response.content
  374. assert response.data["isMetricsData"]
  375. assert meta["isMetricsData"]
  376. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  377. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  378. def test_multi_yaxis_custom_measurement(self):
  379. self.store_transaction_metric(
  380. 123,
  381. metric="measurements.bytes_transfered",
  382. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  383. entity="metrics_distributions",
  384. tags={"transaction": "foo_transaction"},
  385. timestamp=self.day_ago + timedelta(minutes=30),
  386. )
  387. self.store_transaction_metric(
  388. 456,
  389. metric="measurements.bytes_transfered",
  390. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  391. entity="metrics_distributions",
  392. tags={"transaction": "foo_transaction"},
  393. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  394. )
  395. self.store_transaction_metric(
  396. 789,
  397. metric="measurements.bytes_transfered",
  398. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  399. entity="metrics_distributions",
  400. tags={"transaction": "foo_transaction"},
  401. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  402. )
  403. response = self.do_request(
  404. data={
  405. "start": iso_format(self.day_ago),
  406. "end": iso_format(self.day_ago + timedelta(hours=2)),
  407. "interval": "1h",
  408. "yAxis": [
  409. "sum(measurements.datacenter_memory)",
  410. "p50(measurements.datacenter_memory)",
  411. ],
  412. "dataset": "metricsEnhanced",
  413. },
  414. )
  415. assert response.status_code == 200, response.content
  416. sum_data = response.data["sum(measurements.datacenter_memory)"]
  417. p50_data = response.data["p50(measurements.datacenter_memory)"]
  418. assert sum_data["isMetricsData"]
  419. assert p50_data["isMetricsData"]
  420. assert [attrs for time, attrs in sum_data["data"]] == [
  421. [{"count": 123}],
  422. [{"count": 1245}],
  423. ]
  424. assert [attrs for time, attrs in p50_data["data"]] == [
  425. [{"count": 123}],
  426. [{"count": 622.5}],
  427. ]
  428. sum_meta = sum_data["meta"]
  429. assert sum_meta["isMetricsData"] == sum_data["isMetricsData"]
  430. assert sum_meta["fields"] == {
  431. "time": "date",
  432. "sum_measurements_datacenter_memory": "size",
  433. "p50_measurements_datacenter_memory": "size",
  434. }
  435. assert sum_meta["units"] == {
  436. "time": None,
  437. "sum_measurements_datacenter_memory": "pebibyte",
  438. "p50_measurements_datacenter_memory": "pebibyte",
  439. }
  440. p50_meta = p50_data["meta"]
  441. assert p50_meta["isMetricsData"] == p50_data["isMetricsData"]
  442. assert p50_meta["fields"] == {
  443. "time": "date",
  444. "sum_measurements_datacenter_memory": "size",
  445. "p50_measurements_datacenter_memory": "size",
  446. }
  447. assert p50_meta["units"] == {
  448. "time": None,
  449. "sum_measurements_datacenter_memory": "pebibyte",
  450. "p50_measurements_datacenter_memory": "pebibyte",
  451. }
  452. def test_dataset_metrics_does_not_fallback(self):
  453. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  454. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  455. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  456. response = self.do_request(
  457. data={
  458. "start": iso_format(self.day_ago),
  459. "end": iso_format(self.day_ago + timedelta(hours=2)),
  460. "interval": "1h",
  461. "query": "transaction.duration:<5s",
  462. "yAxis": "sum(transaction.duration)",
  463. "dataset": "metrics",
  464. },
  465. )
  466. assert response.status_code == 400, response.content
  467. def test_title_filter(self):
  468. self.store_transaction_metric(
  469. 123,
  470. tags={"transaction": "foo_transaction"},
  471. timestamp=self.day_ago + timedelta(minutes=30),
  472. )
  473. response = self.do_request(
  474. data={
  475. "start": iso_format(self.day_ago),
  476. "end": iso_format(self.day_ago + timedelta(hours=2)),
  477. "interval": "1h",
  478. "query": "title:foo_transaction",
  479. "yAxis": [
  480. "sum(transaction.duration)",
  481. ],
  482. "dataset": "metricsEnhanced",
  483. },
  484. )
  485. assert response.status_code == 200, response.content
  486. data = response.data["data"]
  487. assert [attrs for time, attrs in data] == [
  488. [{"count": 123}],
  489. [{"count": 0}],
  490. ]
  491. def test_custom_performance_metric_meta_contains_field_and_unit_data(self):
  492. self.store_transaction_metric(
  493. 123,
  494. timestamp=self.day_ago + timedelta(hours=1),
  495. internal_metric="d:transactions/measurements.custom@kibibyte",
  496. entity="metrics_distributions",
  497. )
  498. response = self.do_request(
  499. data={
  500. "start": iso_format(self.day_ago),
  501. "end": iso_format(self.day_ago + timedelta(hours=2)),
  502. "interval": "1h",
  503. "yAxis": "p99(measurements.custom)",
  504. "query": "",
  505. },
  506. )
  507. assert response.status_code == 200
  508. meta = response.data["meta"]
  509. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  510. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  511. def test_multi_series_custom_performance_metric_meta_contains_field_and_unit_data(self):
  512. self.store_transaction_metric(
  513. 123,
  514. timestamp=self.day_ago + timedelta(hours=1),
  515. internal_metric="d:transactions/measurements.custom@kibibyte",
  516. entity="metrics_distributions",
  517. )
  518. self.store_transaction_metric(
  519. 123,
  520. timestamp=self.day_ago + timedelta(hours=1),
  521. internal_metric="d:transactions/measurements.another.custom@pebibyte",
  522. entity="metrics_distributions",
  523. )
  524. response = self.do_request(
  525. data={
  526. "start": iso_format(self.day_ago),
  527. "end": iso_format(self.day_ago + timedelta(hours=2)),
  528. "interval": "1h",
  529. "yAxis": [
  530. "p95(measurements.custom)",
  531. "p99(measurements.custom)",
  532. "p99(measurements.another.custom)",
  533. ],
  534. "query": "",
  535. },
  536. )
  537. assert response.status_code == 200
  538. meta = response.data["p95(measurements.custom)"]["meta"]
  539. assert meta["fields"] == {
  540. "time": "date",
  541. "p95_measurements_custom": "size",
  542. "p99_measurements_custom": "size",
  543. "p99_measurements_another_custom": "size",
  544. }
  545. assert meta["units"] == {
  546. "time": None,
  547. "p95_measurements_custom": "kibibyte",
  548. "p99_measurements_custom": "kibibyte",
  549. "p99_measurements_another_custom": "pebibyte",
  550. }
  551. assert meta == response.data["p99(measurements.custom)"]["meta"]
  552. assert meta == response.data["p99(measurements.another.custom)"]["meta"]
  553. def test_no_top_events_with_project_field(self):
  554. project = self.create_project()
  555. response = self.do_request(
  556. data={
  557. # make sure to query the project with 0 events
  558. "project": project.id,
  559. "start": iso_format(self.day_ago),
  560. "end": iso_format(self.day_ago + timedelta(hours=2)),
  561. "interval": "1h",
  562. "yAxis": "count()",
  563. "orderby": ["-count()"],
  564. "field": ["count()", "project"],
  565. "topEvents": 5,
  566. "dataset": "metrics",
  567. },
  568. )
  569. assert response.status_code == 200, response.content
  570. # When there are no top events, we do not return an empty dict.
  571. # Instead, we return a single zero-filled series for an empty graph.
  572. data = response.data["data"]
  573. assert [attrs for time, attrs in data] == [[{"count": 0}], [{"count": 0}]]
  574. def test_top_events_with_transaction(self):
  575. transaction_spec = [("foo", 100), ("bar", 200), ("baz", 300)]
  576. for offset in range(5):
  577. for transaction, duration in transaction_spec:
  578. self.store_transaction_metric(
  579. duration,
  580. tags={"transaction": f"{transaction}_transaction"},
  581. timestamp=self.day_ago + timedelta(hours=offset, minutes=30),
  582. )
  583. response = self.do_request(
  584. data={
  585. # make sure to query the project with 0 events
  586. "project": self.project.id,
  587. "start": iso_format(self.day_ago),
  588. "end": iso_format(self.day_ago + timedelta(hours=5)),
  589. "interval": "1h",
  590. "yAxis": "p75(transaction.duration)",
  591. "orderby": ["-p75(transaction.duration)"],
  592. "field": ["p75(transaction.duration)", "transaction"],
  593. "topEvents": 5,
  594. "dataset": "metrics",
  595. },
  596. )
  597. assert response.status_code == 200, response.content
  598. for position, (transaction, duration) in enumerate(transaction_spec):
  599. data = response.data[f"{transaction}_transaction"]
  600. chart_data = data["data"]
  601. assert data["order"] == 2 - position
  602. assert [attrs for time, attrs in chart_data] == [[{"count": duration}]] * 5
  603. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  604. OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest
  605. ):
  606. def setUp(self):
  607. super().setUp()
  608. self.features["organizations:use-metrics-layer"] = True