test_organization_events_stats_mep.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. from datetime import timedelta
  2. from unittest import mock
  3. import pytest
  4. from django.urls import reverse
  5. from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
  6. from sentry.testutils.helpers.datetime import before_now, iso_format
  7. from sentry.testutils.silo import region_silo_test
  8. pytestmark = pytest.mark.sentry_metrics
  9. @region_silo_test
  10. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
  11. MetricsEnhancedPerformanceTestCase
  12. ):
  13. endpoint = "sentry-api-0-organization-events-stats"
  14. METRIC_STRINGS = [
  15. "foo_transaction",
  16. "d:transactions/measurements.datacenter_memory@pebibyte",
  17. ]
  18. def setUp(self):
  19. super().setUp()
  20. self.login_as(user=self.user)
  21. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  22. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  23. self.url = reverse(
  24. "sentry-api-0-organization-events-stats",
  25. kwargs={"organization_slug": self.project.organization.slug},
  26. )
  27. self.features = {
  28. "organizations:performance-use-metrics": True,
  29. }
  30. def do_request(self, data, url=None, features=None):
  31. if features is None:
  32. features = {"organizations:discover-basic": True}
  33. features.update(self.features)
  34. with self.feature(features):
  35. return self.client.get(self.url if url is None else url, data=data, format="json")
  36. # These throughput tests should roughly match the ones in OrganizationEventsStatsEndpointTest
  37. def test_throughput_epm_hour_rollup(self):
  38. # Each of these denotes how many events to create in each hour
  39. event_counts = [6, 0, 6, 3, 0, 3]
  40. for hour, count in enumerate(event_counts):
  41. for minute in range(count):
  42. self.store_transaction_metric(
  43. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  44. )
  45. for axis in ["epm()", "tpm()"]:
  46. response = self.do_request(
  47. data={
  48. "start": iso_format(self.day_ago),
  49. "end": iso_format(self.day_ago + timedelta(hours=6)),
  50. "interval": "1h",
  51. "yAxis": axis,
  52. "project": self.project.id,
  53. "dataset": "metricsEnhanced",
  54. },
  55. )
  56. assert response.status_code == 200, response.content
  57. data = response.data["data"]
  58. assert len(data) == 6
  59. assert response.data["isMetricsData"]
  60. rows = data[0:6]
  61. for test in zip(event_counts, rows):
  62. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  63. def test_throughput_epm_day_rollup(self):
  64. # Each of these denotes how many events to create in each minute
  65. event_counts = [6, 0, 6, 3, 0, 3]
  66. for hour, count in enumerate(event_counts):
  67. for minute in range(count):
  68. self.store_transaction_metric(
  69. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  70. )
  71. for axis in ["epm()", "tpm()"]:
  72. response = self.do_request(
  73. data={
  74. "start": iso_format(self.day_ago),
  75. "end": iso_format(self.day_ago + timedelta(hours=24)),
  76. "interval": "24h",
  77. "yAxis": axis,
  78. "project": self.project.id,
  79. "dataset": "metricsEnhanced",
  80. },
  81. )
  82. assert response.status_code == 200, response.content
  83. data = response.data["data"]
  84. assert len(data) == 2
  85. assert response.data["isMetricsData"]
  86. assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
  87. def test_throughput_epm_hour_rollup_offset_of_hour(self):
  88. # Each of these denotes how many events to create in each hour
  89. event_counts = [6, 0, 6, 3, 0, 3]
  90. for hour, count in enumerate(event_counts):
  91. for minute in range(count):
  92. self.store_transaction_metric(
  93. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute + 30)
  94. )
  95. for axis in ["tpm()", "epm()"]:
  96. response = self.do_request(
  97. data={
  98. "start": iso_format(self.day_ago + timedelta(minutes=30)),
  99. "end": iso_format(self.day_ago + timedelta(hours=6, minutes=30)),
  100. "interval": "1h",
  101. "yAxis": axis,
  102. "project": self.project.id,
  103. "dataset": "metricsEnhanced",
  104. },
  105. )
  106. assert response.status_code == 200, response.content
  107. data = response.data["data"]
  108. assert len(data) == 6
  109. assert response.data["isMetricsData"]
  110. rows = data[0:6]
  111. for test in zip(event_counts, rows):
  112. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  113. def test_throughput_eps_minute_rollup(self):
  114. # Each of these denotes how many events to create in each minute
  115. event_counts = [6, 0, 6, 3, 0, 3]
  116. for minute, count in enumerate(event_counts):
  117. for second in range(count):
  118. self.store_transaction_metric(
  119. 1, timestamp=self.day_ago + timedelta(minutes=minute, seconds=second)
  120. )
  121. for axis in ["eps()", "tps()"]:
  122. response = self.do_request(
  123. data={
  124. "start": iso_format(self.day_ago),
  125. "end": iso_format(self.day_ago + timedelta(minutes=6)),
  126. "interval": "1m",
  127. "yAxis": axis,
  128. "project": self.project.id,
  129. "dataset": "metricsEnhanced",
  130. },
  131. )
  132. assert response.status_code == 200, response.content
  133. data = response.data["data"]
  134. assert len(data) == 6
  135. assert response.data["isMetricsData"]
  136. rows = data[0:6]
  137. for test in zip(event_counts, rows):
  138. assert test[1][1][0]["count"] == test[0] / 60.0
  139. def test_failure_rate(self):
  140. for hour in range(6):
  141. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  142. self.store_transaction_metric(1, tags={"transaction.status": "ok"}, timestamp=timestamp)
  143. if hour < 3:
  144. self.store_transaction_metric(
  145. 1, tags={"transaction.status": "internal_error"}, timestamp=timestamp
  146. )
  147. response = self.do_request(
  148. data={
  149. "start": iso_format(self.day_ago),
  150. "end": iso_format(self.day_ago + timedelta(hours=6)),
  151. "interval": "1h",
  152. "yAxis": ["failure_rate()"],
  153. "project": self.project.id,
  154. "dataset": "metricsEnhanced",
  155. },
  156. )
  157. assert response.status_code == 200, response.content
  158. data = response.data["data"]
  159. assert len(data) == 6
  160. assert response.data["isMetricsData"]
  161. assert [attrs for time, attrs in response.data["data"]] == [
  162. [{"count": 0.5}],
  163. [{"count": 0.5}],
  164. [{"count": 0.5}],
  165. [{"count": 0}],
  166. [{"count": 0}],
  167. [{"count": 0}],
  168. ]
  169. def test_percentiles_multi_axis(self):
  170. for hour in range(6):
  171. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  172. self.store_transaction_metric(111, timestamp=timestamp)
  173. self.store_transaction_metric(222, metric="measurements.lcp", timestamp=timestamp)
  174. response = self.do_request(
  175. data={
  176. "start": iso_format(self.day_ago),
  177. "end": iso_format(self.day_ago + timedelta(hours=6)),
  178. "interval": "1h",
  179. "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
  180. "project": self.project.id,
  181. "dataset": "metricsEnhanced",
  182. },
  183. )
  184. assert response.status_code == 200, response.content
  185. lcp = response.data["p75(measurements.lcp)"]
  186. duration = response.data["p75(transaction.duration)"]
  187. assert len(duration["data"]) == 6
  188. assert duration["isMetricsData"]
  189. assert len(lcp["data"]) == 6
  190. assert lcp["isMetricsData"]
  191. for item in duration["data"]:
  192. assert item[1][0]["count"] == 111
  193. for item in lcp["data"]:
  194. assert item[1][0]["count"] == 222
  195. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query", return_value={})
  196. def test_multiple_yaxis_only_one_query(self, mock_query):
  197. self.do_request(
  198. data={
  199. "project": self.project.id,
  200. "start": iso_format(self.day_ago),
  201. "end": iso_format(self.day_ago + timedelta(hours=2)),
  202. "interval": "1h",
  203. "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
  204. "dataset": "metricsEnhanced",
  205. },
  206. )
  207. assert mock_query.call_count == 1
  208. def test_aggregate_function_user_count(self):
  209. self.store_transaction_metric(
  210. 1, metric="user", timestamp=self.day_ago + timedelta(minutes=30)
  211. )
  212. self.store_transaction_metric(
  213. 1, metric="user", timestamp=self.day_ago + timedelta(hours=1, minutes=30)
  214. )
  215. response = self.do_request(
  216. data={
  217. "start": iso_format(self.day_ago),
  218. "end": iso_format(self.day_ago + timedelta(hours=2)),
  219. "interval": "1h",
  220. "yAxis": "count_unique(user)",
  221. "dataset": "metricsEnhanced",
  222. },
  223. )
  224. assert response.status_code == 200, response.content
  225. assert response.data["isMetricsData"]
  226. assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
  227. meta = response.data["meta"]
  228. assert meta["isMetricsData"] == response.data["isMetricsData"]
  229. def test_non_mep_query_fallsback(self):
  230. def get_mep(query):
  231. response = self.do_request(
  232. data={
  233. "project": self.project.id,
  234. "start": iso_format(self.day_ago),
  235. "end": iso_format(self.day_ago + timedelta(hours=2)),
  236. "interval": "1h",
  237. "query": query,
  238. "yAxis": ["epm()"],
  239. "dataset": "metricsEnhanced",
  240. },
  241. )
  242. assert response.status_code == 200, response.content
  243. return response.data["isMetricsData"]
  244. assert get_mep(""), "empty query"
  245. assert get_mep("event.type:transaction"), "event type transaction"
  246. assert not get_mep("event.type:error"), "event type error"
  247. assert not get_mep("transaction.duration:<15min"), "outlier filter"
  248. assert get_mep("epm():>0.01"), "throughput filter"
  249. assert not get_mep(
  250. "event.type:transaction OR event.type:error"
  251. ), "boolean with non-mep filter"
  252. assert get_mep(
  253. "event.type:transaction OR transaction:foo_transaction"
  254. ), "boolean with mep filter"
  255. def test_having_condition_with_preventing_aggregates(self):
  256. response = self.do_request(
  257. data={
  258. "project": self.project.id,
  259. "start": iso_format(self.day_ago),
  260. "end": iso_format(self.day_ago + timedelta(hours=2)),
  261. "interval": "1h",
  262. "query": "p95():<5s",
  263. "yAxis": ["epm()"],
  264. "dataset": "metricsEnhanced",
  265. "preventMetricAggregates": "1",
  266. },
  267. )
  268. assert response.status_code == 200, response.content
  269. assert not response.data["isMetricsData"]
  270. meta = response.data["meta"]
  271. assert meta["isMetricsData"] == response.data["isMetricsData"]
  272. def test_explicit_not_mep(self):
  273. response = self.do_request(
  274. data={
  275. "project": self.project.id,
  276. "start": iso_format(self.day_ago),
  277. "end": iso_format(self.day_ago + timedelta(hours=2)),
  278. "interval": "1h",
  279. # Should be a mep able query
  280. "query": "",
  281. "yAxis": ["epm()"],
  282. "metricsEnhanced": "0",
  283. },
  284. )
  285. assert response.status_code == 200, response.content
  286. assert not response.data["isMetricsData"]
  287. meta = response.data["meta"]
  288. assert meta["isMetricsData"] == response.data["isMetricsData"]
  289. def test_sum_transaction_duration(self):
  290. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  291. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  292. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  293. response = self.do_request(
  294. data={
  295. "start": iso_format(self.day_ago),
  296. "end": iso_format(self.day_ago + timedelta(hours=2)),
  297. "interval": "1h",
  298. "yAxis": "sum(transaction.duration)",
  299. "dataset": "metricsEnhanced",
  300. },
  301. )
  302. assert response.status_code == 200, response.content
  303. assert response.data["isMetricsData"]
  304. assert [attrs for time, attrs in response.data["data"]] == [
  305. [{"count": 123}],
  306. [{"count": 1245}],
  307. ]
  308. meta = response.data["meta"]
  309. assert meta["isMetricsData"] == response.data["isMetricsData"]
  310. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  311. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  312. def test_sum_transaction_duration_with_comparison(self):
  313. # We store the data for the previous day (in order to have values for the comparison).
  314. self.store_transaction_metric(
  315. 1, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  316. )
  317. self.store_transaction_metric(
  318. 2, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  319. )
  320. # We store the data for today.
  321. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  322. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(minutes=30))
  323. response = self.do_request(
  324. data={
  325. "start": iso_format(self.day_ago),
  326. "end": iso_format(self.day_ago + timedelta(days=1)),
  327. "interval": "1d",
  328. "yAxis": "sum(transaction.duration)",
  329. "comparisonDelta": 86400,
  330. "dataset": "metricsEnhanced",
  331. },
  332. )
  333. assert response.status_code == 200, response.content
  334. assert response.data["isMetricsData"]
  335. # For some reason, if all tests run, there is some shared state that makes this test have data in the second
  336. # time bucket, which is filled automatically by the zerofilling. In order to avoid this flaky failure, we will
  337. # only check that the first bucket contains the actual data.
  338. assert [attrs for time, attrs in response.data["data"]][0] == [
  339. {"comparisonCount": 3.0, "count": 579.0}
  340. ]
  341. meta = response.data["meta"]
  342. assert meta["isMetricsData"] == response.data["isMetricsData"]
  343. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  344. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  345. def test_custom_measurement(self):
  346. self.store_transaction_metric(
  347. 123,
  348. metric="measurements.bytes_transfered",
  349. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  350. entity="metrics_distributions",
  351. tags={"transaction": "foo_transaction"},
  352. timestamp=self.day_ago + timedelta(minutes=30),
  353. )
  354. self.store_transaction_metric(
  355. 456,
  356. metric="measurements.bytes_transfered",
  357. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  358. entity="metrics_distributions",
  359. tags={"transaction": "foo_transaction"},
  360. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  361. )
  362. self.store_transaction_metric(
  363. 789,
  364. metric="measurements.bytes_transfered",
  365. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  366. entity="metrics_distributions",
  367. tags={"transaction": "foo_transaction"},
  368. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  369. )
  370. response = self.do_request(
  371. data={
  372. "start": iso_format(self.day_ago),
  373. "end": iso_format(self.day_ago + timedelta(hours=2)),
  374. "interval": "1h",
  375. "yAxis": "sum(measurements.datacenter_memory)",
  376. "dataset": "metricsEnhanced",
  377. },
  378. )
  379. assert response.status_code == 200, response.content
  380. assert response.data["isMetricsData"]
  381. assert [attrs for time, attrs in response.data["data"]] == [
  382. [{"count": 123}],
  383. [{"count": 1245}],
  384. ]
  385. meta = response.data["meta"]
  386. assert meta["isMetricsData"] == response.data["isMetricsData"]
  387. assert meta["fields"] == {"time": "date", "sum_measurements_datacenter_memory": "size"}
  388. assert meta["units"] == {"time": None, "sum_measurements_datacenter_memory": "pebibyte"}
  389. def test_does_not_fallback_if_custom_metric_is_out_of_request_time_range(self):
  390. self.store_transaction_metric(
  391. 123,
  392. timestamp=self.day_ago + timedelta(hours=1),
  393. internal_metric="d:transactions/measurements.custom@kibibyte",
  394. entity="metrics_distributions",
  395. )
  396. response = self.do_request(
  397. data={
  398. "start": iso_format(self.day_ago),
  399. "end": iso_format(self.day_ago + timedelta(hours=2)),
  400. "interval": "1h",
  401. "yAxis": "p99(measurements.custom)",
  402. "dataset": "metricsEnhanced",
  403. },
  404. )
  405. meta = response.data["meta"]
  406. assert response.status_code == 200, response.content
  407. assert response.data["isMetricsData"]
  408. assert meta["isMetricsData"]
  409. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  410. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  411. def test_multi_yaxis_custom_measurement(self):
  412. self.store_transaction_metric(
  413. 123,
  414. metric="measurements.bytes_transfered",
  415. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  416. entity="metrics_distributions",
  417. tags={"transaction": "foo_transaction"},
  418. timestamp=self.day_ago + timedelta(minutes=30),
  419. )
  420. self.store_transaction_metric(
  421. 456,
  422. metric="measurements.bytes_transfered",
  423. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  424. entity="metrics_distributions",
  425. tags={"transaction": "foo_transaction"},
  426. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  427. )
  428. self.store_transaction_metric(
  429. 789,
  430. metric="measurements.bytes_transfered",
  431. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  432. entity="metrics_distributions",
  433. tags={"transaction": "foo_transaction"},
  434. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  435. )
  436. response = self.do_request(
  437. data={
  438. "start": iso_format(self.day_ago),
  439. "end": iso_format(self.day_ago + timedelta(hours=2)),
  440. "interval": "1h",
  441. "yAxis": [
  442. "sum(measurements.datacenter_memory)",
  443. "p50(measurements.datacenter_memory)",
  444. ],
  445. "dataset": "metricsEnhanced",
  446. },
  447. )
  448. assert response.status_code == 200, response.content
  449. sum_data = response.data["sum(measurements.datacenter_memory)"]
  450. p50_data = response.data["p50(measurements.datacenter_memory)"]
  451. assert sum_data["isMetricsData"]
  452. assert p50_data["isMetricsData"]
  453. assert [attrs for time, attrs in sum_data["data"]] == [
  454. [{"count": 123}],
  455. [{"count": 1245}],
  456. ]
  457. assert [attrs for time, attrs in p50_data["data"]] == [
  458. [{"count": 123}],
  459. [{"count": 622.5}],
  460. ]
  461. sum_meta = sum_data["meta"]
  462. assert sum_meta["isMetricsData"] == sum_data["isMetricsData"]
  463. assert sum_meta["fields"] == {
  464. "time": "date",
  465. "sum_measurements_datacenter_memory": "size",
  466. "p50_measurements_datacenter_memory": "size",
  467. }
  468. assert sum_meta["units"] == {
  469. "time": None,
  470. "sum_measurements_datacenter_memory": "pebibyte",
  471. "p50_measurements_datacenter_memory": "pebibyte",
  472. }
  473. p50_meta = p50_data["meta"]
  474. assert p50_meta["isMetricsData"] == p50_data["isMetricsData"]
  475. assert p50_meta["fields"] == {
  476. "time": "date",
  477. "sum_measurements_datacenter_memory": "size",
  478. "p50_measurements_datacenter_memory": "size",
  479. }
  480. assert p50_meta["units"] == {
  481. "time": None,
  482. "sum_measurements_datacenter_memory": "pebibyte",
  483. "p50_measurements_datacenter_memory": "pebibyte",
  484. }
  485. def test_dataset_metrics_does_not_fallback(self):
  486. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  487. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  488. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  489. response = self.do_request(
  490. data={
  491. "start": iso_format(self.day_ago),
  492. "end": iso_format(self.day_ago + timedelta(hours=2)),
  493. "interval": "1h",
  494. "query": "transaction.duration:<5s",
  495. "yAxis": "sum(transaction.duration)",
  496. "dataset": "metrics",
  497. },
  498. )
  499. assert response.status_code == 400, response.content
  500. def test_title_filter(self):
  501. self.store_transaction_metric(
  502. 123,
  503. tags={"transaction": "foo_transaction"},
  504. timestamp=self.day_ago + timedelta(minutes=30),
  505. )
  506. response = self.do_request(
  507. data={
  508. "start": iso_format(self.day_ago),
  509. "end": iso_format(self.day_ago + timedelta(hours=2)),
  510. "interval": "1h",
  511. "query": "title:foo_transaction",
  512. "yAxis": [
  513. "sum(transaction.duration)",
  514. ],
  515. "dataset": "metricsEnhanced",
  516. },
  517. )
  518. assert response.status_code == 200, response.content
  519. data = response.data["data"]
  520. assert [attrs for time, attrs in data] == [
  521. [{"count": 123}],
  522. [{"count": 0}],
  523. ]
  524. def test_custom_performance_metric_meta_contains_field_and_unit_data(self):
  525. self.store_transaction_metric(
  526. 123,
  527. timestamp=self.day_ago + timedelta(hours=1),
  528. internal_metric="d:transactions/measurements.custom@kibibyte",
  529. entity="metrics_distributions",
  530. )
  531. response = self.do_request(
  532. data={
  533. "start": iso_format(self.day_ago),
  534. "end": iso_format(self.day_ago + timedelta(hours=2)),
  535. "interval": "1h",
  536. "yAxis": "p99(measurements.custom)",
  537. "query": "",
  538. },
  539. )
  540. assert response.status_code == 200
  541. meta = response.data["meta"]
  542. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  543. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  544. def test_multi_series_custom_performance_metric_meta_contains_field_and_unit_data(self):
  545. self.store_transaction_metric(
  546. 123,
  547. timestamp=self.day_ago + timedelta(hours=1),
  548. internal_metric="d:transactions/measurements.custom@kibibyte",
  549. entity="metrics_distributions",
  550. )
  551. self.store_transaction_metric(
  552. 123,
  553. timestamp=self.day_ago + timedelta(hours=1),
  554. internal_metric="d:transactions/measurements.another.custom@pebibyte",
  555. entity="metrics_distributions",
  556. )
  557. response = self.do_request(
  558. data={
  559. "start": iso_format(self.day_ago),
  560. "end": iso_format(self.day_ago + timedelta(hours=2)),
  561. "interval": "1h",
  562. "yAxis": [
  563. "p95(measurements.custom)",
  564. "p99(measurements.custom)",
  565. "p99(measurements.another.custom)",
  566. ],
  567. "query": "",
  568. },
  569. )
  570. assert response.status_code == 200
  571. meta = response.data["p95(measurements.custom)"]["meta"]
  572. assert meta["fields"] == {
  573. "time": "date",
  574. "p95_measurements_custom": "size",
  575. "p99_measurements_custom": "size",
  576. "p99_measurements_another_custom": "size",
  577. }
  578. assert meta["units"] == {
  579. "time": None,
  580. "p95_measurements_custom": "kibibyte",
  581. "p99_measurements_custom": "kibibyte",
  582. "p99_measurements_another_custom": "pebibyte",
  583. }
  584. assert meta == response.data["p99(measurements.custom)"]["meta"]
  585. assert meta == response.data["p99(measurements.another.custom)"]["meta"]
  586. def test_no_top_events_with_project_field(self):
  587. project = self.create_project()
  588. response = self.do_request(
  589. data={
  590. # make sure to query the project with 0 events
  591. "project": project.id,
  592. "start": iso_format(self.day_ago),
  593. "end": iso_format(self.day_ago + timedelta(hours=2)),
  594. "interval": "1h",
  595. "yAxis": "count()",
  596. "orderby": ["-count()"],
  597. "field": ["count()", "project"],
  598. "topEvents": 5,
  599. "dataset": "metrics",
  600. },
  601. )
  602. assert response.status_code == 200, response.content
  603. # When there are no top events, we do not return an empty dict.
  604. # Instead, we return a single zero-filled series for an empty graph.
  605. data = response.data["data"]
  606. assert [attrs for time, attrs in data] == [[{"count": 0}], [{"count": 0}]]
  607. def test_top_events_with_transaction(self):
  608. transaction_spec = [("foo", 100), ("bar", 200), ("baz", 300)]
  609. for offset in range(5):
  610. for transaction, duration in transaction_spec:
  611. self.store_transaction_metric(
  612. duration,
  613. tags={"transaction": f"{transaction}_transaction"},
  614. timestamp=self.day_ago + timedelta(hours=offset, minutes=30),
  615. )
  616. response = self.do_request(
  617. data={
  618. # make sure to query the project with 0 events
  619. "project": self.project.id,
  620. "start": iso_format(self.day_ago),
  621. "end": iso_format(self.day_ago + timedelta(hours=5)),
  622. "interval": "1h",
  623. "yAxis": "p75(transaction.duration)",
  624. "orderby": ["-p75(transaction.duration)"],
  625. "field": ["p75(transaction.duration)", "transaction"],
  626. "topEvents": 5,
  627. "dataset": "metrics",
  628. },
  629. )
  630. assert response.status_code == 200, response.content
  631. for position, (transaction, duration) in enumerate(transaction_spec):
  632. data = response.data[f"{transaction}_transaction"]
  633. chart_data = data["data"]
  634. assert data["order"] == 2 - position
  635. assert [attrs for time, attrs in chart_data] == [[{"count": duration}]] * 5
  636. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  637. OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest
  638. ):
  639. def setUp(self):
  640. super().setUp()
  641. self.features["organizations:use-metrics-layer"] = True