test_organization_events_stats_mep.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634
  1. from datetime import timedelta
  2. from unittest import mock
  3. import pytest
  4. from django.urls import reverse
  5. from sentry import options
  6. from sentry.testutils import MetricsEnhancedPerformanceTestCase
  7. from sentry.testutils.helpers.datetime import before_now, iso_format
  8. from sentry.testutils.silo import region_silo_test
  9. pytestmark = pytest.mark.sentry_metrics
  10. @region_silo_test
  11. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
  12. MetricsEnhancedPerformanceTestCase
  13. ):
  14. endpoint = "sentry-api-0-organization-events-stats"
  15. METRIC_STRINGS = [
  16. "foo_transaction",
  17. "d:transactions/measurements.datacenter_memory@pebibyte",
  18. ]
  19. def setUp(self):
  20. super().setUp()
  21. self.login_as(user=self.user)
  22. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  23. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  24. self.url = reverse(
  25. "sentry-api-0-organization-events-stats",
  26. kwargs={"organization_slug": self.project.organization.slug},
  27. )
  28. self.features = {
  29. "organizations:performance-use-metrics": True,
  30. }
  31. def do_request(self, data, url=None, features=None):
  32. if features is None:
  33. features = {"organizations:discover-basic": True}
  34. features.update(self.features)
  35. with self.feature(features):
  36. return self.client.get(self.url if url is None else url, data=data, format="json")
  37. # These throughput tests should roughly match the ones in OrganizationEventsStatsEndpointTest
  38. def test_throughput_epm_hour_rollup(self):
  39. # Each of these denotes how many events to create in each hour
  40. event_counts = [6, 0, 6, 3, 0, 3]
  41. for hour, count in enumerate(event_counts):
  42. for minute in range(count):
  43. self.store_transaction_metric(
  44. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  45. )
  46. for axis in ["epm()", "tpm()"]:
  47. response = self.do_request(
  48. data={
  49. "start": iso_format(self.day_ago),
  50. "end": iso_format(self.day_ago + timedelta(hours=6)),
  51. "interval": "1h",
  52. "yAxis": axis,
  53. "project": self.project.id,
  54. "dataset": "metricsEnhanced",
  55. },
  56. )
  57. assert response.status_code == 200, response.content
  58. data = response.data["data"]
  59. assert len(data) == 6
  60. assert response.data["isMetricsData"]
  61. rows = data[0:6]
  62. for test in zip(event_counts, rows):
  63. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  64. def test_throughput_epm_day_rollup(self):
  65. # Each of these denotes how many events to create in each minute
  66. event_counts = [6, 0, 6, 3, 0, 3]
  67. for hour, count in enumerate(event_counts):
  68. for minute in range(count):
  69. self.store_transaction_metric(
  70. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  71. )
  72. for axis in ["epm()", "tpm()"]:
  73. response = self.do_request(
  74. data={
  75. "start": iso_format(self.day_ago),
  76. "end": iso_format(self.day_ago + timedelta(hours=24)),
  77. "interval": "24h",
  78. "yAxis": axis,
  79. "project": self.project.id,
  80. "dataset": "metricsEnhanced",
  81. },
  82. )
  83. assert response.status_code == 200, response.content
  84. data = response.data["data"]
  85. assert len(data) == 2
  86. assert response.data["isMetricsData"]
  87. assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
  88. def test_throughput_epm_hour_rollup_offset_of_hour(self):
  89. # Each of these denotes how many events to create in each hour
  90. event_counts = [6, 0, 6, 3, 0, 3]
  91. for hour, count in enumerate(event_counts):
  92. for minute in range(count):
  93. self.store_transaction_metric(
  94. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute + 30)
  95. )
  96. for axis in ["tpm()", "epm()"]:
  97. response = self.do_request(
  98. data={
  99. "start": iso_format(self.day_ago + timedelta(minutes=30)),
  100. "end": iso_format(self.day_ago + timedelta(hours=6, minutes=30)),
  101. "interval": "1h",
  102. "yAxis": axis,
  103. "project": self.project.id,
  104. "dataset": "metricsEnhanced",
  105. },
  106. )
  107. assert response.status_code == 200, response.content
  108. data = response.data["data"]
  109. assert len(data) == 6
  110. assert response.data["isMetricsData"]
  111. rows = data[0:6]
  112. for test in zip(event_counts, rows):
  113. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  114. def test_throughput_eps_minute_rollup(self):
  115. # Each of these denotes how many events to create in each minute
  116. event_counts = [6, 0, 6, 3, 0, 3]
  117. for minute, count in enumerate(event_counts):
  118. for second in range(count):
  119. self.store_transaction_metric(
  120. 1, timestamp=self.day_ago + timedelta(minutes=minute, seconds=second)
  121. )
  122. for axis in ["eps()", "tps()"]:
  123. response = self.do_request(
  124. data={
  125. "start": iso_format(self.day_ago),
  126. "end": iso_format(self.day_ago + timedelta(minutes=6)),
  127. "interval": "1m",
  128. "yAxis": axis,
  129. "project": self.project.id,
  130. "dataset": "metricsEnhanced",
  131. },
  132. )
  133. assert response.status_code == 200, response.content
  134. data = response.data["data"]
  135. assert len(data) == 6
  136. assert response.data["isMetricsData"]
  137. rows = data[0:6]
  138. for test in zip(event_counts, rows):
  139. assert test[1][1][0]["count"] == test[0] / 60.0
  140. def test_failure_rate(self):
  141. for hour in range(6):
  142. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  143. self.store_transaction_metric(1, tags={"transaction.status": "ok"}, timestamp=timestamp)
  144. if hour < 3:
  145. self.store_transaction_metric(
  146. 1, tags={"transaction.status": "internal_error"}, timestamp=timestamp
  147. )
  148. response = self.do_request(
  149. data={
  150. "start": iso_format(self.day_ago),
  151. "end": iso_format(self.day_ago + timedelta(hours=6)),
  152. "interval": "1h",
  153. "yAxis": ["failure_rate()"],
  154. "project": self.project.id,
  155. "dataset": "metricsEnhanced",
  156. },
  157. )
  158. assert response.status_code == 200, response.content
  159. data = response.data["data"]
  160. assert len(data) == 6
  161. assert response.data["isMetricsData"]
  162. assert [attrs for time, attrs in response.data["data"]] == [
  163. [{"count": 0.5}],
  164. [{"count": 0.5}],
  165. [{"count": 0.5}],
  166. [{"count": 0}],
  167. [{"count": 0}],
  168. [{"count": 0}],
  169. ]
  170. def test_percentiles_multi_axis(self):
  171. for hour in range(6):
  172. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  173. self.store_transaction_metric(111, timestamp=timestamp)
  174. self.store_transaction_metric(222, metric="measurements.lcp", timestamp=timestamp)
  175. response = self.do_request(
  176. data={
  177. "start": iso_format(self.day_ago),
  178. "end": iso_format(self.day_ago + timedelta(hours=6)),
  179. "interval": "1h",
  180. "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
  181. "project": self.project.id,
  182. "dataset": "metricsEnhanced",
  183. },
  184. )
  185. assert response.status_code == 200, response.content
  186. lcp = response.data["p75(measurements.lcp)"]
  187. duration = response.data["p75(transaction.duration)"]
  188. assert len(duration["data"]) == 6
  189. assert duration["isMetricsData"]
  190. assert len(lcp["data"]) == 6
  191. assert lcp["isMetricsData"]
  192. for item in duration["data"]:
  193. assert item[1][0]["count"] == 111
  194. for item in lcp["data"]:
  195. assert item[1][0]["count"] == 222
  196. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query", return_value={})
  197. def test_multiple_yaxis_only_one_query(self, mock_query):
  198. self.do_request(
  199. data={
  200. "project": self.project.id,
  201. "start": iso_format(self.day_ago),
  202. "end": iso_format(self.day_ago + timedelta(hours=2)),
  203. "interval": "1h",
  204. "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
  205. "dataset": "metricsEnhanced",
  206. },
  207. )
  208. assert mock_query.call_count == 1
  209. def test_aggregate_function_user_count(self):
  210. self.store_transaction_metric(
  211. 1, metric="user", timestamp=self.day_ago + timedelta(minutes=30)
  212. )
  213. self.store_transaction_metric(
  214. 1, metric="user", timestamp=self.day_ago + timedelta(hours=1, minutes=30)
  215. )
  216. response = self.do_request(
  217. data={
  218. "start": iso_format(self.day_ago),
  219. "end": iso_format(self.day_ago + timedelta(hours=2)),
  220. "interval": "1h",
  221. "yAxis": "count_unique(user)",
  222. "dataset": "metricsEnhanced",
  223. },
  224. )
  225. assert response.status_code == 200, response.content
  226. assert response.data["isMetricsData"]
  227. assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
  228. meta = response.data["meta"]
  229. assert meta["isMetricsData"] == response.data["isMetricsData"]
  230. def test_non_mep_query_fallsback(self):
  231. def get_mep(query):
  232. response = self.do_request(
  233. data={
  234. "project": self.project.id,
  235. "start": iso_format(self.day_ago),
  236. "end": iso_format(self.day_ago + timedelta(hours=2)),
  237. "interval": "1h",
  238. "query": query,
  239. "yAxis": ["epm()"],
  240. "dataset": "metricsEnhanced",
  241. },
  242. )
  243. assert response.status_code == 200, response.content
  244. return response.data["isMetricsData"]
  245. assert get_mep(""), "empty query"
  246. assert get_mep("event.type:transaction"), "event type transaction"
  247. assert not get_mep("event.type:error"), "event type error"
  248. assert not get_mep("transaction.duration:<15min"), "outlier filter"
  249. assert get_mep("epm():>0.01"), "throughput filter"
  250. assert not get_mep(
  251. "event.type:transaction OR event.type:error"
  252. ), "boolean with non-mep filter"
  253. assert get_mep(
  254. "event.type:transaction OR transaction:foo_transaction"
  255. ), "boolean with mep filter"
  256. def test_having_condition_with_preventing_aggregates(self):
  257. response = self.do_request(
  258. data={
  259. "project": self.project.id,
  260. "start": iso_format(self.day_ago),
  261. "end": iso_format(self.day_ago + timedelta(hours=2)),
  262. "interval": "1h",
  263. "query": "p95():<5s",
  264. "yAxis": ["epm()"],
  265. "dataset": "metricsEnhanced",
  266. "preventMetricAggregates": "1",
  267. },
  268. )
  269. assert response.status_code == 200, response.content
  270. assert not response.data["isMetricsData"]
  271. meta = response.data["meta"]
  272. assert meta["isMetricsData"] == response.data["isMetricsData"]
  273. def test_explicit_not_mep(self):
  274. response = self.do_request(
  275. data={
  276. "project": self.project.id,
  277. "start": iso_format(self.day_ago),
  278. "end": iso_format(self.day_ago + timedelta(hours=2)),
  279. "interval": "1h",
  280. # Should be a mep able query
  281. "query": "",
  282. "yAxis": ["epm()"],
  283. "metricsEnhanced": "0",
  284. },
  285. )
  286. assert response.status_code == 200, response.content
  287. assert not response.data["isMetricsData"]
  288. meta = response.data["meta"]
  289. assert meta["isMetricsData"] == response.data["isMetricsData"]
  290. def test_sum_transaction_duration(self):
  291. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  292. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  293. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  294. response = self.do_request(
  295. data={
  296. "start": iso_format(self.day_ago),
  297. "end": iso_format(self.day_ago + timedelta(hours=2)),
  298. "interval": "1h",
  299. "yAxis": "sum(transaction.duration)",
  300. "dataset": "metricsEnhanced",
  301. },
  302. )
  303. assert response.status_code == 200, response.content
  304. assert response.data["isMetricsData"]
  305. assert [attrs for time, attrs in response.data["data"]] == [
  306. [{"count": 123}],
  307. [{"count": 1245}],
  308. ]
  309. meta = response.data["meta"]
  310. assert meta["isMetricsData"] == response.data["isMetricsData"]
  311. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  312. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  313. def test_custom_measurement(self):
  314. self.store_transaction_metric(
  315. 123,
  316. metric="measurements.bytes_transfered",
  317. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  318. entity="metrics_distributions",
  319. tags={"transaction": "foo_transaction"},
  320. timestamp=self.day_ago + timedelta(minutes=30),
  321. )
  322. self.store_transaction_metric(
  323. 456,
  324. metric="measurements.bytes_transfered",
  325. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  326. entity="metrics_distributions",
  327. tags={"transaction": "foo_transaction"},
  328. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  329. )
  330. self.store_transaction_metric(
  331. 789,
  332. metric="measurements.bytes_transfered",
  333. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  334. entity="metrics_distributions",
  335. tags={"transaction": "foo_transaction"},
  336. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  337. )
  338. response = self.do_request(
  339. data={
  340. "start": iso_format(self.day_ago),
  341. "end": iso_format(self.day_ago + timedelta(hours=2)),
  342. "interval": "1h",
  343. "yAxis": "sum(measurements.datacenter_memory)",
  344. "dataset": "metricsEnhanced",
  345. },
  346. )
  347. assert response.status_code == 200, response.content
  348. assert response.data["isMetricsData"]
  349. assert [attrs for time, attrs in response.data["data"]] == [
  350. [{"count": 123}],
  351. [{"count": 1245}],
  352. ]
  353. meta = response.data["meta"]
  354. assert meta["isMetricsData"] == response.data["isMetricsData"]
  355. assert meta["fields"] == {"time": "date", "sum_measurements_datacenter_memory": "size"}
  356. assert meta["units"] == {"time": None, "sum_measurements_datacenter_memory": "pebibyte"}
  357. def test_does_not_fallback_if_custom_metric_is_out_of_request_time_range(self):
  358. self.store_transaction_metric(
  359. 123,
  360. timestamp=self.day_ago + timedelta(hours=1),
  361. internal_metric="d:transactions/measurements.custom@kibibyte",
  362. entity="metrics_distributions",
  363. )
  364. response = self.do_request(
  365. data={
  366. "start": iso_format(self.day_ago),
  367. "end": iso_format(self.day_ago + timedelta(hours=2)),
  368. "interval": "1h",
  369. "yAxis": "p99(measurements.custom)",
  370. "dataset": "metricsEnhanced",
  371. },
  372. )
  373. meta = response.data["meta"]
  374. assert response.status_code == 200, response.content
  375. assert response.data["isMetricsData"]
  376. assert meta["isMetricsData"]
  377. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  378. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  379. def test_multi_yaxis_custom_measurement(self):
  380. self.store_transaction_metric(
  381. 123,
  382. metric="measurements.bytes_transfered",
  383. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  384. entity="metrics_distributions",
  385. tags={"transaction": "foo_transaction"},
  386. timestamp=self.day_ago + timedelta(minutes=30),
  387. )
  388. self.store_transaction_metric(
  389. 456,
  390. metric="measurements.bytes_transfered",
  391. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  392. entity="metrics_distributions",
  393. tags={"transaction": "foo_transaction"},
  394. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  395. )
  396. self.store_transaction_metric(
  397. 789,
  398. metric="measurements.bytes_transfered",
  399. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  400. entity="metrics_distributions",
  401. tags={"transaction": "foo_transaction"},
  402. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  403. )
  404. response = self.do_request(
  405. data={
  406. "start": iso_format(self.day_ago),
  407. "end": iso_format(self.day_ago + timedelta(hours=2)),
  408. "interval": "1h",
  409. "yAxis": [
  410. "sum(measurements.datacenter_memory)",
  411. "p50(measurements.datacenter_memory)",
  412. ],
  413. "dataset": "metricsEnhanced",
  414. },
  415. )
  416. assert response.status_code == 200, response.content
  417. sum_data = response.data["sum(measurements.datacenter_memory)"]
  418. p50_data = response.data["p50(measurements.datacenter_memory)"]
  419. assert sum_data["isMetricsData"]
  420. assert p50_data["isMetricsData"]
  421. assert [attrs for time, attrs in sum_data["data"]] == [
  422. [{"count": 123}],
  423. [{"count": 1245}],
  424. ]
  425. assert [attrs for time, attrs in p50_data["data"]] == [
  426. [{"count": 123}],
  427. [{"count": 622.5}],
  428. ]
  429. sum_meta = sum_data["meta"]
  430. assert sum_meta["isMetricsData"] == sum_data["isMetricsData"]
  431. assert sum_meta["fields"] == {
  432. "time": "date",
  433. "sum_measurements_datacenter_memory": "size",
  434. "p50_measurements_datacenter_memory": "size",
  435. }
  436. assert sum_meta["units"] == {
  437. "time": None,
  438. "sum_measurements_datacenter_memory": "pebibyte",
  439. "p50_measurements_datacenter_memory": "pebibyte",
  440. }
  441. p50_meta = p50_data["meta"]
  442. assert p50_meta["isMetricsData"] == p50_data["isMetricsData"]
  443. assert p50_meta["fields"] == {
  444. "time": "date",
  445. "sum_measurements_datacenter_memory": "size",
  446. "p50_measurements_datacenter_memory": "size",
  447. }
  448. assert p50_meta["units"] == {
  449. "time": None,
  450. "sum_measurements_datacenter_memory": "pebibyte",
  451. "p50_measurements_datacenter_memory": "pebibyte",
  452. }
  453. def test_dataset_metrics_does_not_fallback(self):
  454. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  455. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  456. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  457. response = self.do_request(
  458. data={
  459. "start": iso_format(self.day_ago),
  460. "end": iso_format(self.day_ago + timedelta(hours=2)),
  461. "interval": "1h",
  462. "query": "transaction.duration:<5s",
  463. "yAxis": "sum(transaction.duration)",
  464. "dataset": "metrics",
  465. },
  466. )
  467. assert response.status_code == 400, response.content
  468. def test_title_filter(self):
  469. self.store_transaction_metric(
  470. 123,
  471. tags={"transaction": "foo_transaction"},
  472. timestamp=self.day_ago + timedelta(minutes=30),
  473. )
  474. response = self.do_request(
  475. data={
  476. "start": iso_format(self.day_ago),
  477. "end": iso_format(self.day_ago + timedelta(hours=2)),
  478. "interval": "1h",
  479. "query": "title:foo_transaction",
  480. "yAxis": [
  481. "sum(transaction.duration)",
  482. ],
  483. "dataset": "metricsEnhanced",
  484. },
  485. )
  486. assert response.status_code == 200, response.content
  487. data = response.data["data"]
  488. assert [attrs for time, attrs in data] == [
  489. [{"count": 123}],
  490. [{"count": 0}],
  491. ]
  492. def test_search_query_if_environment_does_not_exist_on_indexer(self):
  493. if options.get("sentry-metrics.performance.tags-values-are-strings"):
  494. pytest.skip("test does not apply if tag values are in clickhouse")
  495. self.create_environment(self.project, name="prod")
  496. self.create_environment(self.project, name="dev")
  497. self.store_transaction_metric(
  498. 123,
  499. tags={"transaction": "foo_transaction"},
  500. timestamp=self.day_ago + timedelta(minutes=30),
  501. )
  502. response = self.do_request(
  503. data={
  504. "start": iso_format(self.day_ago),
  505. "end": iso_format(self.day_ago + timedelta(hours=2)),
  506. "interval": "1h",
  507. "yAxis": [
  508. "sum(transaction.duration)",
  509. ],
  510. "environment": ["prod", "dev"],
  511. "dataset": "metricsEnhanced",
  512. },
  513. )
  514. assert response.status_code == 200, response.content
  515. data = response.data["data"]
  516. assert [attrs for time, attrs in data] == [
  517. [{"count": 0}],
  518. [{"count": 0}],
  519. ]
  520. assert not response.data["isMetricsData"]
  521. def test_custom_performance_metric_meta_contains_field_and_unit_data(self):
  522. self.store_transaction_metric(
  523. 123,
  524. timestamp=self.day_ago + timedelta(hours=1),
  525. internal_metric="d:transactions/measurements.custom@kibibyte",
  526. entity="metrics_distributions",
  527. )
  528. response = self.do_request(
  529. data={
  530. "start": iso_format(self.day_ago),
  531. "end": iso_format(self.day_ago + timedelta(hours=2)),
  532. "interval": "1h",
  533. "yAxis": "p99(measurements.custom)",
  534. "query": "",
  535. },
  536. )
  537. assert response.status_code == 200
  538. meta = response.data["meta"]
  539. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  540. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  541. def test_multi_series_custom_performance_metric_meta_contains_field_and_unit_data(self):
  542. self.store_transaction_metric(
  543. 123,
  544. timestamp=self.day_ago + timedelta(hours=1),
  545. internal_metric="d:transactions/measurements.custom@kibibyte",
  546. entity="metrics_distributions",
  547. )
  548. self.store_transaction_metric(
  549. 123,
  550. timestamp=self.day_ago + timedelta(hours=1),
  551. internal_metric="d:transactions/measurements.another.custom@pebibyte",
  552. entity="metrics_distributions",
  553. )
  554. response = self.do_request(
  555. data={
  556. "start": iso_format(self.day_ago),
  557. "end": iso_format(self.day_ago + timedelta(hours=2)),
  558. "interval": "1h",
  559. "yAxis": [
  560. "p95(measurements.custom)",
  561. "p99(measurements.custom)",
  562. "p99(measurements.another.custom)",
  563. ],
  564. "query": "",
  565. },
  566. )
  567. assert response.status_code == 200
  568. meta = response.data["p95(measurements.custom)"]["meta"]
  569. assert meta["fields"] == {
  570. "time": "date",
  571. "p95_measurements_custom": "size",
  572. "p99_measurements_custom": "size",
  573. "p99_measurements_another_custom": "size",
  574. }
  575. assert meta["units"] == {
  576. "time": None,
  577. "p95_measurements_custom": "kibibyte",
  578. "p99_measurements_custom": "kibibyte",
  579. "p99_measurements_another_custom": "pebibyte",
  580. }
  581. assert meta == response.data["p99(measurements.custom)"]["meta"]
  582. assert meta == response.data["p99(measurements.another.custom)"]["meta"]
  583. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  584. OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest
  585. ):
  586. def setUp(self):
  587. super().setUp()
  588. self.features["organizations:use-metrics-layer"] = True