test_organization_events_stats_mep.py 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966
  1. from datetime import timedelta
  2. from unittest import mock
  3. import pytest
  4. from django.urls import reverse
  5. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  6. from sentry.snuba.metrics.extraction import OnDemandMetricSpec
  7. from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
  8. from sentry.testutils.helpers.datetime import before_now, iso_format
  9. from sentry.testutils.silo import region_silo_test
  10. pytestmark = pytest.mark.sentry_metrics
  11. @region_silo_test
  12. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
  13. MetricsEnhancedPerformanceTestCase
  14. ):
  15. endpoint = "sentry-api-0-organization-events-stats"
  16. METRIC_STRINGS = [
  17. "foo_transaction",
  18. "d:transactions/measurements.datacenter_memory@pebibyte",
  19. ]
  20. def setUp(self):
  21. super().setUp()
  22. self.login_as(user=self.user)
  23. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  24. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  25. self.url = reverse(
  26. "sentry-api-0-organization-events-stats",
  27. kwargs={"organization_slug": self.project.organization.slug},
  28. )
  29. self.features = {
  30. "organizations:performance-use-metrics": True,
  31. }
  32. def do_request(self, data, url=None, features=None):
  33. if features is None:
  34. features = {"organizations:discover-basic": True}
  35. features.update(self.features)
  36. with self.feature(features):
  37. return self.client.get(self.url if url is None else url, data=data, format="json")
  38. # These throughput tests should roughly match the ones in OrganizationEventsStatsEndpointTest
  39. def test_throughput_epm_hour_rollup(self):
  40. # Each of these denotes how many events to create in each hour
  41. event_counts = [6, 0, 6, 3, 0, 3]
  42. for hour, count in enumerate(event_counts):
  43. for minute in range(count):
  44. self.store_transaction_metric(
  45. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  46. )
  47. for axis in ["epm()", "tpm()"]:
  48. response = self.do_request(
  49. data={
  50. "start": iso_format(self.day_ago),
  51. "end": iso_format(self.day_ago + timedelta(hours=6)),
  52. "interval": "1h",
  53. "yAxis": axis,
  54. "project": self.project.id,
  55. "dataset": "metricsEnhanced",
  56. },
  57. )
  58. assert response.status_code == 200, response.content
  59. data = response.data["data"]
  60. assert len(data) == 6
  61. assert response.data["isMetricsData"]
  62. rows = data[0:6]
  63. for test in zip(event_counts, rows):
  64. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  65. def test_throughput_epm_day_rollup(self):
  66. # Each of these denotes how many events to create in each minute
  67. event_counts = [6, 0, 6, 3, 0, 3]
  68. for hour, count in enumerate(event_counts):
  69. for minute in range(count):
  70. self.store_transaction_metric(
  71. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  72. )
  73. for axis in ["epm()", "tpm()"]:
  74. response = self.do_request(
  75. data={
  76. "start": iso_format(self.day_ago),
  77. "end": iso_format(self.day_ago + timedelta(hours=24)),
  78. "interval": "24h",
  79. "yAxis": axis,
  80. "project": self.project.id,
  81. "dataset": "metricsEnhanced",
  82. },
  83. )
  84. assert response.status_code == 200, response.content
  85. data = response.data["data"]
  86. assert len(data) == 2
  87. assert response.data["isMetricsData"]
  88. assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
  89. def test_throughput_epm_hour_rollup_offset_of_hour(self):
  90. # Each of these denotes how many events to create in each hour
  91. event_counts = [6, 0, 6, 3, 0, 3]
  92. for hour, count in enumerate(event_counts):
  93. for minute in range(count):
  94. self.store_transaction_metric(
  95. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute + 30)
  96. )
  97. for axis in ["tpm()", "epm()"]:
  98. response = self.do_request(
  99. data={
  100. "start": iso_format(self.day_ago + timedelta(minutes=30)),
  101. "end": iso_format(self.day_ago + timedelta(hours=6, minutes=30)),
  102. "interval": "1h",
  103. "yAxis": axis,
  104. "project": self.project.id,
  105. "dataset": "metricsEnhanced",
  106. },
  107. )
  108. assert response.status_code == 200, response.content
  109. data = response.data["data"]
  110. assert len(data) == 6
  111. assert response.data["isMetricsData"]
  112. rows = data[0:6]
  113. for test in zip(event_counts, rows):
  114. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  115. def test_throughput_eps_minute_rollup(self):
  116. # Each of these denotes how many events to create in each minute
  117. event_counts = [6, 0, 6, 3, 0, 3]
  118. for minute, count in enumerate(event_counts):
  119. for second in range(count):
  120. self.store_transaction_metric(
  121. 1, timestamp=self.day_ago + timedelta(minutes=minute, seconds=second)
  122. )
  123. for axis in ["eps()", "tps()"]:
  124. response = self.do_request(
  125. data={
  126. "start": iso_format(self.day_ago),
  127. "end": iso_format(self.day_ago + timedelta(minutes=6)),
  128. "interval": "1m",
  129. "yAxis": axis,
  130. "project": self.project.id,
  131. "dataset": "metricsEnhanced",
  132. },
  133. )
  134. assert response.status_code == 200, response.content
  135. data = response.data["data"]
  136. assert len(data) == 6
  137. assert response.data["isMetricsData"]
  138. rows = data[0:6]
  139. for test in zip(event_counts, rows):
  140. assert test[1][1][0]["count"] == test[0] / 60.0
  141. def test_failure_rate(self):
  142. for hour in range(6):
  143. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  144. self.store_transaction_metric(1, tags={"transaction.status": "ok"}, timestamp=timestamp)
  145. if hour < 3:
  146. self.store_transaction_metric(
  147. 1, tags={"transaction.status": "internal_error"}, timestamp=timestamp
  148. )
  149. response = self.do_request(
  150. data={
  151. "start": iso_format(self.day_ago),
  152. "end": iso_format(self.day_ago + timedelta(hours=6)),
  153. "interval": "1h",
  154. "yAxis": ["failure_rate()"],
  155. "project": self.project.id,
  156. "dataset": "metricsEnhanced",
  157. },
  158. )
  159. assert response.status_code == 200, response.content
  160. data = response.data["data"]
  161. assert len(data) == 6
  162. assert response.data["isMetricsData"]
  163. assert [attrs for time, attrs in response.data["data"]] == [
  164. [{"count": 0.5}],
  165. [{"count": 0.5}],
  166. [{"count": 0.5}],
  167. [{"count": 0}],
  168. [{"count": 0}],
  169. [{"count": 0}],
  170. ]
  171. def test_percentiles_multi_axis(self):
  172. for hour in range(6):
  173. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  174. self.store_transaction_metric(111, timestamp=timestamp)
  175. self.store_transaction_metric(222, metric="measurements.lcp", timestamp=timestamp)
  176. response = self.do_request(
  177. data={
  178. "start": iso_format(self.day_ago),
  179. "end": iso_format(self.day_ago + timedelta(hours=6)),
  180. "interval": "1h",
  181. "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
  182. "project": self.project.id,
  183. "dataset": "metricsEnhanced",
  184. },
  185. )
  186. assert response.status_code == 200, response.content
  187. lcp = response.data["p75(measurements.lcp)"]
  188. duration = response.data["p75(transaction.duration)"]
  189. assert len(duration["data"]) == 6
  190. assert duration["isMetricsData"]
  191. assert len(lcp["data"]) == 6
  192. assert lcp["isMetricsData"]
  193. for item in duration["data"]:
  194. assert item[1][0]["count"] == 111
  195. for item in lcp["data"]:
  196. assert item[1][0]["count"] == 222
  197. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query", return_value={})
  198. def test_multiple_yaxis_only_one_query(self, mock_query):
  199. self.do_request(
  200. data={
  201. "project": self.project.id,
  202. "start": iso_format(self.day_ago),
  203. "end": iso_format(self.day_ago + timedelta(hours=2)),
  204. "interval": "1h",
  205. "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
  206. "dataset": "metricsEnhanced",
  207. },
  208. )
  209. assert mock_query.call_count == 1
  210. def test_aggregate_function_user_count(self):
  211. self.store_transaction_metric(
  212. 1, metric="user", timestamp=self.day_ago + timedelta(minutes=30)
  213. )
  214. self.store_transaction_metric(
  215. 1, metric="user", timestamp=self.day_ago + timedelta(hours=1, minutes=30)
  216. )
  217. response = self.do_request(
  218. data={
  219. "start": iso_format(self.day_ago),
  220. "end": iso_format(self.day_ago + timedelta(hours=2)),
  221. "interval": "1h",
  222. "yAxis": "count_unique(user)",
  223. "dataset": "metricsEnhanced",
  224. },
  225. )
  226. assert response.status_code == 200, response.content
  227. assert response.data["isMetricsData"]
  228. assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
  229. meta = response.data["meta"]
  230. assert meta["isMetricsData"] == response.data["isMetricsData"]
  231. def test_non_mep_query_fallsback(self):
  232. def get_mep(query):
  233. response = self.do_request(
  234. data={
  235. "project": self.project.id,
  236. "start": iso_format(self.day_ago),
  237. "end": iso_format(self.day_ago + timedelta(hours=2)),
  238. "interval": "1h",
  239. "query": query,
  240. "yAxis": ["epm()"],
  241. "dataset": "metricsEnhanced",
  242. },
  243. )
  244. assert response.status_code == 200, response.content
  245. return response.data["isMetricsData"]
  246. assert get_mep(""), "empty query"
  247. assert get_mep("event.type:transaction"), "event type transaction"
  248. assert not get_mep("event.type:error"), "event type error"
  249. assert not get_mep("transaction.duration:<15min"), "outlier filter"
  250. assert get_mep("epm():>0.01"), "throughput filter"
  251. assert not get_mep(
  252. "event.type:transaction OR event.type:error"
  253. ), "boolean with non-mep filter"
  254. assert get_mep(
  255. "event.type:transaction OR transaction:foo_transaction"
  256. ), "boolean with mep filter"
  257. def test_having_condition_with_preventing_aggregates(self):
  258. response = self.do_request(
  259. data={
  260. "project": self.project.id,
  261. "start": iso_format(self.day_ago),
  262. "end": iso_format(self.day_ago + timedelta(hours=2)),
  263. "interval": "1h",
  264. "query": "p95():<5s",
  265. "yAxis": ["epm()"],
  266. "dataset": "metricsEnhanced",
  267. "preventMetricAggregates": "1",
  268. },
  269. )
  270. assert response.status_code == 200, response.content
  271. assert not response.data["isMetricsData"]
  272. meta = response.data["meta"]
  273. assert meta["isMetricsData"] == response.data["isMetricsData"]
  274. def test_explicit_not_mep(self):
  275. response = self.do_request(
  276. data={
  277. "project": self.project.id,
  278. "start": iso_format(self.day_ago),
  279. "end": iso_format(self.day_ago + timedelta(hours=2)),
  280. "interval": "1h",
  281. # Should be a mep able query
  282. "query": "",
  283. "yAxis": ["epm()"],
  284. "metricsEnhanced": "0",
  285. },
  286. )
  287. assert response.status_code == 200, response.content
  288. assert not response.data["isMetricsData"]
  289. meta = response.data["meta"]
  290. assert meta["isMetricsData"] == response.data["isMetricsData"]
  291. def test_sum_transaction_duration(self):
  292. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  293. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  294. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  295. response = self.do_request(
  296. data={
  297. "start": iso_format(self.day_ago),
  298. "end": iso_format(self.day_ago + timedelta(hours=2)),
  299. "interval": "1h",
  300. "yAxis": "sum(transaction.duration)",
  301. "dataset": "metricsEnhanced",
  302. },
  303. )
  304. assert response.status_code == 200, response.content
  305. assert response.data["isMetricsData"]
  306. assert [attrs for time, attrs in response.data["data"]] == [
  307. [{"count": 123}],
  308. [{"count": 1245}],
  309. ]
  310. meta = response.data["meta"]
  311. assert meta["isMetricsData"] == response.data["isMetricsData"]
  312. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  313. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  314. def test_sum_transaction_duration_with_comparison(self):
  315. # We store the data for the previous day (in order to have values for the comparison).
  316. self.store_transaction_metric(
  317. 1, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  318. )
  319. self.store_transaction_metric(
  320. 2, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  321. )
  322. # We store the data for today.
  323. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  324. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(minutes=30))
  325. response = self.do_request(
  326. data={
  327. "start": iso_format(self.day_ago),
  328. "end": iso_format(self.day_ago + timedelta(days=1)),
  329. "interval": "1d",
  330. "yAxis": "sum(transaction.duration)",
  331. "comparisonDelta": 86400,
  332. "dataset": "metricsEnhanced",
  333. },
  334. )
  335. assert response.status_code == 200, response.content
  336. assert response.data["isMetricsData"]
  337. # For some reason, if all tests run, there is some shared state that makes this test have data in the second
  338. # time bucket, which is filled automatically by the zerofilling. In order to avoid this flaky failure, we will
  339. # only check that the first bucket contains the actual data.
  340. assert [attrs for time, attrs in response.data["data"]][0] == [
  341. {"comparisonCount": 3.0, "count": 579.0}
  342. ]
  343. meta = response.data["meta"]
  344. assert meta["isMetricsData"] == response.data["isMetricsData"]
  345. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  346. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  347. def test_custom_measurement(self):
  348. self.store_transaction_metric(
  349. 123,
  350. metric="measurements.bytes_transfered",
  351. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  352. entity="metrics_distributions",
  353. tags={"transaction": "foo_transaction"},
  354. timestamp=self.day_ago + timedelta(minutes=30),
  355. )
  356. self.store_transaction_metric(
  357. 456,
  358. metric="measurements.bytes_transfered",
  359. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  360. entity="metrics_distributions",
  361. tags={"transaction": "foo_transaction"},
  362. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  363. )
  364. self.store_transaction_metric(
  365. 789,
  366. metric="measurements.bytes_transfered",
  367. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  368. entity="metrics_distributions",
  369. tags={"transaction": "foo_transaction"},
  370. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  371. )
  372. response = self.do_request(
  373. data={
  374. "start": iso_format(self.day_ago),
  375. "end": iso_format(self.day_ago + timedelta(hours=2)),
  376. "interval": "1h",
  377. "yAxis": "sum(measurements.datacenter_memory)",
  378. "dataset": "metricsEnhanced",
  379. },
  380. )
  381. assert response.status_code == 200, response.content
  382. assert response.data["isMetricsData"]
  383. assert [attrs for time, attrs in response.data["data"]] == [
  384. [{"count": 123}],
  385. [{"count": 1245}],
  386. ]
  387. meta = response.data["meta"]
  388. assert meta["isMetricsData"] == response.data["isMetricsData"]
  389. assert meta["fields"] == {"time": "date", "sum_measurements_datacenter_memory": "size"}
  390. assert meta["units"] == {"time": None, "sum_measurements_datacenter_memory": "pebibyte"}
  391. def test_does_not_fallback_if_custom_metric_is_out_of_request_time_range(self):
  392. self.store_transaction_metric(
  393. 123,
  394. timestamp=self.day_ago + timedelta(hours=1),
  395. internal_metric="d:transactions/measurements.custom@kibibyte",
  396. entity="metrics_distributions",
  397. )
  398. response = self.do_request(
  399. data={
  400. "start": iso_format(self.day_ago),
  401. "end": iso_format(self.day_ago + timedelta(hours=2)),
  402. "interval": "1h",
  403. "yAxis": "p99(measurements.custom)",
  404. "dataset": "metricsEnhanced",
  405. },
  406. )
  407. meta = response.data["meta"]
  408. assert response.status_code == 200, response.content
  409. assert response.data["isMetricsData"]
  410. assert meta["isMetricsData"]
  411. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  412. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  413. def test_multi_yaxis_custom_measurement(self):
  414. self.store_transaction_metric(
  415. 123,
  416. metric="measurements.bytes_transfered",
  417. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  418. entity="metrics_distributions",
  419. tags={"transaction": "foo_transaction"},
  420. timestamp=self.day_ago + timedelta(minutes=30),
  421. )
  422. self.store_transaction_metric(
  423. 456,
  424. metric="measurements.bytes_transfered",
  425. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  426. entity="metrics_distributions",
  427. tags={"transaction": "foo_transaction"},
  428. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  429. )
  430. self.store_transaction_metric(
  431. 789,
  432. metric="measurements.bytes_transfered",
  433. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  434. entity="metrics_distributions",
  435. tags={"transaction": "foo_transaction"},
  436. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  437. )
  438. response = self.do_request(
  439. data={
  440. "start": iso_format(self.day_ago),
  441. "end": iso_format(self.day_ago + timedelta(hours=2)),
  442. "interval": "1h",
  443. "yAxis": [
  444. "sum(measurements.datacenter_memory)",
  445. "p50(measurements.datacenter_memory)",
  446. ],
  447. "dataset": "metricsEnhanced",
  448. },
  449. )
  450. assert response.status_code == 200, response.content
  451. sum_data = response.data["sum(measurements.datacenter_memory)"]
  452. p50_data = response.data["p50(measurements.datacenter_memory)"]
  453. assert sum_data["isMetricsData"]
  454. assert p50_data["isMetricsData"]
  455. assert [attrs for time, attrs in sum_data["data"]] == [
  456. [{"count": 123}],
  457. [{"count": 1245}],
  458. ]
  459. assert [attrs for time, attrs in p50_data["data"]] == [
  460. [{"count": 123}],
  461. [{"count": 622.5}],
  462. ]
  463. sum_meta = sum_data["meta"]
  464. assert sum_meta["isMetricsData"] == sum_data["isMetricsData"]
  465. assert sum_meta["fields"] == {
  466. "time": "date",
  467. "sum_measurements_datacenter_memory": "size",
  468. "p50_measurements_datacenter_memory": "size",
  469. }
  470. assert sum_meta["units"] == {
  471. "time": None,
  472. "sum_measurements_datacenter_memory": "pebibyte",
  473. "p50_measurements_datacenter_memory": "pebibyte",
  474. }
  475. p50_meta = p50_data["meta"]
  476. assert p50_meta["isMetricsData"] == p50_data["isMetricsData"]
  477. assert p50_meta["fields"] == {
  478. "time": "date",
  479. "sum_measurements_datacenter_memory": "size",
  480. "p50_measurements_datacenter_memory": "size",
  481. }
  482. assert p50_meta["units"] == {
  483. "time": None,
  484. "sum_measurements_datacenter_memory": "pebibyte",
  485. "p50_measurements_datacenter_memory": "pebibyte",
  486. }
  487. def test_dataset_metrics_does_not_fallback(self):
  488. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  489. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  490. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  491. response = self.do_request(
  492. data={
  493. "start": iso_format(self.day_ago),
  494. "end": iso_format(self.day_ago + timedelta(hours=2)),
  495. "interval": "1h",
  496. "query": "transaction.duration:<5s",
  497. "yAxis": "sum(transaction.duration)",
  498. "dataset": "metrics",
  499. },
  500. )
  501. assert response.status_code == 400, response.content
  502. def test_title_filter(self):
  503. self.store_transaction_metric(
  504. 123,
  505. tags={"transaction": "foo_transaction"},
  506. timestamp=self.day_ago + timedelta(minutes=30),
  507. )
  508. response = self.do_request(
  509. data={
  510. "start": iso_format(self.day_ago),
  511. "end": iso_format(self.day_ago + timedelta(hours=2)),
  512. "interval": "1h",
  513. "query": "title:foo_transaction",
  514. "yAxis": [
  515. "sum(transaction.duration)",
  516. ],
  517. "dataset": "metricsEnhanced",
  518. },
  519. )
  520. assert response.status_code == 200, response.content
  521. data = response.data["data"]
  522. assert [attrs for time, attrs in data] == [
  523. [{"count": 123}],
  524. [{"count": 0}],
  525. ]
  526. def test_transaction_status_unknown_error(self):
  527. self.store_transaction_metric(
  528. 123,
  529. tags={"transaction.status": "unknown"},
  530. timestamp=self.day_ago + timedelta(minutes=30),
  531. )
  532. response = self.do_request(
  533. data={
  534. "start": iso_format(self.day_ago),
  535. "end": iso_format(self.day_ago + timedelta(hours=2)),
  536. "interval": "1h",
  537. "query": "transaction.status:unknown_error",
  538. "yAxis": [
  539. "sum(transaction.duration)",
  540. ],
  541. "dataset": "metricsEnhanced",
  542. },
  543. )
  544. assert response.status_code == 200, response.content
  545. data = response.data["data"]
  546. assert [attrs for time, attrs in data] == [
  547. [{"count": 123}],
  548. [{"count": 0}],
  549. ]
  550. def test_custom_performance_metric_meta_contains_field_and_unit_data(self):
  551. self.store_transaction_metric(
  552. 123,
  553. timestamp=self.day_ago + timedelta(hours=1),
  554. internal_metric="d:transactions/measurements.custom@kibibyte",
  555. entity="metrics_distributions",
  556. )
  557. response = self.do_request(
  558. data={
  559. "start": iso_format(self.day_ago),
  560. "end": iso_format(self.day_ago + timedelta(hours=2)),
  561. "interval": "1h",
  562. "yAxis": "p99(measurements.custom)",
  563. "query": "",
  564. },
  565. )
  566. assert response.status_code == 200
  567. meta = response.data["meta"]
  568. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  569. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  570. def test_multi_series_custom_performance_metric_meta_contains_field_and_unit_data(self):
  571. self.store_transaction_metric(
  572. 123,
  573. timestamp=self.day_ago + timedelta(hours=1),
  574. internal_metric="d:transactions/measurements.custom@kibibyte",
  575. entity="metrics_distributions",
  576. )
  577. self.store_transaction_metric(
  578. 123,
  579. timestamp=self.day_ago + timedelta(hours=1),
  580. internal_metric="d:transactions/measurements.another.custom@pebibyte",
  581. entity="metrics_distributions",
  582. )
  583. response = self.do_request(
  584. data={
  585. "start": iso_format(self.day_ago),
  586. "end": iso_format(self.day_ago + timedelta(hours=2)),
  587. "interval": "1h",
  588. "yAxis": [
  589. "p95(measurements.custom)",
  590. "p99(measurements.custom)",
  591. "p99(measurements.another.custom)",
  592. ],
  593. "query": "",
  594. },
  595. )
  596. assert response.status_code == 200
  597. meta = response.data["p95(measurements.custom)"]["meta"]
  598. assert meta["fields"] == {
  599. "time": "date",
  600. "p95_measurements_custom": "size",
  601. "p99_measurements_custom": "size",
  602. "p99_measurements_another_custom": "size",
  603. }
  604. assert meta["units"] == {
  605. "time": None,
  606. "p95_measurements_custom": "kibibyte",
  607. "p99_measurements_custom": "kibibyte",
  608. "p99_measurements_another_custom": "pebibyte",
  609. }
  610. assert meta == response.data["p99(measurements.custom)"]["meta"]
  611. assert meta == response.data["p99(measurements.another.custom)"]["meta"]
  612. def test_no_top_events_with_project_field(self):
  613. project = self.create_project()
  614. response = self.do_request(
  615. data={
  616. # make sure to query the project with 0 events
  617. "project": project.id,
  618. "start": iso_format(self.day_ago),
  619. "end": iso_format(self.day_ago + timedelta(hours=2)),
  620. "interval": "1h",
  621. "yAxis": "count()",
  622. "orderby": ["-count()"],
  623. "field": ["count()", "project"],
  624. "topEvents": 5,
  625. "dataset": "metrics",
  626. },
  627. )
  628. assert response.status_code == 200, response.content
  629. # When there are no top events, we do not return an empty dict.
  630. # Instead, we return a single zero-filled series for an empty graph.
  631. data = response.data["data"]
  632. assert [attrs for time, attrs in data] == [[{"count": 0}], [{"count": 0}]]
  633. def test_top_events_with_transaction(self):
  634. transaction_spec = [("foo", 100), ("bar", 200), ("baz", 300)]
  635. for offset in range(5):
  636. for transaction, duration in transaction_spec:
  637. self.store_transaction_metric(
  638. duration,
  639. tags={"transaction": f"{transaction}_transaction"},
  640. timestamp=self.day_ago + timedelta(hours=offset, minutes=30),
  641. )
  642. response = self.do_request(
  643. data={
  644. # make sure to query the project with 0 events
  645. "project": self.project.id,
  646. "start": iso_format(self.day_ago),
  647. "end": iso_format(self.day_ago + timedelta(hours=5)),
  648. "interval": "1h",
  649. "yAxis": "p75(transaction.duration)",
  650. "orderby": ["-p75(transaction.duration)"],
  651. "field": ["p75(transaction.duration)", "transaction"],
  652. "topEvents": 5,
  653. "dataset": "metrics",
  654. },
  655. )
  656. assert response.status_code == 200, response.content
  657. for position, (transaction, duration) in enumerate(transaction_spec):
  658. data = response.data[f"{transaction}_transaction"]
  659. chart_data = data["data"]
  660. assert data["order"] == 2 - position
  661. assert [attrs for time, attrs in chart_data] == [[{"count": duration}]] * 5
  662. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  663. OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest
  664. ):
  665. def setUp(self):
  666. super().setUp()
  667. self.features["organizations:use-metrics-layer"] = True
  668. def test_counter_custom_metric(self):
  669. mri = "c:custom/sentry.process_profile.track_outcome@second"
  670. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  671. self.store_transaction_metric(
  672. value,
  673. metric=mri,
  674. internal_metric=mri,
  675. entity="metrics_counters",
  676. timestamp=self.day_ago + timedelta(hours=index),
  677. use_case_id=UseCaseID.CUSTOM,
  678. )
  679. response = self.do_request(
  680. data={
  681. "start": iso_format(self.day_ago),
  682. "end": iso_format(self.day_ago + timedelta(hours=6)),
  683. "interval": "1h",
  684. "yAxis": [f"sum({mri})"],
  685. "project": self.project.id,
  686. "dataset": "metricsEnhanced",
  687. },
  688. )
  689. assert response.status_code == 200, response.content
  690. data = response.data["data"]
  691. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  692. assert value[0]["count"] == expected_value # type:ignore
  693. def test_distribution_custom_metric(self):
  694. mri = "d:custom/sentry.process_profile.track_outcome@second"
  695. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  696. for multiplier in (1, 2, 3):
  697. self.store_transaction_metric(
  698. value * multiplier,
  699. metric=mri,
  700. internal_metric=mri,
  701. entity="metrics_distributions",
  702. timestamp=self.day_ago + timedelta(hours=index),
  703. use_case_id=UseCaseID.CUSTOM,
  704. )
  705. response = self.do_request(
  706. data={
  707. "start": iso_format(self.day_ago),
  708. "end": iso_format(self.day_ago + timedelta(hours=6)),
  709. "interval": "1h",
  710. "yAxis": [f"min({mri})", f"max({mri})", f"p90({mri})"],
  711. "project": self.project.id,
  712. "dataset": "metricsEnhanced",
  713. },
  714. )
  715. assert response.status_code == 200, response.content
  716. data = response.data
  717. min = data[f"min({mri})"]["data"]
  718. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  719. assert value[0]["count"] == expected_value # type:ignore
  720. max = data[f"max({mri})"]["data"]
  721. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  722. assert value[0]["count"] == expected_value # type:ignore
  723. p90 = data[f"p90({mri})"]["data"]
  724. for (_, value), expected_value in zip(p90, [28.0, 56.0, 84.0, 112.0, 140.0, 168.0]):
  725. assert value[0]["count"] == expected_value # type:ignore
  726. def test_set_custom_metric(self):
  727. mri = "s:custom/sentry.process_profile.track_outcome@second"
  728. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  729. # We store each value a second time, since we want to check the de-duplication of sets.
  730. for i in range(0, 2):
  731. self.store_transaction_metric(
  732. value,
  733. metric=mri,
  734. internal_metric=mri,
  735. entity="metrics_sets",
  736. timestamp=self.day_ago + timedelta(hours=index),
  737. use_case_id=UseCaseID.CUSTOM,
  738. )
  739. response = self.do_request(
  740. data={
  741. "start": iso_format(self.day_ago),
  742. "end": iso_format(self.day_ago + timedelta(hours=6)),
  743. "interval": "1h",
  744. "yAxis": [f"count_unique({mri})"],
  745. "project": self.project.id,
  746. "dataset": "metricsEnhanced",
  747. },
  748. )
  749. assert response.status_code == 200, response.content
  750. data = response.data["data"]
  751. for (_, value), expected_value in zip(data, [1, 1, 1, 1, 1, 1]):
  752. assert value[0]["count"] == expected_value # type:ignore
  753. def test_gauge_custom_metric(self):
  754. mri = "g:custom/sentry.process_profile.track_outcome@second"
  755. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  756. for multiplier in (1, 3):
  757. self.store_transaction_metric(
  758. value * multiplier,
  759. metric=mri,
  760. internal_metric=mri,
  761. entity="metrics_gauges",
  762. # When multiple gauges are merged, in order to make the `last` merge work deterministically it's
  763. # better to have the gauges with different timestamps so that the last value is always the same.
  764. timestamp=self.day_ago + timedelta(hours=index, minutes=multiplier),
  765. use_case_id=UseCaseID.CUSTOM,
  766. )
  767. response = self.do_request(
  768. data={
  769. "start": iso_format(self.day_ago),
  770. "end": iso_format(self.day_ago + timedelta(hours=6)),
  771. "interval": "1h",
  772. "yAxis": [
  773. f"min({mri})",
  774. f"max({mri})",
  775. f"last({mri})",
  776. f"sum({mri})",
  777. f"count({mri})",
  778. ],
  779. "project": self.project.id,
  780. "dataset": "metricsEnhanced",
  781. },
  782. )
  783. assert response.status_code == 200, response.content
  784. data = response.data
  785. min = data[f"min({mri})"]["data"]
  786. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  787. assert value[0]["count"] == expected_value # type:ignore
  788. max = data[f"max({mri})"]["data"]
  789. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  790. assert value[0]["count"] == expected_value # type:ignore
  791. last = data[f"last({mri})"]["data"]
  792. for (_, value), expected_value in zip(last, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  793. assert value[0]["count"] == expected_value # type:ignore
  794. sum = data[f"sum({mri})"]["data"]
  795. for (_, value), expected_value in zip(sum, [40.0, 80.0, 120.0, 160.0, 200.0, 240.0]):
  796. assert value[0]["count"] == expected_value # type:ignore
  797. count = data[f"count({mri})"]["data"]
  798. for (_, value), expected_value in zip(count, [40, 80, 120, 160, 200, 240]):
  799. assert value[0]["count"] == expected_value # type:ignore
  800. @region_silo_test
  801. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithOnDemand(
  802. MetricsEnhancedPerformanceTestCase
  803. ):
  804. endpoint = "sentry-api-0-organization-events-stats"
  805. METRIC_STRINGS = [
  806. "foo_transaction",
  807. "d:transactions/measurements.datacenter_memory@pebibyte",
  808. ]
  809. def setUp(self):
  810. super().setUp()
  811. self.login_as(user=self.user)
  812. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  813. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  814. self.url = reverse(
  815. "sentry-api-0-organization-events-stats",
  816. kwargs={"organization_slug": self.project.organization.slug},
  817. )
  818. self.features = {
  819. "organizations:performance-use-metrics": True,
  820. }
  821. def do_request(self, data, url=None, features=None):
  822. if features is None:
  823. features = {"organizations:discover-basic": True}
  824. features.update(self.features)
  825. with self.feature(features):
  826. return self.client.get(self.url if url is None else url, data=data, format="json")
  827. def test_top_events_with_transaction_on_demand(self):
  828. field = "count()"
  829. field_two = "count_web_vitals(measurements.lcp, good)"
  830. groupbys = ["customtag1", "customtag2"]
  831. query = "transaction.duration:>=100"
  832. spec = OnDemandMetricSpec(field=field, groupbys=groupbys, query=query)
  833. spec_two = OnDemandMetricSpec(field=field_two, groupbys=groupbys, query=query)
  834. for hour in range(0, 5):
  835. self.store_on_demand_metric(
  836. hour * 62 * 24,
  837. spec=spec,
  838. additional_tags={
  839. "customtag1": "foo",
  840. "customtag2": "red",
  841. },
  842. timestamp=self.day_ago + timedelta(hours=hour),
  843. )
  844. self.store_on_demand_metric(
  845. hour * 60 * 24,
  846. spec=spec_two,
  847. additional_tags={
  848. "customtag1": "bar",
  849. "customtag2": "blue",
  850. },
  851. timestamp=self.day_ago + timedelta(hours=hour),
  852. )
  853. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  854. response = self.do_request(
  855. data={
  856. "project": self.project.id,
  857. "start": iso_format(self.day_ago),
  858. "end": iso_format(self.day_ago + timedelta(hours=2)),
  859. "interval": "1h",
  860. "orderby": ["-count()"],
  861. "query": query,
  862. "yAxis": yAxis,
  863. "field": [
  864. "count()",
  865. "count_web_vitals(measurements.lcp, good)",
  866. "customtag1",
  867. "customtag2",
  868. ],
  869. "topEvents": 5,
  870. "dataset": "metrics",
  871. "useOnDemandMetrics": "true",
  872. },
  873. )
  874. assert response.status_code == 200, response.content
  875. groups = [
  876. ("foo,red", "count()", 0.0, 1488.0),
  877. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  878. ("bar,blue", "count()", 0.0, 0.0),
  879. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  880. ]
  881. assert len(response.data.keys()) == 2
  882. for group_count in groups:
  883. group, agg, row1, row2 = group_count
  884. row_data = response.data[group][agg]["data"][:2]
  885. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]