test_organization_events_stats_mep.py 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689
  1. from __future__ import annotations
  2. from datetime import timedelta
  3. from typing import Any
  4. from unittest import mock
  5. import pytest
  6. from django.urls import reverse
  7. from sentry.models.environment import Environment
  8. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  9. from sentry.snuba.metrics.extraction import MetricSpecType, OnDemandMetricSpec
  10. from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
  11. from sentry.testutils.helpers.datetime import before_now, iso_format
  12. from sentry.testutils.silo import region_silo_test
  13. pytestmark = pytest.mark.sentry_metrics
  14. @region_silo_test
  15. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
  16. MetricsEnhancedPerformanceTestCase
  17. ):
  18. endpoint = "sentry-api-0-organization-events-stats"
  19. METRIC_STRINGS = [
  20. "foo_transaction",
  21. "d:transactions/measurements.datacenter_memory@pebibyte",
  22. ]
  23. def setUp(self):
  24. super().setUp()
  25. self.login_as(user=self.user)
  26. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  27. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  28. self.url = reverse(
  29. "sentry-api-0-organization-events-stats",
  30. kwargs={"organization_slug": self.project.organization.slug},
  31. )
  32. self.features = {
  33. "organizations:performance-use-metrics": True,
  34. }
  35. self.additional_params = dict()
  36. def do_request(self, data, url=None, features=None):
  37. if features is None:
  38. features = {"organizations:discover-basic": True}
  39. features.update(self.features)
  40. with self.feature(features):
  41. return self.client.get(self.url if url is None else url, data=data, format="json")
  42. # These throughput tests should roughly match the ones in OrganizationEventsStatsEndpointTest
  43. def test_throughput_epm_hour_rollup(self):
  44. # Each of these denotes how many events to create in each hour
  45. event_counts = [6, 0, 6, 3, 0, 3]
  46. for hour, count in enumerate(event_counts):
  47. for minute in range(count):
  48. self.store_transaction_metric(
  49. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  50. )
  51. for axis in ["epm()", "tpm()"]:
  52. response = self.do_request(
  53. data={
  54. "start": iso_format(self.day_ago),
  55. "end": iso_format(self.day_ago + timedelta(hours=6)),
  56. "interval": "1h",
  57. "yAxis": axis,
  58. "project": self.project.id,
  59. "dataset": "metricsEnhanced",
  60. **self.additional_params,
  61. },
  62. )
  63. assert response.status_code == 200, response.content
  64. data = response.data["data"]
  65. assert len(data) == 6
  66. assert response.data["isMetricsData"]
  67. rows = data[0:6]
  68. for test in zip(event_counts, rows):
  69. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  70. def test_throughput_epm_day_rollup(self):
  71. # Each of these denotes how many events to create in each minute
  72. event_counts = [6, 0, 6, 3, 0, 3]
  73. for hour, count in enumerate(event_counts):
  74. for minute in range(count):
  75. self.store_transaction_metric(
  76. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  77. )
  78. for axis in ["epm()", "tpm()"]:
  79. response = self.do_request(
  80. data={
  81. "start": iso_format(self.day_ago),
  82. "end": iso_format(self.day_ago + timedelta(hours=24)),
  83. "interval": "24h",
  84. "yAxis": axis,
  85. "project": self.project.id,
  86. "dataset": "metricsEnhanced",
  87. **self.additional_params,
  88. },
  89. )
  90. assert response.status_code == 200, response.content
  91. data = response.data["data"]
  92. assert len(data) == 2
  93. assert response.data["isMetricsData"]
  94. assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
  95. def test_throughput_epm_hour_rollup_offset_of_hour(self):
  96. # Each of these denotes how many events to create in each hour
  97. event_counts = [6, 0, 6, 3, 0, 3]
  98. for hour, count in enumerate(event_counts):
  99. for minute in range(count):
  100. self.store_transaction_metric(
  101. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute + 30)
  102. )
  103. for axis in ["tpm()", "epm()"]:
  104. response = self.do_request(
  105. data={
  106. "start": iso_format(self.day_ago + timedelta(minutes=30)),
  107. "end": iso_format(self.day_ago + timedelta(hours=6, minutes=30)),
  108. "interval": "1h",
  109. "yAxis": axis,
  110. "project": self.project.id,
  111. "dataset": "metricsEnhanced",
  112. **self.additional_params,
  113. },
  114. )
  115. assert response.status_code == 200, response.content
  116. data = response.data["data"]
  117. assert len(data) == 6
  118. assert response.data["isMetricsData"]
  119. rows = data[0:6]
  120. for test in zip(event_counts, rows):
  121. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  122. def test_throughput_eps_minute_rollup(self):
  123. # Each of these denotes how many events to create in each minute
  124. event_counts = [6, 0, 6, 3, 0, 3]
  125. for minute, count in enumerate(event_counts):
  126. for second in range(count):
  127. self.store_transaction_metric(
  128. 1, timestamp=self.day_ago + timedelta(minutes=minute, seconds=second)
  129. )
  130. for axis in ["eps()", "tps()"]:
  131. response = self.do_request(
  132. data={
  133. "start": iso_format(self.day_ago),
  134. "end": iso_format(self.day_ago + timedelta(minutes=6)),
  135. "interval": "1m",
  136. "yAxis": axis,
  137. "project": self.project.id,
  138. "dataset": "metricsEnhanced",
  139. **self.additional_params,
  140. },
  141. )
  142. assert response.status_code == 200, response.content
  143. data = response.data["data"]
  144. assert len(data) == 6
  145. assert response.data["isMetricsData"]
  146. rows = data[0:6]
  147. for test in zip(event_counts, rows):
  148. assert test[1][1][0]["count"] == test[0] / 60.0
  149. def test_failure_rate(self):
  150. for hour in range(6):
  151. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  152. self.store_transaction_metric(1, tags={"transaction.status": "ok"}, timestamp=timestamp)
  153. if hour < 3:
  154. self.store_transaction_metric(
  155. 1, tags={"transaction.status": "internal_error"}, timestamp=timestamp
  156. )
  157. response = self.do_request(
  158. data={
  159. "start": iso_format(self.day_ago),
  160. "end": iso_format(self.day_ago + timedelta(hours=6)),
  161. "interval": "1h",
  162. "yAxis": ["failure_rate()"],
  163. "project": self.project.id,
  164. "dataset": "metricsEnhanced",
  165. **self.additional_params,
  166. },
  167. )
  168. assert response.status_code == 200, response.content
  169. data = response.data["data"]
  170. assert len(data) == 6
  171. assert response.data["isMetricsData"]
  172. assert [attrs for time, attrs in response.data["data"]] == [
  173. [{"count": 0.5}],
  174. [{"count": 0.5}],
  175. [{"count": 0.5}],
  176. [{"count": 0}],
  177. [{"count": 0}],
  178. [{"count": 0}],
  179. ]
  180. def test_percentiles_multi_axis(self):
  181. for hour in range(6):
  182. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  183. self.store_transaction_metric(111, timestamp=timestamp)
  184. self.store_transaction_metric(222, metric="measurements.lcp", timestamp=timestamp)
  185. response = self.do_request(
  186. data={
  187. "start": iso_format(self.day_ago),
  188. "end": iso_format(self.day_ago + timedelta(hours=6)),
  189. "interval": "1h",
  190. "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
  191. "project": self.project.id,
  192. "dataset": "metricsEnhanced",
  193. **self.additional_params,
  194. },
  195. )
  196. assert response.status_code == 200, response.content
  197. lcp = response.data["p75(measurements.lcp)"]
  198. duration = response.data["p75(transaction.duration)"]
  199. assert len(duration["data"]) == 6
  200. assert duration["isMetricsData"]
  201. assert len(lcp["data"]) == 6
  202. assert lcp["isMetricsData"]
  203. for item in duration["data"]:
  204. assert item[1][0]["count"] == 111
  205. for item in lcp["data"]:
  206. assert item[1][0]["count"] == 222
  207. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query", return_value={})
  208. def test_multiple_yaxis_only_one_query(self, mock_query):
  209. self.do_request(
  210. data={
  211. "project": self.project.id,
  212. "start": iso_format(self.day_ago),
  213. "end": iso_format(self.day_ago + timedelta(hours=2)),
  214. "interval": "1h",
  215. "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
  216. "dataset": "metricsEnhanced",
  217. **self.additional_params,
  218. },
  219. )
  220. assert mock_query.call_count == 1
  221. def test_aggregate_function_user_count(self):
  222. self.store_transaction_metric(
  223. 1, metric="user", timestamp=self.day_ago + timedelta(minutes=30)
  224. )
  225. self.store_transaction_metric(
  226. 1, metric="user", timestamp=self.day_ago + timedelta(hours=1, minutes=30)
  227. )
  228. response = self.do_request(
  229. data={
  230. "start": iso_format(self.day_ago),
  231. "end": iso_format(self.day_ago + timedelta(hours=2)),
  232. "interval": "1h",
  233. "yAxis": "count_unique(user)",
  234. "dataset": "metricsEnhanced",
  235. **self.additional_params,
  236. },
  237. )
  238. assert response.status_code == 200, response.content
  239. assert response.data["isMetricsData"]
  240. assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
  241. meta = response.data["meta"]
  242. assert meta["isMetricsData"] == response.data["isMetricsData"]
  243. def test_non_mep_query_fallsback(self):
  244. def get_mep(query):
  245. response = self.do_request(
  246. data={
  247. "project": self.project.id,
  248. "start": iso_format(self.day_ago),
  249. "end": iso_format(self.day_ago + timedelta(hours=2)),
  250. "interval": "1h",
  251. "query": query,
  252. "yAxis": ["epm()"],
  253. "dataset": "metricsEnhanced",
  254. **self.additional_params,
  255. },
  256. )
  257. assert response.status_code == 200, response.content
  258. return response.data["isMetricsData"]
  259. assert get_mep(""), "empty query"
  260. assert get_mep("event.type:transaction"), "event type transaction"
  261. assert not get_mep("event.type:error"), "event type error"
  262. assert not get_mep("transaction.duration:<15min"), "outlier filter"
  263. assert get_mep("epm():>0.01"), "throughput filter"
  264. assert not get_mep(
  265. "event.type:transaction OR event.type:error"
  266. ), "boolean with non-mep filter"
  267. assert get_mep(
  268. "event.type:transaction OR transaction:foo_transaction"
  269. ), "boolean with mep filter"
  270. def test_having_condition_with_preventing_aggregates(self):
  271. response = self.do_request(
  272. data={
  273. "project": self.project.id,
  274. "start": iso_format(self.day_ago),
  275. "end": iso_format(self.day_ago + timedelta(hours=2)),
  276. "interval": "1h",
  277. "query": "p95():<5s",
  278. "yAxis": ["epm()"],
  279. "dataset": "metricsEnhanced",
  280. "preventMetricAggregates": "1",
  281. **self.additional_params,
  282. },
  283. )
  284. assert response.status_code == 200, response.content
  285. assert not response.data["isMetricsData"]
  286. meta = response.data["meta"]
  287. assert meta["isMetricsData"] == response.data["isMetricsData"]
  288. def test_explicit_not_mep(self):
  289. response = self.do_request(
  290. data={
  291. "project": self.project.id,
  292. "start": iso_format(self.day_ago),
  293. "end": iso_format(self.day_ago + timedelta(hours=2)),
  294. "interval": "1h",
  295. # Should be a mep able query
  296. "query": "",
  297. "yAxis": ["epm()"],
  298. "metricsEnhanced": "0",
  299. **self.additional_params,
  300. },
  301. )
  302. assert response.status_code == 200, response.content
  303. assert not response.data["isMetricsData"]
  304. meta = response.data["meta"]
  305. assert meta["isMetricsData"] == response.data["isMetricsData"]
  306. def test_sum_transaction_duration(self):
  307. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  308. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  309. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  310. response = self.do_request(
  311. data={
  312. "start": iso_format(self.day_ago),
  313. "end": iso_format(self.day_ago + timedelta(hours=2)),
  314. "interval": "1h",
  315. "yAxis": "sum(transaction.duration)",
  316. "dataset": "metricsEnhanced",
  317. **self.additional_params,
  318. },
  319. )
  320. assert response.status_code == 200, response.content
  321. assert response.data["isMetricsData"]
  322. assert [attrs for time, attrs in response.data["data"]] == [
  323. [{"count": 123}],
  324. [{"count": 1245}],
  325. ]
  326. meta = response.data["meta"]
  327. assert meta["isMetricsData"] == response.data["isMetricsData"]
  328. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  329. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  330. def test_sum_transaction_duration_with_comparison(self):
  331. # We store the data for the previous day (in order to have values for the comparison).
  332. self.store_transaction_metric(
  333. 1, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  334. )
  335. self.store_transaction_metric(
  336. 2, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  337. )
  338. # We store the data for today.
  339. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  340. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(minutes=30))
  341. response = self.do_request(
  342. data={
  343. "start": iso_format(self.day_ago),
  344. "end": iso_format(self.day_ago + timedelta(days=1)),
  345. "interval": "1d",
  346. "yAxis": "sum(transaction.duration)",
  347. "comparisonDelta": 86400,
  348. "dataset": "metricsEnhanced",
  349. **self.additional_params,
  350. },
  351. )
  352. assert response.status_code == 200, response.content
  353. assert response.data["isMetricsData"]
  354. # For some reason, if all tests run, there is some shared state that makes this test have data in the second
  355. # time bucket, which is filled automatically by the zerofilling. In order to avoid this flaky failure, we will
  356. # only check that the first bucket contains the actual data.
  357. assert [attrs for time, attrs in response.data["data"]][0] == [
  358. {"comparisonCount": 3.0, "count": 579.0}
  359. ]
  360. meta = response.data["meta"]
  361. assert meta["isMetricsData"] == response.data["isMetricsData"]
  362. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  363. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  364. def test_custom_measurement(self):
  365. self.store_transaction_metric(
  366. 123,
  367. metric="measurements.bytes_transfered",
  368. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  369. entity="metrics_distributions",
  370. tags={"transaction": "foo_transaction"},
  371. timestamp=self.day_ago + timedelta(minutes=30),
  372. )
  373. self.store_transaction_metric(
  374. 456,
  375. metric="measurements.bytes_transfered",
  376. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  377. entity="metrics_distributions",
  378. tags={"transaction": "foo_transaction"},
  379. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  380. )
  381. self.store_transaction_metric(
  382. 789,
  383. metric="measurements.bytes_transfered",
  384. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  385. entity="metrics_distributions",
  386. tags={"transaction": "foo_transaction"},
  387. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  388. )
  389. response = self.do_request(
  390. data={
  391. "start": iso_format(self.day_ago),
  392. "end": iso_format(self.day_ago + timedelta(hours=2)),
  393. "interval": "1h",
  394. "yAxis": "sum(measurements.datacenter_memory)",
  395. "dataset": "metricsEnhanced",
  396. **self.additional_params,
  397. },
  398. )
  399. assert response.status_code == 200, response.content
  400. assert response.data["isMetricsData"]
  401. assert [attrs for time, attrs in response.data["data"]] == [
  402. [{"count": 123}],
  403. [{"count": 1245}],
  404. ]
  405. meta = response.data["meta"]
  406. assert meta["isMetricsData"] == response.data["isMetricsData"]
  407. assert meta["fields"] == {"time": "date", "sum_measurements_datacenter_memory": "size"}
  408. assert meta["units"] == {"time": None, "sum_measurements_datacenter_memory": "pebibyte"}
  409. def test_does_not_fallback_if_custom_metric_is_out_of_request_time_range(self):
  410. self.store_transaction_metric(
  411. 123,
  412. timestamp=self.day_ago + timedelta(hours=1),
  413. internal_metric="d:transactions/measurements.custom@kibibyte",
  414. entity="metrics_distributions",
  415. )
  416. response = self.do_request(
  417. data={
  418. "start": iso_format(self.day_ago),
  419. "end": iso_format(self.day_ago + timedelta(hours=2)),
  420. "interval": "1h",
  421. "yAxis": "p99(measurements.custom)",
  422. "dataset": "metricsEnhanced",
  423. **self.additional_params,
  424. },
  425. )
  426. meta = response.data["meta"]
  427. assert response.status_code == 200, response.content
  428. assert response.data["isMetricsData"]
  429. assert meta["isMetricsData"]
  430. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  431. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  432. def test_multi_yaxis_custom_measurement(self):
  433. self.store_transaction_metric(
  434. 123,
  435. metric="measurements.bytes_transfered",
  436. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  437. entity="metrics_distributions",
  438. tags={"transaction": "foo_transaction"},
  439. timestamp=self.day_ago + timedelta(minutes=30),
  440. )
  441. self.store_transaction_metric(
  442. 456,
  443. metric="measurements.bytes_transfered",
  444. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  445. entity="metrics_distributions",
  446. tags={"transaction": "foo_transaction"},
  447. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  448. )
  449. self.store_transaction_metric(
  450. 789,
  451. metric="measurements.bytes_transfered",
  452. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  453. entity="metrics_distributions",
  454. tags={"transaction": "foo_transaction"},
  455. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  456. )
  457. response = self.do_request(
  458. data={
  459. "start": iso_format(self.day_ago),
  460. "end": iso_format(self.day_ago + timedelta(hours=2)),
  461. "interval": "1h",
  462. "yAxis": [
  463. "sum(measurements.datacenter_memory)",
  464. "p50(measurements.datacenter_memory)",
  465. ],
  466. "dataset": "metricsEnhanced",
  467. **self.additional_params,
  468. },
  469. )
  470. assert response.status_code == 200, response.content
  471. sum_data = response.data["sum(measurements.datacenter_memory)"]
  472. p50_data = response.data["p50(measurements.datacenter_memory)"]
  473. assert sum_data["isMetricsData"]
  474. assert p50_data["isMetricsData"]
  475. assert [attrs for time, attrs in sum_data["data"]] == [
  476. [{"count": 123}],
  477. [{"count": 1245}],
  478. ]
  479. assert [attrs for time, attrs in p50_data["data"]] == [
  480. [{"count": 123}],
  481. [{"count": 622.5}],
  482. ]
  483. sum_meta = sum_data["meta"]
  484. assert sum_meta["isMetricsData"] == sum_data["isMetricsData"]
  485. assert sum_meta["fields"] == {
  486. "time": "date",
  487. "sum_measurements_datacenter_memory": "size",
  488. "p50_measurements_datacenter_memory": "size",
  489. }
  490. assert sum_meta["units"] == {
  491. "time": None,
  492. "sum_measurements_datacenter_memory": "pebibyte",
  493. "p50_measurements_datacenter_memory": "pebibyte",
  494. }
  495. p50_meta = p50_data["meta"]
  496. assert p50_meta["isMetricsData"] == p50_data["isMetricsData"]
  497. assert p50_meta["fields"] == {
  498. "time": "date",
  499. "sum_measurements_datacenter_memory": "size",
  500. "p50_measurements_datacenter_memory": "size",
  501. }
  502. assert p50_meta["units"] == {
  503. "time": None,
  504. "sum_measurements_datacenter_memory": "pebibyte",
  505. "p50_measurements_datacenter_memory": "pebibyte",
  506. }
  507. def test_dataset_metrics_does_not_fallback(self):
  508. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  509. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  510. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  511. response = self.do_request(
  512. data={
  513. "start": iso_format(self.day_ago),
  514. "end": iso_format(self.day_ago + timedelta(hours=2)),
  515. "interval": "1h",
  516. "query": "transaction.duration:<5s",
  517. "yAxis": "sum(transaction.duration)",
  518. "dataset": "metrics",
  519. **self.additional_params,
  520. },
  521. )
  522. assert response.status_code == 400, response.content
  523. def test_title_filter(self):
  524. self.store_transaction_metric(
  525. 123,
  526. tags={"transaction": "foo_transaction"},
  527. timestamp=self.day_ago + timedelta(minutes=30),
  528. )
  529. response = self.do_request(
  530. data={
  531. "start": iso_format(self.day_ago),
  532. "end": iso_format(self.day_ago + timedelta(hours=2)),
  533. "interval": "1h",
  534. "query": "title:foo_transaction",
  535. "yAxis": [
  536. "sum(transaction.duration)",
  537. ],
  538. "dataset": "metricsEnhanced",
  539. **self.additional_params,
  540. },
  541. )
  542. assert response.status_code == 200, response.content
  543. data = response.data["data"]
  544. assert [attrs for time, attrs in data] == [
  545. [{"count": 123}],
  546. [{"count": 0}],
  547. ]
  548. def test_transaction_status_unknown_error(self):
  549. self.store_transaction_metric(
  550. 123,
  551. tags={"transaction.status": "unknown"},
  552. timestamp=self.day_ago + timedelta(minutes=30),
  553. )
  554. response = self.do_request(
  555. data={
  556. "start": iso_format(self.day_ago),
  557. "end": iso_format(self.day_ago + timedelta(hours=2)),
  558. "interval": "1h",
  559. "query": "transaction.status:unknown_error",
  560. "yAxis": [
  561. "sum(transaction.duration)",
  562. ],
  563. "dataset": "metricsEnhanced",
  564. **self.additional_params,
  565. },
  566. )
  567. assert response.status_code == 200, response.content
  568. data = response.data["data"]
  569. assert [attrs for time, attrs in data] == [
  570. [{"count": 123}],
  571. [{"count": 0}],
  572. ]
  573. def test_custom_performance_metric_meta_contains_field_and_unit_data(self):
  574. self.store_transaction_metric(
  575. 123,
  576. timestamp=self.day_ago + timedelta(hours=1),
  577. internal_metric="d:transactions/measurements.custom@kibibyte",
  578. entity="metrics_distributions",
  579. )
  580. response = self.do_request(
  581. data={
  582. "start": iso_format(self.day_ago),
  583. "end": iso_format(self.day_ago + timedelta(hours=2)),
  584. "interval": "1h",
  585. "yAxis": "p99(measurements.custom)",
  586. "query": "",
  587. **self.additional_params,
  588. },
  589. )
  590. assert response.status_code == 200
  591. meta = response.data["meta"]
  592. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  593. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  594. def test_multi_series_custom_performance_metric_meta_contains_field_and_unit_data(self):
  595. self.store_transaction_metric(
  596. 123,
  597. timestamp=self.day_ago + timedelta(hours=1),
  598. internal_metric="d:transactions/measurements.custom@kibibyte",
  599. entity="metrics_distributions",
  600. )
  601. self.store_transaction_metric(
  602. 123,
  603. timestamp=self.day_ago + timedelta(hours=1),
  604. internal_metric="d:transactions/measurements.another.custom@pebibyte",
  605. entity="metrics_distributions",
  606. )
  607. response = self.do_request(
  608. data={
  609. "start": iso_format(self.day_ago),
  610. "end": iso_format(self.day_ago + timedelta(hours=2)),
  611. "interval": "1h",
  612. "yAxis": [
  613. "p95(measurements.custom)",
  614. "p99(measurements.custom)",
  615. "p99(measurements.another.custom)",
  616. ],
  617. "query": "",
  618. **self.additional_params,
  619. },
  620. )
  621. assert response.status_code == 200
  622. meta = response.data["p95(measurements.custom)"]["meta"]
  623. assert meta["fields"] == {
  624. "time": "date",
  625. "p95_measurements_custom": "size",
  626. "p99_measurements_custom": "size",
  627. "p99_measurements_another_custom": "size",
  628. }
  629. assert meta["units"] == {
  630. "time": None,
  631. "p95_measurements_custom": "kibibyte",
  632. "p99_measurements_custom": "kibibyte",
  633. "p99_measurements_another_custom": "pebibyte",
  634. }
  635. assert meta == response.data["p99(measurements.custom)"]["meta"]
  636. assert meta == response.data["p99(measurements.another.custom)"]["meta"]
  637. def test_no_top_events_with_project_field(self):
  638. project = self.create_project()
  639. response = self.do_request(
  640. data={
  641. # make sure to query the project with 0 events
  642. "project": project.id,
  643. "start": iso_format(self.day_ago),
  644. "end": iso_format(self.day_ago + timedelta(hours=2)),
  645. "interval": "1h",
  646. "yAxis": "count()",
  647. "orderby": ["-count()"],
  648. "field": ["count()", "project"],
  649. "topEvents": 5,
  650. "dataset": "metrics",
  651. **self.additional_params,
  652. },
  653. )
  654. assert response.status_code == 200, response.content
  655. # When there are no top events, we do not return an empty dict.
  656. # Instead, we return a single zero-filled series for an empty graph.
  657. data = response.data["data"]
  658. assert [attrs for time, attrs in data] == [[{"count": 0}], [{"count": 0}]]
  659. def test_top_events_with_transaction(self):
  660. transaction_spec = [("foo", 100), ("bar", 200), ("baz", 300)]
  661. for offset in range(5):
  662. for transaction, duration in transaction_spec:
  663. self.store_transaction_metric(
  664. duration,
  665. tags={"transaction": f"{transaction}_transaction"},
  666. timestamp=self.day_ago + timedelta(hours=offset, minutes=30),
  667. )
  668. response = self.do_request(
  669. data={
  670. # make sure to query the project with 0 events
  671. "project": self.project.id,
  672. "start": iso_format(self.day_ago),
  673. "end": iso_format(self.day_ago + timedelta(hours=5)),
  674. "interval": "1h",
  675. "yAxis": "p75(transaction.duration)",
  676. "orderby": ["-p75(transaction.duration)"],
  677. "field": ["p75(transaction.duration)", "transaction"],
  678. "topEvents": 5,
  679. "dataset": "metrics",
  680. **self.additional_params,
  681. },
  682. )
  683. assert response.status_code == 200, response.content
  684. for position, (transaction, duration) in enumerate(transaction_spec):
  685. data = response.data[f"{transaction}_transaction"]
  686. chart_data = data["data"]
  687. assert data["order"] == 2 - position
  688. assert [attrs for time, attrs in chart_data] == [[{"count": duration}]] * 5
  689. def test_top_events_with_project(self):
  690. self.store_transaction_metric(
  691. 100,
  692. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  693. )
  694. response = self.do_request(
  695. data={
  696. # make sure to query the project with 0 events
  697. "project": self.project.id,
  698. "start": iso_format(self.day_ago),
  699. "end": iso_format(self.day_ago + timedelta(hours=5)),
  700. "interval": "1h",
  701. "yAxis": "p75(transaction.duration)",
  702. "orderby": ["-p75(transaction.duration)"],
  703. "field": ["p75(transaction.duration)", "project"],
  704. "topEvents": 5,
  705. "dataset": "metrics",
  706. **self.additional_params,
  707. },
  708. )
  709. assert response.status_code == 200, response.content
  710. data = response.data[f"{self.project.slug}"]
  711. assert data["order"] == 0
  712. @region_silo_test
  713. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  714. OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest
  715. ):
  716. def setUp(self):
  717. super().setUp()
  718. self.features["organizations:use-metrics-layer"] = True
  719. self.additional_params = {"forceMetricsLayer": "true"}
  720. def test_counter_standard_metric(self):
  721. mri = "c:transactions/usage@none"
  722. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  723. self.store_transaction_metric(
  724. value,
  725. metric=mri,
  726. internal_metric=mri,
  727. entity="metrics_counters",
  728. timestamp=self.day_ago + timedelta(minutes=index),
  729. use_case_id=UseCaseID.CUSTOM,
  730. )
  731. response = self.do_request(
  732. data={
  733. "start": iso_format(self.day_ago),
  734. "end": iso_format(self.day_ago + timedelta(hours=6)),
  735. "interval": "1m",
  736. "yAxis": [f"sum({mri})"],
  737. "project": self.project.id,
  738. "dataset": "metricsEnhanced",
  739. **self.additional_params,
  740. },
  741. )
  742. assert response.status_code == 200, response.content
  743. data = response.data["data"]
  744. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  745. assert value[0]["count"] == expected_value # type:ignore
  746. def test_counter_custom_metric(self):
  747. mri = "c:custom/sentry.process_profile.track_outcome@second"
  748. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  749. self.store_transaction_metric(
  750. value,
  751. metric=mri,
  752. internal_metric=mri,
  753. entity="metrics_counters",
  754. timestamp=self.day_ago + timedelta(hours=index),
  755. use_case_id=UseCaseID.CUSTOM,
  756. )
  757. response = self.do_request(
  758. data={
  759. "start": iso_format(self.day_ago),
  760. "end": iso_format(self.day_ago + timedelta(hours=6)),
  761. "interval": "1h",
  762. "yAxis": [f"sum({mri})"],
  763. "project": self.project.id,
  764. "dataset": "metricsEnhanced",
  765. **self.additional_params,
  766. },
  767. )
  768. assert response.status_code == 200, response.content
  769. data = response.data["data"]
  770. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  771. assert value[0]["count"] == expected_value # type:ignore
  772. def test_distribution_custom_metric(self):
  773. mri = "d:custom/sentry.process_profile.track_outcome@second"
  774. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  775. for multiplier in (1, 2, 3):
  776. self.store_transaction_metric(
  777. value * multiplier,
  778. metric=mri,
  779. internal_metric=mri,
  780. entity="metrics_distributions",
  781. timestamp=self.day_ago + timedelta(hours=index),
  782. use_case_id=UseCaseID.CUSTOM,
  783. )
  784. response = self.do_request(
  785. data={
  786. "start": iso_format(self.day_ago),
  787. "end": iso_format(self.day_ago + timedelta(hours=6)),
  788. "interval": "1h",
  789. "yAxis": [f"min({mri})", f"max({mri})", f"p90({mri})"],
  790. "project": self.project.id,
  791. "dataset": "metricsEnhanced",
  792. **self.additional_params,
  793. },
  794. )
  795. assert response.status_code == 200, response.content
  796. data = response.data
  797. min = data[f"min({mri})"]["data"]
  798. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  799. assert value[0]["count"] == expected_value # type:ignore
  800. max = data[f"max({mri})"]["data"]
  801. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  802. assert value[0]["count"] == expected_value # type:ignore
  803. p90 = data[f"p90({mri})"]["data"]
  804. for (_, value), expected_value in zip(p90, [28.0, 56.0, 84.0, 112.0, 140.0, 168.0]):
  805. assert value[0]["count"] == expected_value # type:ignore
  806. def test_set_custom_metric(self):
  807. mri = "s:custom/sentry.process_profile.track_outcome@second"
  808. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  809. # We store each value a second time, since we want to check the de-duplication of sets.
  810. for i in range(0, 2):
  811. self.store_transaction_metric(
  812. value,
  813. metric=mri,
  814. internal_metric=mri,
  815. entity="metrics_sets",
  816. timestamp=self.day_ago + timedelta(hours=index),
  817. use_case_id=UseCaseID.CUSTOM,
  818. )
  819. response = self.do_request(
  820. data={
  821. "start": iso_format(self.day_ago),
  822. "end": iso_format(self.day_ago + timedelta(hours=6)),
  823. "interval": "1h",
  824. "yAxis": [f"count_unique({mri})"],
  825. "project": self.project.id,
  826. "dataset": "metricsEnhanced",
  827. **self.additional_params,
  828. },
  829. )
  830. assert response.status_code == 200, response.content
  831. data = response.data["data"]
  832. for (_, value), expected_value in zip(data, [1, 1, 1, 1, 1, 1]):
  833. assert value[0]["count"] == expected_value # type:ignore
  834. def test_gauge_custom_metric(self):
  835. mri = "g:custom/sentry.process_profile.track_outcome@second"
  836. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  837. for multiplier in (1, 3):
  838. self.store_transaction_metric(
  839. value * multiplier,
  840. metric=mri,
  841. internal_metric=mri,
  842. entity="metrics_gauges",
  843. # When multiple gauges are merged, in order to make the `last` merge work deterministically it's
  844. # better to have the gauges with different timestamps so that the last value is always the same.
  845. timestamp=self.day_ago + timedelta(hours=index, minutes=multiplier),
  846. use_case_id=UseCaseID.CUSTOM,
  847. )
  848. response = self.do_request(
  849. data={
  850. "start": iso_format(self.day_ago),
  851. "end": iso_format(self.day_ago + timedelta(hours=6)),
  852. "interval": "1h",
  853. "yAxis": [
  854. f"min({mri})",
  855. f"max({mri})",
  856. f"last({mri})",
  857. f"sum({mri})",
  858. f"count({mri})",
  859. ],
  860. "project": self.project.id,
  861. "dataset": "metricsEnhanced",
  862. **self.additional_params,
  863. },
  864. )
  865. assert response.status_code == 200, response.content
  866. data = response.data
  867. min = data[f"min({mri})"]["data"]
  868. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  869. assert value[0]["count"] == expected_value # type:ignore
  870. max = data[f"max({mri})"]["data"]
  871. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  872. assert value[0]["count"] == expected_value # type:ignore
  873. last = data[f"last({mri})"]["data"]
  874. for (_, value), expected_value in zip(last, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  875. assert value[0]["count"] == expected_value # type:ignore
  876. sum = data[f"sum({mri})"]["data"]
  877. for (_, value), expected_value in zip(sum, [40.0, 80.0, 120.0, 160.0, 200.0, 240.0]):
  878. assert value[0]["count"] == expected_value # type:ignore
  879. count = data[f"count({mri})"]["data"]
  880. for (_, value), expected_value in zip(count, [40, 80, 120, 160, 200, 240]):
  881. assert value[0]["count"] == expected_value # type:ignore
  882. @region_silo_test
  883. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithOnDemandWidgets(
  884. MetricsEnhancedPerformanceTestCase
  885. ):
  886. endpoint = "sentry-api-0-organization-events-stats"
  887. def setUp(self):
  888. super().setUp()
  889. self.login_as(user=self.user)
  890. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  891. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  892. Environment.get_or_create(self.project, "production")
  893. self.url = reverse(
  894. "sentry-api-0-organization-events-stats",
  895. kwargs={"organization_slug": self.project.organization.slug},
  896. )
  897. self.features = {"organizations:on-demand-metrics-extraction-widgets": True}
  898. def do_request(self, data, url=None, features=None):
  899. if features is None:
  900. features = {"organizations:discover-basic": True}
  901. features.update(self.features)
  902. with self.feature(features):
  903. return self.client.get(self.url if url is None else url, data=data, format="json")
  904. def test_top_events_wrong_on_demand_type(self):
  905. query = "transaction.duration:>=100"
  906. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  907. response = self.do_request(
  908. data={
  909. "project": self.project.id,
  910. "start": iso_format(self.day_ago),
  911. "end": iso_format(self.day_ago + timedelta(hours=2)),
  912. "interval": "1h",
  913. "orderby": ["-count()"],
  914. "environment": "production",
  915. "query": query,
  916. "yAxis": yAxis,
  917. "field": [
  918. "count()",
  919. ],
  920. "topEvents": 5,
  921. "dataset": "metrics",
  922. "useOnDemandMetrics": "true",
  923. "onDemandType": "not_real",
  924. },
  925. )
  926. assert response.status_code == 400, response.content
  927. def test_top_events_works_without_on_demand_type(self):
  928. query = "transaction.duration:>=100"
  929. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  930. response = self.do_request(
  931. data={
  932. "project": self.project.id,
  933. "start": iso_format(self.day_ago),
  934. "end": iso_format(self.day_ago + timedelta(hours=2)),
  935. "interval": "1h",
  936. "orderby": ["-count()"],
  937. "environment": "production",
  938. "query": query,
  939. "yAxis": yAxis,
  940. "field": [
  941. "count()",
  942. ],
  943. "topEvents": 5,
  944. "dataset": "metrics",
  945. "useOnDemandMetrics": "true",
  946. },
  947. )
  948. assert response.status_code == 200, response.content
  949. def test_top_events_with_transaction_on_demand(self):
  950. field = "count()"
  951. field_two = "count_web_vitals(measurements.lcp, good)"
  952. groupbys = ["customtag1", "customtag2"]
  953. query = "transaction.duration:>=100"
  954. spec = OnDemandMetricSpec(
  955. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  956. )
  957. spec_two = OnDemandMetricSpec(
  958. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  959. )
  960. for hour in range(0, 5):
  961. self.store_on_demand_metric(
  962. hour * 62 * 24,
  963. spec=spec,
  964. additional_tags={
  965. "customtag1": "foo",
  966. "customtag2": "red",
  967. "environment": "production",
  968. },
  969. timestamp=self.day_ago + timedelta(hours=hour),
  970. )
  971. self.store_on_demand_metric(
  972. hour * 60 * 24,
  973. spec=spec_two,
  974. additional_tags={
  975. "customtag1": "bar",
  976. "customtag2": "blue",
  977. "environment": "production",
  978. },
  979. timestamp=self.day_ago + timedelta(hours=hour),
  980. )
  981. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  982. response = self.do_request(
  983. data={
  984. "project": self.project.id,
  985. "start": iso_format(self.day_ago),
  986. "end": iso_format(self.day_ago + timedelta(hours=2)),
  987. "interval": "1h",
  988. "orderby": ["-count()"],
  989. "environment": "production",
  990. "query": query,
  991. "yAxis": yAxis,
  992. "field": [
  993. "count()",
  994. "count_web_vitals(measurements.lcp, good)",
  995. "customtag1",
  996. "customtag2",
  997. ],
  998. "topEvents": 5,
  999. "dataset": "metricsEnhanced",
  1000. "useOnDemandMetrics": "true",
  1001. "onDemandType": "dynamic_query",
  1002. },
  1003. )
  1004. assert response.status_code == 200, response.content
  1005. groups = [
  1006. ("foo,red", "count()", 0.0, 1488.0),
  1007. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1008. ("bar,blue", "count()", 0.0, 0.0),
  1009. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1010. ]
  1011. assert len(response.data.keys()) == 2
  1012. for group_count in groups:
  1013. group, agg, row1, row2 = group_count
  1014. row_data = response.data[group][agg]["data"][:2]
  1015. assert [attrs for _, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1016. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1017. assert response.data[group]["isMetricsExtractedData"]
  1018. def test_top_events_with_transaction_on_demand_and_no_environment(self):
  1019. field = "count()"
  1020. field_two = "count_web_vitals(measurements.lcp, good)"
  1021. groupbys = ["customtag1", "customtag2"]
  1022. query = "transaction.duration:>=100"
  1023. spec = OnDemandMetricSpec(
  1024. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1025. )
  1026. spec_two = OnDemandMetricSpec(
  1027. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1028. )
  1029. for hour in range(0, 5):
  1030. self.store_on_demand_metric(
  1031. hour * 62 * 24,
  1032. spec=spec,
  1033. additional_tags={
  1034. "customtag1": "foo",
  1035. "customtag2": "red",
  1036. "environment": "production",
  1037. },
  1038. timestamp=self.day_ago + timedelta(hours=hour),
  1039. )
  1040. self.store_on_demand_metric(
  1041. hour * 60 * 24,
  1042. spec=spec_two,
  1043. additional_tags={
  1044. "customtag1": "bar",
  1045. "customtag2": "blue",
  1046. "environment": "production",
  1047. },
  1048. timestamp=self.day_ago + timedelta(hours=hour),
  1049. )
  1050. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1051. response = self.do_request(
  1052. data={
  1053. "project": self.project.id,
  1054. "start": iso_format(self.day_ago),
  1055. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1056. "interval": "1h",
  1057. "orderby": ["-count()"],
  1058. "query": query,
  1059. "yAxis": yAxis,
  1060. "field": [
  1061. "count()",
  1062. "count_web_vitals(measurements.lcp, good)",
  1063. "customtag1",
  1064. "customtag2",
  1065. ],
  1066. "topEvents": 5,
  1067. "dataset": "metricsEnhanced",
  1068. "useOnDemandMetrics": "true",
  1069. "onDemandType": "dynamic_query",
  1070. },
  1071. )
  1072. assert response.status_code == 200, response.content
  1073. groups = [
  1074. ("foo,red", "count()", 0.0, 1488.0),
  1075. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1076. ("bar,blue", "count()", 0.0, 0.0),
  1077. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1078. ]
  1079. assert len(response.data.keys()) == 2
  1080. for group_count in groups:
  1081. group, agg, row1, row2 = group_count
  1082. row_data = response.data[group][agg]["data"][:2]
  1083. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1084. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1085. assert response.data[group]["isMetricsExtractedData"]
  1086. def test_timeseries_on_demand_with_multiple_percentiles(self):
  1087. field = "p75(measurements.fcp)"
  1088. field_two = "p75(measurements.lcp)"
  1089. query = "transaction.duration:>=100"
  1090. spec = OnDemandMetricSpec(field=field, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY)
  1091. spec_two = OnDemandMetricSpec(
  1092. field=field_two, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1093. )
  1094. assert (
  1095. spec._query_str_for_hash
  1096. == "event.measurements.fcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1097. )
  1098. assert (
  1099. spec_two._query_str_for_hash
  1100. == "event.measurements.lcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1101. )
  1102. for count in range(0, 4):
  1103. self.store_on_demand_metric(
  1104. count * 100,
  1105. spec=spec,
  1106. timestamp=self.day_ago + timedelta(hours=1),
  1107. )
  1108. self.store_on_demand_metric(
  1109. count * 200.0,
  1110. spec=spec_two,
  1111. timestamp=self.day_ago + timedelta(hours=1),
  1112. )
  1113. yAxis = [field, field_two]
  1114. response = self.do_request(
  1115. data={
  1116. "project": self.project.id,
  1117. "start": iso_format(self.day_ago),
  1118. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1119. "interval": "1h",
  1120. "orderby": [field],
  1121. "query": query,
  1122. "yAxis": yAxis,
  1123. "dataset": "metricsEnhanced",
  1124. "useOnDemandMetrics": "true",
  1125. "onDemandType": "dynamic_query",
  1126. },
  1127. )
  1128. assert response.status_code == 200, response.content
  1129. assert response.data["p75(measurements.fcp)"]["meta"]["isMetricsExtractedData"]
  1130. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1131. assert [attrs for time, attrs in response.data["p75(measurements.fcp)"]["data"]] == [
  1132. [{"count": 0}],
  1133. [{"count": 225.0}],
  1134. ]
  1135. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsExtractedData"]
  1136. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1137. assert [attrs for time, attrs in response.data["p75(measurements.lcp)"]["data"]] == [
  1138. [{"count": 0}],
  1139. [{"count": 450.0}],
  1140. ]
  1141. def test_apdex_issue(self):
  1142. field = "apdex(300)"
  1143. groupbys = ["group_tag"]
  1144. query = "transaction.duration:>=100"
  1145. spec = OnDemandMetricSpec(
  1146. field=field,
  1147. groupbys=groupbys,
  1148. query=query,
  1149. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1150. )
  1151. for hour in range(0, 5):
  1152. self.store_on_demand_metric(
  1153. 1,
  1154. spec=spec,
  1155. additional_tags={
  1156. "group_tag": "group_one",
  1157. "environment": "production",
  1158. "satisfaction": "tolerable",
  1159. },
  1160. timestamp=self.day_ago + timedelta(hours=hour),
  1161. )
  1162. self.store_on_demand_metric(
  1163. 1,
  1164. spec=spec,
  1165. additional_tags={
  1166. "group_tag": "group_two",
  1167. "environment": "production",
  1168. "satisfaction": "satisfactory",
  1169. },
  1170. timestamp=self.day_ago + timedelta(hours=hour),
  1171. )
  1172. response = self.do_request(
  1173. data={
  1174. "dataset": "metricsEnhanced",
  1175. "environment": "production",
  1176. "excludeOther": 1,
  1177. "field": [field, "group_tag"],
  1178. "start": iso_format(self.day_ago),
  1179. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1180. "interval": "1h",
  1181. "orderby": f"-{field}",
  1182. "partial": 1,
  1183. "project": self.project.id,
  1184. "query": query,
  1185. "topEvents": 5,
  1186. "yAxis": field,
  1187. "onDemandType": "dynamic_query",
  1188. "useOnDemandMetrics": "true",
  1189. },
  1190. )
  1191. assert response.status_code == 200, response.content
  1192. assert response.data["group_one"]["meta"]["isMetricsExtractedData"] is True
  1193. assert [attrs for time, attrs in response.data["group_one"]["data"]] == [
  1194. [{"count": 0.5}],
  1195. [{"count": 0.5}],
  1196. ]
  1197. def test_glob_http_referer_on_demand(self):
  1198. agg = "count()"
  1199. network_id_tag = "networkId"
  1200. url = "https://sentry.io"
  1201. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1202. spec = OnDemandMetricSpec(
  1203. field=agg,
  1204. groupbys=[network_id_tag],
  1205. query=query,
  1206. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1207. )
  1208. assert spec.to_metric_spec(self.project) == {
  1209. "category": "transaction",
  1210. "mri": "c:transactions/on_demand@none",
  1211. "field": None,
  1212. "tags": [
  1213. {"key": "query_hash", "value": "ac241f56"},
  1214. {"key": "networkId", "field": "event.tags.networkId"},
  1215. {"key": "environment", "field": "event.environment"},
  1216. ],
  1217. "condition": {
  1218. "op": "and",
  1219. "inner": [
  1220. {
  1221. "op": "glob",
  1222. "name": "event.request.url",
  1223. "value": ["https://sentry.io/*/foo/bar/*"],
  1224. },
  1225. {
  1226. "op": "glob",
  1227. "name": "event.request.headers.Referer",
  1228. "value": ["https://sentry.io/*/bar/*"],
  1229. },
  1230. ],
  1231. },
  1232. }
  1233. for hour in range(0, 5):
  1234. self.store_on_demand_metric(
  1235. 1,
  1236. spec=spec,
  1237. additional_tags={network_id_tag: "1234"},
  1238. timestamp=self.day_ago + timedelta(hours=hour),
  1239. )
  1240. self.store_on_demand_metric(
  1241. 1,
  1242. spec=spec,
  1243. additional_tags={network_id_tag: "5678"},
  1244. timestamp=self.day_ago + timedelta(hours=hour),
  1245. )
  1246. response = self.do_request(
  1247. data={
  1248. "dataset": "metricsEnhanced",
  1249. "field": [network_id_tag, agg],
  1250. "start": iso_format(self.day_ago),
  1251. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1252. "onDemandType": "dynamic_query",
  1253. "orderby": f"-{agg}",
  1254. "interval": "1d",
  1255. "partial": 1,
  1256. "query": query,
  1257. "referrer": "api.dashboards.widget.bar-chart",
  1258. "project": self.project.id,
  1259. "topEvents": 2,
  1260. "useOnDemandMetrics": "true",
  1261. "yAxis": agg,
  1262. },
  1263. )
  1264. assert response.status_code == 200, response.content
  1265. for datum in response.data.values():
  1266. assert datum["meta"] == {
  1267. "dataset": "metricsEnhanced",
  1268. "datasetReason": "unchanged",
  1269. "fields": {},
  1270. "isMetricsData": False,
  1271. "isMetricsExtractedData": True,
  1272. "tips": {},
  1273. "units": {},
  1274. }
  1275. def _test_is_metrics_extracted_data(
  1276. self, params: dict[str, Any], expected_on_demand_query: bool, dataset: str
  1277. ) -> None:
  1278. features = {"organizations:on-demand-metrics-extraction": True}
  1279. spec = OnDemandMetricSpec(
  1280. field="count()",
  1281. query="transaction.duration:>1s",
  1282. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1283. )
  1284. self.store_on_demand_metric(1, spec=spec)
  1285. response = self.do_request(params, features=features)
  1286. assert response.status_code == 200, response.content
  1287. meta = response.data["meta"]
  1288. # This is the main thing we want to test for
  1289. assert meta.get("isMetricsExtractedData", False) is expected_on_demand_query
  1290. assert meta["dataset"] == dataset
  1291. return meta
  1292. def test_is_metrics_extracted_data_is_included(self):
  1293. self._test_is_metrics_extracted_data(
  1294. {
  1295. "dataset": "metricsEnhanced",
  1296. "query": "transaction.duration:>=91",
  1297. "useOnDemandMetrics": "true",
  1298. "yAxis": "count()",
  1299. },
  1300. expected_on_demand_query=True,
  1301. dataset="metricsEnhanced",
  1302. )
  1303. def test_group_by_transaction(self):
  1304. field = "count()"
  1305. groupbys = ["transaction"]
  1306. query = "transaction.duration:>=100"
  1307. spec = OnDemandMetricSpec(
  1308. field=field,
  1309. groupbys=groupbys,
  1310. query=query,
  1311. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1312. )
  1313. for hour in range(0, 2):
  1314. self.store_on_demand_metric(
  1315. (hour + 1) * 5,
  1316. spec=spec,
  1317. additional_tags={
  1318. "transaction": "/performance",
  1319. "environment": "production",
  1320. },
  1321. timestamp=self.day_ago + timedelta(hours=hour),
  1322. )
  1323. response = self.do_request(
  1324. data={
  1325. "dataset": "metricsEnhanced",
  1326. "environment": "production",
  1327. "excludeOther": 1,
  1328. "field": [field, "transaction"],
  1329. "start": iso_format(self.day_ago),
  1330. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1331. "interval": "1h",
  1332. "orderby": f"-{field}",
  1333. "partial": 1,
  1334. "project": self.project.id,
  1335. "query": query,
  1336. "topEvents": 5,
  1337. "yAxis": field,
  1338. "onDemandType": "dynamic_query",
  1339. "useOnDemandMetrics": "true",
  1340. },
  1341. )
  1342. assert response.status_code == 200, response.content
  1343. assert response.data["/performance"]["meta"]["isMetricsExtractedData"] is True
  1344. assert [attrs for time, attrs in response.data["/performance"]["data"]] == [
  1345. [{"count": 5.0}],
  1346. [{"count": 10.0}],
  1347. ]
  1348. def _setup_orderby_tests(self, query):
  1349. count_spec = OnDemandMetricSpec(
  1350. field="count()",
  1351. groupbys=["networkId"],
  1352. query=query,
  1353. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1354. )
  1355. p95_spec = OnDemandMetricSpec(
  1356. field="p95(transaction.duration)",
  1357. groupbys=["networkId"],
  1358. query=query,
  1359. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1360. )
  1361. for hour in range(0, 5):
  1362. self.store_on_demand_metric(
  1363. 1,
  1364. spec=count_spec,
  1365. additional_tags={"networkId": "1234"},
  1366. timestamp=self.day_ago + timedelta(hours=hour),
  1367. )
  1368. self.store_on_demand_metric(
  1369. 100,
  1370. spec=p95_spec,
  1371. additional_tags={"networkId": "1234"},
  1372. timestamp=self.day_ago + timedelta(hours=hour),
  1373. )
  1374. self.store_on_demand_metric(
  1375. 200,
  1376. spec=p95_spec,
  1377. additional_tags={"networkId": "5678"},
  1378. timestamp=self.day_ago + timedelta(hours=hour),
  1379. )
  1380. # Store twice as many 5678 so orderby puts it later
  1381. self.store_on_demand_metric(
  1382. 2,
  1383. spec=count_spec,
  1384. additional_tags={"networkId": "5678"},
  1385. timestamp=self.day_ago + timedelta(hours=hour),
  1386. )
  1387. def test_order_by_aggregate_top_events_desc(self):
  1388. url = "https://sentry.io"
  1389. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1390. self._setup_orderby_tests(query)
  1391. response = self.do_request(
  1392. data={
  1393. "dataset": "metricsEnhanced",
  1394. "field": ["networkId", "count()"],
  1395. "start": iso_format(self.day_ago),
  1396. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1397. "onDemandType": "dynamic_query",
  1398. "orderby": "-count()",
  1399. "interval": "1d",
  1400. "partial": 1,
  1401. "query": query,
  1402. "referrer": "api.dashboards.widget.bar-chart",
  1403. "project": self.project.id,
  1404. "topEvents": 2,
  1405. "useOnDemandMetrics": "true",
  1406. "yAxis": "count()",
  1407. },
  1408. )
  1409. assert response.status_code == 200, response.content
  1410. assert len(response.data) == 3
  1411. data1 = response.data["5678"]
  1412. assert data1["order"] == 0
  1413. assert data1["data"][0][1][0]["count"] == 10
  1414. data2 = response.data["1234"]
  1415. assert data2["order"] == 1
  1416. assert data2["data"][0][1][0]["count"] == 5
  1417. for datum in response.data.values():
  1418. assert datum["meta"] == {
  1419. "dataset": "metricsEnhanced",
  1420. "datasetReason": "unchanged",
  1421. "fields": {},
  1422. "isMetricsData": False,
  1423. "isMetricsExtractedData": True,
  1424. "tips": {},
  1425. "units": {},
  1426. }
  1427. def test_order_by_aggregate_top_events_asc(self):
  1428. url = "https://sentry.io"
  1429. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1430. self._setup_orderby_tests(query)
  1431. response = self.do_request(
  1432. data={
  1433. "dataset": "metricsEnhanced",
  1434. "field": ["networkId", "count()"],
  1435. "start": iso_format(self.day_ago),
  1436. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1437. "onDemandType": "dynamic_query",
  1438. "orderby": "count()",
  1439. "interval": "1d",
  1440. "partial": 1,
  1441. "query": query,
  1442. "referrer": "api.dashboards.widget.bar-chart",
  1443. "project": self.project.id,
  1444. "topEvents": 2,
  1445. "useOnDemandMetrics": "true",
  1446. "yAxis": "count()",
  1447. },
  1448. )
  1449. assert response.status_code == 200, response.content
  1450. assert len(response.data) == 3
  1451. data1 = response.data["1234"]
  1452. assert data1["order"] == 0
  1453. assert data1["data"][0][1][0]["count"] == 5
  1454. data2 = response.data["5678"]
  1455. assert data2["order"] == 1
  1456. assert data2["data"][0][1][0]["count"] == 10
  1457. for datum in response.data.values():
  1458. assert datum["meta"] == {
  1459. "dataset": "metricsEnhanced",
  1460. "datasetReason": "unchanged",
  1461. "fields": {},
  1462. "isMetricsData": False,
  1463. "isMetricsExtractedData": True,
  1464. "tips": {},
  1465. "units": {},
  1466. }
  1467. def test_order_by_aggregate_top_events_graph_different_aggregate(self):
  1468. url = "https://sentry.io"
  1469. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1470. self._setup_orderby_tests(query)
  1471. response = self.do_request(
  1472. data={
  1473. "dataset": "metricsEnhanced",
  1474. "field": ["networkId", "count()"],
  1475. "start": iso_format(self.day_ago),
  1476. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1477. "onDemandType": "dynamic_query",
  1478. "orderby": "count()",
  1479. "interval": "1d",
  1480. "partial": 1,
  1481. "query": query,
  1482. "referrer": "api.dashboards.widget.bar-chart",
  1483. "project": self.project.id,
  1484. "topEvents": 2,
  1485. "useOnDemandMetrics": "true",
  1486. "yAxis": "p95(transaction.duration)",
  1487. },
  1488. )
  1489. assert response.status_code == 200, response.content
  1490. assert len(response.data) == 3
  1491. data1 = response.data["1234"]
  1492. assert data1["order"] == 0
  1493. assert data1["data"][0][1][0]["count"] == 100
  1494. data2 = response.data["5678"]
  1495. assert data2["order"] == 1
  1496. assert data2["data"][0][1][0]["count"] == 200
  1497. for datum in response.data.values():
  1498. assert datum["meta"] == {
  1499. "dataset": "metricsEnhanced",
  1500. "datasetReason": "unchanged",
  1501. "fields": {},
  1502. "isMetricsData": False,
  1503. "isMetricsExtractedData": True,
  1504. "tips": {},
  1505. "units": {},
  1506. }
  1507. def test_cannot_order_by_tag(self):
  1508. url = "https://sentry.io"
  1509. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1510. self._setup_orderby_tests(query)
  1511. response = self.do_request(
  1512. data={
  1513. "dataset": "metrics",
  1514. "field": ["networkId", "count()"],
  1515. "start": iso_format(self.day_ago),
  1516. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1517. "onDemandType": "dynamic_query",
  1518. "orderby": "-networkId",
  1519. "interval": "1d",
  1520. "partial": 1,
  1521. "query": query,
  1522. "referrer": "api.dashboards.widget.bar-chart",
  1523. "project": self.project.id,
  1524. "topEvents": 2,
  1525. "useOnDemandMetrics": "true",
  1526. "yAxis": "count()",
  1527. },
  1528. )
  1529. assert response.status_code == 400, response.content
  1530. def test_order_by_two_aggregates(self):
  1531. url = "https://sentry.io"
  1532. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1533. self._setup_orderby_tests(query)
  1534. response = self.do_request(
  1535. data={
  1536. "dataset": "metrics",
  1537. "field": ["networkId", "count()", "p95(transaction.duration)"],
  1538. "start": iso_format(self.day_ago),
  1539. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1540. "onDemandType": "dynamic_query",
  1541. "orderby": ["count()", "p95(transaction.duration)"],
  1542. "interval": "1d",
  1543. "partial": 1,
  1544. "query": query,
  1545. "referrer": "api.dashboards.widget.bar-chart",
  1546. "project": self.project.id,
  1547. "topEvents": 2,
  1548. "useOnDemandMetrics": "true",
  1549. "yAxis": "p95(transaction.duration)",
  1550. },
  1551. )
  1552. assert response.status_code == 400, response.content