test_organization_events_stats_mep.py 80 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068
  1. from __future__ import annotations
  2. from datetime import timedelta
  3. from typing import Any
  4. from unittest import mock
  5. import pytest
  6. from django.urls import reverse
  7. from rest_framework.response import Response
  8. from sentry.models.dashboard_widget import DashboardWidget, DashboardWidgetTypes
  9. from sentry.models.environment import Environment
  10. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  11. from sentry.snuba.metrics.extraction import MetricSpecType, OnDemandMetricSpec
  12. from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
  13. from sentry.testutils.helpers.datetime import before_now, iso_format
  14. from sentry.testutils.helpers.on_demand import create_widget
  15. from sentry.utils.samples import load_data
  16. pytestmark = pytest.mark.sentry_metrics
  17. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
  18. MetricsEnhancedPerformanceTestCase
  19. ):
  20. endpoint = "sentry-api-0-organization-events-stats"
  21. METRIC_STRINGS = [
  22. "foo_transaction",
  23. "d:transactions/measurements.datacenter_memory@pebibyte",
  24. ]
  25. def setUp(self):
  26. super().setUp()
  27. self.login_as(user=self.user)
  28. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  29. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  30. self.url = reverse(
  31. "sentry-api-0-organization-events-stats",
  32. kwargs={"organization_slug": self.project.organization.slug},
  33. )
  34. self.features = {
  35. "organizations:performance-use-metrics": True,
  36. }
  37. self.additional_params = dict()
  38. # These throughput tests should roughly match the ones in OrganizationEventsStatsEndpointTest
  39. def test_throughput_epm_hour_rollup(self):
  40. # Each of these denotes how many events to create in each hour
  41. event_counts = [6, 0, 6, 3, 0, 3]
  42. for hour, count in enumerate(event_counts):
  43. for minute in range(count):
  44. self.store_transaction_metric(
  45. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  46. )
  47. for axis in ["epm()", "tpm()"]:
  48. response = self.do_request(
  49. data={
  50. "start": iso_format(self.day_ago),
  51. "end": iso_format(self.day_ago + timedelta(hours=6)),
  52. "interval": "1h",
  53. "yAxis": axis,
  54. "project": self.project.id,
  55. "dataset": "metricsEnhanced",
  56. **self.additional_params,
  57. },
  58. )
  59. assert response.status_code == 200, response.content
  60. data = response.data["data"]
  61. assert len(data) == 6
  62. assert response.data["isMetricsData"]
  63. rows = data[0:6]
  64. for test in zip(event_counts, rows):
  65. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  66. def test_throughput_epm_day_rollup(self):
  67. # Each of these denotes how many events to create in each minute
  68. event_counts = [6, 0, 6, 3, 0, 3]
  69. for hour, count in enumerate(event_counts):
  70. for minute in range(count):
  71. self.store_transaction_metric(
  72. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  73. )
  74. for axis in ["epm()", "tpm()"]:
  75. response = self.do_request(
  76. data={
  77. "start": iso_format(self.day_ago),
  78. "end": iso_format(self.day_ago + timedelta(hours=24)),
  79. "interval": "24h",
  80. "yAxis": axis,
  81. "project": self.project.id,
  82. "dataset": "metricsEnhanced",
  83. **self.additional_params,
  84. },
  85. )
  86. assert response.status_code == 200, response.content
  87. data = response.data["data"]
  88. assert len(data) == 2
  89. assert response.data["isMetricsData"]
  90. assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
  91. def test_throughput_epm_hour_rollup_offset_of_hour(self):
  92. # Each of these denotes how many events to create in each hour
  93. event_counts = [6, 0, 6, 3, 0, 3]
  94. for hour, count in enumerate(event_counts):
  95. for minute in range(count):
  96. self.store_transaction_metric(
  97. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute + 30)
  98. )
  99. for axis in ["tpm()", "epm()"]:
  100. response = self.do_request(
  101. data={
  102. "start": iso_format(self.day_ago + timedelta(minutes=30)),
  103. "end": iso_format(self.day_ago + timedelta(hours=6, minutes=30)),
  104. "interval": "1h",
  105. "yAxis": axis,
  106. "project": self.project.id,
  107. "dataset": "metricsEnhanced",
  108. **self.additional_params,
  109. },
  110. )
  111. assert response.status_code == 200, response.content
  112. data = response.data["data"]
  113. assert len(data) == 6
  114. assert response.data["isMetricsData"]
  115. rows = data[0:6]
  116. for test in zip(event_counts, rows):
  117. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  118. def test_throughput_eps_minute_rollup(self):
  119. # Each of these denotes how many events to create in each minute
  120. event_counts = [6, 0, 6, 3, 0, 3]
  121. for minute, count in enumerate(event_counts):
  122. for second in range(count):
  123. self.store_transaction_metric(
  124. 1, timestamp=self.day_ago + timedelta(minutes=minute, seconds=second)
  125. )
  126. for axis in ["eps()", "tps()"]:
  127. response = self.do_request(
  128. data={
  129. "start": iso_format(self.day_ago),
  130. "end": iso_format(self.day_ago + timedelta(minutes=6)),
  131. "interval": "1m",
  132. "yAxis": axis,
  133. "project": self.project.id,
  134. "dataset": "metricsEnhanced",
  135. **self.additional_params,
  136. },
  137. )
  138. assert response.status_code == 200, response.content
  139. data = response.data["data"]
  140. assert len(data) == 6
  141. assert response.data["isMetricsData"]
  142. rows = data[0:6]
  143. for test in zip(event_counts, rows):
  144. assert test[1][1][0]["count"] == test[0] / 60.0
  145. def test_failure_rate(self):
  146. for hour in range(6):
  147. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  148. self.store_transaction_metric(1, tags={"transaction.status": "ok"}, timestamp=timestamp)
  149. if hour < 3:
  150. self.store_transaction_metric(
  151. 1, tags={"transaction.status": "internal_error"}, timestamp=timestamp
  152. )
  153. response = self.do_request(
  154. data={
  155. "start": iso_format(self.day_ago),
  156. "end": iso_format(self.day_ago + timedelta(hours=6)),
  157. "interval": "1h",
  158. "yAxis": ["failure_rate()"],
  159. "project": self.project.id,
  160. "dataset": "metricsEnhanced",
  161. **self.additional_params,
  162. },
  163. )
  164. assert response.status_code == 200, response.content
  165. data = response.data["data"]
  166. assert len(data) == 6
  167. assert response.data["isMetricsData"]
  168. assert [attrs for time, attrs in response.data["data"]] == [
  169. [{"count": 0.5}],
  170. [{"count": 0.5}],
  171. [{"count": 0.5}],
  172. [{"count": 0}],
  173. [{"count": 0}],
  174. [{"count": 0}],
  175. ]
  176. def test_percentiles_multi_axis(self):
  177. for hour in range(6):
  178. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  179. self.store_transaction_metric(111, timestamp=timestamp)
  180. self.store_transaction_metric(222, metric="measurements.lcp", timestamp=timestamp)
  181. response = self.do_request(
  182. data={
  183. "start": iso_format(self.day_ago),
  184. "end": iso_format(self.day_ago + timedelta(hours=6)),
  185. "interval": "1h",
  186. "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
  187. "project": self.project.id,
  188. "dataset": "metricsEnhanced",
  189. **self.additional_params,
  190. },
  191. )
  192. assert response.status_code == 200, response.content
  193. lcp = response.data["p75(measurements.lcp)"]
  194. duration = response.data["p75(transaction.duration)"]
  195. assert len(duration["data"]) == 6
  196. assert duration["isMetricsData"]
  197. assert len(lcp["data"]) == 6
  198. assert lcp["isMetricsData"]
  199. for item in duration["data"]:
  200. assert item[1][0]["count"] == 111
  201. for item in lcp["data"]:
  202. assert item[1][0]["count"] == 222
  203. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query", return_value={})
  204. def test_multiple_yaxis_only_one_query(self, mock_query):
  205. self.do_request(
  206. data={
  207. "project": self.project.id,
  208. "start": iso_format(self.day_ago),
  209. "end": iso_format(self.day_ago + timedelta(hours=2)),
  210. "interval": "1h",
  211. "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
  212. "dataset": "metricsEnhanced",
  213. **self.additional_params,
  214. },
  215. )
  216. assert mock_query.call_count == 1
  217. def test_aggregate_function_user_count(self):
  218. self.store_transaction_metric(
  219. 1, metric="user", timestamp=self.day_ago + timedelta(minutes=30)
  220. )
  221. self.store_transaction_metric(
  222. 1, metric="user", timestamp=self.day_ago + timedelta(hours=1, minutes=30)
  223. )
  224. response = self.do_request(
  225. data={
  226. "start": iso_format(self.day_ago),
  227. "end": iso_format(self.day_ago + timedelta(hours=2)),
  228. "interval": "1h",
  229. "yAxis": "count_unique(user)",
  230. "dataset": "metricsEnhanced",
  231. **self.additional_params,
  232. },
  233. )
  234. assert response.status_code == 200, response.content
  235. assert response.data["isMetricsData"]
  236. assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
  237. meta = response.data["meta"]
  238. assert meta["isMetricsData"] == response.data["isMetricsData"]
  239. def test_non_mep_query_fallsback(self):
  240. def get_mep(query):
  241. response = self.do_request(
  242. data={
  243. "project": self.project.id,
  244. "start": iso_format(self.day_ago),
  245. "end": iso_format(self.day_ago + timedelta(hours=2)),
  246. "interval": "1h",
  247. "query": query,
  248. "yAxis": ["epm()"],
  249. "dataset": "metricsEnhanced",
  250. **self.additional_params,
  251. },
  252. )
  253. assert response.status_code == 200, response.content
  254. return response.data["isMetricsData"]
  255. assert get_mep(""), "empty query"
  256. assert get_mep("event.type:transaction"), "event type transaction"
  257. assert not get_mep("event.type:error"), "event type error"
  258. assert not get_mep("transaction.duration:<15min"), "outlier filter"
  259. assert get_mep("epm():>0.01"), "throughput filter"
  260. assert not get_mep(
  261. "event.type:transaction OR event.type:error"
  262. ), "boolean with non-mep filter"
  263. assert get_mep(
  264. "event.type:transaction OR transaction:foo_transaction"
  265. ), "boolean with mep filter"
  266. def test_having_condition_with_preventing_aggregates(self):
  267. response = self.do_request(
  268. data={
  269. "project": self.project.id,
  270. "start": iso_format(self.day_ago),
  271. "end": iso_format(self.day_ago + timedelta(hours=2)),
  272. "interval": "1h",
  273. "query": "p95():<5s",
  274. "yAxis": ["epm()"],
  275. "dataset": "metricsEnhanced",
  276. "preventMetricAggregates": "1",
  277. **self.additional_params,
  278. },
  279. )
  280. assert response.status_code == 200, response.content
  281. assert not response.data["isMetricsData"]
  282. meta = response.data["meta"]
  283. assert meta["isMetricsData"] == response.data["isMetricsData"]
  284. def test_explicit_not_mep(self):
  285. response = self.do_request(
  286. data={
  287. "project": self.project.id,
  288. "start": iso_format(self.day_ago),
  289. "end": iso_format(self.day_ago + timedelta(hours=2)),
  290. "interval": "1h",
  291. # Should be a mep able query
  292. "query": "",
  293. "yAxis": ["epm()"],
  294. "metricsEnhanced": "0",
  295. **self.additional_params,
  296. },
  297. )
  298. assert response.status_code == 200, response.content
  299. assert not response.data["isMetricsData"]
  300. meta = response.data["meta"]
  301. assert meta["isMetricsData"] == response.data["isMetricsData"]
  302. def test_sum_transaction_duration(self):
  303. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  304. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  305. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  306. response = self.do_request(
  307. data={
  308. "start": iso_format(self.day_ago),
  309. "end": iso_format(self.day_ago + timedelta(hours=2)),
  310. "interval": "1h",
  311. "yAxis": "sum(transaction.duration)",
  312. "dataset": "metricsEnhanced",
  313. **self.additional_params,
  314. },
  315. )
  316. assert response.status_code == 200, response.content
  317. assert response.data["isMetricsData"]
  318. assert [attrs for time, attrs in response.data["data"]] == [
  319. [{"count": 123}],
  320. [{"count": 1245}],
  321. ]
  322. meta = response.data["meta"]
  323. assert meta["isMetricsData"] == response.data["isMetricsData"]
  324. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  325. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  326. def test_sum_transaction_duration_with_comparison(self):
  327. # We store the data for the previous day (in order to have values for the comparison).
  328. self.store_transaction_metric(
  329. 1, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  330. )
  331. self.store_transaction_metric(
  332. 2, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  333. )
  334. # We store the data for today.
  335. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  336. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(minutes=30))
  337. response = self.do_request(
  338. data={
  339. "start": iso_format(self.day_ago),
  340. "end": iso_format(self.day_ago + timedelta(days=1)),
  341. "interval": "1d",
  342. "yAxis": "sum(transaction.duration)",
  343. "comparisonDelta": 86400,
  344. "dataset": "metricsEnhanced",
  345. **self.additional_params,
  346. },
  347. )
  348. assert response.status_code == 200, response.content
  349. assert response.data["isMetricsData"]
  350. # For some reason, if all tests run, there is some shared state that makes this test have data in the second
  351. # time bucket, which is filled automatically by the zerofilling. In order to avoid this flaky failure, we will
  352. # only check that the first bucket contains the actual data.
  353. assert [attrs for time, attrs in response.data["data"]][0] == [
  354. {"comparisonCount": 3.0, "count": 579.0}
  355. ]
  356. meta = response.data["meta"]
  357. assert meta["isMetricsData"] == response.data["isMetricsData"]
  358. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  359. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  360. def test_custom_measurement(self):
  361. self.store_transaction_metric(
  362. 123,
  363. metric="measurements.bytes_transfered",
  364. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  365. entity="metrics_distributions",
  366. tags={"transaction": "foo_transaction"},
  367. timestamp=self.day_ago + timedelta(minutes=30),
  368. )
  369. self.store_transaction_metric(
  370. 456,
  371. metric="measurements.bytes_transfered",
  372. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  373. entity="metrics_distributions",
  374. tags={"transaction": "foo_transaction"},
  375. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  376. )
  377. self.store_transaction_metric(
  378. 789,
  379. metric="measurements.bytes_transfered",
  380. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  381. entity="metrics_distributions",
  382. tags={"transaction": "foo_transaction"},
  383. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  384. )
  385. response = self.do_request(
  386. data={
  387. "start": iso_format(self.day_ago),
  388. "end": iso_format(self.day_ago + timedelta(hours=2)),
  389. "interval": "1h",
  390. "yAxis": "sum(measurements.datacenter_memory)",
  391. "dataset": "metricsEnhanced",
  392. **self.additional_params,
  393. },
  394. )
  395. assert response.status_code == 200, response.content
  396. assert response.data["isMetricsData"]
  397. assert [attrs for time, attrs in response.data["data"]] == [
  398. [{"count": 123}],
  399. [{"count": 1245}],
  400. ]
  401. meta = response.data["meta"]
  402. assert meta["isMetricsData"] == response.data["isMetricsData"]
  403. assert meta["fields"] == {"time": "date", "sum_measurements_datacenter_memory": "size"}
  404. assert meta["units"] == {"time": None, "sum_measurements_datacenter_memory": "pebibyte"}
  405. def test_does_not_fallback_if_custom_metric_is_out_of_request_time_range(self):
  406. self.store_transaction_metric(
  407. 123,
  408. timestamp=self.day_ago + timedelta(hours=1),
  409. internal_metric="d:transactions/measurements.custom@kibibyte",
  410. entity="metrics_distributions",
  411. )
  412. response = self.do_request(
  413. data={
  414. "start": iso_format(self.day_ago),
  415. "end": iso_format(self.day_ago + timedelta(hours=2)),
  416. "interval": "1h",
  417. "yAxis": "p99(measurements.custom)",
  418. "dataset": "metricsEnhanced",
  419. **self.additional_params,
  420. },
  421. )
  422. meta = response.data["meta"]
  423. assert response.status_code == 200, response.content
  424. assert response.data["isMetricsData"]
  425. assert meta["isMetricsData"]
  426. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  427. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  428. def test_multi_yaxis_custom_measurement(self):
  429. self.store_transaction_metric(
  430. 123,
  431. metric="measurements.bytes_transfered",
  432. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  433. entity="metrics_distributions",
  434. tags={"transaction": "foo_transaction"},
  435. timestamp=self.day_ago + timedelta(minutes=30),
  436. )
  437. self.store_transaction_metric(
  438. 456,
  439. metric="measurements.bytes_transfered",
  440. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  441. entity="metrics_distributions",
  442. tags={"transaction": "foo_transaction"},
  443. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  444. )
  445. self.store_transaction_metric(
  446. 789,
  447. metric="measurements.bytes_transfered",
  448. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  449. entity="metrics_distributions",
  450. tags={"transaction": "foo_transaction"},
  451. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  452. )
  453. response = self.do_request(
  454. data={
  455. "start": iso_format(self.day_ago),
  456. "end": iso_format(self.day_ago + timedelta(hours=2)),
  457. "interval": "1h",
  458. "yAxis": [
  459. "sum(measurements.datacenter_memory)",
  460. "p50(measurements.datacenter_memory)",
  461. ],
  462. "dataset": "metricsEnhanced",
  463. **self.additional_params,
  464. },
  465. )
  466. assert response.status_code == 200, response.content
  467. sum_data = response.data["sum(measurements.datacenter_memory)"]
  468. p50_data = response.data["p50(measurements.datacenter_memory)"]
  469. assert sum_data["isMetricsData"]
  470. assert p50_data["isMetricsData"]
  471. assert [attrs for time, attrs in sum_data["data"]] == [
  472. [{"count": 123}],
  473. [{"count": 1245}],
  474. ]
  475. assert [attrs for time, attrs in p50_data["data"]] == [
  476. [{"count": 123}],
  477. [{"count": 622.5}],
  478. ]
  479. sum_meta = sum_data["meta"]
  480. assert sum_meta["isMetricsData"] == sum_data["isMetricsData"]
  481. assert sum_meta["fields"] == {
  482. "time": "date",
  483. "sum_measurements_datacenter_memory": "size",
  484. "p50_measurements_datacenter_memory": "size",
  485. }
  486. assert sum_meta["units"] == {
  487. "time": None,
  488. "sum_measurements_datacenter_memory": "pebibyte",
  489. "p50_measurements_datacenter_memory": "pebibyte",
  490. }
  491. p50_meta = p50_data["meta"]
  492. assert p50_meta["isMetricsData"] == p50_data["isMetricsData"]
  493. assert p50_meta["fields"] == {
  494. "time": "date",
  495. "sum_measurements_datacenter_memory": "size",
  496. "p50_measurements_datacenter_memory": "size",
  497. }
  498. assert p50_meta["units"] == {
  499. "time": None,
  500. "sum_measurements_datacenter_memory": "pebibyte",
  501. "p50_measurements_datacenter_memory": "pebibyte",
  502. }
  503. def test_dataset_metrics_does_not_fallback(self):
  504. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  505. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  506. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  507. response = self.do_request(
  508. data={
  509. "start": iso_format(self.day_ago),
  510. "end": iso_format(self.day_ago + timedelta(hours=2)),
  511. "interval": "1h",
  512. "query": "transaction.duration:<5s",
  513. "yAxis": "sum(transaction.duration)",
  514. "dataset": "metrics",
  515. **self.additional_params,
  516. },
  517. )
  518. assert response.status_code == 400, response.content
  519. def test_title_filter(self):
  520. self.store_transaction_metric(
  521. 123,
  522. tags={"transaction": "foo_transaction"},
  523. timestamp=self.day_ago + timedelta(minutes=30),
  524. )
  525. response = self.do_request(
  526. data={
  527. "start": iso_format(self.day_ago),
  528. "end": iso_format(self.day_ago + timedelta(hours=2)),
  529. "interval": "1h",
  530. "query": "title:foo_transaction",
  531. "yAxis": [
  532. "sum(transaction.duration)",
  533. ],
  534. "dataset": "metricsEnhanced",
  535. **self.additional_params,
  536. },
  537. )
  538. assert response.status_code == 200, response.content
  539. data = response.data["data"]
  540. assert [attrs for time, attrs in data] == [
  541. [{"count": 123}],
  542. [{"count": 0}],
  543. ]
  544. def test_transaction_status_unknown_error(self):
  545. self.store_transaction_metric(
  546. 123,
  547. tags={"transaction.status": "unknown"},
  548. timestamp=self.day_ago + timedelta(minutes=30),
  549. )
  550. response = self.do_request(
  551. data={
  552. "start": iso_format(self.day_ago),
  553. "end": iso_format(self.day_ago + timedelta(hours=2)),
  554. "interval": "1h",
  555. "query": "transaction.status:unknown_error",
  556. "yAxis": [
  557. "sum(transaction.duration)",
  558. ],
  559. "dataset": "metricsEnhanced",
  560. **self.additional_params,
  561. },
  562. )
  563. assert response.status_code == 200, response.content
  564. data = response.data["data"]
  565. assert [attrs for time, attrs in data] == [
  566. [{"count": 123}],
  567. [{"count": 0}],
  568. ]
  569. def test_custom_performance_metric_meta_contains_field_and_unit_data(self):
  570. self.store_transaction_metric(
  571. 123,
  572. timestamp=self.day_ago + timedelta(hours=1),
  573. internal_metric="d:transactions/measurements.custom@kibibyte",
  574. entity="metrics_distributions",
  575. )
  576. response = self.do_request(
  577. data={
  578. "start": iso_format(self.day_ago),
  579. "end": iso_format(self.day_ago + timedelta(hours=2)),
  580. "interval": "1h",
  581. "yAxis": "p99(measurements.custom)",
  582. "query": "",
  583. **self.additional_params,
  584. },
  585. )
  586. assert response.status_code == 200
  587. meta = response.data["meta"]
  588. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  589. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  590. def test_multi_series_custom_performance_metric_meta_contains_field_and_unit_data(self):
  591. self.store_transaction_metric(
  592. 123,
  593. timestamp=self.day_ago + timedelta(hours=1),
  594. internal_metric="d:transactions/measurements.custom@kibibyte",
  595. entity="metrics_distributions",
  596. )
  597. self.store_transaction_metric(
  598. 123,
  599. timestamp=self.day_ago + timedelta(hours=1),
  600. internal_metric="d:transactions/measurements.another.custom@pebibyte",
  601. entity="metrics_distributions",
  602. )
  603. response = self.do_request(
  604. data={
  605. "start": iso_format(self.day_ago),
  606. "end": iso_format(self.day_ago + timedelta(hours=2)),
  607. "interval": "1h",
  608. "yAxis": [
  609. "p95(measurements.custom)",
  610. "p99(measurements.custom)",
  611. "p99(measurements.another.custom)",
  612. ],
  613. "query": "",
  614. **self.additional_params,
  615. },
  616. )
  617. assert response.status_code == 200
  618. meta = response.data["p95(measurements.custom)"]["meta"]
  619. assert meta["fields"] == {
  620. "time": "date",
  621. "p95_measurements_custom": "size",
  622. "p99_measurements_custom": "size",
  623. "p99_measurements_another_custom": "size",
  624. }
  625. assert meta["units"] == {
  626. "time": None,
  627. "p95_measurements_custom": "kibibyte",
  628. "p99_measurements_custom": "kibibyte",
  629. "p99_measurements_another_custom": "pebibyte",
  630. }
  631. assert meta == response.data["p99(measurements.custom)"]["meta"]
  632. assert meta == response.data["p99(measurements.another.custom)"]["meta"]
  633. def test_no_top_events_with_project_field(self):
  634. project = self.create_project()
  635. response = self.do_request(
  636. data={
  637. # make sure to query the project with 0 events
  638. "project": project.id,
  639. "start": iso_format(self.day_ago),
  640. "end": iso_format(self.day_ago + timedelta(hours=2)),
  641. "interval": "1h",
  642. "yAxis": "count()",
  643. "orderby": ["-count()"],
  644. "field": ["count()", "project"],
  645. "topEvents": 5,
  646. "dataset": "metrics",
  647. **self.additional_params,
  648. },
  649. )
  650. assert response.status_code == 200, response.content
  651. # When there are no top events, we do not return an empty dict.
  652. # Instead, we return a single zero-filled series for an empty graph.
  653. data = response.data["data"]
  654. assert [attrs for time, attrs in data] == [[{"count": 0}], [{"count": 0}]]
  655. def test_top_events_with_transaction(self):
  656. transaction_spec = [("foo", 100), ("bar", 200), ("baz", 300)]
  657. for offset in range(5):
  658. for transaction, duration in transaction_spec:
  659. self.store_transaction_metric(
  660. duration,
  661. tags={"transaction": f"{transaction}_transaction"},
  662. timestamp=self.day_ago + timedelta(hours=offset, minutes=30),
  663. )
  664. response = self.do_request(
  665. data={
  666. # make sure to query the project with 0 events
  667. "project": self.project.id,
  668. "start": iso_format(self.day_ago),
  669. "end": iso_format(self.day_ago + timedelta(hours=5)),
  670. "interval": "1h",
  671. "yAxis": "p75(transaction.duration)",
  672. "orderby": ["-p75(transaction.duration)"],
  673. "field": ["p75(transaction.duration)", "transaction"],
  674. "topEvents": 5,
  675. "dataset": "metrics",
  676. **self.additional_params,
  677. },
  678. )
  679. assert response.status_code == 200, response.content
  680. for position, (transaction, duration) in enumerate(transaction_spec):
  681. data = response.data[f"{transaction}_transaction"]
  682. chart_data = data["data"]
  683. assert data["order"] == 2 - position
  684. assert [attrs for time, attrs in chart_data] == [[{"count": duration}]] * 5
  685. def test_top_events_with_project(self):
  686. self.store_transaction_metric(
  687. 100,
  688. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  689. )
  690. response = self.do_request(
  691. data={
  692. # make sure to query the project with 0 events
  693. "project": self.project.id,
  694. "start": iso_format(self.day_ago),
  695. "end": iso_format(self.day_ago + timedelta(hours=5)),
  696. "interval": "1h",
  697. "yAxis": "p75(transaction.duration)",
  698. "orderby": ["-p75(transaction.duration)"],
  699. "field": ["p75(transaction.duration)", "project"],
  700. "topEvents": 5,
  701. "dataset": "metrics",
  702. **self.additional_params,
  703. },
  704. )
  705. assert response.status_code == 200, response.content
  706. data = response.data[f"{self.project.slug}"]
  707. assert data["order"] == 0
  708. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  709. OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest
  710. ):
  711. def setUp(self):
  712. super().setUp()
  713. self.features["organizations:use-metrics-layer"] = True
  714. self.additional_params = {"forceMetricsLayer": "true"}
  715. def test_counter_standard_metric(self):
  716. mri = "c:transactions/usage@none"
  717. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  718. self.store_transaction_metric(
  719. value,
  720. metric=mri,
  721. internal_metric=mri,
  722. entity="metrics_counters",
  723. timestamp=self.day_ago + timedelta(minutes=index),
  724. use_case_id=UseCaseID.CUSTOM,
  725. )
  726. response = self.do_request(
  727. data={
  728. "start": iso_format(self.day_ago),
  729. "end": iso_format(self.day_ago + timedelta(hours=6)),
  730. "interval": "1m",
  731. "yAxis": [f"sum({mri})"],
  732. "project": self.project.id,
  733. "dataset": "metricsEnhanced",
  734. **self.additional_params,
  735. },
  736. )
  737. assert response.status_code == 200, response.content
  738. data = response.data["data"]
  739. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  740. assert value[0]["count"] == expected_value # type: ignore[index]
  741. def test_counter_custom_metric(self):
  742. mri = "c:custom/sentry.process_profile.track_outcome@second"
  743. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  744. self.store_transaction_metric(
  745. value,
  746. metric=mri,
  747. internal_metric=mri,
  748. entity="metrics_counters",
  749. timestamp=self.day_ago + timedelta(hours=index),
  750. use_case_id=UseCaseID.CUSTOM,
  751. )
  752. response = self.do_request(
  753. data={
  754. "start": iso_format(self.day_ago),
  755. "end": iso_format(self.day_ago + timedelta(hours=6)),
  756. "interval": "1h",
  757. "yAxis": [f"sum({mri})"],
  758. "project": self.project.id,
  759. "dataset": "metricsEnhanced",
  760. **self.additional_params,
  761. },
  762. )
  763. assert response.status_code == 200, response.content
  764. data = response.data["data"]
  765. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  766. assert value[0]["count"] == expected_value # type: ignore[index]
  767. def test_distribution_custom_metric(self):
  768. mri = "d:custom/sentry.process_profile.track_outcome@second"
  769. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  770. for multiplier in (1, 2, 3):
  771. self.store_transaction_metric(
  772. value * multiplier,
  773. metric=mri,
  774. internal_metric=mri,
  775. entity="metrics_distributions",
  776. timestamp=self.day_ago + timedelta(hours=index),
  777. use_case_id=UseCaseID.CUSTOM,
  778. )
  779. response = self.do_request(
  780. data={
  781. "start": iso_format(self.day_ago),
  782. "end": iso_format(self.day_ago + timedelta(hours=6)),
  783. "interval": "1h",
  784. "yAxis": [f"min({mri})", f"max({mri})", f"p90({mri})"],
  785. "project": self.project.id,
  786. "dataset": "metricsEnhanced",
  787. **self.additional_params,
  788. },
  789. )
  790. assert response.status_code == 200, response.content
  791. data = response.data
  792. min = data[f"min({mri})"]["data"]
  793. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  794. assert value[0]["count"] == expected_value # type: ignore[index]
  795. max = data[f"max({mri})"]["data"]
  796. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  797. assert value[0]["count"] == expected_value # type: ignore[index]
  798. p90 = data[f"p90({mri})"]["data"]
  799. for (_, value), expected_value in zip(p90, [28.0, 56.0, 84.0, 112.0, 140.0, 168.0]):
  800. assert value[0]["count"] == expected_value # type: ignore[index]
  801. def test_set_custom_metric(self):
  802. mri = "s:custom/sentry.process_profile.track_outcome@second"
  803. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  804. # We store each value a second time, since we want to check the de-duplication of sets.
  805. for i in range(0, 2):
  806. self.store_transaction_metric(
  807. value,
  808. metric=mri,
  809. internal_metric=mri,
  810. entity="metrics_sets",
  811. timestamp=self.day_ago + timedelta(hours=index),
  812. use_case_id=UseCaseID.CUSTOM,
  813. )
  814. response = self.do_request(
  815. data={
  816. "start": iso_format(self.day_ago),
  817. "end": iso_format(self.day_ago + timedelta(hours=6)),
  818. "interval": "1h",
  819. "yAxis": [f"count_unique({mri})"],
  820. "project": self.project.id,
  821. "dataset": "metricsEnhanced",
  822. **self.additional_params,
  823. },
  824. )
  825. assert response.status_code == 200, response.content
  826. data = response.data["data"]
  827. for (_, value), expected_value in zip(data, [1, 1, 1, 1, 1, 1]):
  828. assert value[0]["count"] == expected_value # type: ignore[index]
  829. def test_gauge_custom_metric(self):
  830. mri = "g:custom/sentry.process_profile.track_outcome@second"
  831. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  832. for multiplier in (1, 3):
  833. self.store_transaction_metric(
  834. value * multiplier,
  835. metric=mri,
  836. internal_metric=mri,
  837. entity="metrics_gauges",
  838. # When multiple gauges are merged, in order to make the `last` merge work deterministically it's
  839. # better to have the gauges with different timestamps so that the last value is always the same.
  840. timestamp=self.day_ago + timedelta(hours=index, minutes=multiplier),
  841. use_case_id=UseCaseID.CUSTOM,
  842. )
  843. response = self.do_request(
  844. data={
  845. "start": iso_format(self.day_ago),
  846. "end": iso_format(self.day_ago + timedelta(hours=6)),
  847. "interval": "1h",
  848. "yAxis": [
  849. f"min({mri})",
  850. f"max({mri})",
  851. f"last({mri})",
  852. f"sum({mri})",
  853. f"count({mri})",
  854. ],
  855. "project": self.project.id,
  856. "dataset": "metricsEnhanced",
  857. **self.additional_params,
  858. },
  859. )
  860. assert response.status_code == 200, response.content
  861. data = response.data
  862. min = data[f"min({mri})"]["data"]
  863. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  864. assert value[0]["count"] == expected_value # type: ignore[index]
  865. max = data[f"max({mri})"]["data"]
  866. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  867. assert value[0]["count"] == expected_value # type: ignore[index]
  868. last = data[f"last({mri})"]["data"]
  869. for (_, value), expected_value in zip(last, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  870. assert value[0]["count"] == expected_value # type: ignore[index]
  871. sum = data[f"sum({mri})"]["data"]
  872. for (_, value), expected_value in zip(sum, [40.0, 80.0, 120.0, 160.0, 200.0, 240.0]):
  873. assert value[0]["count"] == expected_value # type: ignore[index]
  874. count = data[f"count({mri})"]["data"]
  875. for (_, value), expected_value in zip(count, [40, 80, 120, 160, 200, 240]):
  876. assert value[0]["count"] == expected_value # type: ignore[index]
  877. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithOnDemandWidgets(
  878. MetricsEnhancedPerformanceTestCase
  879. ):
  880. endpoint = "sentry-api-0-organization-events-stats"
  881. def setUp(self):
  882. super().setUp()
  883. self.login_as(user=self.user)
  884. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  885. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  886. Environment.get_or_create(self.project, "production")
  887. self.url = reverse(
  888. "sentry-api-0-organization-events-stats",
  889. kwargs={"organization_slug": self.project.organization.slug},
  890. )
  891. self.features = {
  892. "organizations:on-demand-metrics-extraction-widgets": True,
  893. "organizations:on-demand-metrics-extraction": True,
  894. }
  895. def _make_on_demand_request(
  896. self, params: dict[str, Any], extra_features: dict[str, bool] | None = None
  897. ) -> Response:
  898. """Ensures that the required parameters for an on-demand request are included."""
  899. # Expected parameters for this helper function
  900. params["dataset"] = "metricsEnhanced"
  901. params["useOnDemandMetrics"] = "true"
  902. params["onDemandType"] = "dynamic_query"
  903. _features = {**self.features, **(extra_features or {})}
  904. return self.do_request(params, features=_features)
  905. def test_top_events_wrong_on_demand_type(self):
  906. query = "transaction.duration:>=100"
  907. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  908. response = self.do_request(
  909. data={
  910. "project": self.project.id,
  911. "start": iso_format(self.day_ago),
  912. "end": iso_format(self.day_ago + timedelta(hours=2)),
  913. "interval": "1h",
  914. "orderby": ["-count()"],
  915. "environment": "production",
  916. "query": query,
  917. "yAxis": yAxis,
  918. "field": [
  919. "count()",
  920. ],
  921. "topEvents": 5,
  922. "dataset": "metrics",
  923. "useOnDemandMetrics": "true",
  924. "onDemandType": "not_real",
  925. },
  926. )
  927. assert response.status_code == 400, response.content
  928. def test_top_events_works_without_on_demand_type(self):
  929. query = "transaction.duration:>=100"
  930. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  931. response = self.do_request(
  932. data={
  933. "project": self.project.id,
  934. "start": iso_format(self.day_ago),
  935. "end": iso_format(self.day_ago + timedelta(hours=2)),
  936. "interval": "1h",
  937. "orderby": ["-count()"],
  938. "environment": "production",
  939. "query": query,
  940. "yAxis": yAxis,
  941. "field": [
  942. "count()",
  943. ],
  944. "topEvents": 5,
  945. "dataset": "metrics",
  946. "useOnDemandMetrics": "true",
  947. },
  948. )
  949. assert response.status_code == 200, response.content
  950. def test_top_events_with_transaction_on_demand(self):
  951. field = "count()"
  952. field_two = "count_web_vitals(measurements.lcp, good)"
  953. groupbys = ["customtag1", "customtag2"]
  954. query = "transaction.duration:>=100"
  955. spec = OnDemandMetricSpec(
  956. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  957. )
  958. spec_two = OnDemandMetricSpec(
  959. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  960. )
  961. for hour in range(0, 5):
  962. self.store_on_demand_metric(
  963. hour * 62 * 24,
  964. spec=spec,
  965. additional_tags={
  966. "customtag1": "foo",
  967. "customtag2": "red",
  968. "environment": "production",
  969. },
  970. timestamp=self.day_ago + timedelta(hours=hour),
  971. )
  972. self.store_on_demand_metric(
  973. hour * 60 * 24,
  974. spec=spec_two,
  975. additional_tags={
  976. "customtag1": "bar",
  977. "customtag2": "blue",
  978. "environment": "production",
  979. },
  980. timestamp=self.day_ago + timedelta(hours=hour),
  981. )
  982. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  983. response = self.do_request(
  984. data={
  985. "project": self.project.id,
  986. "start": iso_format(self.day_ago),
  987. "end": iso_format(self.day_ago + timedelta(hours=2)),
  988. "interval": "1h",
  989. "orderby": ["-count()"],
  990. "environment": "production",
  991. "query": query,
  992. "yAxis": yAxis,
  993. "field": [
  994. "count()",
  995. "count_web_vitals(measurements.lcp, good)",
  996. "customtag1",
  997. "customtag2",
  998. ],
  999. "topEvents": 5,
  1000. "dataset": "metricsEnhanced",
  1001. "useOnDemandMetrics": "true",
  1002. "onDemandType": "dynamic_query",
  1003. },
  1004. )
  1005. assert response.status_code == 200, response.content
  1006. groups = [
  1007. ("foo,red", "count()", 0.0, 1488.0),
  1008. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1009. ("bar,blue", "count()", 0.0, 0.0),
  1010. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1011. ]
  1012. assert len(response.data.keys()) == 2
  1013. for group_count in groups:
  1014. group, agg, row1, row2 = group_count
  1015. row_data = response.data[group][agg]["data"][:2]
  1016. assert [attrs for _, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1017. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1018. assert response.data[group]["isMetricsExtractedData"]
  1019. def test_top_events_with_transaction_on_demand_and_no_environment(self):
  1020. field = "count()"
  1021. field_two = "count_web_vitals(measurements.lcp, good)"
  1022. groupbys = ["customtag1", "customtag2"]
  1023. query = "transaction.duration:>=100"
  1024. spec = OnDemandMetricSpec(
  1025. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1026. )
  1027. spec_two = OnDemandMetricSpec(
  1028. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1029. )
  1030. for hour in range(0, 5):
  1031. self.store_on_demand_metric(
  1032. hour * 62 * 24,
  1033. spec=spec,
  1034. additional_tags={
  1035. "customtag1": "foo",
  1036. "customtag2": "red",
  1037. "environment": "production",
  1038. },
  1039. timestamp=self.day_ago + timedelta(hours=hour),
  1040. )
  1041. self.store_on_demand_metric(
  1042. hour * 60 * 24,
  1043. spec=spec_two,
  1044. additional_tags={
  1045. "customtag1": "bar",
  1046. "customtag2": "blue",
  1047. "environment": "production",
  1048. },
  1049. timestamp=self.day_ago + timedelta(hours=hour),
  1050. )
  1051. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1052. response = self.do_request(
  1053. data={
  1054. "project": self.project.id,
  1055. "start": iso_format(self.day_ago),
  1056. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1057. "interval": "1h",
  1058. "orderby": ["-count()"],
  1059. "query": query,
  1060. "yAxis": yAxis,
  1061. "field": [
  1062. "count()",
  1063. "count_web_vitals(measurements.lcp, good)",
  1064. "customtag1",
  1065. "customtag2",
  1066. ],
  1067. "topEvents": 5,
  1068. "dataset": "metricsEnhanced",
  1069. "useOnDemandMetrics": "true",
  1070. "onDemandType": "dynamic_query",
  1071. },
  1072. )
  1073. assert response.status_code == 200, response.content
  1074. groups = [
  1075. ("foo,red", "count()", 0.0, 1488.0),
  1076. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1077. ("bar,blue", "count()", 0.0, 0.0),
  1078. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1079. ]
  1080. assert len(response.data.keys()) == 2
  1081. for group_count in groups:
  1082. group, agg, row1, row2 = group_count
  1083. row_data = response.data[group][agg]["data"][:2]
  1084. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1085. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1086. assert response.data[group]["isMetricsExtractedData"]
  1087. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_transaction_only(self):
  1088. field = "count()"
  1089. field_two = "count_web_vitals(measurements.lcp, good)"
  1090. groupbys = ["customtag1", "customtag2"]
  1091. query = "transaction.duration:>=100"
  1092. spec = OnDemandMetricSpec(
  1093. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1094. )
  1095. spec_two = OnDemandMetricSpec(
  1096. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1097. )
  1098. _, widget, __ = create_widget(
  1099. ["count()"],
  1100. "",
  1101. self.project,
  1102. discover_widget_split=None,
  1103. )
  1104. for hour in range(0, 2):
  1105. self.store_on_demand_metric(
  1106. hour * 62 * 24,
  1107. spec=spec,
  1108. additional_tags={
  1109. "customtag1": "foo",
  1110. "customtag2": "red",
  1111. "environment": "production",
  1112. },
  1113. timestamp=self.day_ago + timedelta(hours=hour),
  1114. )
  1115. self.store_on_demand_metric(
  1116. hour * 60 * 24,
  1117. spec=spec_two,
  1118. additional_tags={
  1119. "customtag1": "bar",
  1120. "customtag2": "blue",
  1121. "environment": "production",
  1122. },
  1123. timestamp=self.day_ago + timedelta(hours=hour),
  1124. )
  1125. yAxis = [field, field_two]
  1126. response = self.do_request(
  1127. data={
  1128. "project": self.project.id,
  1129. "start": iso_format(self.day_ago),
  1130. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1131. "interval": "1h",
  1132. "orderby": ["-count()"],
  1133. "query": query,
  1134. "yAxis": yAxis,
  1135. "field": yAxis + groupbys,
  1136. "topEvents": 5,
  1137. "dataset": "metricsEnhanced",
  1138. "useOnDemandMetrics": "true",
  1139. "onDemandType": "dynamic_query",
  1140. "dashboardWidgetId": widget.id,
  1141. },
  1142. )
  1143. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1144. assert saved_widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  1145. assert response.status_code == 200, response.content
  1146. # Fell back to discover data which is empty for this test (empty group of '').
  1147. assert len(response.data.keys()) == 2
  1148. assert bool(response.data["foo,red"])
  1149. assert bool(response.data["bar,blue"])
  1150. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_error(
  1151. self,
  1152. ):
  1153. self.project = self.create_project(organization=self.organization)
  1154. Environment.get_or_create(self.project, "production")
  1155. field = "count()"
  1156. field_two = "count()"
  1157. groupbys = ["customtag1", "customtag2"]
  1158. query = "query.dataset:foo"
  1159. _, widget, __ = create_widget(
  1160. ["count()"],
  1161. "",
  1162. self.project,
  1163. discover_widget_split=None,
  1164. )
  1165. self.store_event(
  1166. data={
  1167. "event_id": "a" * 32,
  1168. "message": "very bad",
  1169. "type": "error",
  1170. "start_timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1171. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1172. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1173. },
  1174. project_id=self.project.id,
  1175. )
  1176. self.store_event(
  1177. data={
  1178. "event_id": "b" * 32,
  1179. "message": "very bad 2",
  1180. "type": "error",
  1181. "start_timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1182. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1183. "tags": {"customtag1": "error_value2", "query.dataset": "foo"},
  1184. },
  1185. project_id=self.project.id,
  1186. )
  1187. yAxis = ["count()"]
  1188. response = self.do_request(
  1189. data={
  1190. "project": self.project.id,
  1191. "start": iso_format(self.day_ago),
  1192. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1193. "interval": "1h",
  1194. "orderby": ["-count()"],
  1195. "query": query,
  1196. "yAxis": yAxis,
  1197. "field": [field, field_two] + groupbys,
  1198. "topEvents": 5,
  1199. "dataset": "metricsEnhanced",
  1200. "useOnDemandMetrics": "true",
  1201. "onDemandType": "dynamic_query",
  1202. "dashboardWidgetId": widget.id,
  1203. },
  1204. )
  1205. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1206. assert saved_widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  1207. assert response.status_code == 200, response.content
  1208. # Fell back to discover data which is empty for this test (empty group of '').
  1209. assert len(response.data.keys()) == 2
  1210. assert bool(response.data["error_value,"])
  1211. assert bool(response.data["error_value2,"])
  1212. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_discover(self):
  1213. self.project = self.create_project(organization=self.organization)
  1214. Environment.get_or_create(self.project, "production")
  1215. field = "count()"
  1216. field_two = "count()"
  1217. groupbys = ["customtag1", "customtag2"]
  1218. query = "query.dataset:foo"
  1219. spec = OnDemandMetricSpec(
  1220. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1221. )
  1222. spec_two = OnDemandMetricSpec(
  1223. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1224. )
  1225. _, widget, __ = create_widget(
  1226. ["count()"],
  1227. "",
  1228. self.project,
  1229. discover_widget_split=None,
  1230. )
  1231. self.store_event(
  1232. data={
  1233. "event_id": "a" * 32,
  1234. "message": "very bad",
  1235. "type": "error",
  1236. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1237. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1238. },
  1239. project_id=self.project.id,
  1240. )
  1241. transaction = load_data("transaction")
  1242. transaction["timestamp"] = iso_format(self.day_ago + timedelta(hours=1))
  1243. transaction["start_timestamp"] = iso_format(self.day_ago + timedelta(hours=1))
  1244. transaction["tags"] = {"customtag1": "transaction_value", "query.dataset": "foo"}
  1245. self.store_event(
  1246. data=transaction,
  1247. project_id=self.project.id,
  1248. )
  1249. for hour in range(0, 5):
  1250. self.store_on_demand_metric(
  1251. hour * 62 * 24,
  1252. spec=spec,
  1253. additional_tags={
  1254. "customtag1": "foo",
  1255. "customtag2": "red",
  1256. "environment": "production",
  1257. },
  1258. timestamp=self.day_ago + timedelta(hours=hour),
  1259. )
  1260. self.store_on_demand_metric(
  1261. hour * 60 * 24,
  1262. spec=spec_two,
  1263. additional_tags={
  1264. "customtag1": "bar",
  1265. "customtag2": "blue",
  1266. "environment": "production",
  1267. },
  1268. timestamp=self.day_ago + timedelta(hours=hour),
  1269. )
  1270. yAxis = ["count()"]
  1271. response = self.do_request(
  1272. data={
  1273. "project": self.project.id,
  1274. "start": iso_format(self.day_ago),
  1275. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1276. "interval": "1h",
  1277. "orderby": ["-count()"],
  1278. "query": query,
  1279. "yAxis": yAxis,
  1280. "field": [field, field_two, "customtag1", "customtag2"],
  1281. "topEvents": 5,
  1282. "dataset": "metricsEnhanced",
  1283. "useOnDemandMetrics": "true",
  1284. "onDemandType": "dynamic_query",
  1285. "dashboardWidgetId": widget.id,
  1286. },
  1287. )
  1288. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1289. assert saved_widget.discover_widget_split == DashboardWidgetTypes.DISCOVER
  1290. assert response.status_code == 200, response.content
  1291. assert response.status_code == 200, response.content
  1292. # Fell back to discover data which is empty for this test (empty group of '').
  1293. assert len(response.data.keys()) == 2
  1294. assert bool(response.data["error_value,"])
  1295. assert bool(response.data["transaction_value,"])
  1296. def test_top_events_with_transaction_on_demand_passing_widget_id_saved(self):
  1297. field = "count()"
  1298. field_two = "count_web_vitals(measurements.lcp, good)"
  1299. groupbys = ["customtag1", "customtag2"]
  1300. query = "transaction.duration:>=100"
  1301. spec = OnDemandMetricSpec(
  1302. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1303. )
  1304. spec_two = OnDemandMetricSpec(
  1305. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1306. )
  1307. _, widget, __ = create_widget(
  1308. ["count()"],
  1309. "",
  1310. self.project,
  1311. discover_widget_split=DashboardWidgetTypes.TRANSACTION_LIKE, # Transactions like uses on-demand
  1312. )
  1313. for hour in range(0, 5):
  1314. self.store_on_demand_metric(
  1315. hour * 62 * 24,
  1316. spec=spec,
  1317. additional_tags={
  1318. "customtag1": "foo",
  1319. "customtag2": "red",
  1320. "environment": "production",
  1321. },
  1322. timestamp=self.day_ago + timedelta(hours=hour),
  1323. )
  1324. self.store_on_demand_metric(
  1325. hour * 60 * 24,
  1326. spec=spec_two,
  1327. additional_tags={
  1328. "customtag1": "bar",
  1329. "customtag2": "blue",
  1330. "environment": "production",
  1331. },
  1332. timestamp=self.day_ago + timedelta(hours=hour),
  1333. )
  1334. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1335. with mock.patch.object(widget, "save") as mock_widget_save:
  1336. response = self.do_request(
  1337. data={
  1338. "project": self.project.id,
  1339. "start": iso_format(self.day_ago),
  1340. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1341. "interval": "1h",
  1342. "orderby": ["-count()"],
  1343. "query": query,
  1344. "yAxis": yAxis,
  1345. "field": [
  1346. "count()",
  1347. "count_web_vitals(measurements.lcp, good)",
  1348. "customtag1",
  1349. "customtag2",
  1350. ],
  1351. "topEvents": 5,
  1352. "dataset": "metricsEnhanced",
  1353. "useOnDemandMetrics": "true",
  1354. "onDemandType": "dynamic_query",
  1355. "dashboardWidgetId": widget.id,
  1356. },
  1357. )
  1358. assert bool(mock_widget_save.assert_not_called)
  1359. assert response.status_code == 200, response.content
  1360. groups = [
  1361. ("foo,red", "count()", 0.0, 1488.0),
  1362. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1363. ("bar,blue", "count()", 0.0, 0.0),
  1364. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1365. ]
  1366. assert len(response.data.keys()) == 2
  1367. for group_count in groups:
  1368. group, agg, row1, row2 = group_count
  1369. row_data = response.data[group][agg]["data"][:2]
  1370. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1371. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1372. assert response.data[group]["isMetricsExtractedData"]
  1373. def test_timeseries_on_demand_with_multiple_percentiles(self):
  1374. field = "p75(measurements.fcp)"
  1375. field_two = "p75(measurements.lcp)"
  1376. query = "transaction.duration:>=100"
  1377. spec = OnDemandMetricSpec(field=field, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY)
  1378. spec_two = OnDemandMetricSpec(
  1379. field=field_two, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1380. )
  1381. assert (
  1382. spec._query_str_for_hash
  1383. == "event.measurements.fcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1384. )
  1385. assert (
  1386. spec_two._query_str_for_hash
  1387. == "event.measurements.lcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1388. )
  1389. for count in range(0, 4):
  1390. self.store_on_demand_metric(
  1391. count * 100,
  1392. spec=spec,
  1393. timestamp=self.day_ago + timedelta(hours=1),
  1394. )
  1395. self.store_on_demand_metric(
  1396. count * 200.0,
  1397. spec=spec_two,
  1398. timestamp=self.day_ago + timedelta(hours=1),
  1399. )
  1400. yAxis = [field, field_two]
  1401. response = self.do_request(
  1402. data={
  1403. "project": self.project.id,
  1404. "start": iso_format(self.day_ago),
  1405. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1406. "interval": "1h",
  1407. "orderby": [field],
  1408. "query": query,
  1409. "yAxis": yAxis,
  1410. "dataset": "metricsEnhanced",
  1411. "useOnDemandMetrics": "true",
  1412. "onDemandType": "dynamic_query",
  1413. },
  1414. )
  1415. assert response.status_code == 200, response.content
  1416. assert response.data["p75(measurements.fcp)"]["meta"]["isMetricsExtractedData"]
  1417. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1418. assert [attrs for time, attrs in response.data["p75(measurements.fcp)"]["data"]] == [
  1419. [{"count": 0}],
  1420. [{"count": 225.0}],
  1421. ]
  1422. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsExtractedData"]
  1423. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1424. assert [attrs for time, attrs in response.data["p75(measurements.lcp)"]["data"]] == [
  1425. [{"count": 0}],
  1426. [{"count": 450.0}],
  1427. ]
  1428. def test_apdex_issue(self):
  1429. field = "apdex(300)"
  1430. groupbys = ["group_tag"]
  1431. query = "transaction.duration:>=100"
  1432. spec = OnDemandMetricSpec(
  1433. field=field,
  1434. groupbys=groupbys,
  1435. query=query,
  1436. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1437. )
  1438. for hour in range(0, 5):
  1439. self.store_on_demand_metric(
  1440. 1,
  1441. spec=spec,
  1442. additional_tags={
  1443. "group_tag": "group_one",
  1444. "environment": "production",
  1445. "satisfaction": "tolerable",
  1446. },
  1447. timestamp=self.day_ago + timedelta(hours=hour),
  1448. )
  1449. self.store_on_demand_metric(
  1450. 1,
  1451. spec=spec,
  1452. additional_tags={
  1453. "group_tag": "group_two",
  1454. "environment": "production",
  1455. "satisfaction": "satisfactory",
  1456. },
  1457. timestamp=self.day_ago + timedelta(hours=hour),
  1458. )
  1459. response = self.do_request(
  1460. data={
  1461. "dataset": "metricsEnhanced",
  1462. "environment": "production",
  1463. "excludeOther": 1,
  1464. "field": [field, "group_tag"],
  1465. "start": iso_format(self.day_ago),
  1466. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1467. "interval": "1h",
  1468. "orderby": f"-{field}",
  1469. "partial": 1,
  1470. "project": self.project.id,
  1471. "query": query,
  1472. "topEvents": 5,
  1473. "yAxis": field,
  1474. "onDemandType": "dynamic_query",
  1475. "useOnDemandMetrics": "true",
  1476. },
  1477. )
  1478. assert response.status_code == 200, response.content
  1479. assert response.data["group_one"]["meta"]["isMetricsExtractedData"] is True
  1480. assert [attrs for time, attrs in response.data["group_one"]["data"]] == [
  1481. [{"count": 0.5}],
  1482. [{"count": 0.5}],
  1483. ]
  1484. def test_glob_http_referer_on_demand(self):
  1485. agg = "count()"
  1486. network_id_tag = "networkId"
  1487. url = "https://sentry.io"
  1488. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1489. spec = OnDemandMetricSpec(
  1490. field=agg,
  1491. groupbys=[network_id_tag],
  1492. query=query,
  1493. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1494. )
  1495. assert spec.to_metric_spec(self.project) == {
  1496. "category": "transaction",
  1497. "mri": "c:transactions/on_demand@none",
  1498. "field": None,
  1499. "tags": [
  1500. {"key": "query_hash", "value": "ac241f56"},
  1501. {"key": "networkId", "field": "event.tags.networkId"},
  1502. {"key": "environment", "field": "event.environment"},
  1503. ],
  1504. "condition": {
  1505. "op": "and",
  1506. "inner": [
  1507. {
  1508. "op": "glob",
  1509. "name": "event.request.url",
  1510. "value": ["https://sentry.io/*/foo/bar/*"],
  1511. },
  1512. {
  1513. "op": "glob",
  1514. "name": "event.request.headers.Referer",
  1515. "value": ["https://sentry.io/*/bar/*"],
  1516. },
  1517. ],
  1518. },
  1519. }
  1520. for hour in range(0, 5):
  1521. self.store_on_demand_metric(
  1522. 1,
  1523. spec=spec,
  1524. additional_tags={network_id_tag: "1234"},
  1525. timestamp=self.day_ago + timedelta(hours=hour),
  1526. )
  1527. self.store_on_demand_metric(
  1528. 1,
  1529. spec=spec,
  1530. additional_tags={network_id_tag: "5678"},
  1531. timestamp=self.day_ago + timedelta(hours=hour),
  1532. )
  1533. response = self.do_request(
  1534. data={
  1535. "dataset": "metricsEnhanced",
  1536. "field": [network_id_tag, agg],
  1537. "start": iso_format(self.day_ago),
  1538. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1539. "onDemandType": "dynamic_query",
  1540. "orderby": f"-{agg}",
  1541. "interval": "1d",
  1542. "partial": 1,
  1543. "query": query,
  1544. "referrer": "api.dashboards.widget.bar-chart",
  1545. "project": self.project.id,
  1546. "topEvents": 2,
  1547. "useOnDemandMetrics": "true",
  1548. "yAxis": agg,
  1549. },
  1550. )
  1551. assert response.status_code == 200, response.content
  1552. for datum in response.data.values():
  1553. assert datum["meta"] == {
  1554. "dataset": "metricsEnhanced",
  1555. "datasetReason": "unchanged",
  1556. "fields": {},
  1557. "isMetricsData": False,
  1558. "isMetricsExtractedData": True,
  1559. "tips": {},
  1560. "units": {},
  1561. }
  1562. def _test_is_metrics_extracted_data(
  1563. self, params: dict[str, Any], expected_on_demand_query: bool, dataset: str
  1564. ) -> None:
  1565. spec = OnDemandMetricSpec(
  1566. field="count()",
  1567. query="transaction.duration:>1s",
  1568. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1569. )
  1570. self.store_on_demand_metric(1, spec=spec)
  1571. response = self.do_request(params)
  1572. assert response.status_code == 200, response.content
  1573. meta = response.data["meta"]
  1574. # This is the main thing we want to test for
  1575. assert meta.get("isMetricsExtractedData", False) is expected_on_demand_query
  1576. assert meta["dataset"] == dataset
  1577. return meta
  1578. def test_is_metrics_extracted_data_is_included(self):
  1579. self._test_is_metrics_extracted_data(
  1580. {
  1581. "dataset": "metricsEnhanced",
  1582. "query": "transaction.duration:>=91",
  1583. "useOnDemandMetrics": "true",
  1584. "yAxis": "count()",
  1585. },
  1586. expected_on_demand_query=True,
  1587. dataset="metricsEnhanced",
  1588. )
  1589. def test_on_demand_epm_no_query(self):
  1590. params = {
  1591. "dataset": "metricsEnhanced",
  1592. "environment": "production",
  1593. "onDemandType": "dynamic_query",
  1594. "project": self.project.id,
  1595. "query": "",
  1596. "statsPeriod": "1h",
  1597. "useOnDemandMetrics": "true",
  1598. "yAxis": ["epm()"],
  1599. }
  1600. response = self.do_request(params)
  1601. assert response.status_code == 200, response.content
  1602. assert response.data["meta"] == {
  1603. "fields": {"time": "date", "epm_900": "rate"},
  1604. "units": {"time": None, "epm_900": None},
  1605. "isMetricsData": True,
  1606. "isMetricsExtractedData": False,
  1607. "tips": {},
  1608. "datasetReason": "unchanged",
  1609. "dataset": "metricsEnhanced",
  1610. }
  1611. def test_group_by_transaction(self):
  1612. field = "count()"
  1613. groupbys = ["transaction"]
  1614. query = "transaction.duration:>=100"
  1615. spec = OnDemandMetricSpec(
  1616. field=field,
  1617. groupbys=groupbys,
  1618. query=query,
  1619. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1620. )
  1621. for hour in range(0, 2):
  1622. self.store_on_demand_metric(
  1623. (hour + 1) * 5,
  1624. spec=spec,
  1625. additional_tags={
  1626. "transaction": "/performance",
  1627. "environment": "production",
  1628. },
  1629. timestamp=self.day_ago + timedelta(hours=hour),
  1630. )
  1631. response = self.do_request(
  1632. data={
  1633. "dataset": "metricsEnhanced",
  1634. "environment": "production",
  1635. "excludeOther": 1,
  1636. "field": [field, "transaction"],
  1637. "start": iso_format(self.day_ago),
  1638. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1639. "interval": "1h",
  1640. "orderby": f"-{field}",
  1641. "partial": 1,
  1642. "project": self.project.id,
  1643. "query": query,
  1644. "topEvents": 5,
  1645. "yAxis": field,
  1646. "onDemandType": "dynamic_query",
  1647. "useOnDemandMetrics": "true",
  1648. },
  1649. )
  1650. assert response.status_code == 200, response.content
  1651. assert response.data["/performance"]["meta"]["isMetricsExtractedData"] is True
  1652. assert [attrs for time, attrs in response.data["/performance"]["data"]] == [
  1653. [{"count": 5.0}],
  1654. [{"count": 10.0}],
  1655. ]
  1656. def _setup_orderby_tests(self, query):
  1657. count_spec = OnDemandMetricSpec(
  1658. field="count()",
  1659. groupbys=["networkId"],
  1660. query=query,
  1661. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1662. )
  1663. p95_spec = OnDemandMetricSpec(
  1664. field="p95(transaction.duration)",
  1665. groupbys=["networkId"],
  1666. query=query,
  1667. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1668. )
  1669. for hour in range(0, 5):
  1670. self.store_on_demand_metric(
  1671. 1,
  1672. spec=count_spec,
  1673. additional_tags={"networkId": "1234"},
  1674. timestamp=self.day_ago + timedelta(hours=hour),
  1675. )
  1676. self.store_on_demand_metric(
  1677. 100,
  1678. spec=p95_spec,
  1679. additional_tags={"networkId": "1234"},
  1680. timestamp=self.day_ago + timedelta(hours=hour),
  1681. )
  1682. self.store_on_demand_metric(
  1683. 200,
  1684. spec=p95_spec,
  1685. additional_tags={"networkId": "5678"},
  1686. timestamp=self.day_ago + timedelta(hours=hour),
  1687. )
  1688. # Store twice as many 5678 so orderby puts it later
  1689. self.store_on_demand_metric(
  1690. 2,
  1691. spec=count_spec,
  1692. additional_tags={"networkId": "5678"},
  1693. timestamp=self.day_ago + timedelta(hours=hour),
  1694. )
  1695. def test_order_by_aggregate_top_events_desc(self):
  1696. url = "https://sentry.io"
  1697. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1698. self._setup_orderby_tests(query)
  1699. response = self.do_request(
  1700. data={
  1701. "dataset": "metricsEnhanced",
  1702. "field": ["networkId", "count()"],
  1703. "start": iso_format(self.day_ago),
  1704. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1705. "onDemandType": "dynamic_query",
  1706. "orderby": "-count()",
  1707. "interval": "1d",
  1708. "partial": 1,
  1709. "query": query,
  1710. "referrer": "api.dashboards.widget.bar-chart",
  1711. "project": self.project.id,
  1712. "topEvents": 2,
  1713. "useOnDemandMetrics": "true",
  1714. "yAxis": "count()",
  1715. },
  1716. )
  1717. assert response.status_code == 200, response.content
  1718. assert len(response.data) == 3
  1719. data1 = response.data["5678"]
  1720. assert data1["order"] == 0
  1721. assert data1["data"][0][1][0]["count"] == 10
  1722. data2 = response.data["1234"]
  1723. assert data2["order"] == 1
  1724. assert data2["data"][0][1][0]["count"] == 5
  1725. for datum in response.data.values():
  1726. assert datum["meta"] == {
  1727. "dataset": "metricsEnhanced",
  1728. "datasetReason": "unchanged",
  1729. "fields": {},
  1730. "isMetricsData": False,
  1731. "isMetricsExtractedData": True,
  1732. "tips": {},
  1733. "units": {},
  1734. }
  1735. def test_order_by_aggregate_top_events_asc(self):
  1736. url = "https://sentry.io"
  1737. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1738. self._setup_orderby_tests(query)
  1739. response = self.do_request(
  1740. data={
  1741. "dataset": "metricsEnhanced",
  1742. "field": ["networkId", "count()"],
  1743. "start": iso_format(self.day_ago),
  1744. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1745. "onDemandType": "dynamic_query",
  1746. "orderby": "count()",
  1747. "interval": "1d",
  1748. "partial": 1,
  1749. "query": query,
  1750. "referrer": "api.dashboards.widget.bar-chart",
  1751. "project": self.project.id,
  1752. "topEvents": 2,
  1753. "useOnDemandMetrics": "true",
  1754. "yAxis": "count()",
  1755. },
  1756. )
  1757. assert response.status_code == 200, response.content
  1758. assert len(response.data) == 3
  1759. data1 = response.data["1234"]
  1760. assert data1["order"] == 0
  1761. assert data1["data"][0][1][0]["count"] == 5
  1762. data2 = response.data["5678"]
  1763. assert data2["order"] == 1
  1764. assert data2["data"][0][1][0]["count"] == 10
  1765. for datum in response.data.values():
  1766. assert datum["meta"] == {
  1767. "dataset": "metricsEnhanced",
  1768. "datasetReason": "unchanged",
  1769. "fields": {},
  1770. "isMetricsData": False,
  1771. "isMetricsExtractedData": True,
  1772. "tips": {},
  1773. "units": {},
  1774. }
  1775. def test_order_by_aggregate_top_events_graph_different_aggregate(self):
  1776. url = "https://sentry.io"
  1777. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1778. self._setup_orderby_tests(query)
  1779. response = self.do_request(
  1780. data={
  1781. "dataset": "metricsEnhanced",
  1782. "field": ["networkId", "count()"],
  1783. "start": iso_format(self.day_ago),
  1784. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1785. "onDemandType": "dynamic_query",
  1786. "orderby": "count()",
  1787. "interval": "1d",
  1788. "partial": 1,
  1789. "query": query,
  1790. "referrer": "api.dashboards.widget.bar-chart",
  1791. "project": self.project.id,
  1792. "topEvents": 2,
  1793. "useOnDemandMetrics": "true",
  1794. "yAxis": "p95(transaction.duration)",
  1795. },
  1796. )
  1797. assert response.status_code == 200, response.content
  1798. assert len(response.data) == 3
  1799. data1 = response.data["1234"]
  1800. assert data1["order"] == 0
  1801. assert data1["data"][0][1][0]["count"] == 100
  1802. data2 = response.data["5678"]
  1803. assert data2["order"] == 1
  1804. assert data2["data"][0][1][0]["count"] == 200
  1805. for datum in response.data.values():
  1806. assert datum["meta"] == {
  1807. "dataset": "metricsEnhanced",
  1808. "datasetReason": "unchanged",
  1809. "fields": {},
  1810. "isMetricsData": False,
  1811. "isMetricsExtractedData": True,
  1812. "tips": {},
  1813. "units": {},
  1814. }
  1815. def test_cannot_order_by_tag(self):
  1816. url = "https://sentry.io"
  1817. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1818. self._setup_orderby_tests(query)
  1819. response = self.do_request(
  1820. data={
  1821. "dataset": "metrics",
  1822. "field": ["networkId", "count()"],
  1823. "start": iso_format(self.day_ago),
  1824. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1825. "onDemandType": "dynamic_query",
  1826. "orderby": "-networkId",
  1827. "interval": "1d",
  1828. "partial": 1,
  1829. "query": query,
  1830. "referrer": "api.dashboards.widget.bar-chart",
  1831. "project": self.project.id,
  1832. "topEvents": 2,
  1833. "useOnDemandMetrics": "true",
  1834. "yAxis": "count()",
  1835. },
  1836. )
  1837. assert response.status_code == 400, response.content
  1838. def test_order_by_two_aggregates(self):
  1839. url = "https://sentry.io"
  1840. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1841. self._setup_orderby_tests(query)
  1842. response = self.do_request(
  1843. data={
  1844. "dataset": "metrics",
  1845. "field": ["networkId", "count()", "p95(transaction.duration)"],
  1846. "start": iso_format(self.day_ago),
  1847. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1848. "onDemandType": "dynamic_query",
  1849. "orderby": ["count()", "p95(transaction.duration)"],
  1850. "interval": "1d",
  1851. "partial": 1,
  1852. "query": query,
  1853. "referrer": "api.dashboards.widget.bar-chart",
  1854. "project": self.project.id,
  1855. "topEvents": 2,
  1856. "useOnDemandMetrics": "true",
  1857. "yAxis": "p95(transaction.duration)",
  1858. },
  1859. )
  1860. assert response.status_code == 400, response.content
  1861. def test_top_events_with_tag(self):
  1862. query = "transaction.duration:>=100"
  1863. yAxis = ["count()"]
  1864. field = "count()"
  1865. groupbys = ["some-field"]
  1866. spec = OnDemandMetricSpec(
  1867. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1868. )
  1869. self.store_on_demand_metric(
  1870. 1,
  1871. spec=spec,
  1872. additional_tags={
  1873. "some-field": "bar",
  1874. "environment": "production",
  1875. },
  1876. timestamp=self.day_ago,
  1877. )
  1878. response = self.do_request(
  1879. data={
  1880. "project": self.project.id,
  1881. "start": iso_format(self.day_ago),
  1882. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1883. "interval": "1h",
  1884. "orderby": ["-count()"],
  1885. "environment": "production",
  1886. "query": query,
  1887. "yAxis": yAxis,
  1888. "field": [
  1889. "some-field",
  1890. "count()",
  1891. ],
  1892. "topEvents": 5,
  1893. "dataset": "metrics",
  1894. "useOnDemandMetrics": "true",
  1895. },
  1896. )
  1897. assert response.status_code == 200, response.content