test_organization_events_stats_mep.py 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230
  1. from __future__ import annotations
  2. from datetime import timedelta
  3. from typing import Any
  4. from unittest import mock
  5. import pytest
  6. from django.urls import reverse
  7. from rest_framework.response import Response
  8. from sentry.models.dashboard_widget import DashboardWidget, DashboardWidgetTypes
  9. from sentry.models.environment import Environment
  10. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  11. from sentry.snuba.metrics.extraction import MetricSpecType, OnDemandMetricSpec
  12. from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
  13. from sentry.testutils.helpers.datetime import before_now, iso_format
  14. from sentry.testutils.helpers.on_demand import create_widget
  15. from sentry.utils.samples import load_data
  16. pytestmark = pytest.mark.sentry_metrics
  17. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
  18. MetricsEnhancedPerformanceTestCase
  19. ):
  20. endpoint = "sentry-api-0-organization-events-stats"
  21. METRIC_STRINGS = [
  22. "foo_transaction",
  23. "d:transactions/measurements.datacenter_memory@pebibyte",
  24. ]
  25. def setUp(self):
  26. super().setUp()
  27. self.login_as(user=self.user)
  28. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  29. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  30. self.url = reverse(
  31. "sentry-api-0-organization-events-stats",
  32. kwargs={"organization_id_or_slug": self.project.organization.slug},
  33. )
  34. self.features = {
  35. "organizations:performance-use-metrics": True,
  36. }
  37. self.additional_params = dict()
  38. # These throughput tests should roughly match the ones in OrganizationEventsStatsEndpointTest
  39. @pytest.mark.querybuilder
  40. def test_throughput_epm_hour_rollup(self):
  41. # Each of these denotes how many events to create in each hour
  42. event_counts = [6, 0, 6, 3, 0, 3]
  43. for hour, count in enumerate(event_counts):
  44. for minute in range(count):
  45. self.store_transaction_metric(
  46. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  47. )
  48. for axis in ["epm()", "tpm()"]:
  49. response = self.do_request(
  50. data={
  51. "start": iso_format(self.day_ago),
  52. "end": iso_format(self.day_ago + timedelta(hours=6)),
  53. "interval": "1h",
  54. "yAxis": axis,
  55. "project": self.project.id,
  56. "dataset": "metricsEnhanced",
  57. **self.additional_params,
  58. },
  59. )
  60. assert response.status_code == 200, response.content
  61. data = response.data["data"]
  62. assert len(data) == 6
  63. assert response.data["isMetricsData"]
  64. rows = data[0:6]
  65. for test in zip(event_counts, rows):
  66. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  67. def test_throughput_epm_day_rollup(self):
  68. # Each of these denotes how many events to create in each minute
  69. event_counts = [6, 0, 6, 3, 0, 3]
  70. for hour, count in enumerate(event_counts):
  71. for minute in range(count):
  72. self.store_transaction_metric(
  73. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  74. )
  75. for axis in ["epm()", "tpm()"]:
  76. response = self.do_request(
  77. data={
  78. "start": iso_format(self.day_ago),
  79. "end": iso_format(self.day_ago + timedelta(hours=24)),
  80. "interval": "24h",
  81. "yAxis": axis,
  82. "project": self.project.id,
  83. "dataset": "metricsEnhanced",
  84. **self.additional_params,
  85. },
  86. )
  87. assert response.status_code == 200, response.content
  88. data = response.data["data"]
  89. assert len(data) == 2
  90. assert response.data["isMetricsData"]
  91. assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
  92. def test_throughput_epm_hour_rollup_offset_of_hour(self):
  93. # Each of these denotes how many events to create in each hour
  94. event_counts = [6, 0, 6, 3, 0, 3]
  95. for hour, count in enumerate(event_counts):
  96. for minute in range(count):
  97. self.store_transaction_metric(
  98. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute + 30)
  99. )
  100. for axis in ["tpm()", "epm()"]:
  101. response = self.do_request(
  102. data={
  103. "start": iso_format(self.day_ago + timedelta(minutes=30)),
  104. "end": iso_format(self.day_ago + timedelta(hours=6, minutes=30)),
  105. "interval": "1h",
  106. "yAxis": axis,
  107. "project": self.project.id,
  108. "dataset": "metricsEnhanced",
  109. **self.additional_params,
  110. },
  111. )
  112. assert response.status_code == 200, response.content
  113. data = response.data["data"]
  114. assert len(data) == 6
  115. assert response.data["isMetricsData"]
  116. rows = data[0:6]
  117. for test in zip(event_counts, rows):
  118. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  119. def test_throughput_eps_minute_rollup(self):
  120. # Each of these denotes how many events to create in each minute
  121. event_counts = [6, 0, 6, 3, 0, 3]
  122. for minute, count in enumerate(event_counts):
  123. for second in range(count):
  124. self.store_transaction_metric(
  125. 1, timestamp=self.day_ago + timedelta(minutes=minute, seconds=second)
  126. )
  127. for axis in ["eps()", "tps()"]:
  128. response = self.do_request(
  129. data={
  130. "start": iso_format(self.day_ago),
  131. "end": iso_format(self.day_ago + timedelta(minutes=6)),
  132. "interval": "1m",
  133. "yAxis": axis,
  134. "project": self.project.id,
  135. "dataset": "metricsEnhanced",
  136. **self.additional_params,
  137. },
  138. )
  139. assert response.status_code == 200, response.content
  140. data = response.data["data"]
  141. assert len(data) == 6
  142. assert response.data["isMetricsData"]
  143. rows = data[0:6]
  144. for test in zip(event_counts, rows):
  145. assert test[1][1][0]["count"] == test[0] / 60.0
  146. def test_failure_rate(self):
  147. for hour in range(6):
  148. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  149. self.store_transaction_metric(1, tags={"transaction.status": "ok"}, timestamp=timestamp)
  150. if hour < 3:
  151. self.store_transaction_metric(
  152. 1, tags={"transaction.status": "internal_error"}, timestamp=timestamp
  153. )
  154. response = self.do_request(
  155. data={
  156. "start": iso_format(self.day_ago),
  157. "end": iso_format(self.day_ago + timedelta(hours=6)),
  158. "interval": "1h",
  159. "yAxis": ["failure_rate()"],
  160. "project": self.project.id,
  161. "dataset": "metricsEnhanced",
  162. **self.additional_params,
  163. },
  164. )
  165. assert response.status_code == 200, response.content
  166. data = response.data["data"]
  167. assert len(data) == 6
  168. assert response.data["isMetricsData"]
  169. assert [attrs for time, attrs in response.data["data"]] == [
  170. [{"count": 0.5}],
  171. [{"count": 0.5}],
  172. [{"count": 0.5}],
  173. [{"count": 0}],
  174. [{"count": 0}],
  175. [{"count": 0}],
  176. ]
  177. def test_percentiles_multi_axis(self):
  178. for hour in range(6):
  179. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  180. self.store_transaction_metric(111, timestamp=timestamp)
  181. self.store_transaction_metric(222, metric="measurements.lcp", timestamp=timestamp)
  182. response = self.do_request(
  183. data={
  184. "start": iso_format(self.day_ago),
  185. "end": iso_format(self.day_ago + timedelta(hours=6)),
  186. "interval": "1h",
  187. "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
  188. "project": self.project.id,
  189. "dataset": "metricsEnhanced",
  190. **self.additional_params,
  191. },
  192. )
  193. assert response.status_code == 200, response.content
  194. lcp = response.data["p75(measurements.lcp)"]
  195. duration = response.data["p75(transaction.duration)"]
  196. assert len(duration["data"]) == 6
  197. assert duration["isMetricsData"]
  198. assert len(lcp["data"]) == 6
  199. assert lcp["isMetricsData"]
  200. for item in duration["data"]:
  201. assert item[1][0]["count"] == 111
  202. for item in lcp["data"]:
  203. assert item[1][0]["count"] == 222
  204. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query", return_value={})
  205. def test_multiple_yaxis_only_one_query(self, mock_query):
  206. self.do_request(
  207. data={
  208. "project": self.project.id,
  209. "start": iso_format(self.day_ago),
  210. "end": iso_format(self.day_ago + timedelta(hours=2)),
  211. "interval": "1h",
  212. "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
  213. "dataset": "metricsEnhanced",
  214. **self.additional_params,
  215. },
  216. )
  217. assert mock_query.call_count == 1
  218. def test_aggregate_function_user_count(self):
  219. self.store_transaction_metric(
  220. 1, metric="user", timestamp=self.day_ago + timedelta(minutes=30)
  221. )
  222. self.store_transaction_metric(
  223. 1, metric="user", timestamp=self.day_ago + timedelta(hours=1, minutes=30)
  224. )
  225. response = self.do_request(
  226. data={
  227. "start": iso_format(self.day_ago),
  228. "end": iso_format(self.day_ago + timedelta(hours=2)),
  229. "interval": "1h",
  230. "yAxis": "count_unique(user)",
  231. "dataset": "metricsEnhanced",
  232. **self.additional_params,
  233. },
  234. )
  235. assert response.status_code == 200, response.content
  236. assert response.data["isMetricsData"]
  237. assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
  238. meta = response.data["meta"]
  239. assert meta["isMetricsData"] == response.data["isMetricsData"]
  240. def test_non_mep_query_fallsback(self):
  241. def get_mep(query):
  242. response = self.do_request(
  243. data={
  244. "project": self.project.id,
  245. "start": iso_format(self.day_ago),
  246. "end": iso_format(self.day_ago + timedelta(hours=2)),
  247. "interval": "1h",
  248. "query": query,
  249. "yAxis": ["epm()"],
  250. "dataset": "metricsEnhanced",
  251. **self.additional_params,
  252. },
  253. )
  254. assert response.status_code == 200, response.content
  255. return response.data["isMetricsData"]
  256. assert get_mep(""), "empty query"
  257. assert get_mep("event.type:transaction"), "event type transaction"
  258. assert not get_mep("event.type:error"), "event type error"
  259. assert not get_mep("transaction.duration:<15min"), "outlier filter"
  260. assert get_mep("epm():>0.01"), "throughput filter"
  261. assert not get_mep(
  262. "event.type:transaction OR event.type:error"
  263. ), "boolean with non-mep filter"
  264. assert get_mep(
  265. "event.type:transaction OR transaction:foo_transaction"
  266. ), "boolean with mep filter"
  267. def test_having_condition_with_preventing_aggregates(self):
  268. response = self.do_request(
  269. data={
  270. "project": self.project.id,
  271. "start": iso_format(self.day_ago),
  272. "end": iso_format(self.day_ago + timedelta(hours=2)),
  273. "interval": "1h",
  274. "query": "p95():<5s",
  275. "yAxis": ["epm()"],
  276. "dataset": "metricsEnhanced",
  277. "preventMetricAggregates": "1",
  278. **self.additional_params,
  279. },
  280. )
  281. assert response.status_code == 200, response.content
  282. assert not response.data["isMetricsData"]
  283. meta = response.data["meta"]
  284. assert meta["isMetricsData"] == response.data["isMetricsData"]
  285. def test_explicit_not_mep(self):
  286. response = self.do_request(
  287. data={
  288. "project": self.project.id,
  289. "start": iso_format(self.day_ago),
  290. "end": iso_format(self.day_ago + timedelta(hours=2)),
  291. "interval": "1h",
  292. # Should be a mep able query
  293. "query": "",
  294. "yAxis": ["epm()"],
  295. "metricsEnhanced": "0",
  296. **self.additional_params,
  297. },
  298. )
  299. assert response.status_code == 200, response.content
  300. assert not response.data["isMetricsData"]
  301. meta = response.data["meta"]
  302. assert meta["isMetricsData"] == response.data["isMetricsData"]
  303. def test_sum_transaction_duration(self):
  304. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  305. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  306. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  307. response = self.do_request(
  308. data={
  309. "start": iso_format(self.day_ago),
  310. "end": iso_format(self.day_ago + timedelta(hours=2)),
  311. "interval": "1h",
  312. "yAxis": "sum(transaction.duration)",
  313. "dataset": "metricsEnhanced",
  314. **self.additional_params,
  315. },
  316. )
  317. assert response.status_code == 200, response.content
  318. assert response.data["isMetricsData"]
  319. assert [attrs for time, attrs in response.data["data"]] == [
  320. [{"count": 123}],
  321. [{"count": 1245}],
  322. ]
  323. meta = response.data["meta"]
  324. assert meta["isMetricsData"] == response.data["isMetricsData"]
  325. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  326. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  327. def test_sum_transaction_duration_with_comparison(self):
  328. # We store the data for the previous day (in order to have values for the comparison).
  329. self.store_transaction_metric(
  330. 1, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  331. )
  332. self.store_transaction_metric(
  333. 2, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  334. )
  335. # We store the data for today.
  336. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  337. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(minutes=30))
  338. response = self.do_request(
  339. data={
  340. "start": iso_format(self.day_ago),
  341. "end": iso_format(self.day_ago + timedelta(days=1)),
  342. "interval": "1d",
  343. "yAxis": "sum(transaction.duration)",
  344. "comparisonDelta": 86400,
  345. "dataset": "metricsEnhanced",
  346. **self.additional_params,
  347. },
  348. )
  349. assert response.status_code == 200, response.content
  350. assert response.data["isMetricsData"]
  351. # For some reason, if all tests run, there is some shared state that makes this test have data in the second
  352. # time bucket, which is filled automatically by the zerofilling. In order to avoid this flaky failure, we will
  353. # only check that the first bucket contains the actual data.
  354. assert [attrs for time, attrs in response.data["data"]][0] == [
  355. {"comparisonCount": 3.0, "count": 579.0}
  356. ]
  357. meta = response.data["meta"]
  358. assert meta["isMetricsData"] == response.data["isMetricsData"]
  359. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  360. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  361. def test_custom_measurement(self):
  362. self.store_transaction_metric(
  363. 123,
  364. metric="measurements.bytes_transfered",
  365. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  366. entity="metrics_distributions",
  367. tags={"transaction": "foo_transaction"},
  368. timestamp=self.day_ago + timedelta(minutes=30),
  369. )
  370. self.store_transaction_metric(
  371. 456,
  372. metric="measurements.bytes_transfered",
  373. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  374. entity="metrics_distributions",
  375. tags={"transaction": "foo_transaction"},
  376. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  377. )
  378. self.store_transaction_metric(
  379. 789,
  380. metric="measurements.bytes_transfered",
  381. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  382. entity="metrics_distributions",
  383. tags={"transaction": "foo_transaction"},
  384. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  385. )
  386. response = self.do_request(
  387. data={
  388. "start": iso_format(self.day_ago),
  389. "end": iso_format(self.day_ago + timedelta(hours=2)),
  390. "interval": "1h",
  391. "yAxis": "sum(measurements.datacenter_memory)",
  392. "dataset": "metricsEnhanced",
  393. **self.additional_params,
  394. },
  395. )
  396. assert response.status_code == 200, response.content
  397. assert response.data["isMetricsData"]
  398. assert [attrs for time, attrs in response.data["data"]] == [
  399. [{"count": 123}],
  400. [{"count": 1245}],
  401. ]
  402. meta = response.data["meta"]
  403. assert meta["isMetricsData"] == response.data["isMetricsData"]
  404. assert meta["fields"] == {"time": "date", "sum_measurements_datacenter_memory": "size"}
  405. assert meta["units"] == {"time": None, "sum_measurements_datacenter_memory": "pebibyte"}
  406. def test_does_not_fallback_if_custom_metric_is_out_of_request_time_range(self):
  407. self.store_transaction_metric(
  408. 123,
  409. timestamp=self.day_ago + timedelta(hours=1),
  410. internal_metric="d:transactions/measurements.custom@kibibyte",
  411. entity="metrics_distributions",
  412. )
  413. response = self.do_request(
  414. data={
  415. "start": iso_format(self.day_ago),
  416. "end": iso_format(self.day_ago + timedelta(hours=2)),
  417. "interval": "1h",
  418. "yAxis": "p99(measurements.custom)",
  419. "dataset": "metricsEnhanced",
  420. **self.additional_params,
  421. },
  422. )
  423. meta = response.data["meta"]
  424. assert response.status_code == 200, response.content
  425. assert response.data["isMetricsData"]
  426. assert meta["isMetricsData"]
  427. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  428. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  429. def test_multi_yaxis_custom_measurement(self):
  430. self.store_transaction_metric(
  431. 123,
  432. metric="measurements.bytes_transfered",
  433. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  434. entity="metrics_distributions",
  435. tags={"transaction": "foo_transaction"},
  436. timestamp=self.day_ago + timedelta(minutes=30),
  437. )
  438. self.store_transaction_metric(
  439. 456,
  440. metric="measurements.bytes_transfered",
  441. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  442. entity="metrics_distributions",
  443. tags={"transaction": "foo_transaction"},
  444. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  445. )
  446. self.store_transaction_metric(
  447. 789,
  448. metric="measurements.bytes_transfered",
  449. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  450. entity="metrics_distributions",
  451. tags={"transaction": "foo_transaction"},
  452. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  453. )
  454. response = self.do_request(
  455. data={
  456. "start": iso_format(self.day_ago),
  457. "end": iso_format(self.day_ago + timedelta(hours=2)),
  458. "interval": "1h",
  459. "yAxis": [
  460. "sum(measurements.datacenter_memory)",
  461. "p50(measurements.datacenter_memory)",
  462. ],
  463. "dataset": "metricsEnhanced",
  464. **self.additional_params,
  465. },
  466. )
  467. assert response.status_code == 200, response.content
  468. sum_data = response.data["sum(measurements.datacenter_memory)"]
  469. p50_data = response.data["p50(measurements.datacenter_memory)"]
  470. assert sum_data["isMetricsData"]
  471. assert p50_data["isMetricsData"]
  472. assert [attrs for time, attrs in sum_data["data"]] == [
  473. [{"count": 123}],
  474. [{"count": 1245}],
  475. ]
  476. assert [attrs for time, attrs in p50_data["data"]] == [
  477. [{"count": 123}],
  478. [{"count": 622.5}],
  479. ]
  480. sum_meta = sum_data["meta"]
  481. assert sum_meta["isMetricsData"] == sum_data["isMetricsData"]
  482. assert sum_meta["fields"] == {
  483. "time": "date",
  484. "sum_measurements_datacenter_memory": "size",
  485. "p50_measurements_datacenter_memory": "size",
  486. }
  487. assert sum_meta["units"] == {
  488. "time": None,
  489. "sum_measurements_datacenter_memory": "pebibyte",
  490. "p50_measurements_datacenter_memory": "pebibyte",
  491. }
  492. p50_meta = p50_data["meta"]
  493. assert p50_meta["isMetricsData"] == p50_data["isMetricsData"]
  494. assert p50_meta["fields"] == {
  495. "time": "date",
  496. "sum_measurements_datacenter_memory": "size",
  497. "p50_measurements_datacenter_memory": "size",
  498. }
  499. assert p50_meta["units"] == {
  500. "time": None,
  501. "sum_measurements_datacenter_memory": "pebibyte",
  502. "p50_measurements_datacenter_memory": "pebibyte",
  503. }
  504. def test_dataset_metrics_does_not_fallback(self):
  505. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  506. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  507. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  508. response = self.do_request(
  509. data={
  510. "start": iso_format(self.day_ago),
  511. "end": iso_format(self.day_ago + timedelta(hours=2)),
  512. "interval": "1h",
  513. "query": "transaction.duration:<5s",
  514. "yAxis": "sum(transaction.duration)",
  515. "dataset": "metrics",
  516. **self.additional_params,
  517. },
  518. )
  519. assert response.status_code == 400, response.content
  520. def test_title_filter(self):
  521. self.store_transaction_metric(
  522. 123,
  523. tags={"transaction": "foo_transaction"},
  524. timestamp=self.day_ago + timedelta(minutes=30),
  525. )
  526. response = self.do_request(
  527. data={
  528. "start": iso_format(self.day_ago),
  529. "end": iso_format(self.day_ago + timedelta(hours=2)),
  530. "interval": "1h",
  531. "query": "title:foo_transaction",
  532. "yAxis": [
  533. "sum(transaction.duration)",
  534. ],
  535. "dataset": "metricsEnhanced",
  536. **self.additional_params,
  537. },
  538. )
  539. assert response.status_code == 200, response.content
  540. data = response.data["data"]
  541. assert [attrs for time, attrs in data] == [
  542. [{"count": 123}],
  543. [{"count": 0}],
  544. ]
  545. def test_transaction_status_unknown_error(self):
  546. self.store_transaction_metric(
  547. 123,
  548. tags={"transaction.status": "unknown"},
  549. timestamp=self.day_ago + timedelta(minutes=30),
  550. )
  551. response = self.do_request(
  552. data={
  553. "start": iso_format(self.day_ago),
  554. "end": iso_format(self.day_ago + timedelta(hours=2)),
  555. "interval": "1h",
  556. "query": "transaction.status:unknown_error",
  557. "yAxis": [
  558. "sum(transaction.duration)",
  559. ],
  560. "dataset": "metricsEnhanced",
  561. **self.additional_params,
  562. },
  563. )
  564. assert response.status_code == 200, response.content
  565. data = response.data["data"]
  566. assert [attrs for time, attrs in data] == [
  567. [{"count": 123}],
  568. [{"count": 0}],
  569. ]
  570. def test_custom_performance_metric_meta_contains_field_and_unit_data(self):
  571. self.store_transaction_metric(
  572. 123,
  573. timestamp=self.day_ago + timedelta(hours=1),
  574. internal_metric="d:transactions/measurements.custom@kibibyte",
  575. entity="metrics_distributions",
  576. )
  577. response = self.do_request(
  578. data={
  579. "start": iso_format(self.day_ago),
  580. "end": iso_format(self.day_ago + timedelta(hours=2)),
  581. "interval": "1h",
  582. "yAxis": "p99(measurements.custom)",
  583. "query": "",
  584. **self.additional_params,
  585. },
  586. )
  587. assert response.status_code == 200
  588. meta = response.data["meta"]
  589. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  590. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  591. def test_multi_series_custom_performance_metric_meta_contains_field_and_unit_data(self):
  592. self.store_transaction_metric(
  593. 123,
  594. timestamp=self.day_ago + timedelta(hours=1),
  595. internal_metric="d:transactions/measurements.custom@kibibyte",
  596. entity="metrics_distributions",
  597. )
  598. self.store_transaction_metric(
  599. 123,
  600. timestamp=self.day_ago + timedelta(hours=1),
  601. internal_metric="d:transactions/measurements.another.custom@pebibyte",
  602. entity="metrics_distributions",
  603. )
  604. response = self.do_request(
  605. data={
  606. "start": iso_format(self.day_ago),
  607. "end": iso_format(self.day_ago + timedelta(hours=2)),
  608. "interval": "1h",
  609. "yAxis": [
  610. "p95(measurements.custom)",
  611. "p99(measurements.custom)",
  612. "p99(measurements.another.custom)",
  613. ],
  614. "query": "",
  615. **self.additional_params,
  616. },
  617. )
  618. assert response.status_code == 200
  619. meta = response.data["p95(measurements.custom)"]["meta"]
  620. assert meta["fields"] == {
  621. "time": "date",
  622. "p95_measurements_custom": "size",
  623. "p99_measurements_custom": "size",
  624. "p99_measurements_another_custom": "size",
  625. }
  626. assert meta["units"] == {
  627. "time": None,
  628. "p95_measurements_custom": "kibibyte",
  629. "p99_measurements_custom": "kibibyte",
  630. "p99_measurements_another_custom": "pebibyte",
  631. }
  632. assert meta == response.data["p99(measurements.custom)"]["meta"]
  633. assert meta == response.data["p99(measurements.another.custom)"]["meta"]
  634. def test_no_top_events_with_project_field(self):
  635. project = self.create_project()
  636. response = self.do_request(
  637. data={
  638. # make sure to query the project with 0 events
  639. "project": project.id,
  640. "start": iso_format(self.day_ago),
  641. "end": iso_format(self.day_ago + timedelta(hours=2)),
  642. "interval": "1h",
  643. "yAxis": "count()",
  644. "orderby": ["-count()"],
  645. "field": ["count()", "project"],
  646. "topEvents": 5,
  647. "dataset": "metrics",
  648. **self.additional_params,
  649. },
  650. )
  651. assert response.status_code == 200, response.content
  652. # When there are no top events, we do not return an empty dict.
  653. # Instead, we return a single zero-filled series for an empty graph.
  654. data = response.data["data"]
  655. assert [attrs for time, attrs in data] == [[{"count": 0}], [{"count": 0}]]
  656. def test_top_events_with_transaction(self):
  657. transaction_spec = [("foo", 100), ("bar", 200), ("baz", 300)]
  658. for offset in range(5):
  659. for transaction, duration in transaction_spec:
  660. self.store_transaction_metric(
  661. duration,
  662. tags={"transaction": f"{transaction}_transaction"},
  663. timestamp=self.day_ago + timedelta(hours=offset, minutes=30),
  664. )
  665. response = self.do_request(
  666. data={
  667. # make sure to query the project with 0 events
  668. "project": self.project.id,
  669. "start": iso_format(self.day_ago),
  670. "end": iso_format(self.day_ago + timedelta(hours=5)),
  671. "interval": "1h",
  672. "yAxis": "p75(transaction.duration)",
  673. "orderby": ["-p75(transaction.duration)"],
  674. "field": ["p75(transaction.duration)", "transaction"],
  675. "topEvents": 5,
  676. "dataset": "metrics",
  677. **self.additional_params,
  678. },
  679. )
  680. assert response.status_code == 200, response.content
  681. for position, (transaction, duration) in enumerate(transaction_spec):
  682. data = response.data[f"{transaction}_transaction"]
  683. chart_data = data["data"]
  684. assert data["order"] == 2 - position
  685. assert [attrs for time, attrs in chart_data] == [[{"count": duration}]] * 5
  686. def test_top_events_with_project(self):
  687. self.store_transaction_metric(
  688. 100,
  689. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  690. )
  691. response = self.do_request(
  692. data={
  693. # make sure to query the project with 0 events
  694. "project": self.project.id,
  695. "start": iso_format(self.day_ago),
  696. "end": iso_format(self.day_ago + timedelta(hours=5)),
  697. "interval": "1h",
  698. "yAxis": "p75(transaction.duration)",
  699. "orderby": ["-p75(transaction.duration)"],
  700. "field": ["p75(transaction.duration)", "project"],
  701. "topEvents": 5,
  702. "dataset": "metrics",
  703. **self.additional_params,
  704. },
  705. )
  706. assert response.status_code == 200, response.content
  707. data = response.data[f"{self.project.slug}"]
  708. assert data["order"] == 0
  709. def test_split_decision_for_errors_widget(self):
  710. error_data = load_data("python", timestamp=before_now(minutes=1))
  711. self.store_event(
  712. data={
  713. **error_data,
  714. "exception": {"values": [{"type": "blah", "data": {"values": []}}]},
  715. },
  716. project_id=self.project.id,
  717. )
  718. _, widget, __ = create_widget(
  719. ["count()", "error.type"], "error.type:blah", self.project, discover_widget_split=None
  720. )
  721. response = self.do_request(
  722. {
  723. "field": ["count()", "error.type"],
  724. "query": "error.type:blah",
  725. "dataset": "metricsEnhanced",
  726. "per_page": 50,
  727. "dashboardWidgetId": widget.id,
  728. }
  729. )
  730. assert response.status_code == 200, response.content
  731. assert response.data.get("meta").get(
  732. "discoverSplitDecision"
  733. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  734. widget.refresh_from_db()
  735. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  736. def test_split_decision_for_transactions_widget(self):
  737. self.store_transaction_metric(
  738. 100,
  739. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  740. )
  741. _, widget, __ = create_widget(
  742. ["count()", "transaction.name"], "", self.project, discover_widget_split=None
  743. )
  744. assert widget.discover_widget_split is None
  745. response = self.do_request(
  746. {
  747. "field": ["count()", "transaction.name"],
  748. "query": "",
  749. "dataset": "metricsEnhanced",
  750. "per_page": 50,
  751. "dashboardWidgetId": widget.id,
  752. }
  753. )
  754. assert response.status_code == 200, response.content
  755. assert response.data.get("meta").get(
  756. "discoverSplitDecision"
  757. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.TRANSACTION_LIKE)
  758. widget.refresh_from_db()
  759. assert widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  760. def test_split_decision_for_top_events_errors_widget(self):
  761. error_data = load_data("python", timestamp=before_now(minutes=1))
  762. self.store_event(
  763. data={
  764. **error_data,
  765. "exception": {"values": [{"type": "test_error", "data": {"values": []}}]},
  766. },
  767. project_id=self.project.id,
  768. )
  769. _, widget, __ = create_widget(
  770. ["count()", "error.type"],
  771. "error.type:test_error",
  772. self.project,
  773. discover_widget_split=None,
  774. )
  775. response = self.do_request(
  776. {
  777. "field": ["count()", "error.type"],
  778. "query": "error.type:test_error",
  779. "dataset": "metricsEnhanced",
  780. "per_page": 50,
  781. "dashboardWidgetId": widget.id,
  782. "topEvents": 5,
  783. }
  784. )
  785. assert response.status_code == 200, response.content
  786. # Only a singular result for the test_error event
  787. assert len(response.data) == 1
  788. # Results are grouped by the error type
  789. assert response.data.get("test_error").get("meta").get(
  790. "discoverSplitDecision"
  791. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  792. widget.refresh_from_db()
  793. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  794. def test_split_decision_for_top_events_transactions_widget(self):
  795. self.store_transaction_metric(
  796. 100,
  797. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  798. tags={"transaction": "foo_transaction"},
  799. )
  800. _, widget, __ = create_widget(
  801. ["count()", "transaction"], "", self.project, discover_widget_split=None
  802. )
  803. assert widget.discover_widget_split is None
  804. response = self.do_request(
  805. {
  806. "field": ["count()", "transaction"],
  807. "query": "",
  808. "dataset": "metricsEnhanced",
  809. "per_page": 50,
  810. "dashboardWidgetId": widget.id,
  811. "topEvents": 5,
  812. }
  813. )
  814. assert response.status_code == 200, response.content
  815. # Only a singular result for the transaction
  816. assert len(response.data) == 1
  817. # Results are grouped by the transaction
  818. assert response.data.get("foo_transaction").get("meta").get(
  819. "discoverSplitDecision"
  820. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.TRANSACTION_LIKE)
  821. widget.refresh_from_db()
  822. assert widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  823. def test_split_decision_for_ambiguous_widget_without_data(self):
  824. _, widget, __ = create_widget(
  825. ["count()", "transaction.name", "error.type"],
  826. "",
  827. self.project,
  828. discover_widget_split=None,
  829. )
  830. assert widget.discover_widget_split is None
  831. response = self.do_request(
  832. {
  833. "field": ["count()", "transaction.name", "error.type"],
  834. "query": "",
  835. "dataset": "metricsEnhanced",
  836. "per_page": 50,
  837. "dashboardWidgetId": widget.id,
  838. }
  839. )
  840. assert response.status_code == 200, response.content
  841. assert response.data.get("meta").get("discoverSplitDecision") is None
  842. widget.refresh_from_db()
  843. assert widget.discover_widget_split is None
  844. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  845. OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest
  846. ):
  847. def setUp(self):
  848. super().setUp()
  849. self.features["organizations:use-metrics-layer"] = True
  850. self.additional_params = {"forceMetricsLayer": "true"}
  851. def test_counter_standard_metric(self):
  852. mri = "c:transactions/usage@none"
  853. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  854. self.store_transaction_metric(
  855. value,
  856. metric=mri,
  857. internal_metric=mri,
  858. entity="metrics_counters",
  859. timestamp=self.day_ago + timedelta(minutes=index),
  860. use_case_id=UseCaseID.CUSTOM,
  861. )
  862. response = self.do_request(
  863. data={
  864. "start": iso_format(self.day_ago),
  865. "end": iso_format(self.day_ago + timedelta(hours=6)),
  866. "interval": "1m",
  867. "yAxis": [f"sum({mri})"],
  868. "project": self.project.id,
  869. "dataset": "metricsEnhanced",
  870. **self.additional_params,
  871. },
  872. )
  873. assert response.status_code == 200, response.content
  874. data = response.data["data"]
  875. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  876. assert value[0]["count"] == expected_value # type: ignore[index]
  877. def test_counter_custom_metric(self):
  878. mri = "c:custom/sentry.process_profile.track_outcome@second"
  879. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  880. self.store_transaction_metric(
  881. value,
  882. metric=mri,
  883. internal_metric=mri,
  884. entity="metrics_counters",
  885. timestamp=self.day_ago + timedelta(hours=index),
  886. use_case_id=UseCaseID.CUSTOM,
  887. )
  888. response = self.do_request(
  889. data={
  890. "start": iso_format(self.day_ago),
  891. "end": iso_format(self.day_ago + timedelta(hours=6)),
  892. "interval": "1h",
  893. "yAxis": [f"sum({mri})"],
  894. "project": self.project.id,
  895. "dataset": "metricsEnhanced",
  896. **self.additional_params,
  897. },
  898. )
  899. assert response.status_code == 200, response.content
  900. data = response.data["data"]
  901. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  902. assert value[0]["count"] == expected_value # type: ignore[index]
  903. def test_distribution_custom_metric(self):
  904. mri = "d:custom/sentry.process_profile.track_outcome@second"
  905. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  906. for multiplier in (1, 2, 3):
  907. self.store_transaction_metric(
  908. value * multiplier,
  909. metric=mri,
  910. internal_metric=mri,
  911. entity="metrics_distributions",
  912. timestamp=self.day_ago + timedelta(hours=index),
  913. use_case_id=UseCaseID.CUSTOM,
  914. )
  915. response = self.do_request(
  916. data={
  917. "start": iso_format(self.day_ago),
  918. "end": iso_format(self.day_ago + timedelta(hours=6)),
  919. "interval": "1h",
  920. "yAxis": [f"min({mri})", f"max({mri})", f"p90({mri})"],
  921. "project": self.project.id,
  922. "dataset": "metricsEnhanced",
  923. **self.additional_params,
  924. },
  925. )
  926. assert response.status_code == 200, response.content
  927. data = response.data
  928. min = data[f"min({mri})"]["data"]
  929. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  930. assert value[0]["count"] == expected_value # type: ignore[index]
  931. max = data[f"max({mri})"]["data"]
  932. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  933. assert value[0]["count"] == expected_value # type: ignore[index]
  934. p90 = data[f"p90({mri})"]["data"]
  935. for (_, value), expected_value in zip(p90, [28.0, 56.0, 84.0, 112.0, 140.0, 168.0]):
  936. assert value[0]["count"] == expected_value # type: ignore[index]
  937. def test_set_custom_metric(self):
  938. mri = "s:custom/sentry.process_profile.track_outcome@second"
  939. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  940. # We store each value a second time, since we want to check the de-duplication of sets.
  941. for i in range(0, 2):
  942. self.store_transaction_metric(
  943. value,
  944. metric=mri,
  945. internal_metric=mri,
  946. entity="metrics_sets",
  947. timestamp=self.day_ago + timedelta(hours=index),
  948. use_case_id=UseCaseID.CUSTOM,
  949. )
  950. response = self.do_request(
  951. data={
  952. "start": iso_format(self.day_ago),
  953. "end": iso_format(self.day_ago + timedelta(hours=6)),
  954. "interval": "1h",
  955. "yAxis": [f"count_unique({mri})"],
  956. "project": self.project.id,
  957. "dataset": "metricsEnhanced",
  958. **self.additional_params,
  959. },
  960. )
  961. assert response.status_code == 200, response.content
  962. data = response.data["data"]
  963. for (_, value), expected_value in zip(data, [1, 1, 1, 1, 1, 1]):
  964. assert value[0]["count"] == expected_value # type: ignore[index]
  965. def test_gauge_custom_metric(self):
  966. mri = "g:custom/sentry.process_profile.track_outcome@second"
  967. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  968. for multiplier in (1, 3):
  969. self.store_transaction_metric(
  970. value * multiplier,
  971. metric=mri,
  972. internal_metric=mri,
  973. entity="metrics_gauges",
  974. # When multiple gauges are merged, in order to make the `last` merge work deterministically it's
  975. # better to have the gauges with different timestamps so that the last value is always the same.
  976. timestamp=self.day_ago + timedelta(hours=index, minutes=multiplier),
  977. use_case_id=UseCaseID.CUSTOM,
  978. )
  979. response = self.do_request(
  980. data={
  981. "start": iso_format(self.day_ago),
  982. "end": iso_format(self.day_ago + timedelta(hours=6)),
  983. "interval": "1h",
  984. "yAxis": [
  985. f"min({mri})",
  986. f"max({mri})",
  987. f"last({mri})",
  988. f"sum({mri})",
  989. f"count({mri})",
  990. ],
  991. "project": self.project.id,
  992. "dataset": "metricsEnhanced",
  993. **self.additional_params,
  994. },
  995. )
  996. assert response.status_code == 200, response.content
  997. data = response.data
  998. min = data[f"min({mri})"]["data"]
  999. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  1000. assert value[0]["count"] == expected_value # type: ignore[index]
  1001. max = data[f"max({mri})"]["data"]
  1002. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  1003. assert value[0]["count"] == expected_value # type: ignore[index]
  1004. last = data[f"last({mri})"]["data"]
  1005. for (_, value), expected_value in zip(last, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  1006. assert value[0]["count"] == expected_value # type: ignore[index]
  1007. sum = data[f"sum({mri})"]["data"]
  1008. for (_, value), expected_value in zip(sum, [40.0, 80.0, 120.0, 160.0, 200.0, 240.0]):
  1009. assert value[0]["count"] == expected_value # type: ignore[index]
  1010. count = data[f"count({mri})"]["data"]
  1011. for (_, value), expected_value in zip(count, [40, 80, 120, 160, 200, 240]):
  1012. assert value[0]["count"] == expected_value # type: ignore[index]
  1013. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithOnDemandWidgets(
  1014. MetricsEnhancedPerformanceTestCase
  1015. ):
  1016. endpoint = "sentry-api-0-organization-events-stats"
  1017. def setUp(self):
  1018. super().setUp()
  1019. self.login_as(user=self.user)
  1020. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  1021. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  1022. Environment.get_or_create(self.project, "production")
  1023. self.url = reverse(
  1024. "sentry-api-0-organization-events-stats",
  1025. kwargs={"organization_id_or_slug": self.project.organization.slug},
  1026. )
  1027. self.features = {
  1028. "organizations:on-demand-metrics-extraction-widgets": True,
  1029. "organizations:on-demand-metrics-extraction": True,
  1030. }
  1031. def _make_on_demand_request(
  1032. self, params: dict[str, Any], extra_features: dict[str, bool] | None = None
  1033. ) -> Response:
  1034. """Ensures that the required parameters for an on-demand request are included."""
  1035. # Expected parameters for this helper function
  1036. params["dataset"] = "metricsEnhanced"
  1037. params["useOnDemandMetrics"] = "true"
  1038. params["onDemandType"] = "dynamic_query"
  1039. _features = {**self.features, **(extra_features or {})}
  1040. return self.do_request(params, features=_features)
  1041. def test_top_events_wrong_on_demand_type(self):
  1042. query = "transaction.duration:>=100"
  1043. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1044. response = self.do_request(
  1045. data={
  1046. "project": self.project.id,
  1047. "start": iso_format(self.day_ago),
  1048. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1049. "interval": "1h",
  1050. "orderby": ["-count()"],
  1051. "environment": "production",
  1052. "query": query,
  1053. "yAxis": yAxis,
  1054. "field": [
  1055. "count()",
  1056. ],
  1057. "topEvents": 5,
  1058. "dataset": "metrics",
  1059. "useOnDemandMetrics": "true",
  1060. "onDemandType": "not_real",
  1061. },
  1062. )
  1063. assert response.status_code == 400, response.content
  1064. def test_top_events_works_without_on_demand_type(self):
  1065. query = "transaction.duration:>=100"
  1066. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1067. response = self.do_request(
  1068. data={
  1069. "project": self.project.id,
  1070. "start": iso_format(self.day_ago),
  1071. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1072. "interval": "1h",
  1073. "orderby": ["-count()"],
  1074. "environment": "production",
  1075. "query": query,
  1076. "yAxis": yAxis,
  1077. "field": [
  1078. "count()",
  1079. ],
  1080. "topEvents": 5,
  1081. "dataset": "metrics",
  1082. "useOnDemandMetrics": "true",
  1083. },
  1084. )
  1085. assert response.status_code == 200, response.content
  1086. def test_top_events_with_transaction_on_demand(self):
  1087. field = "count()"
  1088. field_two = "count_web_vitals(measurements.lcp, good)"
  1089. groupbys = ["customtag1", "customtag2"]
  1090. query = "transaction.duration:>=100"
  1091. spec = OnDemandMetricSpec(
  1092. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1093. )
  1094. spec_two = OnDemandMetricSpec(
  1095. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1096. )
  1097. for hour in range(0, 5):
  1098. self.store_on_demand_metric(
  1099. hour * 62 * 24,
  1100. spec=spec,
  1101. additional_tags={
  1102. "customtag1": "foo",
  1103. "customtag2": "red",
  1104. "environment": "production",
  1105. },
  1106. timestamp=self.day_ago + timedelta(hours=hour),
  1107. )
  1108. self.store_on_demand_metric(
  1109. hour * 60 * 24,
  1110. spec=spec_two,
  1111. additional_tags={
  1112. "customtag1": "bar",
  1113. "customtag2": "blue",
  1114. "environment": "production",
  1115. },
  1116. timestamp=self.day_ago + timedelta(hours=hour),
  1117. )
  1118. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1119. response = self.do_request(
  1120. data={
  1121. "project": self.project.id,
  1122. "start": iso_format(self.day_ago),
  1123. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1124. "interval": "1h",
  1125. "orderby": ["-count()"],
  1126. "environment": "production",
  1127. "query": query,
  1128. "yAxis": yAxis,
  1129. "field": [
  1130. "count()",
  1131. "count_web_vitals(measurements.lcp, good)",
  1132. "customtag1",
  1133. "customtag2",
  1134. ],
  1135. "topEvents": 5,
  1136. "dataset": "metricsEnhanced",
  1137. "useOnDemandMetrics": "true",
  1138. "onDemandType": "dynamic_query",
  1139. },
  1140. )
  1141. assert response.status_code == 200, response.content
  1142. groups = [
  1143. ("foo,red", "count()", 0.0, 1488.0),
  1144. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1145. ("bar,blue", "count()", 0.0, 0.0),
  1146. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1147. ]
  1148. assert len(response.data.keys()) == 2
  1149. for group_count in groups:
  1150. group, agg, row1, row2 = group_count
  1151. row_data = response.data[group][agg]["data"][:2]
  1152. assert [attrs for _, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1153. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1154. assert response.data[group]["isMetricsExtractedData"]
  1155. def test_top_events_with_transaction_on_demand_and_no_environment(self):
  1156. field = "count()"
  1157. field_two = "count_web_vitals(measurements.lcp, good)"
  1158. groupbys = ["customtag1", "customtag2"]
  1159. query = "transaction.duration:>=100"
  1160. spec = OnDemandMetricSpec(
  1161. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1162. )
  1163. spec_two = OnDemandMetricSpec(
  1164. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1165. )
  1166. for hour in range(0, 5):
  1167. self.store_on_demand_metric(
  1168. hour * 62 * 24,
  1169. spec=spec,
  1170. additional_tags={
  1171. "customtag1": "foo",
  1172. "customtag2": "red",
  1173. "environment": "production",
  1174. },
  1175. timestamp=self.day_ago + timedelta(hours=hour),
  1176. )
  1177. self.store_on_demand_metric(
  1178. hour * 60 * 24,
  1179. spec=spec_two,
  1180. additional_tags={
  1181. "customtag1": "bar",
  1182. "customtag2": "blue",
  1183. "environment": "production",
  1184. },
  1185. timestamp=self.day_ago + timedelta(hours=hour),
  1186. )
  1187. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1188. response = self.do_request(
  1189. data={
  1190. "project": self.project.id,
  1191. "start": iso_format(self.day_ago),
  1192. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1193. "interval": "1h",
  1194. "orderby": ["-count()"],
  1195. "query": query,
  1196. "yAxis": yAxis,
  1197. "field": [
  1198. "count()",
  1199. "count_web_vitals(measurements.lcp, good)",
  1200. "customtag1",
  1201. "customtag2",
  1202. ],
  1203. "topEvents": 5,
  1204. "dataset": "metricsEnhanced",
  1205. "useOnDemandMetrics": "true",
  1206. "onDemandType": "dynamic_query",
  1207. },
  1208. )
  1209. assert response.status_code == 200, response.content
  1210. groups = [
  1211. ("foo,red", "count()", 0.0, 1488.0),
  1212. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1213. ("bar,blue", "count()", 0.0, 0.0),
  1214. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1215. ]
  1216. assert len(response.data.keys()) == 2
  1217. for group_count in groups:
  1218. group, agg, row1, row2 = group_count
  1219. row_data = response.data[group][agg]["data"][:2]
  1220. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1221. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1222. assert response.data[group]["isMetricsExtractedData"]
  1223. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_transaction_only(self):
  1224. field = "count()"
  1225. field_two = "count_web_vitals(measurements.lcp, good)"
  1226. groupbys = ["customtag1", "customtag2"]
  1227. query = "transaction.duration:>=100"
  1228. spec = OnDemandMetricSpec(
  1229. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1230. )
  1231. spec_two = OnDemandMetricSpec(
  1232. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1233. )
  1234. _, widget, __ = create_widget(
  1235. ["count()"],
  1236. "",
  1237. self.project,
  1238. discover_widget_split=None,
  1239. )
  1240. for hour in range(0, 2):
  1241. self.store_on_demand_metric(
  1242. hour * 62 * 24,
  1243. spec=spec,
  1244. additional_tags={
  1245. "customtag1": "foo",
  1246. "customtag2": "red",
  1247. "environment": "production",
  1248. },
  1249. timestamp=self.day_ago + timedelta(hours=hour),
  1250. )
  1251. self.store_on_demand_metric(
  1252. hour * 60 * 24,
  1253. spec=spec_two,
  1254. additional_tags={
  1255. "customtag1": "bar",
  1256. "customtag2": "blue",
  1257. "environment": "production",
  1258. },
  1259. timestamp=self.day_ago + timedelta(hours=hour),
  1260. )
  1261. yAxis = [field, field_two]
  1262. response = self.do_request(
  1263. data={
  1264. "project": self.project.id,
  1265. "start": iso_format(self.day_ago),
  1266. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1267. "interval": "1h",
  1268. "orderby": ["-count()"],
  1269. "query": query,
  1270. "yAxis": yAxis,
  1271. "field": yAxis + groupbys,
  1272. "topEvents": 5,
  1273. "dataset": "metricsEnhanced",
  1274. "useOnDemandMetrics": "true",
  1275. "onDemandType": "dynamic_query",
  1276. "dashboardWidgetId": widget.id,
  1277. },
  1278. )
  1279. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1280. assert saved_widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  1281. assert response.status_code == 200, response.content
  1282. # Fell back to discover data which is empty for this test (empty group of '').
  1283. assert len(response.data.keys()) == 2
  1284. assert bool(response.data["foo,red"])
  1285. assert bool(response.data["bar,blue"])
  1286. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_error(
  1287. self,
  1288. ):
  1289. self.project = self.create_project(organization=self.organization)
  1290. Environment.get_or_create(self.project, "production")
  1291. field = "count()"
  1292. field_two = "count()"
  1293. groupbys = ["customtag1", "customtag2"]
  1294. query = "query.dataset:foo"
  1295. _, widget, __ = create_widget(
  1296. ["count()"],
  1297. "",
  1298. self.project,
  1299. discover_widget_split=None,
  1300. )
  1301. self.store_event(
  1302. data={
  1303. "event_id": "a" * 32,
  1304. "message": "very bad",
  1305. "type": "error",
  1306. "start_timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1307. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1308. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1309. },
  1310. project_id=self.project.id,
  1311. )
  1312. self.store_event(
  1313. data={
  1314. "event_id": "b" * 32,
  1315. "message": "very bad 2",
  1316. "type": "error",
  1317. "start_timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1318. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1319. "tags": {"customtag1": "error_value2", "query.dataset": "foo"},
  1320. },
  1321. project_id=self.project.id,
  1322. )
  1323. yAxis = ["count()"]
  1324. response = self.do_request(
  1325. data={
  1326. "project": self.project.id,
  1327. "start": iso_format(self.day_ago),
  1328. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1329. "interval": "1h",
  1330. "orderby": ["-count()"],
  1331. "query": query,
  1332. "yAxis": yAxis,
  1333. "field": [field, field_two] + groupbys,
  1334. "topEvents": 5,
  1335. "dataset": "metricsEnhanced",
  1336. "useOnDemandMetrics": "true",
  1337. "onDemandType": "dynamic_query",
  1338. "dashboardWidgetId": widget.id,
  1339. },
  1340. )
  1341. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1342. assert saved_widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  1343. assert response.status_code == 200, response.content
  1344. # Fell back to discover data which is empty for this test (empty group of '').
  1345. assert len(response.data.keys()) == 2
  1346. assert bool(response.data["error_value,"])
  1347. assert bool(response.data["error_value2,"])
  1348. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_discover(self):
  1349. self.project = self.create_project(organization=self.organization)
  1350. Environment.get_or_create(self.project, "production")
  1351. field = "count()"
  1352. field_two = "count()"
  1353. groupbys = ["customtag1", "customtag2"]
  1354. query = "query.dataset:foo"
  1355. spec = OnDemandMetricSpec(
  1356. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1357. )
  1358. spec_two = OnDemandMetricSpec(
  1359. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1360. )
  1361. _, widget, __ = create_widget(
  1362. ["count()"],
  1363. "",
  1364. self.project,
  1365. discover_widget_split=None,
  1366. )
  1367. self.store_event(
  1368. data={
  1369. "event_id": "a" * 32,
  1370. "message": "very bad",
  1371. "type": "error",
  1372. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1373. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1374. },
  1375. project_id=self.project.id,
  1376. )
  1377. transaction = load_data("transaction")
  1378. transaction["timestamp"] = iso_format(self.day_ago + timedelta(hours=1))
  1379. transaction["start_timestamp"] = iso_format(self.day_ago + timedelta(hours=1))
  1380. transaction["tags"] = {"customtag1": "transaction_value", "query.dataset": "foo"}
  1381. self.store_event(
  1382. data=transaction,
  1383. project_id=self.project.id,
  1384. )
  1385. for hour in range(0, 5):
  1386. self.store_on_demand_metric(
  1387. hour * 62 * 24,
  1388. spec=spec,
  1389. additional_tags={
  1390. "customtag1": "foo",
  1391. "customtag2": "red",
  1392. "environment": "production",
  1393. },
  1394. timestamp=self.day_ago + timedelta(hours=hour),
  1395. )
  1396. self.store_on_demand_metric(
  1397. hour * 60 * 24,
  1398. spec=spec_two,
  1399. additional_tags={
  1400. "customtag1": "bar",
  1401. "customtag2": "blue",
  1402. "environment": "production",
  1403. },
  1404. timestamp=self.day_ago + timedelta(hours=hour),
  1405. )
  1406. yAxis = ["count()"]
  1407. response = self.do_request(
  1408. data={
  1409. "project": self.project.id,
  1410. "start": iso_format(self.day_ago),
  1411. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1412. "interval": "1h",
  1413. "orderby": ["-count()"],
  1414. "query": query,
  1415. "yAxis": yAxis,
  1416. "field": [field, field_two, "customtag1", "customtag2"],
  1417. "topEvents": 5,
  1418. "dataset": "metricsEnhanced",
  1419. "useOnDemandMetrics": "true",
  1420. "onDemandType": "dynamic_query",
  1421. "dashboardWidgetId": widget.id,
  1422. },
  1423. )
  1424. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1425. assert saved_widget.discover_widget_split == DashboardWidgetTypes.DISCOVER
  1426. assert response.status_code == 200, response.content
  1427. assert response.status_code == 200, response.content
  1428. # Fell back to discover data which is empty for this test (empty group of '').
  1429. assert len(response.data.keys()) == 2
  1430. assert bool(response.data["error_value,"])
  1431. assert bool(response.data["transaction_value,"])
  1432. def test_top_events_with_transaction_on_demand_passing_widget_id_saved(self):
  1433. field = "count()"
  1434. field_two = "count_web_vitals(measurements.lcp, good)"
  1435. groupbys = ["customtag1", "customtag2"]
  1436. query = "transaction.duration:>=100"
  1437. spec = OnDemandMetricSpec(
  1438. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1439. )
  1440. spec_two = OnDemandMetricSpec(
  1441. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1442. )
  1443. _, widget, __ = create_widget(
  1444. ["count()"],
  1445. "",
  1446. self.project,
  1447. discover_widget_split=DashboardWidgetTypes.TRANSACTION_LIKE, # Transactions like uses on-demand
  1448. )
  1449. for hour in range(0, 5):
  1450. self.store_on_demand_metric(
  1451. hour * 62 * 24,
  1452. spec=spec,
  1453. additional_tags={
  1454. "customtag1": "foo",
  1455. "customtag2": "red",
  1456. "environment": "production",
  1457. },
  1458. timestamp=self.day_ago + timedelta(hours=hour),
  1459. )
  1460. self.store_on_demand_metric(
  1461. hour * 60 * 24,
  1462. spec=spec_two,
  1463. additional_tags={
  1464. "customtag1": "bar",
  1465. "customtag2": "blue",
  1466. "environment": "production",
  1467. },
  1468. timestamp=self.day_ago + timedelta(hours=hour),
  1469. )
  1470. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1471. with mock.patch.object(widget, "save") as mock_widget_save:
  1472. response = self.do_request(
  1473. data={
  1474. "project": self.project.id,
  1475. "start": iso_format(self.day_ago),
  1476. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1477. "interval": "1h",
  1478. "orderby": ["-count()"],
  1479. "query": query,
  1480. "yAxis": yAxis,
  1481. "field": [
  1482. "count()",
  1483. "count_web_vitals(measurements.lcp, good)",
  1484. "customtag1",
  1485. "customtag2",
  1486. ],
  1487. "topEvents": 5,
  1488. "dataset": "metricsEnhanced",
  1489. "useOnDemandMetrics": "true",
  1490. "onDemandType": "dynamic_query",
  1491. "dashboardWidgetId": widget.id,
  1492. },
  1493. )
  1494. assert bool(mock_widget_save.assert_not_called)
  1495. assert response.status_code == 200, response.content
  1496. groups = [
  1497. ("foo,red", "count()", 0.0, 1488.0),
  1498. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1499. ("bar,blue", "count()", 0.0, 0.0),
  1500. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1501. ]
  1502. assert len(response.data.keys()) == 2
  1503. for group_count in groups:
  1504. group, agg, row1, row2 = group_count
  1505. row_data = response.data[group][agg]["data"][:2]
  1506. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1507. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1508. assert response.data[group]["isMetricsExtractedData"]
  1509. def test_timeseries_on_demand_with_multiple_percentiles(self):
  1510. field = "p75(measurements.fcp)"
  1511. field_two = "p75(measurements.lcp)"
  1512. query = "transaction.duration:>=100"
  1513. spec = OnDemandMetricSpec(field=field, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY)
  1514. spec_two = OnDemandMetricSpec(
  1515. field=field_two, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1516. )
  1517. assert (
  1518. spec._query_str_for_hash
  1519. == "event.measurements.fcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1520. )
  1521. assert (
  1522. spec_two._query_str_for_hash
  1523. == "event.measurements.lcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1524. )
  1525. for count in range(0, 4):
  1526. self.store_on_demand_metric(
  1527. count * 100,
  1528. spec=spec,
  1529. timestamp=self.day_ago + timedelta(hours=1),
  1530. )
  1531. self.store_on_demand_metric(
  1532. count * 200.0,
  1533. spec=spec_two,
  1534. timestamp=self.day_ago + timedelta(hours=1),
  1535. )
  1536. yAxis = [field, field_two]
  1537. response = self.do_request(
  1538. data={
  1539. "project": self.project.id,
  1540. "start": iso_format(self.day_ago),
  1541. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1542. "interval": "1h",
  1543. "orderby": [field],
  1544. "query": query,
  1545. "yAxis": yAxis,
  1546. "dataset": "metricsEnhanced",
  1547. "useOnDemandMetrics": "true",
  1548. "onDemandType": "dynamic_query",
  1549. },
  1550. )
  1551. assert response.status_code == 200, response.content
  1552. assert response.data["p75(measurements.fcp)"]["meta"]["isMetricsExtractedData"]
  1553. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1554. assert [attrs for time, attrs in response.data["p75(measurements.fcp)"]["data"]] == [
  1555. [{"count": 0}],
  1556. [{"count": 225.0}],
  1557. ]
  1558. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsExtractedData"]
  1559. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1560. assert [attrs for time, attrs in response.data["p75(measurements.lcp)"]["data"]] == [
  1561. [{"count": 0}],
  1562. [{"count": 450.0}],
  1563. ]
  1564. def test_apdex_issue(self):
  1565. field = "apdex(300)"
  1566. groupbys = ["group_tag"]
  1567. query = "transaction.duration:>=100"
  1568. spec = OnDemandMetricSpec(
  1569. field=field,
  1570. groupbys=groupbys,
  1571. query=query,
  1572. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1573. )
  1574. for hour in range(0, 5):
  1575. self.store_on_demand_metric(
  1576. 1,
  1577. spec=spec,
  1578. additional_tags={
  1579. "group_tag": "group_one",
  1580. "environment": "production",
  1581. "satisfaction": "tolerable",
  1582. },
  1583. timestamp=self.day_ago + timedelta(hours=hour),
  1584. )
  1585. self.store_on_demand_metric(
  1586. 1,
  1587. spec=spec,
  1588. additional_tags={
  1589. "group_tag": "group_two",
  1590. "environment": "production",
  1591. "satisfaction": "satisfactory",
  1592. },
  1593. timestamp=self.day_ago + timedelta(hours=hour),
  1594. )
  1595. response = self.do_request(
  1596. data={
  1597. "dataset": "metricsEnhanced",
  1598. "environment": "production",
  1599. "excludeOther": 1,
  1600. "field": [field, "group_tag"],
  1601. "start": iso_format(self.day_ago),
  1602. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1603. "interval": "1h",
  1604. "orderby": f"-{field}",
  1605. "partial": 1,
  1606. "project": self.project.id,
  1607. "query": query,
  1608. "topEvents": 5,
  1609. "yAxis": field,
  1610. "onDemandType": "dynamic_query",
  1611. "useOnDemandMetrics": "true",
  1612. },
  1613. )
  1614. assert response.status_code == 200, response.content
  1615. assert response.data["group_one"]["meta"]["isMetricsExtractedData"] is True
  1616. assert [attrs for time, attrs in response.data["group_one"]["data"]] == [
  1617. [{"count": 0.5}],
  1618. [{"count": 0.5}],
  1619. ]
  1620. def test_glob_http_referer_on_demand(self):
  1621. agg = "count()"
  1622. network_id_tag = "networkId"
  1623. url = "https://sentry.io"
  1624. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1625. spec = OnDemandMetricSpec(
  1626. field=agg,
  1627. groupbys=[network_id_tag],
  1628. query=query,
  1629. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1630. )
  1631. assert spec.to_metric_spec(self.project) == {
  1632. "category": "transaction",
  1633. "mri": "c:transactions/on_demand@none",
  1634. "field": None,
  1635. "tags": [
  1636. {"key": "query_hash", "value": "ac241f56"},
  1637. {"key": "networkId", "field": "event.tags.networkId"},
  1638. {"key": "environment", "field": "event.environment"},
  1639. ],
  1640. "condition": {
  1641. "op": "and",
  1642. "inner": [
  1643. {
  1644. "op": "glob",
  1645. "name": "event.request.url",
  1646. "value": ["https://sentry.io/*/foo/bar/*"],
  1647. },
  1648. {
  1649. "op": "glob",
  1650. "name": "event.request.headers.Referer",
  1651. "value": ["https://sentry.io/*/bar/*"],
  1652. },
  1653. ],
  1654. },
  1655. }
  1656. for hour in range(0, 5):
  1657. self.store_on_demand_metric(
  1658. 1,
  1659. spec=spec,
  1660. additional_tags={network_id_tag: "1234"},
  1661. timestamp=self.day_ago + timedelta(hours=hour),
  1662. )
  1663. self.store_on_demand_metric(
  1664. 1,
  1665. spec=spec,
  1666. additional_tags={network_id_tag: "5678"},
  1667. timestamp=self.day_ago + timedelta(hours=hour),
  1668. )
  1669. response = self.do_request(
  1670. data={
  1671. "dataset": "metricsEnhanced",
  1672. "field": [network_id_tag, agg],
  1673. "start": iso_format(self.day_ago),
  1674. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1675. "onDemandType": "dynamic_query",
  1676. "orderby": f"-{agg}",
  1677. "interval": "1d",
  1678. "partial": 1,
  1679. "query": query,
  1680. "referrer": "api.dashboards.widget.bar-chart",
  1681. "project": self.project.id,
  1682. "topEvents": 2,
  1683. "useOnDemandMetrics": "true",
  1684. "yAxis": agg,
  1685. },
  1686. )
  1687. assert response.status_code == 200, response.content
  1688. for datum in response.data.values():
  1689. assert datum["meta"] == {
  1690. "dataset": "metricsEnhanced",
  1691. "datasetReason": "unchanged",
  1692. "fields": {},
  1693. "isMetricsData": False,
  1694. "isMetricsExtractedData": True,
  1695. "tips": {},
  1696. "units": {},
  1697. }
  1698. def _test_is_metrics_extracted_data(
  1699. self, params: dict[str, Any], expected_on_demand_query: bool, dataset: str
  1700. ) -> None:
  1701. spec = OnDemandMetricSpec(
  1702. field="count()",
  1703. query="transaction.duration:>1s",
  1704. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1705. )
  1706. self.store_on_demand_metric(1, spec=spec)
  1707. response = self.do_request(params)
  1708. assert response.status_code == 200, response.content
  1709. meta = response.data["meta"]
  1710. # This is the main thing we want to test for
  1711. assert meta.get("isMetricsExtractedData", False) is expected_on_demand_query
  1712. assert meta["dataset"] == dataset
  1713. return meta
  1714. def test_is_metrics_extracted_data_is_included(self):
  1715. self._test_is_metrics_extracted_data(
  1716. {
  1717. "dataset": "metricsEnhanced",
  1718. "query": "transaction.duration:>=91",
  1719. "useOnDemandMetrics": "true",
  1720. "yAxis": "count()",
  1721. },
  1722. expected_on_demand_query=True,
  1723. dataset="metricsEnhanced",
  1724. )
  1725. def test_on_demand_epm_no_query(self):
  1726. params = {
  1727. "dataset": "metricsEnhanced",
  1728. "environment": "production",
  1729. "onDemandType": "dynamic_query",
  1730. "project": self.project.id,
  1731. "query": "",
  1732. "statsPeriod": "1h",
  1733. "useOnDemandMetrics": "true",
  1734. "yAxis": ["epm()"],
  1735. }
  1736. response = self.do_request(params)
  1737. assert response.status_code == 200, response.content
  1738. assert response.data["meta"] == {
  1739. "fields": {"time": "date", "epm_900": "rate"},
  1740. "units": {"time": None, "epm_900": None},
  1741. "isMetricsData": True,
  1742. "isMetricsExtractedData": False,
  1743. "tips": {},
  1744. "datasetReason": "unchanged",
  1745. "dataset": "metricsEnhanced",
  1746. }
  1747. def test_group_by_transaction(self):
  1748. field = "count()"
  1749. groupbys = ["transaction"]
  1750. query = "transaction.duration:>=100"
  1751. spec = OnDemandMetricSpec(
  1752. field=field,
  1753. groupbys=groupbys,
  1754. query=query,
  1755. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1756. )
  1757. for hour in range(0, 2):
  1758. self.store_on_demand_metric(
  1759. (hour + 1) * 5,
  1760. spec=spec,
  1761. additional_tags={
  1762. "transaction": "/performance",
  1763. "environment": "production",
  1764. },
  1765. timestamp=self.day_ago + timedelta(hours=hour),
  1766. )
  1767. response = self.do_request(
  1768. data={
  1769. "dataset": "metricsEnhanced",
  1770. "environment": "production",
  1771. "excludeOther": 1,
  1772. "field": [field, "transaction"],
  1773. "start": iso_format(self.day_ago),
  1774. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1775. "interval": "1h",
  1776. "orderby": f"-{field}",
  1777. "partial": 1,
  1778. "project": self.project.id,
  1779. "query": query,
  1780. "topEvents": 5,
  1781. "yAxis": field,
  1782. "onDemandType": "dynamic_query",
  1783. "useOnDemandMetrics": "true",
  1784. },
  1785. )
  1786. assert response.status_code == 200, response.content
  1787. assert response.data["/performance"]["meta"]["isMetricsExtractedData"] is True
  1788. assert [attrs for time, attrs in response.data["/performance"]["data"]] == [
  1789. [{"count": 5.0}],
  1790. [{"count": 10.0}],
  1791. ]
  1792. def _setup_orderby_tests(self, query):
  1793. count_spec = OnDemandMetricSpec(
  1794. field="count()",
  1795. groupbys=["networkId"],
  1796. query=query,
  1797. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1798. )
  1799. p95_spec = OnDemandMetricSpec(
  1800. field="p95(transaction.duration)",
  1801. groupbys=["networkId"],
  1802. query=query,
  1803. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1804. )
  1805. for hour in range(0, 5):
  1806. self.store_on_demand_metric(
  1807. 1,
  1808. spec=count_spec,
  1809. additional_tags={"networkId": "1234"},
  1810. timestamp=self.day_ago + timedelta(hours=hour),
  1811. )
  1812. self.store_on_demand_metric(
  1813. 100,
  1814. spec=p95_spec,
  1815. additional_tags={"networkId": "1234"},
  1816. timestamp=self.day_ago + timedelta(hours=hour),
  1817. )
  1818. self.store_on_demand_metric(
  1819. 200,
  1820. spec=p95_spec,
  1821. additional_tags={"networkId": "5678"},
  1822. timestamp=self.day_ago + timedelta(hours=hour),
  1823. )
  1824. # Store twice as many 5678 so orderby puts it later
  1825. self.store_on_demand_metric(
  1826. 2,
  1827. spec=count_spec,
  1828. additional_tags={"networkId": "5678"},
  1829. timestamp=self.day_ago + timedelta(hours=hour),
  1830. )
  1831. def test_order_by_aggregate_top_events_desc(self):
  1832. url = "https://sentry.io"
  1833. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1834. self._setup_orderby_tests(query)
  1835. response = self.do_request(
  1836. data={
  1837. "dataset": "metricsEnhanced",
  1838. "field": ["networkId", "count()"],
  1839. "start": iso_format(self.day_ago),
  1840. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1841. "onDemandType": "dynamic_query",
  1842. "orderby": "-count()",
  1843. "interval": "1d",
  1844. "partial": 1,
  1845. "query": query,
  1846. "referrer": "api.dashboards.widget.bar-chart",
  1847. "project": self.project.id,
  1848. "topEvents": 2,
  1849. "useOnDemandMetrics": "true",
  1850. "yAxis": "count()",
  1851. },
  1852. )
  1853. assert response.status_code == 200, response.content
  1854. assert len(response.data) == 3
  1855. data1 = response.data["5678"]
  1856. assert data1["order"] == 0
  1857. assert data1["data"][0][1][0]["count"] == 10
  1858. data2 = response.data["1234"]
  1859. assert data2["order"] == 1
  1860. assert data2["data"][0][1][0]["count"] == 5
  1861. for datum in response.data.values():
  1862. assert datum["meta"] == {
  1863. "dataset": "metricsEnhanced",
  1864. "datasetReason": "unchanged",
  1865. "fields": {},
  1866. "isMetricsData": False,
  1867. "isMetricsExtractedData": True,
  1868. "tips": {},
  1869. "units": {},
  1870. }
  1871. def test_order_by_aggregate_top_events_asc(self):
  1872. url = "https://sentry.io"
  1873. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1874. self._setup_orderby_tests(query)
  1875. response = self.do_request(
  1876. data={
  1877. "dataset": "metricsEnhanced",
  1878. "field": ["networkId", "count()"],
  1879. "start": iso_format(self.day_ago),
  1880. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1881. "onDemandType": "dynamic_query",
  1882. "orderby": "count()",
  1883. "interval": "1d",
  1884. "partial": 1,
  1885. "query": query,
  1886. "referrer": "api.dashboards.widget.bar-chart",
  1887. "project": self.project.id,
  1888. "topEvents": 2,
  1889. "useOnDemandMetrics": "true",
  1890. "yAxis": "count()",
  1891. },
  1892. )
  1893. assert response.status_code == 200, response.content
  1894. assert len(response.data) == 3
  1895. data1 = response.data["1234"]
  1896. assert data1["order"] == 0
  1897. assert data1["data"][0][1][0]["count"] == 5
  1898. data2 = response.data["5678"]
  1899. assert data2["order"] == 1
  1900. assert data2["data"][0][1][0]["count"] == 10
  1901. for datum in response.data.values():
  1902. assert datum["meta"] == {
  1903. "dataset": "metricsEnhanced",
  1904. "datasetReason": "unchanged",
  1905. "fields": {},
  1906. "isMetricsData": False,
  1907. "isMetricsExtractedData": True,
  1908. "tips": {},
  1909. "units": {},
  1910. }
  1911. def test_order_by_aggregate_top_events_graph_different_aggregate(self):
  1912. url = "https://sentry.io"
  1913. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1914. self._setup_orderby_tests(query)
  1915. response = self.do_request(
  1916. data={
  1917. "dataset": "metricsEnhanced",
  1918. "field": ["networkId", "count()"],
  1919. "start": iso_format(self.day_ago),
  1920. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1921. "onDemandType": "dynamic_query",
  1922. "orderby": "count()",
  1923. "interval": "1d",
  1924. "partial": 1,
  1925. "query": query,
  1926. "referrer": "api.dashboards.widget.bar-chart",
  1927. "project": self.project.id,
  1928. "topEvents": 2,
  1929. "useOnDemandMetrics": "true",
  1930. "yAxis": "p95(transaction.duration)",
  1931. },
  1932. )
  1933. assert response.status_code == 200, response.content
  1934. assert len(response.data) == 3
  1935. data1 = response.data["1234"]
  1936. assert data1["order"] == 0
  1937. assert data1["data"][0][1][0]["count"] == 100
  1938. data2 = response.data["5678"]
  1939. assert data2["order"] == 1
  1940. assert data2["data"][0][1][0]["count"] == 200
  1941. for datum in response.data.values():
  1942. assert datum["meta"] == {
  1943. "dataset": "metricsEnhanced",
  1944. "datasetReason": "unchanged",
  1945. "fields": {},
  1946. "isMetricsData": False,
  1947. "isMetricsExtractedData": True,
  1948. "tips": {},
  1949. "units": {},
  1950. }
  1951. def test_cannot_order_by_tag(self):
  1952. url = "https://sentry.io"
  1953. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1954. self._setup_orderby_tests(query)
  1955. response = self.do_request(
  1956. data={
  1957. "dataset": "metrics",
  1958. "field": ["networkId", "count()"],
  1959. "start": iso_format(self.day_ago),
  1960. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1961. "onDemandType": "dynamic_query",
  1962. "orderby": "-networkId",
  1963. "interval": "1d",
  1964. "partial": 1,
  1965. "query": query,
  1966. "referrer": "api.dashboards.widget.bar-chart",
  1967. "project": self.project.id,
  1968. "topEvents": 2,
  1969. "useOnDemandMetrics": "true",
  1970. "yAxis": "count()",
  1971. },
  1972. )
  1973. assert response.status_code == 400, response.content
  1974. def test_order_by_two_aggregates(self):
  1975. url = "https://sentry.io"
  1976. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1977. self._setup_orderby_tests(query)
  1978. response = self.do_request(
  1979. data={
  1980. "dataset": "metrics",
  1981. "field": ["networkId", "count()", "p95(transaction.duration)"],
  1982. "start": iso_format(self.day_ago),
  1983. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1984. "onDemandType": "dynamic_query",
  1985. "orderby": ["count()", "p95(transaction.duration)"],
  1986. "interval": "1d",
  1987. "partial": 1,
  1988. "query": query,
  1989. "referrer": "api.dashboards.widget.bar-chart",
  1990. "project": self.project.id,
  1991. "topEvents": 2,
  1992. "useOnDemandMetrics": "true",
  1993. "yAxis": "p95(transaction.duration)",
  1994. },
  1995. )
  1996. assert response.status_code == 400, response.content
  1997. def test_top_events_with_tag(self):
  1998. query = "transaction.duration:>=100"
  1999. yAxis = ["count()"]
  2000. field = "count()"
  2001. groupbys = ["some-field"]
  2002. spec = OnDemandMetricSpec(
  2003. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  2004. )
  2005. self.store_on_demand_metric(
  2006. 1,
  2007. spec=spec,
  2008. additional_tags={
  2009. "some-field": "bar",
  2010. "environment": "production",
  2011. },
  2012. timestamp=self.day_ago,
  2013. )
  2014. response = self.do_request(
  2015. data={
  2016. "project": self.project.id,
  2017. "start": iso_format(self.day_ago),
  2018. "end": iso_format(self.day_ago + timedelta(hours=2)),
  2019. "interval": "1h",
  2020. "orderby": ["-count()"],
  2021. "environment": "production",
  2022. "query": query,
  2023. "yAxis": yAxis,
  2024. "field": [
  2025. "some-field",
  2026. "count()",
  2027. ],
  2028. "topEvents": 5,
  2029. "dataset": "metrics",
  2030. "useOnDemandMetrics": "true",
  2031. },
  2032. )
  2033. assert response.status_code == 200, response.content