test_organization_events_stats_mep.py 90 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377
  1. from __future__ import annotations
  2. from datetime import timedelta
  3. from typing import Any
  4. from unittest import mock
  5. import pytest
  6. from django.urls import reverse
  7. from rest_framework.response import Response
  8. from sentry.discover.models import DatasetSourcesTypes
  9. from sentry.models.dashboard_widget import DashboardWidget, DashboardWidgetTypes
  10. from sentry.models.environment import Environment
  11. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  12. from sentry.snuba.metrics.extraction import MetricSpecType, OnDemandMetricSpec
  13. from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
  14. from sentry.testutils.helpers.datetime import before_now, iso_format
  15. from sentry.testutils.helpers.on_demand import create_widget
  16. from sentry.utils.samples import load_data
  17. pytestmark = pytest.mark.sentry_metrics
  18. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
  19. MetricsEnhancedPerformanceTestCase
  20. ):
  21. endpoint = "sentry-api-0-organization-events-stats"
  22. METRIC_STRINGS = [
  23. "foo_transaction",
  24. "d:transactions/measurements.datacenter_memory@pebibyte",
  25. ]
  26. def setUp(self):
  27. super().setUp()
  28. self.login_as(user=self.user)
  29. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  30. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  31. self.url = reverse(
  32. "sentry-api-0-organization-events-stats",
  33. kwargs={"organization_id_or_slug": self.project.organization.slug},
  34. )
  35. self.features = {
  36. "organizations:performance-use-metrics": True,
  37. }
  38. self.additional_params = dict()
  39. # These throughput tests should roughly match the ones in OrganizationEventsStatsEndpointTest
  40. @pytest.mark.querybuilder
  41. def test_throughput_epm_hour_rollup(self):
  42. # Each of these denotes how many events to create in each hour
  43. event_counts = [6, 0, 6, 3, 0, 3]
  44. for hour, count in enumerate(event_counts):
  45. for minute in range(count):
  46. self.store_transaction_metric(
  47. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  48. )
  49. for axis in ["epm()", "tpm()"]:
  50. response = self.do_request(
  51. data={
  52. "start": self.day_ago,
  53. "end": self.day_ago + timedelta(hours=6),
  54. "interval": "1h",
  55. "yAxis": axis,
  56. "project": self.project.id,
  57. "dataset": "metricsEnhanced",
  58. **self.additional_params,
  59. },
  60. )
  61. assert response.status_code == 200, response.content
  62. data = response.data["data"]
  63. assert len(data) == 6
  64. assert response.data["isMetricsData"]
  65. rows = data[0:6]
  66. for test in zip(event_counts, rows):
  67. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  68. @pytest.mark.querybuilder
  69. def test_throughput_spm_hour_rollup(self):
  70. # Each of these denotes how many events to create in each hour
  71. event_counts = [6, 0, 6, 3, 0, 3]
  72. for hour, count in enumerate(event_counts):
  73. for minute in range(count):
  74. self.store_span_metric(
  75. 1,
  76. timestamp=self.day_ago + timedelta(hours=hour, minutes=minute),
  77. )
  78. response = self.do_request(
  79. data={
  80. "start": self.day_ago,
  81. "end": self.day_ago + timedelta(hours=6),
  82. "interval": "1h",
  83. "yAxis": "spm()",
  84. "project": self.project.id,
  85. "dataset": "metrics",
  86. **self.additional_params,
  87. },
  88. )
  89. assert response.status_code == 200, response.content
  90. data = response.data["data"]
  91. assert len(data) == 6
  92. assert response.data["meta"]["dataset"] == "metrics"
  93. rows = data[0:6]
  94. for test in zip(event_counts, rows):
  95. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  96. def test_throughput_epm_day_rollup(self):
  97. # Each of these denotes how many events to create in each minute
  98. event_counts = [6, 0, 6, 3, 0, 3]
  99. for hour, count in enumerate(event_counts):
  100. for minute in range(count):
  101. self.store_transaction_metric(
  102. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  103. )
  104. for axis in ["epm()", "tpm()"]:
  105. response = self.do_request(
  106. data={
  107. "start": self.day_ago,
  108. "end": self.day_ago + timedelta(hours=24),
  109. "interval": "24h",
  110. "yAxis": axis,
  111. "project": self.project.id,
  112. "dataset": "metricsEnhanced",
  113. **self.additional_params,
  114. },
  115. )
  116. assert response.status_code == 200, response.content
  117. data = response.data["data"]
  118. assert len(data) == 2
  119. assert response.data["isMetricsData"]
  120. assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
  121. def test_throughput_epm_hour_rollup_offset_of_hour(self):
  122. # Each of these denotes how many events to create in each hour
  123. event_counts = [6, 0, 6, 3, 0, 3]
  124. for hour, count in enumerate(event_counts):
  125. for minute in range(count):
  126. self.store_transaction_metric(
  127. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute + 30)
  128. )
  129. for axis in ["tpm()", "epm()"]:
  130. response = self.do_request(
  131. data={
  132. "start": self.day_ago + timedelta(minutes=30),
  133. "end": self.day_ago + timedelta(hours=6, minutes=30),
  134. "interval": "1h",
  135. "yAxis": axis,
  136. "project": self.project.id,
  137. "dataset": "metricsEnhanced",
  138. **self.additional_params,
  139. },
  140. )
  141. assert response.status_code == 200, response.content
  142. data = response.data["data"]
  143. assert len(data) == 6
  144. assert response.data["isMetricsData"]
  145. rows = data[0:6]
  146. for test in zip(event_counts, rows):
  147. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  148. def test_throughput_eps_minute_rollup(self):
  149. # Each of these denotes how many events to create in each minute
  150. event_counts = [6, 0, 6, 3, 0, 3]
  151. for minute, count in enumerate(event_counts):
  152. for second in range(count):
  153. self.store_transaction_metric(
  154. 1, timestamp=self.day_ago + timedelta(minutes=minute, seconds=second)
  155. )
  156. for axis in ["eps()", "tps()"]:
  157. response = self.do_request(
  158. data={
  159. "start": self.day_ago,
  160. "end": self.day_ago + timedelta(minutes=6),
  161. "interval": "1m",
  162. "yAxis": axis,
  163. "project": self.project.id,
  164. "dataset": "metricsEnhanced",
  165. **self.additional_params,
  166. },
  167. )
  168. assert response.status_code == 200, response.content
  169. data = response.data["data"]
  170. assert len(data) == 6
  171. assert response.data["isMetricsData"]
  172. rows = data[0:6]
  173. for test in zip(event_counts, rows):
  174. assert test[1][1][0]["count"] == test[0] / 60.0
  175. def test_failure_rate(self):
  176. for hour in range(6):
  177. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  178. self.store_transaction_metric(1, tags={"transaction.status": "ok"}, timestamp=timestamp)
  179. if hour < 3:
  180. self.store_transaction_metric(
  181. 1, tags={"transaction.status": "internal_error"}, timestamp=timestamp
  182. )
  183. response = self.do_request(
  184. data={
  185. "start": self.day_ago,
  186. "end": self.day_ago + timedelta(hours=6),
  187. "interval": "1h",
  188. "yAxis": ["failure_rate()"],
  189. "project": self.project.id,
  190. "dataset": "metricsEnhanced",
  191. **self.additional_params,
  192. },
  193. )
  194. assert response.status_code == 200, response.content
  195. data = response.data["data"]
  196. assert len(data) == 6
  197. assert response.data["isMetricsData"]
  198. assert [attrs for time, attrs in response.data["data"]] == [
  199. [{"count": 0.5}],
  200. [{"count": 0.5}],
  201. [{"count": 0.5}],
  202. [{"count": 0}],
  203. [{"count": 0}],
  204. [{"count": 0}],
  205. ]
  206. def test_percentiles_multi_axis(self):
  207. for hour in range(6):
  208. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  209. self.store_transaction_metric(111, timestamp=timestamp)
  210. self.store_transaction_metric(222, metric="measurements.lcp", timestamp=timestamp)
  211. response = self.do_request(
  212. data={
  213. "start": self.day_ago,
  214. "end": self.day_ago + timedelta(hours=6),
  215. "interval": "1h",
  216. "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
  217. "project": self.project.id,
  218. "dataset": "metricsEnhanced",
  219. **self.additional_params,
  220. },
  221. )
  222. assert response.status_code == 200, response.content
  223. lcp = response.data["p75(measurements.lcp)"]
  224. duration = response.data["p75(transaction.duration)"]
  225. assert len(duration["data"]) == 6
  226. assert duration["isMetricsData"]
  227. assert len(lcp["data"]) == 6
  228. assert lcp["isMetricsData"]
  229. for item in duration["data"]:
  230. assert item[1][0]["count"] == 111
  231. for item in lcp["data"]:
  232. assert item[1][0]["count"] == 222
  233. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query", return_value={})
  234. def test_multiple_yaxis_only_one_query(self, mock_query):
  235. self.do_request(
  236. data={
  237. "project": self.project.id,
  238. "start": self.day_ago,
  239. "end": self.day_ago + timedelta(hours=2),
  240. "interval": "1h",
  241. "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
  242. "dataset": "metricsEnhanced",
  243. **self.additional_params,
  244. },
  245. )
  246. assert mock_query.call_count == 1
  247. def test_aggregate_function_user_count(self):
  248. self.store_transaction_metric(
  249. 1, metric="user", timestamp=self.day_ago + timedelta(minutes=30)
  250. )
  251. self.store_transaction_metric(
  252. 1, metric="user", timestamp=self.day_ago + timedelta(hours=1, minutes=30)
  253. )
  254. response = self.do_request(
  255. data={
  256. "start": self.day_ago,
  257. "end": self.day_ago + timedelta(hours=2),
  258. "interval": "1h",
  259. "yAxis": "count_unique(user)",
  260. "dataset": "metricsEnhanced",
  261. **self.additional_params,
  262. },
  263. )
  264. assert response.status_code == 200, response.content
  265. assert response.data["isMetricsData"]
  266. assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
  267. meta = response.data["meta"]
  268. assert meta["isMetricsData"] == response.data["isMetricsData"]
  269. def test_non_mep_query_fallsback(self):
  270. def get_mep(query):
  271. response = self.do_request(
  272. data={
  273. "project": self.project.id,
  274. "start": self.day_ago,
  275. "end": self.day_ago + timedelta(hours=2),
  276. "interval": "1h",
  277. "query": query,
  278. "yAxis": ["epm()"],
  279. "dataset": "metricsEnhanced",
  280. **self.additional_params,
  281. },
  282. )
  283. assert response.status_code == 200, response.content
  284. return response.data["isMetricsData"]
  285. assert get_mep(""), "empty query"
  286. assert get_mep("event.type:transaction"), "event type transaction"
  287. assert not get_mep("event.type:error"), "event type error"
  288. assert not get_mep("transaction.duration:<15min"), "outlier filter"
  289. assert get_mep("epm():>0.01"), "throughput filter"
  290. assert not get_mep(
  291. "event.type:transaction OR event.type:error"
  292. ), "boolean with non-mep filter"
  293. assert get_mep(
  294. "event.type:transaction OR transaction:foo_transaction"
  295. ), "boolean with mep filter"
  296. def test_having_condition_with_preventing_aggregates(self):
  297. response = self.do_request(
  298. data={
  299. "project": self.project.id,
  300. "start": self.day_ago,
  301. "end": self.day_ago + timedelta(hours=2),
  302. "interval": "1h",
  303. "query": "p95():<5s",
  304. "yAxis": ["epm()"],
  305. "dataset": "metricsEnhanced",
  306. "preventMetricAggregates": "1",
  307. **self.additional_params,
  308. },
  309. )
  310. assert response.status_code == 200, response.content
  311. assert not response.data["isMetricsData"]
  312. meta = response.data["meta"]
  313. assert meta["isMetricsData"] == response.data["isMetricsData"]
  314. def test_explicit_not_mep(self):
  315. response = self.do_request(
  316. data={
  317. "project": self.project.id,
  318. "start": self.day_ago,
  319. "end": self.day_ago + timedelta(hours=2),
  320. "interval": "1h",
  321. # Should be a mep able query
  322. "query": "",
  323. "yAxis": ["epm()"],
  324. "metricsEnhanced": "0",
  325. **self.additional_params,
  326. },
  327. )
  328. assert response.status_code == 200, response.content
  329. assert not response.data["isMetricsData"]
  330. meta = response.data["meta"]
  331. assert meta["isMetricsData"] == response.data["isMetricsData"]
  332. def test_sum_transaction_duration(self):
  333. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  334. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  335. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  336. response = self.do_request(
  337. data={
  338. "start": self.day_ago,
  339. "end": self.day_ago + timedelta(hours=2),
  340. "interval": "1h",
  341. "yAxis": "sum(transaction.duration)",
  342. "dataset": "metricsEnhanced",
  343. **self.additional_params,
  344. },
  345. )
  346. assert response.status_code == 200, response.content
  347. assert response.data["isMetricsData"]
  348. assert [attrs for time, attrs in response.data["data"]] == [
  349. [{"count": 123}],
  350. [{"count": 1245}],
  351. ]
  352. meta = response.data["meta"]
  353. assert meta["isMetricsData"] == response.data["isMetricsData"]
  354. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  355. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  356. def test_sum_transaction_duration_with_comparison(self):
  357. # We store the data for the previous day (in order to have values for the comparison).
  358. self.store_transaction_metric(
  359. 1, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  360. )
  361. self.store_transaction_metric(
  362. 2, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  363. )
  364. # We store the data for today.
  365. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  366. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(minutes=30))
  367. response = self.do_request(
  368. data={
  369. "start": self.day_ago,
  370. "end": self.day_ago + timedelta(days=1),
  371. "interval": "1d",
  372. "yAxis": "sum(transaction.duration)",
  373. "comparisonDelta": 86400,
  374. "dataset": "metricsEnhanced",
  375. **self.additional_params,
  376. },
  377. )
  378. assert response.status_code == 200, response.content
  379. assert response.data["isMetricsData"]
  380. # For some reason, if all tests run, there is some shared state that makes this test have data in the second
  381. # time bucket, which is filled automatically by the zerofilling. In order to avoid this flaky failure, we will
  382. # only check that the first bucket contains the actual data.
  383. assert [attrs for time, attrs in response.data["data"]][0] == [
  384. {"comparisonCount": 3.0, "count": 579.0}
  385. ]
  386. meta = response.data["meta"]
  387. assert meta["isMetricsData"] == response.data["isMetricsData"]
  388. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  389. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  390. def test_custom_measurement(self):
  391. self.store_transaction_metric(
  392. 123,
  393. metric="measurements.bytes_transfered",
  394. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  395. entity="metrics_distributions",
  396. tags={"transaction": "foo_transaction"},
  397. timestamp=self.day_ago + timedelta(minutes=30),
  398. )
  399. self.store_transaction_metric(
  400. 456,
  401. metric="measurements.bytes_transfered",
  402. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  403. entity="metrics_distributions",
  404. tags={"transaction": "foo_transaction"},
  405. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  406. )
  407. self.store_transaction_metric(
  408. 789,
  409. metric="measurements.bytes_transfered",
  410. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  411. entity="metrics_distributions",
  412. tags={"transaction": "foo_transaction"},
  413. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  414. )
  415. response = self.do_request(
  416. data={
  417. "start": self.day_ago,
  418. "end": self.day_ago + timedelta(hours=2),
  419. "interval": "1h",
  420. "yAxis": "sum(measurements.datacenter_memory)",
  421. "dataset": "metricsEnhanced",
  422. **self.additional_params,
  423. },
  424. )
  425. assert response.status_code == 200, response.content
  426. assert response.data["isMetricsData"]
  427. assert [attrs for time, attrs in response.data["data"]] == [
  428. [{"count": 123}],
  429. [{"count": 1245}],
  430. ]
  431. meta = response.data["meta"]
  432. assert meta["isMetricsData"] == response.data["isMetricsData"]
  433. assert meta["fields"] == {"time": "date", "sum_measurements_datacenter_memory": "size"}
  434. assert meta["units"] == {"time": None, "sum_measurements_datacenter_memory": "pebibyte"}
  435. def test_does_not_fallback_if_custom_metric_is_out_of_request_time_range(self):
  436. self.store_transaction_metric(
  437. 123,
  438. timestamp=self.day_ago + timedelta(hours=1),
  439. internal_metric="d:transactions/measurements.custom@kibibyte",
  440. entity="metrics_distributions",
  441. )
  442. response = self.do_request(
  443. data={
  444. "start": self.day_ago,
  445. "end": self.day_ago + timedelta(hours=2),
  446. "interval": "1h",
  447. "yAxis": "p99(measurements.custom)",
  448. "dataset": "metricsEnhanced",
  449. **self.additional_params,
  450. },
  451. )
  452. meta = response.data["meta"]
  453. assert response.status_code == 200, response.content
  454. assert response.data["isMetricsData"]
  455. assert meta["isMetricsData"]
  456. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  457. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  458. def test_multi_yaxis_custom_measurement(self):
  459. self.store_transaction_metric(
  460. 123,
  461. metric="measurements.bytes_transfered",
  462. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  463. entity="metrics_distributions",
  464. tags={"transaction": "foo_transaction"},
  465. timestamp=self.day_ago + timedelta(minutes=30),
  466. )
  467. self.store_transaction_metric(
  468. 456,
  469. metric="measurements.bytes_transfered",
  470. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  471. entity="metrics_distributions",
  472. tags={"transaction": "foo_transaction"},
  473. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  474. )
  475. self.store_transaction_metric(
  476. 789,
  477. metric="measurements.bytes_transfered",
  478. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  479. entity="metrics_distributions",
  480. tags={"transaction": "foo_transaction"},
  481. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  482. )
  483. response = self.do_request(
  484. data={
  485. "start": self.day_ago,
  486. "end": self.day_ago + timedelta(hours=2),
  487. "interval": "1h",
  488. "yAxis": [
  489. "sum(measurements.datacenter_memory)",
  490. "p50(measurements.datacenter_memory)",
  491. ],
  492. "dataset": "metricsEnhanced",
  493. **self.additional_params,
  494. },
  495. )
  496. assert response.status_code == 200, response.content
  497. sum_data = response.data["sum(measurements.datacenter_memory)"]
  498. p50_data = response.data["p50(measurements.datacenter_memory)"]
  499. assert sum_data["isMetricsData"]
  500. assert p50_data["isMetricsData"]
  501. assert [attrs for time, attrs in sum_data["data"]] == [
  502. [{"count": 123}],
  503. [{"count": 1245}],
  504. ]
  505. assert [attrs for time, attrs in p50_data["data"]] == [
  506. [{"count": 123}],
  507. [{"count": 622.5}],
  508. ]
  509. sum_meta = sum_data["meta"]
  510. assert sum_meta["isMetricsData"] == sum_data["isMetricsData"]
  511. assert sum_meta["fields"] == {
  512. "time": "date",
  513. "sum_measurements_datacenter_memory": "size",
  514. "p50_measurements_datacenter_memory": "size",
  515. }
  516. assert sum_meta["units"] == {
  517. "time": None,
  518. "sum_measurements_datacenter_memory": "pebibyte",
  519. "p50_measurements_datacenter_memory": "pebibyte",
  520. }
  521. p50_meta = p50_data["meta"]
  522. assert p50_meta["isMetricsData"] == p50_data["isMetricsData"]
  523. assert p50_meta["fields"] == {
  524. "time": "date",
  525. "sum_measurements_datacenter_memory": "size",
  526. "p50_measurements_datacenter_memory": "size",
  527. }
  528. assert p50_meta["units"] == {
  529. "time": None,
  530. "sum_measurements_datacenter_memory": "pebibyte",
  531. "p50_measurements_datacenter_memory": "pebibyte",
  532. }
  533. def test_dataset_metrics_does_not_fallback(self):
  534. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  535. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  536. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  537. response = self.do_request(
  538. data={
  539. "start": self.day_ago,
  540. "end": self.day_ago + timedelta(hours=2),
  541. "interval": "1h",
  542. "query": "transaction.duration:<5s",
  543. "yAxis": "sum(transaction.duration)",
  544. "dataset": "metrics",
  545. **self.additional_params,
  546. },
  547. )
  548. assert response.status_code == 400, response.content
  549. def test_title_filter(self):
  550. self.store_transaction_metric(
  551. 123,
  552. tags={"transaction": "foo_transaction"},
  553. timestamp=self.day_ago + timedelta(minutes=30),
  554. )
  555. response = self.do_request(
  556. data={
  557. "start": self.day_ago,
  558. "end": self.day_ago + timedelta(hours=2),
  559. "interval": "1h",
  560. "query": "title:foo_transaction",
  561. "yAxis": [
  562. "sum(transaction.duration)",
  563. ],
  564. "dataset": "metricsEnhanced",
  565. **self.additional_params,
  566. },
  567. )
  568. assert response.status_code == 200, response.content
  569. data = response.data["data"]
  570. assert [attrs for time, attrs in data] == [
  571. [{"count": 123}],
  572. [{"count": 0}],
  573. ]
  574. def test_transaction_status_unknown_error(self):
  575. self.store_transaction_metric(
  576. 123,
  577. tags={"transaction.status": "unknown"},
  578. timestamp=self.day_ago + timedelta(minutes=30),
  579. )
  580. response = self.do_request(
  581. data={
  582. "start": self.day_ago,
  583. "end": self.day_ago + timedelta(hours=2),
  584. "interval": "1h",
  585. "query": "transaction.status:unknown_error",
  586. "yAxis": [
  587. "sum(transaction.duration)",
  588. ],
  589. "dataset": "metricsEnhanced",
  590. **self.additional_params,
  591. },
  592. )
  593. assert response.status_code == 200, response.content
  594. data = response.data["data"]
  595. assert [attrs for time, attrs in data] == [
  596. [{"count": 123}],
  597. [{"count": 0}],
  598. ]
  599. def test_custom_performance_metric_meta_contains_field_and_unit_data(self):
  600. self.store_transaction_metric(
  601. 123,
  602. timestamp=self.day_ago + timedelta(hours=1),
  603. internal_metric="d:transactions/measurements.custom@kibibyte",
  604. entity="metrics_distributions",
  605. )
  606. response = self.do_request(
  607. data={
  608. "start": self.day_ago,
  609. "end": self.day_ago + timedelta(hours=2),
  610. "interval": "1h",
  611. "yAxis": "p99(measurements.custom)",
  612. "query": "",
  613. **self.additional_params,
  614. },
  615. )
  616. assert response.status_code == 200
  617. meta = response.data["meta"]
  618. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  619. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  620. def test_multi_series_custom_performance_metric_meta_contains_field_and_unit_data(self):
  621. self.store_transaction_metric(
  622. 123,
  623. timestamp=self.day_ago + timedelta(hours=1),
  624. internal_metric="d:transactions/measurements.custom@kibibyte",
  625. entity="metrics_distributions",
  626. )
  627. self.store_transaction_metric(
  628. 123,
  629. timestamp=self.day_ago + timedelta(hours=1),
  630. internal_metric="d:transactions/measurements.another.custom@pebibyte",
  631. entity="metrics_distributions",
  632. )
  633. response = self.do_request(
  634. data={
  635. "start": self.day_ago,
  636. "end": self.day_ago + timedelta(hours=2),
  637. "interval": "1h",
  638. "yAxis": [
  639. "p95(measurements.custom)",
  640. "p99(measurements.custom)",
  641. "p99(measurements.another.custom)",
  642. ],
  643. "query": "",
  644. **self.additional_params,
  645. },
  646. )
  647. assert response.status_code == 200
  648. meta = response.data["p95(measurements.custom)"]["meta"]
  649. assert meta["fields"] == {
  650. "time": "date",
  651. "p95_measurements_custom": "size",
  652. "p99_measurements_custom": "size",
  653. "p99_measurements_another_custom": "size",
  654. }
  655. assert meta["units"] == {
  656. "time": None,
  657. "p95_measurements_custom": "kibibyte",
  658. "p99_measurements_custom": "kibibyte",
  659. "p99_measurements_another_custom": "pebibyte",
  660. }
  661. assert meta == response.data["p99(measurements.custom)"]["meta"]
  662. assert meta == response.data["p99(measurements.another.custom)"]["meta"]
  663. def test_no_top_events_with_project_field(self):
  664. project = self.create_project()
  665. response = self.do_request(
  666. data={
  667. # make sure to query the project with 0 events
  668. "project": project.id,
  669. "start": self.day_ago,
  670. "end": self.day_ago + timedelta(hours=2),
  671. "interval": "1h",
  672. "yAxis": "count()",
  673. "orderby": ["-count()"],
  674. "field": ["count()", "project"],
  675. "topEvents": 5,
  676. "dataset": "metrics",
  677. **self.additional_params,
  678. },
  679. )
  680. assert response.status_code == 200, response.content
  681. # When there are no top events, we do not return an empty dict.
  682. # Instead, we return a single zero-filled series for an empty graph.
  683. data = response.data["data"]
  684. assert [attrs for time, attrs in data] == [[{"count": 0}], [{"count": 0}]]
  685. def test_top_events_with_transaction(self):
  686. transaction_spec = [("foo", 100), ("bar", 200), ("baz", 300)]
  687. for offset in range(5):
  688. for transaction, duration in transaction_spec:
  689. self.store_transaction_metric(
  690. duration,
  691. tags={"transaction": f"{transaction}_transaction"},
  692. timestamp=self.day_ago + timedelta(hours=offset, minutes=30),
  693. )
  694. response = self.do_request(
  695. data={
  696. # make sure to query the project with 0 events
  697. "project": self.project.id,
  698. "start": self.day_ago,
  699. "end": self.day_ago + timedelta(hours=5),
  700. "interval": "1h",
  701. "yAxis": "p75(transaction.duration)",
  702. "orderby": ["-p75(transaction.duration)"],
  703. "field": ["p75(transaction.duration)", "transaction"],
  704. "topEvents": 5,
  705. "dataset": "metrics",
  706. **self.additional_params,
  707. },
  708. )
  709. assert response.status_code == 200, response.content
  710. for position, (transaction, duration) in enumerate(transaction_spec):
  711. data = response.data[f"{transaction}_transaction"]
  712. chart_data = data["data"]
  713. assert data["order"] == 2 - position
  714. assert [attrs for time, attrs in chart_data] == [[{"count": duration}]] * 5
  715. def test_top_events_with_project(self):
  716. self.store_transaction_metric(
  717. 100,
  718. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  719. )
  720. response = self.do_request(
  721. data={
  722. # make sure to query the project with 0 events
  723. "project": self.project.id,
  724. "start": self.day_ago,
  725. "end": self.day_ago + timedelta(hours=5),
  726. "interval": "1h",
  727. "yAxis": "p75(transaction.duration)",
  728. "orderby": ["-p75(transaction.duration)"],
  729. "field": ["p75(transaction.duration)", "project"],
  730. "topEvents": 5,
  731. "dataset": "metrics",
  732. **self.additional_params,
  733. },
  734. )
  735. assert response.status_code == 200, response.content
  736. data = response.data[f"{self.project.slug}"]
  737. assert data["order"] == 0
  738. def test_split_decision_for_errors_widget(self):
  739. error_data = load_data("python", timestamp=before_now(minutes=1))
  740. self.store_event(
  741. data={
  742. **error_data,
  743. "exception": {"values": [{"type": "blah", "data": {"values": []}}]},
  744. },
  745. project_id=self.project.id,
  746. )
  747. _, widget, __ = create_widget(
  748. ["count()", "error.type"], "error.type:blah", self.project, discover_widget_split=None
  749. )
  750. response = self.do_request(
  751. {
  752. "field": ["count()", "error.type"],
  753. "query": "error.type:blah",
  754. "dataset": "metricsEnhanced",
  755. "per_page": 50,
  756. "dashboardWidgetId": widget.id,
  757. }
  758. )
  759. assert response.status_code == 200, response.content
  760. assert response.data.get("meta").get(
  761. "discoverSplitDecision"
  762. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  763. widget.refresh_from_db()
  764. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  765. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  766. def test_split_decision_for_transactions_widget(self):
  767. self.store_transaction_metric(
  768. 100,
  769. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  770. )
  771. _, widget, __ = create_widget(
  772. ["count()", "transaction.name"], "", self.project, discover_widget_split=None
  773. )
  774. assert widget.discover_widget_split is None
  775. response = self.do_request(
  776. {
  777. "field": ["count()", "transaction.name"],
  778. "query": "",
  779. "dataset": "metricsEnhanced",
  780. "per_page": 50,
  781. "dashboardWidgetId": widget.id,
  782. }
  783. )
  784. assert response.status_code == 200, response.content
  785. assert response.data.get("meta").get(
  786. "discoverSplitDecision"
  787. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.TRANSACTION_LIKE)
  788. widget.refresh_from_db()
  789. assert widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  790. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  791. def test_split_decision_for_top_events_errors_widget(self):
  792. error_data = load_data("python", timestamp=before_now(minutes=1))
  793. self.store_event(
  794. data={
  795. **error_data,
  796. "exception": {"values": [{"type": "test_error", "data": {"values": []}}]},
  797. },
  798. project_id=self.project.id,
  799. )
  800. _, widget, __ = create_widget(
  801. ["count()", "error.type"],
  802. "error.type:test_error",
  803. self.project,
  804. discover_widget_split=None,
  805. )
  806. response = self.do_request(
  807. {
  808. "field": ["count()", "error.type"],
  809. "query": "error.type:test_error",
  810. "dataset": "metricsEnhanced",
  811. "per_page": 50,
  812. "dashboardWidgetId": widget.id,
  813. "topEvents": 5,
  814. }
  815. )
  816. assert response.status_code == 200, response.content
  817. # Only a singular result for the test_error event
  818. assert len(response.data) == 1
  819. # Results are grouped by the error type
  820. assert response.data.get("test_error").get("meta").get(
  821. "discoverSplitDecision"
  822. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  823. widget.refresh_from_db()
  824. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  825. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  826. def test_split_decision_for_top_events_transactions_widget(self):
  827. self.store_transaction_metric(
  828. 100,
  829. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  830. tags={"transaction": "foo_transaction"},
  831. )
  832. _, widget, __ = create_widget(
  833. ["count()", "transaction"], "", self.project, discover_widget_split=None
  834. )
  835. assert widget.discover_widget_split is None
  836. response = self.do_request(
  837. {
  838. "field": ["count()", "transaction"],
  839. "query": "",
  840. "dataset": "metricsEnhanced",
  841. "per_page": 50,
  842. "dashboardWidgetId": widget.id,
  843. "topEvents": 5,
  844. }
  845. )
  846. assert response.status_code == 200, response.content
  847. # Only a singular result for the transaction
  848. assert len(response.data) == 1
  849. # Results are grouped by the transaction
  850. assert response.data.get("foo_transaction").get("meta").get(
  851. "discoverSplitDecision"
  852. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.TRANSACTION_LIKE)
  853. widget.refresh_from_db()
  854. assert widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  855. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  856. def test_split_decision_for_ambiguous_widget_without_data(self):
  857. _, widget, __ = create_widget(
  858. ["count()", "transaction.name", "error.type"],
  859. "",
  860. self.project,
  861. discover_widget_split=None,
  862. )
  863. assert widget.discover_widget_split is None
  864. response = self.do_request(
  865. {
  866. "field": ["count()", "transaction.name", "error.type"],
  867. "query": "",
  868. "dataset": "metricsEnhanced",
  869. "per_page": 50,
  870. "dashboardWidgetId": widget.id,
  871. },
  872. features={"organizations:performance-discover-dataset-selector": True},
  873. )
  874. assert response.status_code == 200, response.content
  875. assert response.data.get("meta").get(
  876. "discoverSplitDecision"
  877. ) == DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  878. widget.refresh_from_db()
  879. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  880. assert widget.dataset_source == DatasetSourcesTypes.FORCED.value
  881. def test_inp_percentile(self):
  882. for hour in range(6):
  883. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  884. self.store_transaction_metric(
  885. 111,
  886. metric="measurements.inp",
  887. timestamp=timestamp,
  888. use_case_id=UseCaseID.TRANSACTIONS,
  889. )
  890. response = self.do_request(
  891. data={
  892. "start": self.day_ago,
  893. "end": self.day_ago + timedelta(hours=6),
  894. "interval": "1h",
  895. "yAxis": ["p75(measurements.inp)"],
  896. "project": self.project.id,
  897. "dataset": "metrics",
  898. **self.additional_params,
  899. },
  900. )
  901. assert response.status_code == 200, response.content
  902. data = response.data
  903. assert len(data["data"]) == 6
  904. assert data["isMetricsData"]
  905. assert data["meta"]["fields"]["p75_measurements_inp"] == "duration"
  906. for item in data["data"]:
  907. assert item[1][0]["count"] == 111
  908. def test_metrics_enhanced_defaults_to_transactions_with_feature_flag(self):
  909. # Store an error
  910. self.store_event(
  911. data={
  912. "event_id": "a" * 32,
  913. "message": "poof",
  914. "user": {"email": self.user.email},
  915. "timestamp": before_now(days=1, minutes=1).isoformat(),
  916. "tags": {"notMetrics": "this makes it not metrics"},
  917. },
  918. project_id=self.project.id,
  919. )
  920. # Store a transaction
  921. transaction_data = load_data("transaction")
  922. self.store_event(
  923. {
  924. **transaction_data,
  925. "tags": {"notMetrics": "this makes it not metrics"},
  926. "start_timestamp": before_now(days=1, minutes=1).isoformat(),
  927. "timestamp": before_now(days=1).isoformat(),
  928. },
  929. project_id=self.project.id,
  930. )
  931. features = {
  932. "organizations:performance-discover-dataset-selector": True,
  933. "organizations:discover-basic": True,
  934. "organizations:global-views": True,
  935. }
  936. query = {
  937. "field": ["count()"],
  938. "query": 'notMetrics:"this makes it not metrics"',
  939. "statsPeriod": "1d",
  940. "interval": "1d",
  941. "dataset": "metricsEnhanced",
  942. }
  943. response = self.do_request(query, features=features)
  944. assert response.status_code == 200, response.content
  945. assert len(response.data["data"]) == 2
  946. # First bucket, where the transaction should be
  947. assert response.data["data"][0][1][0]["count"] == 1
  948. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  949. OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest
  950. ):
  951. def setUp(self):
  952. super().setUp()
  953. self.features["organizations:use-metrics-layer"] = True
  954. self.additional_params = {"forceMetricsLayer": "true"}
  955. def test_counter_standard_metric(self):
  956. mri = "c:transactions/usage@none"
  957. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  958. self.store_transaction_metric(
  959. value,
  960. metric=mri,
  961. internal_metric=mri,
  962. entity="metrics_counters",
  963. timestamp=self.day_ago + timedelta(minutes=index),
  964. use_case_id=UseCaseID.CUSTOM,
  965. )
  966. response = self.do_request(
  967. data={
  968. "start": self.day_ago,
  969. "end": self.day_ago + timedelta(hours=6),
  970. "interval": "1m",
  971. "yAxis": [f"sum({mri})"],
  972. "project": self.project.id,
  973. "dataset": "metricsEnhanced",
  974. **self.additional_params,
  975. },
  976. )
  977. assert response.status_code == 200, response.content
  978. data = response.data["data"]
  979. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  980. assert value[0]["count"] == expected_value # type: ignore[index]
  981. def test_counter_custom_metric(self):
  982. mri = "c:custom/sentry.process_profile.track_outcome@second"
  983. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  984. self.store_transaction_metric(
  985. value,
  986. metric=mri,
  987. internal_metric=mri,
  988. entity="metrics_counters",
  989. timestamp=self.day_ago + timedelta(hours=index),
  990. use_case_id=UseCaseID.CUSTOM,
  991. )
  992. response = self.do_request(
  993. data={
  994. "start": self.day_ago,
  995. "end": self.day_ago + timedelta(hours=6),
  996. "interval": "1h",
  997. "yAxis": [f"sum({mri})"],
  998. "project": self.project.id,
  999. "dataset": "metricsEnhanced",
  1000. **self.additional_params,
  1001. },
  1002. )
  1003. assert response.status_code == 200, response.content
  1004. data = response.data["data"]
  1005. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  1006. assert value[0]["count"] == expected_value # type: ignore[index]
  1007. def test_distribution_custom_metric(self):
  1008. mri = "d:custom/sentry.process_profile.track_outcome@second"
  1009. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  1010. for multiplier in (1, 2, 3):
  1011. self.store_transaction_metric(
  1012. value * multiplier,
  1013. metric=mri,
  1014. internal_metric=mri,
  1015. entity="metrics_distributions",
  1016. timestamp=self.day_ago + timedelta(hours=index),
  1017. use_case_id=UseCaseID.CUSTOM,
  1018. )
  1019. response = self.do_request(
  1020. data={
  1021. "start": self.day_ago,
  1022. "end": self.day_ago + timedelta(hours=6),
  1023. "interval": "1h",
  1024. "yAxis": [f"min({mri})", f"max({mri})", f"p90({mri})"],
  1025. "project": self.project.id,
  1026. "dataset": "metricsEnhanced",
  1027. **self.additional_params,
  1028. },
  1029. )
  1030. assert response.status_code == 200, response.content
  1031. data = response.data
  1032. min = data[f"min({mri})"]["data"]
  1033. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  1034. assert value[0]["count"] == expected_value # type: ignore[index]
  1035. max = data[f"max({mri})"]["data"]
  1036. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  1037. assert value[0]["count"] == expected_value # type: ignore[index]
  1038. p90 = data[f"p90({mri})"]["data"]
  1039. for (_, value), expected_value in zip(p90, [28.0, 56.0, 84.0, 112.0, 140.0, 168.0]):
  1040. assert value[0]["count"] == expected_value # type: ignore[index]
  1041. def test_set_custom_metric(self):
  1042. mri = "s:custom/sentry.process_profile.track_outcome@second"
  1043. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  1044. # We store each value a second time, since we want to check the de-duplication of sets.
  1045. for i in range(0, 2):
  1046. self.store_transaction_metric(
  1047. value,
  1048. metric=mri,
  1049. internal_metric=mri,
  1050. entity="metrics_sets",
  1051. timestamp=self.day_ago + timedelta(hours=index),
  1052. use_case_id=UseCaseID.CUSTOM,
  1053. )
  1054. response = self.do_request(
  1055. data={
  1056. "start": self.day_ago,
  1057. "end": self.day_ago + timedelta(hours=6),
  1058. "interval": "1h",
  1059. "yAxis": [f"count_unique({mri})"],
  1060. "project": self.project.id,
  1061. "dataset": "metricsEnhanced",
  1062. **self.additional_params,
  1063. },
  1064. )
  1065. assert response.status_code == 200, response.content
  1066. data = response.data["data"]
  1067. for (_, value), expected_value in zip(data, [1, 1, 1, 1, 1, 1]):
  1068. assert value[0]["count"] == expected_value # type: ignore[index]
  1069. def test_gauge_custom_metric(self):
  1070. mri = "g:custom/sentry.process_profile.track_outcome@second"
  1071. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  1072. for multiplier in (1, 3):
  1073. self.store_transaction_metric(
  1074. value * multiplier,
  1075. metric=mri,
  1076. internal_metric=mri,
  1077. entity="metrics_gauges",
  1078. # When multiple gauges are merged, in order to make the `last` merge work deterministically it's
  1079. # better to have the gauges with different timestamps so that the last value is always the same.
  1080. timestamp=self.day_ago + timedelta(hours=index, minutes=multiplier),
  1081. use_case_id=UseCaseID.CUSTOM,
  1082. )
  1083. response = self.do_request(
  1084. data={
  1085. "start": self.day_ago,
  1086. "end": self.day_ago + timedelta(hours=6),
  1087. "interval": "1h",
  1088. "yAxis": [
  1089. f"min({mri})",
  1090. f"max({mri})",
  1091. f"last({mri})",
  1092. f"sum({mri})",
  1093. f"count({mri})",
  1094. ],
  1095. "project": self.project.id,
  1096. "dataset": "metricsEnhanced",
  1097. **self.additional_params,
  1098. },
  1099. )
  1100. assert response.status_code == 200, response.content
  1101. data = response.data
  1102. min = data[f"min({mri})"]["data"]
  1103. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  1104. assert value[0]["count"] == expected_value # type: ignore[index]
  1105. max = data[f"max({mri})"]["data"]
  1106. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  1107. assert value[0]["count"] == expected_value # type: ignore[index]
  1108. last = data[f"last({mri})"]["data"]
  1109. for (_, value), expected_value in zip(last, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  1110. assert value[0]["count"] == expected_value # type: ignore[index]
  1111. sum = data[f"sum({mri})"]["data"]
  1112. for (_, value), expected_value in zip(sum, [40.0, 80.0, 120.0, 160.0, 200.0, 240.0]):
  1113. assert value[0]["count"] == expected_value # type: ignore[index]
  1114. count = data[f"count({mri})"]["data"]
  1115. for (_, value), expected_value in zip(count, [40, 80, 120, 160, 200, 240]):
  1116. assert value[0]["count"] == expected_value # type: ignore[index]
  1117. @pytest.mark.querybuilder
  1118. def test_throughput_spm_hour_rollup(self):
  1119. # Each of these denotes how many events to create in each hour
  1120. event_counts = [6, 0, 6, 3, 0, 3]
  1121. for hour, count in enumerate(event_counts):
  1122. for minute in range(count):
  1123. self.store_span_metric(
  1124. 1,
  1125. timestamp=self.day_ago + timedelta(hours=hour, minutes=minute),
  1126. )
  1127. response = self.do_request(
  1128. data={
  1129. "start": self.day_ago,
  1130. "end": self.day_ago + timedelta(hours=6),
  1131. "interval": "1h",
  1132. "yAxis": "spm()",
  1133. "project": self.project.id,
  1134. "dataset": "metrics",
  1135. },
  1136. )
  1137. assert response.status_code == 200, response.content
  1138. data = response.data["data"]
  1139. assert len(data) == 6
  1140. assert response.data["meta"]["dataset"] == "metrics"
  1141. rows = data[0:6]
  1142. for test in zip(event_counts, rows):
  1143. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  1144. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithOnDemandWidgets(
  1145. MetricsEnhancedPerformanceTestCase
  1146. ):
  1147. endpoint = "sentry-api-0-organization-events-stats"
  1148. def setUp(self):
  1149. super().setUp()
  1150. self.login_as(user=self.user)
  1151. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  1152. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  1153. Environment.get_or_create(self.project, "production")
  1154. self.url = reverse(
  1155. "sentry-api-0-organization-events-stats",
  1156. kwargs={"organization_id_or_slug": self.project.organization.slug},
  1157. )
  1158. self.features = {
  1159. "organizations:on-demand-metrics-extraction-widgets": True,
  1160. "organizations:on-demand-metrics-extraction": True,
  1161. }
  1162. def _make_on_demand_request(
  1163. self, params: dict[str, Any], extra_features: dict[str, bool] | None = None
  1164. ) -> Response:
  1165. """Ensures that the required parameters for an on-demand request are included."""
  1166. # Expected parameters for this helper function
  1167. params["dataset"] = "metricsEnhanced"
  1168. params["useOnDemandMetrics"] = "true"
  1169. params["onDemandType"] = "dynamic_query"
  1170. _features = {**self.features, **(extra_features or {})}
  1171. return self.do_request(params, features=_features)
  1172. def test_top_events_wrong_on_demand_type(self):
  1173. query = "transaction.duration:>=100"
  1174. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1175. response = self.do_request(
  1176. data={
  1177. "project": self.project.id,
  1178. "start": self.day_ago,
  1179. "end": self.day_ago + timedelta(hours=2),
  1180. "interval": "1h",
  1181. "orderby": ["-count()"],
  1182. "environment": "production",
  1183. "query": query,
  1184. "yAxis": yAxis,
  1185. "field": [
  1186. "count()",
  1187. ],
  1188. "topEvents": 5,
  1189. "dataset": "metrics",
  1190. "useOnDemandMetrics": "true",
  1191. "onDemandType": "not_real",
  1192. },
  1193. )
  1194. assert response.status_code == 400, response.content
  1195. def test_top_events_works_without_on_demand_type(self):
  1196. query = "transaction.duration:>=100"
  1197. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1198. response = self.do_request(
  1199. data={
  1200. "project": self.project.id,
  1201. "start": self.day_ago,
  1202. "end": self.day_ago + timedelta(hours=2),
  1203. "interval": "1h",
  1204. "orderby": ["-count()"],
  1205. "environment": "production",
  1206. "query": query,
  1207. "yAxis": yAxis,
  1208. "field": [
  1209. "count()",
  1210. ],
  1211. "topEvents": 5,
  1212. "dataset": "metrics",
  1213. "useOnDemandMetrics": "true",
  1214. },
  1215. )
  1216. assert response.status_code == 200, response.content
  1217. def test_top_events_with_transaction_on_demand(self):
  1218. field = "count()"
  1219. field_two = "count_web_vitals(measurements.lcp, good)"
  1220. groupbys = ["customtag1", "customtag2"]
  1221. query = "transaction.duration:>=100"
  1222. spec = OnDemandMetricSpec(
  1223. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1224. )
  1225. spec_two = OnDemandMetricSpec(
  1226. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1227. )
  1228. for hour in range(0, 5):
  1229. self.store_on_demand_metric(
  1230. hour * 62 * 24,
  1231. spec=spec,
  1232. additional_tags={
  1233. "customtag1": "foo",
  1234. "customtag2": "red",
  1235. "environment": "production",
  1236. },
  1237. timestamp=self.day_ago + timedelta(hours=hour),
  1238. )
  1239. self.store_on_demand_metric(
  1240. hour * 60 * 24,
  1241. spec=spec_two,
  1242. additional_tags={
  1243. "customtag1": "bar",
  1244. "customtag2": "blue",
  1245. "environment": "production",
  1246. },
  1247. timestamp=self.day_ago + timedelta(hours=hour),
  1248. )
  1249. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1250. response = self.do_request(
  1251. data={
  1252. "project": self.project.id,
  1253. "start": self.day_ago,
  1254. "end": self.day_ago + timedelta(hours=2),
  1255. "interval": "1h",
  1256. "orderby": ["-count()"],
  1257. "environment": "production",
  1258. "query": query,
  1259. "yAxis": yAxis,
  1260. "field": [
  1261. "count()",
  1262. "count_web_vitals(measurements.lcp, good)",
  1263. "customtag1",
  1264. "customtag2",
  1265. ],
  1266. "topEvents": 5,
  1267. "dataset": "metricsEnhanced",
  1268. "useOnDemandMetrics": "true",
  1269. "onDemandType": "dynamic_query",
  1270. },
  1271. )
  1272. assert response.status_code == 200, response.content
  1273. groups = [
  1274. ("foo,red", "count()", 0.0, 1488.0),
  1275. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1276. ("bar,blue", "count()", 0.0, 0.0),
  1277. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1278. ]
  1279. assert len(response.data.keys()) == 2
  1280. for group_count in groups:
  1281. group, agg, row1, row2 = group_count
  1282. row_data = response.data[group][agg]["data"][:2]
  1283. assert [attrs for _, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1284. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1285. assert response.data[group]["isMetricsExtractedData"]
  1286. def test_top_events_with_transaction_on_demand_and_no_environment(self):
  1287. field = "count()"
  1288. field_two = "count_web_vitals(measurements.lcp, good)"
  1289. groupbys = ["customtag1", "customtag2"]
  1290. query = "transaction.duration:>=100"
  1291. spec = OnDemandMetricSpec(
  1292. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1293. )
  1294. spec_two = OnDemandMetricSpec(
  1295. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1296. )
  1297. for hour in range(0, 5):
  1298. self.store_on_demand_metric(
  1299. hour * 62 * 24,
  1300. spec=spec,
  1301. additional_tags={
  1302. "customtag1": "foo",
  1303. "customtag2": "red",
  1304. "environment": "production",
  1305. },
  1306. timestamp=self.day_ago + timedelta(hours=hour),
  1307. )
  1308. self.store_on_demand_metric(
  1309. hour * 60 * 24,
  1310. spec=spec_two,
  1311. additional_tags={
  1312. "customtag1": "bar",
  1313. "customtag2": "blue",
  1314. "environment": "production",
  1315. },
  1316. timestamp=self.day_ago + timedelta(hours=hour),
  1317. )
  1318. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1319. response = self.do_request(
  1320. data={
  1321. "project": self.project.id,
  1322. "start": self.day_ago,
  1323. "end": self.day_ago + timedelta(hours=2),
  1324. "interval": "1h",
  1325. "orderby": ["-count()"],
  1326. "query": query,
  1327. "yAxis": yAxis,
  1328. "field": [
  1329. "count()",
  1330. "count_web_vitals(measurements.lcp, good)",
  1331. "customtag1",
  1332. "customtag2",
  1333. ],
  1334. "topEvents": 5,
  1335. "dataset": "metricsEnhanced",
  1336. "useOnDemandMetrics": "true",
  1337. "onDemandType": "dynamic_query",
  1338. },
  1339. )
  1340. assert response.status_code == 200, response.content
  1341. groups = [
  1342. ("foo,red", "count()", 0.0, 1488.0),
  1343. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1344. ("bar,blue", "count()", 0.0, 0.0),
  1345. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1346. ]
  1347. assert len(response.data.keys()) == 2
  1348. for group_count in groups:
  1349. group, agg, row1, row2 = group_count
  1350. row_data = response.data[group][agg]["data"][:2]
  1351. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1352. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1353. assert response.data[group]["isMetricsExtractedData"]
  1354. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_transaction_only(self):
  1355. field = "count()"
  1356. field_two = "count_web_vitals(measurements.lcp, good)"
  1357. groupbys = ["customtag1", "customtag2"]
  1358. query = "transaction.duration:>=100"
  1359. spec = OnDemandMetricSpec(
  1360. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1361. )
  1362. spec_two = OnDemandMetricSpec(
  1363. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1364. )
  1365. _, widget, __ = create_widget(
  1366. ["count()"],
  1367. "",
  1368. self.project,
  1369. discover_widget_split=None,
  1370. )
  1371. for hour in range(0, 2):
  1372. self.store_on_demand_metric(
  1373. hour * 62 * 24,
  1374. spec=spec,
  1375. additional_tags={
  1376. "customtag1": "foo",
  1377. "customtag2": "red",
  1378. "environment": "production",
  1379. },
  1380. timestamp=self.day_ago + timedelta(hours=hour),
  1381. )
  1382. self.store_on_demand_metric(
  1383. hour * 60 * 24,
  1384. spec=spec_two,
  1385. additional_tags={
  1386. "customtag1": "bar",
  1387. "customtag2": "blue",
  1388. "environment": "production",
  1389. },
  1390. timestamp=self.day_ago + timedelta(hours=hour),
  1391. )
  1392. yAxis = [field, field_two]
  1393. response = self.do_request(
  1394. data={
  1395. "project": self.project.id,
  1396. "start": self.day_ago,
  1397. "end": self.day_ago + timedelta(hours=2),
  1398. "interval": "1h",
  1399. "orderby": ["-count()"],
  1400. "query": query,
  1401. "yAxis": yAxis,
  1402. "field": yAxis + groupbys,
  1403. "topEvents": 5,
  1404. "dataset": "metricsEnhanced",
  1405. "useOnDemandMetrics": "true",
  1406. "onDemandType": "dynamic_query",
  1407. "dashboardWidgetId": widget.id,
  1408. },
  1409. )
  1410. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1411. assert saved_widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  1412. assert response.status_code == 200, response.content
  1413. # Fell back to discover data which is empty for this test (empty group of '').
  1414. assert len(response.data.keys()) == 2
  1415. assert bool(response.data["foo,red"])
  1416. assert bool(response.data["bar,blue"])
  1417. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_error(
  1418. self,
  1419. ):
  1420. self.project = self.create_project(organization=self.organization)
  1421. Environment.get_or_create(self.project, "production")
  1422. field = "count()"
  1423. field_two = "count()"
  1424. groupbys = ["customtag1", "customtag2"]
  1425. query = "query.dataset:foo"
  1426. _, widget, __ = create_widget(
  1427. ["count()"],
  1428. "",
  1429. self.project,
  1430. discover_widget_split=None,
  1431. )
  1432. self.store_event(
  1433. data={
  1434. "event_id": "a" * 32,
  1435. "message": "very bad",
  1436. "type": "error",
  1437. "start_timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1438. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1439. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1440. },
  1441. project_id=self.project.id,
  1442. )
  1443. self.store_event(
  1444. data={
  1445. "event_id": "b" * 32,
  1446. "message": "very bad 2",
  1447. "type": "error",
  1448. "start_timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1449. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1450. "tags": {"customtag1": "error_value2", "query.dataset": "foo"},
  1451. },
  1452. project_id=self.project.id,
  1453. )
  1454. yAxis = ["count()"]
  1455. response = self.do_request(
  1456. data={
  1457. "project": self.project.id,
  1458. "start": self.day_ago,
  1459. "end": self.day_ago + timedelta(hours=2),
  1460. "interval": "1h",
  1461. "orderby": ["-count()"],
  1462. "query": query,
  1463. "yAxis": yAxis,
  1464. "field": [field, field_two] + groupbys,
  1465. "topEvents": 5,
  1466. "dataset": "metricsEnhanced",
  1467. "useOnDemandMetrics": "true",
  1468. "onDemandType": "dynamic_query",
  1469. "dashboardWidgetId": widget.id,
  1470. },
  1471. )
  1472. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1473. assert saved_widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  1474. assert response.status_code == 200, response.content
  1475. # Fell back to discover data which is empty for this test (empty group of '').
  1476. assert len(response.data.keys()) == 2
  1477. assert bool(response.data["error_value,"])
  1478. assert bool(response.data["error_value2,"])
  1479. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_discover(self):
  1480. self.project = self.create_project(organization=self.organization)
  1481. Environment.get_or_create(self.project, "production")
  1482. field = "count()"
  1483. field_two = "count()"
  1484. groupbys = ["customtag1", "customtag2"]
  1485. query = "query.dataset:foo"
  1486. spec = OnDemandMetricSpec(
  1487. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1488. )
  1489. spec_two = OnDemandMetricSpec(
  1490. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1491. )
  1492. _, widget, __ = create_widget(
  1493. ["count()"],
  1494. "",
  1495. self.project,
  1496. discover_widget_split=None,
  1497. )
  1498. self.store_event(
  1499. data={
  1500. "event_id": "a" * 32,
  1501. "message": "very bad",
  1502. "type": "error",
  1503. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1504. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1505. },
  1506. project_id=self.project.id,
  1507. )
  1508. transaction = load_data("transaction")
  1509. transaction["timestamp"] = iso_format(self.day_ago + timedelta(hours=1))
  1510. transaction["start_timestamp"] = iso_format(self.day_ago + timedelta(hours=1))
  1511. transaction["tags"] = {"customtag1": "transaction_value", "query.dataset": "foo"}
  1512. self.store_event(
  1513. data=transaction,
  1514. project_id=self.project.id,
  1515. )
  1516. for hour in range(0, 5):
  1517. self.store_on_demand_metric(
  1518. hour * 62 * 24,
  1519. spec=spec,
  1520. additional_tags={
  1521. "customtag1": "foo",
  1522. "customtag2": "red",
  1523. "environment": "production",
  1524. },
  1525. timestamp=self.day_ago + timedelta(hours=hour),
  1526. )
  1527. self.store_on_demand_metric(
  1528. hour * 60 * 24,
  1529. spec=spec_two,
  1530. additional_tags={
  1531. "customtag1": "bar",
  1532. "customtag2": "blue",
  1533. "environment": "production",
  1534. },
  1535. timestamp=self.day_ago + timedelta(hours=hour),
  1536. )
  1537. yAxis = ["count()"]
  1538. response = self.do_request(
  1539. data={
  1540. "project": self.project.id,
  1541. "start": self.day_ago,
  1542. "end": self.day_ago + timedelta(hours=2),
  1543. "interval": "1h",
  1544. "orderby": ["-count()"],
  1545. "query": query,
  1546. "yAxis": yAxis,
  1547. "field": [field, field_two, "customtag1", "customtag2"],
  1548. "topEvents": 5,
  1549. "dataset": "metricsEnhanced",
  1550. "useOnDemandMetrics": "true",
  1551. "onDemandType": "dynamic_query",
  1552. "dashboardWidgetId": widget.id,
  1553. },
  1554. )
  1555. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1556. assert saved_widget.discover_widget_split == DashboardWidgetTypes.DISCOVER
  1557. assert response.status_code == 200, response.content
  1558. assert response.status_code == 200, response.content
  1559. # Fell back to discover data which is empty for this test (empty group of '').
  1560. assert len(response.data.keys()) == 2
  1561. assert bool(response.data["error_value,"])
  1562. assert bool(response.data["transaction_value,"])
  1563. def test_top_events_with_transaction_on_demand_passing_widget_id_saved(self):
  1564. field = "count()"
  1565. field_two = "count_web_vitals(measurements.lcp, good)"
  1566. groupbys = ["customtag1", "customtag2"]
  1567. query = "transaction.duration:>=100"
  1568. spec = OnDemandMetricSpec(
  1569. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1570. )
  1571. spec_two = OnDemandMetricSpec(
  1572. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1573. )
  1574. _, widget, __ = create_widget(
  1575. ["count()"],
  1576. "",
  1577. self.project,
  1578. discover_widget_split=DashboardWidgetTypes.TRANSACTION_LIKE, # Transactions like uses on-demand
  1579. )
  1580. for hour in range(0, 5):
  1581. self.store_on_demand_metric(
  1582. hour * 62 * 24,
  1583. spec=spec,
  1584. additional_tags={
  1585. "customtag1": "foo",
  1586. "customtag2": "red",
  1587. "environment": "production",
  1588. },
  1589. timestamp=self.day_ago + timedelta(hours=hour),
  1590. )
  1591. self.store_on_demand_metric(
  1592. hour * 60 * 24,
  1593. spec=spec_two,
  1594. additional_tags={
  1595. "customtag1": "bar",
  1596. "customtag2": "blue",
  1597. "environment": "production",
  1598. },
  1599. timestamp=self.day_ago + timedelta(hours=hour),
  1600. )
  1601. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1602. with mock.patch.object(widget, "save") as mock_widget_save:
  1603. response = self.do_request(
  1604. data={
  1605. "project": self.project.id,
  1606. "start": self.day_ago,
  1607. "end": self.day_ago + timedelta(hours=2),
  1608. "interval": "1h",
  1609. "orderby": ["-count()"],
  1610. "query": query,
  1611. "yAxis": yAxis,
  1612. "field": [
  1613. "count()",
  1614. "count_web_vitals(measurements.lcp, good)",
  1615. "customtag1",
  1616. "customtag2",
  1617. ],
  1618. "topEvents": 5,
  1619. "dataset": "metricsEnhanced",
  1620. "useOnDemandMetrics": "true",
  1621. "onDemandType": "dynamic_query",
  1622. "dashboardWidgetId": widget.id,
  1623. },
  1624. )
  1625. assert bool(mock_widget_save.assert_not_called)
  1626. assert response.status_code == 200, response.content
  1627. groups = [
  1628. ("foo,red", "count()", 0.0, 1488.0),
  1629. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1630. ("bar,blue", "count()", 0.0, 0.0),
  1631. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1632. ]
  1633. assert len(response.data.keys()) == 2
  1634. for group_count in groups:
  1635. group, agg, row1, row2 = group_count
  1636. row_data = response.data[group][agg]["data"][:2]
  1637. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1638. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1639. assert response.data[group]["isMetricsExtractedData"]
  1640. def test_timeseries_on_demand_with_multiple_percentiles(self):
  1641. field = "p75(measurements.fcp)"
  1642. field_two = "p75(measurements.lcp)"
  1643. query = "transaction.duration:>=100"
  1644. spec = OnDemandMetricSpec(field=field, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY)
  1645. spec_two = OnDemandMetricSpec(
  1646. field=field_two, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1647. )
  1648. assert (
  1649. spec._query_str_for_hash
  1650. == "event.measurements.fcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1651. )
  1652. assert (
  1653. spec_two._query_str_for_hash
  1654. == "event.measurements.lcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1655. )
  1656. for count in range(0, 4):
  1657. self.store_on_demand_metric(
  1658. count * 100,
  1659. spec=spec,
  1660. timestamp=self.day_ago + timedelta(hours=1),
  1661. )
  1662. self.store_on_demand_metric(
  1663. count * 200.0,
  1664. spec=spec_two,
  1665. timestamp=self.day_ago + timedelta(hours=1),
  1666. )
  1667. yAxis = [field, field_two]
  1668. response = self.do_request(
  1669. data={
  1670. "project": self.project.id,
  1671. "start": self.day_ago,
  1672. "end": self.day_ago + timedelta(hours=2),
  1673. "interval": "1h",
  1674. "orderby": [field],
  1675. "query": query,
  1676. "yAxis": yAxis,
  1677. "dataset": "metricsEnhanced",
  1678. "useOnDemandMetrics": "true",
  1679. "onDemandType": "dynamic_query",
  1680. },
  1681. )
  1682. assert response.status_code == 200, response.content
  1683. assert response.data["p75(measurements.fcp)"]["meta"]["isMetricsExtractedData"]
  1684. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1685. assert [attrs for time, attrs in response.data["p75(measurements.fcp)"]["data"]] == [
  1686. [{"count": 0}],
  1687. [{"count": 225.0}],
  1688. ]
  1689. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsExtractedData"]
  1690. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1691. assert [attrs for time, attrs in response.data["p75(measurements.lcp)"]["data"]] == [
  1692. [{"count": 0}],
  1693. [{"count": 450.0}],
  1694. ]
  1695. def test_apdex_issue(self):
  1696. field = "apdex(300)"
  1697. groupbys = ["group_tag"]
  1698. query = "transaction.duration:>=100"
  1699. spec = OnDemandMetricSpec(
  1700. field=field,
  1701. groupbys=groupbys,
  1702. query=query,
  1703. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1704. )
  1705. for hour in range(0, 5):
  1706. self.store_on_demand_metric(
  1707. 1,
  1708. spec=spec,
  1709. additional_tags={
  1710. "group_tag": "group_one",
  1711. "environment": "production",
  1712. "satisfaction": "tolerable",
  1713. },
  1714. timestamp=self.day_ago + timedelta(hours=hour),
  1715. )
  1716. self.store_on_demand_metric(
  1717. 1,
  1718. spec=spec,
  1719. additional_tags={
  1720. "group_tag": "group_two",
  1721. "environment": "production",
  1722. "satisfaction": "satisfactory",
  1723. },
  1724. timestamp=self.day_ago + timedelta(hours=hour),
  1725. )
  1726. response = self.do_request(
  1727. data={
  1728. "dataset": "metricsEnhanced",
  1729. "environment": "production",
  1730. "excludeOther": 1,
  1731. "field": [field, "group_tag"],
  1732. "start": self.day_ago,
  1733. "end": self.day_ago + timedelta(hours=2),
  1734. "interval": "1h",
  1735. "orderby": f"-{field}",
  1736. "partial": 1,
  1737. "project": self.project.id,
  1738. "query": query,
  1739. "topEvents": 5,
  1740. "yAxis": field,
  1741. "onDemandType": "dynamic_query",
  1742. "useOnDemandMetrics": "true",
  1743. },
  1744. )
  1745. assert response.status_code == 200, response.content
  1746. assert response.data["group_one"]["meta"]["isMetricsExtractedData"] is True
  1747. assert [attrs for time, attrs in response.data["group_one"]["data"]] == [
  1748. [{"count": 0.5}],
  1749. [{"count": 0.5}],
  1750. ]
  1751. def test_glob_http_referer_on_demand(self):
  1752. agg = "count()"
  1753. network_id_tag = "networkId"
  1754. url = "https://sentry.io"
  1755. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1756. spec = OnDemandMetricSpec(
  1757. field=agg,
  1758. groupbys=[network_id_tag],
  1759. query=query,
  1760. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1761. )
  1762. assert spec.to_metric_spec(self.project) == {
  1763. "category": "transaction",
  1764. "mri": "c:transactions/on_demand@none",
  1765. "field": None,
  1766. "tags": [
  1767. {"key": "query_hash", "value": "ac241f56"},
  1768. {"key": "networkId", "field": "event.tags.networkId"},
  1769. {"key": "environment", "field": "event.environment"},
  1770. ],
  1771. "condition": {
  1772. "op": "and",
  1773. "inner": [
  1774. {
  1775. "op": "glob",
  1776. "name": "event.request.url",
  1777. "value": ["https://sentry.io/*/foo/bar/*"],
  1778. },
  1779. {
  1780. "op": "glob",
  1781. "name": "event.request.headers.Referer",
  1782. "value": ["https://sentry.io/*/bar/*"],
  1783. },
  1784. ],
  1785. },
  1786. }
  1787. for hour in range(0, 5):
  1788. self.store_on_demand_metric(
  1789. 1,
  1790. spec=spec,
  1791. additional_tags={network_id_tag: "1234"},
  1792. timestamp=self.day_ago + timedelta(hours=hour),
  1793. )
  1794. self.store_on_demand_metric(
  1795. 1,
  1796. spec=spec,
  1797. additional_tags={network_id_tag: "5678"},
  1798. timestamp=self.day_ago + timedelta(hours=hour),
  1799. )
  1800. response = self.do_request(
  1801. data={
  1802. "dataset": "metricsEnhanced",
  1803. "field": [network_id_tag, agg],
  1804. "start": self.day_ago,
  1805. "end": self.day_ago + timedelta(hours=5),
  1806. "onDemandType": "dynamic_query",
  1807. "orderby": f"-{agg}",
  1808. "interval": "1d",
  1809. "partial": 1,
  1810. "query": query,
  1811. "referrer": "api.dashboards.widget.bar-chart",
  1812. "project": self.project.id,
  1813. "topEvents": 2,
  1814. "useOnDemandMetrics": "true",
  1815. "yAxis": agg,
  1816. },
  1817. )
  1818. assert response.status_code == 200, response.content
  1819. for datum in response.data.values():
  1820. assert datum["meta"] == {
  1821. "dataset": "metricsEnhanced",
  1822. "datasetReason": "unchanged",
  1823. "fields": {},
  1824. "isMetricsData": False,
  1825. "isMetricsExtractedData": True,
  1826. "tips": {},
  1827. "units": {},
  1828. }
  1829. def _test_is_metrics_extracted_data(
  1830. self, params: dict[str, Any], expected_on_demand_query: bool, dataset: str
  1831. ) -> None:
  1832. spec = OnDemandMetricSpec(
  1833. field="count()",
  1834. query="transaction.duration:>1s",
  1835. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1836. )
  1837. self.store_on_demand_metric(1, spec=spec)
  1838. response = self.do_request(params)
  1839. assert response.status_code == 200, response.content
  1840. meta = response.data["meta"]
  1841. # This is the main thing we want to test for
  1842. assert meta.get("isMetricsExtractedData", False) is expected_on_demand_query
  1843. assert meta["dataset"] == dataset
  1844. return meta
  1845. def test_is_metrics_extracted_data_is_included(self):
  1846. self._test_is_metrics_extracted_data(
  1847. {
  1848. "dataset": "metricsEnhanced",
  1849. "query": "transaction.duration:>=91",
  1850. "useOnDemandMetrics": "true",
  1851. "yAxis": "count()",
  1852. },
  1853. expected_on_demand_query=True,
  1854. dataset="metricsEnhanced",
  1855. )
  1856. def test_on_demand_epm_no_query(self):
  1857. params = {
  1858. "dataset": "metricsEnhanced",
  1859. "environment": "production",
  1860. "onDemandType": "dynamic_query",
  1861. "project": self.project.id,
  1862. "query": "",
  1863. "statsPeriod": "1h",
  1864. "useOnDemandMetrics": "true",
  1865. "yAxis": ["epm()"],
  1866. }
  1867. response = self.do_request(params)
  1868. assert response.status_code == 200, response.content
  1869. assert response.data["meta"] == {
  1870. "fields": {"time": "date", "epm_900": "rate"},
  1871. "units": {"time": None, "epm_900": None},
  1872. "isMetricsData": True,
  1873. "isMetricsExtractedData": False,
  1874. "tips": {},
  1875. "datasetReason": "unchanged",
  1876. "dataset": "metricsEnhanced",
  1877. }
  1878. def test_group_by_transaction(self):
  1879. field = "count()"
  1880. groupbys = ["transaction"]
  1881. query = "transaction.duration:>=100"
  1882. spec = OnDemandMetricSpec(
  1883. field=field,
  1884. groupbys=groupbys,
  1885. query=query,
  1886. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1887. )
  1888. for hour in range(0, 2):
  1889. self.store_on_demand_metric(
  1890. (hour + 1) * 5,
  1891. spec=spec,
  1892. additional_tags={
  1893. "transaction": "/performance",
  1894. "environment": "production",
  1895. },
  1896. timestamp=self.day_ago + timedelta(hours=hour),
  1897. )
  1898. response = self.do_request(
  1899. data={
  1900. "dataset": "metricsEnhanced",
  1901. "environment": "production",
  1902. "excludeOther": 1,
  1903. "field": [field, "transaction"],
  1904. "start": self.day_ago,
  1905. "end": self.day_ago + timedelta(hours=2),
  1906. "interval": "1h",
  1907. "orderby": f"-{field}",
  1908. "partial": 1,
  1909. "project": self.project.id,
  1910. "query": query,
  1911. "topEvents": 5,
  1912. "yAxis": field,
  1913. "onDemandType": "dynamic_query",
  1914. "useOnDemandMetrics": "true",
  1915. },
  1916. )
  1917. assert response.status_code == 200, response.content
  1918. assert response.data["/performance"]["meta"]["isMetricsExtractedData"] is True
  1919. assert [attrs for time, attrs in response.data["/performance"]["data"]] == [
  1920. [{"count": 5.0}],
  1921. [{"count": 10.0}],
  1922. ]
  1923. def _setup_orderby_tests(self, query):
  1924. count_spec = OnDemandMetricSpec(
  1925. field="count()",
  1926. groupbys=["networkId"],
  1927. query=query,
  1928. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1929. )
  1930. p95_spec = OnDemandMetricSpec(
  1931. field="p95(transaction.duration)",
  1932. groupbys=["networkId"],
  1933. query=query,
  1934. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1935. )
  1936. for hour in range(0, 5):
  1937. self.store_on_demand_metric(
  1938. 1,
  1939. spec=count_spec,
  1940. additional_tags={"networkId": "1234"},
  1941. timestamp=self.day_ago + timedelta(hours=hour),
  1942. )
  1943. self.store_on_demand_metric(
  1944. 100,
  1945. spec=p95_spec,
  1946. additional_tags={"networkId": "1234"},
  1947. timestamp=self.day_ago + timedelta(hours=hour),
  1948. )
  1949. self.store_on_demand_metric(
  1950. 200,
  1951. spec=p95_spec,
  1952. additional_tags={"networkId": "5678"},
  1953. timestamp=self.day_ago + timedelta(hours=hour),
  1954. )
  1955. # Store twice as many 5678 so orderby puts it later
  1956. self.store_on_demand_metric(
  1957. 2,
  1958. spec=count_spec,
  1959. additional_tags={"networkId": "5678"},
  1960. timestamp=self.day_ago + timedelta(hours=hour),
  1961. )
  1962. def test_order_by_aggregate_top_events_desc(self):
  1963. url = "https://sentry.io"
  1964. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1965. self._setup_orderby_tests(query)
  1966. response = self.do_request(
  1967. data={
  1968. "dataset": "metricsEnhanced",
  1969. "field": ["networkId", "count()"],
  1970. "start": self.day_ago,
  1971. "end": self.day_ago + timedelta(hours=5),
  1972. "onDemandType": "dynamic_query",
  1973. "orderby": "-count()",
  1974. "interval": "1d",
  1975. "partial": 1,
  1976. "query": query,
  1977. "referrer": "api.dashboards.widget.bar-chart",
  1978. "project": self.project.id,
  1979. "topEvents": 2,
  1980. "useOnDemandMetrics": "true",
  1981. "yAxis": "count()",
  1982. },
  1983. )
  1984. assert response.status_code == 200, response.content
  1985. assert len(response.data) == 3
  1986. data1 = response.data["5678"]
  1987. assert data1["order"] == 0
  1988. assert data1["data"][0][1][0]["count"] == 10
  1989. data2 = response.data["1234"]
  1990. assert data2["order"] == 1
  1991. assert data2["data"][0][1][0]["count"] == 5
  1992. for datum in response.data.values():
  1993. assert datum["meta"] == {
  1994. "dataset": "metricsEnhanced",
  1995. "datasetReason": "unchanged",
  1996. "fields": {},
  1997. "isMetricsData": False,
  1998. "isMetricsExtractedData": True,
  1999. "tips": {},
  2000. "units": {},
  2001. }
  2002. def test_order_by_aggregate_top_events_asc(self):
  2003. url = "https://sentry.io"
  2004. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  2005. self._setup_orderby_tests(query)
  2006. response = self.do_request(
  2007. data={
  2008. "dataset": "metricsEnhanced",
  2009. "field": ["networkId", "count()"],
  2010. "start": self.day_ago,
  2011. "end": self.day_ago + timedelta(hours=5),
  2012. "onDemandType": "dynamic_query",
  2013. "orderby": "count()",
  2014. "interval": "1d",
  2015. "partial": 1,
  2016. "query": query,
  2017. "referrer": "api.dashboards.widget.bar-chart",
  2018. "project": self.project.id,
  2019. "topEvents": 2,
  2020. "useOnDemandMetrics": "true",
  2021. "yAxis": "count()",
  2022. },
  2023. )
  2024. assert response.status_code == 200, response.content
  2025. assert len(response.data) == 3
  2026. data1 = response.data["1234"]
  2027. assert data1["order"] == 0
  2028. assert data1["data"][0][1][0]["count"] == 5
  2029. data2 = response.data["5678"]
  2030. assert data2["order"] == 1
  2031. assert data2["data"][0][1][0]["count"] == 10
  2032. for datum in response.data.values():
  2033. assert datum["meta"] == {
  2034. "dataset": "metricsEnhanced",
  2035. "datasetReason": "unchanged",
  2036. "fields": {},
  2037. "isMetricsData": False,
  2038. "isMetricsExtractedData": True,
  2039. "tips": {},
  2040. "units": {},
  2041. }
  2042. def test_order_by_aggregate_top_events_graph_different_aggregate(self):
  2043. url = "https://sentry.io"
  2044. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  2045. self._setup_orderby_tests(query)
  2046. response = self.do_request(
  2047. data={
  2048. "dataset": "metricsEnhanced",
  2049. "field": ["networkId", "count()"],
  2050. "start": self.day_ago,
  2051. "end": self.day_ago + timedelta(hours=5),
  2052. "onDemandType": "dynamic_query",
  2053. "orderby": "count()",
  2054. "interval": "1d",
  2055. "partial": 1,
  2056. "query": query,
  2057. "referrer": "api.dashboards.widget.bar-chart",
  2058. "project": self.project.id,
  2059. "topEvents": 2,
  2060. "useOnDemandMetrics": "true",
  2061. "yAxis": "p95(transaction.duration)",
  2062. },
  2063. )
  2064. assert response.status_code == 200, response.content
  2065. assert len(response.data) == 3
  2066. data1 = response.data["1234"]
  2067. assert data1["order"] == 0
  2068. assert data1["data"][0][1][0]["count"] == 100
  2069. data2 = response.data["5678"]
  2070. assert data2["order"] == 1
  2071. assert data2["data"][0][1][0]["count"] == 200
  2072. for datum in response.data.values():
  2073. assert datum["meta"] == {
  2074. "dataset": "metricsEnhanced",
  2075. "datasetReason": "unchanged",
  2076. "fields": {},
  2077. "isMetricsData": False,
  2078. "isMetricsExtractedData": True,
  2079. "tips": {},
  2080. "units": {},
  2081. }
  2082. def test_cannot_order_by_tag(self):
  2083. url = "https://sentry.io"
  2084. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  2085. self._setup_orderby_tests(query)
  2086. response = self.do_request(
  2087. data={
  2088. "dataset": "metrics",
  2089. "field": ["networkId", "count()"],
  2090. "start": self.day_ago,
  2091. "end": self.day_ago + timedelta(hours=5),
  2092. "onDemandType": "dynamic_query",
  2093. "orderby": "-networkId",
  2094. "interval": "1d",
  2095. "partial": 1,
  2096. "query": query,
  2097. "referrer": "api.dashboards.widget.bar-chart",
  2098. "project": self.project.id,
  2099. "topEvents": 2,
  2100. "useOnDemandMetrics": "true",
  2101. "yAxis": "count()",
  2102. },
  2103. )
  2104. assert response.status_code == 400, response.content
  2105. def test_order_by_two_aggregates(self):
  2106. url = "https://sentry.io"
  2107. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  2108. self._setup_orderby_tests(query)
  2109. response = self.do_request(
  2110. data={
  2111. "dataset": "metrics",
  2112. "field": ["networkId", "count()", "p95(transaction.duration)"],
  2113. "start": self.day_ago,
  2114. "end": self.day_ago + timedelta(hours=5),
  2115. "onDemandType": "dynamic_query",
  2116. "orderby": ["count()", "p95(transaction.duration)"],
  2117. "interval": "1d",
  2118. "partial": 1,
  2119. "query": query,
  2120. "referrer": "api.dashboards.widget.bar-chart",
  2121. "project": self.project.id,
  2122. "topEvents": 2,
  2123. "useOnDemandMetrics": "true",
  2124. "yAxis": "p95(transaction.duration)",
  2125. },
  2126. )
  2127. assert response.status_code == 400, response.content
  2128. def test_top_events_with_tag(self):
  2129. query = "transaction.duration:>=100"
  2130. yAxis = ["count()"]
  2131. field = "count()"
  2132. groupbys = ["some-field"]
  2133. spec = OnDemandMetricSpec(
  2134. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  2135. )
  2136. self.store_on_demand_metric(
  2137. 1,
  2138. spec=spec,
  2139. additional_tags={
  2140. "some-field": "bar",
  2141. "environment": "production",
  2142. },
  2143. timestamp=self.day_ago,
  2144. )
  2145. response = self.do_request(
  2146. data={
  2147. "project": self.project.id,
  2148. "start": self.day_ago,
  2149. "end": self.day_ago + timedelta(hours=2),
  2150. "interval": "1h",
  2151. "orderby": ["-count()"],
  2152. "environment": "production",
  2153. "query": query,
  2154. "yAxis": yAxis,
  2155. "field": [
  2156. "some-field",
  2157. "count()",
  2158. ],
  2159. "topEvents": 5,
  2160. "dataset": "metrics",
  2161. "useOnDemandMetrics": "true",
  2162. },
  2163. )
  2164. assert response.status_code == 200, response.content