test_organization_events_stats_mep.py 90 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365
  1. from __future__ import annotations
  2. from datetime import timedelta
  3. from typing import Any
  4. from unittest import mock
  5. import pytest
  6. from django.urls import reverse
  7. from sentry.discover.models import DatasetSourcesTypes
  8. from sentry.models.dashboard_widget import DashboardWidget, DashboardWidgetTypes
  9. from sentry.models.environment import Environment
  10. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  11. from sentry.snuba.metrics.extraction import MetricSpecType, OnDemandMetricSpec
  12. from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
  13. from sentry.testutils.helpers.datetime import before_now
  14. from sentry.testutils.helpers.on_demand import create_widget
  15. from sentry.utils.samples import load_data
  16. pytestmark = pytest.mark.sentry_metrics
  17. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
  18. MetricsEnhancedPerformanceTestCase
  19. ):
  20. endpoint = "sentry-api-0-organization-events-stats"
  21. METRIC_STRINGS = [
  22. "foo_transaction",
  23. "d:transactions/measurements.datacenter_memory@pebibyte",
  24. ]
  25. def setUp(self):
  26. super().setUp()
  27. self.login_as(user=self.user)
  28. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  29. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  30. self.url = reverse(
  31. "sentry-api-0-organization-events-stats",
  32. kwargs={"organization_id_or_slug": self.project.organization.slug},
  33. )
  34. self.features = {
  35. "organizations:performance-use-metrics": True,
  36. }
  37. self.additional_params = dict()
  38. # These throughput tests should roughly match the ones in OrganizationEventsStatsEndpointTest
  39. @pytest.mark.querybuilder
  40. def test_throughput_epm_hour_rollup(self):
  41. # Each of these denotes how many events to create in each hour
  42. event_counts = [6, 0, 6, 3, 0, 3]
  43. for hour, count in enumerate(event_counts):
  44. for minute in range(count):
  45. self.store_transaction_metric(
  46. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  47. )
  48. for axis in ["epm()", "tpm()"]:
  49. response = self.do_request(
  50. data={
  51. "start": self.day_ago,
  52. "end": self.day_ago + timedelta(hours=6),
  53. "interval": "1h",
  54. "yAxis": axis,
  55. "project": self.project.id,
  56. "dataset": "metricsEnhanced",
  57. **self.additional_params,
  58. },
  59. )
  60. assert response.status_code == 200, response.content
  61. data = response.data["data"]
  62. assert len(data) == 6
  63. assert response.data["isMetricsData"]
  64. rows = data[0:6]
  65. for test in zip(event_counts, rows):
  66. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  67. @pytest.mark.querybuilder
  68. def test_throughput_spm_hour_rollup(self):
  69. # Each of these denotes how many events to create in each hour
  70. event_counts = [6, 0, 6, 3, 0, 3]
  71. for hour, count in enumerate(event_counts):
  72. for minute in range(count):
  73. self.store_span_metric(
  74. 1,
  75. timestamp=self.day_ago + timedelta(hours=hour, minutes=minute),
  76. )
  77. response = self.do_request(
  78. data={
  79. "start": self.day_ago,
  80. "end": self.day_ago + timedelta(hours=6),
  81. "interval": "1h",
  82. "yAxis": "spm()",
  83. "project": self.project.id,
  84. "dataset": "metrics",
  85. **self.additional_params,
  86. },
  87. )
  88. assert response.status_code == 200, response.content
  89. data = response.data["data"]
  90. assert len(data) == 6
  91. assert response.data["meta"]["dataset"] == "metrics"
  92. rows = data[0:6]
  93. for test in zip(event_counts, rows):
  94. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  95. def test_throughput_epm_day_rollup(self):
  96. # Each of these denotes how many events to create in each minute
  97. event_counts = [6, 0, 6, 3, 0, 3]
  98. for hour, count in enumerate(event_counts):
  99. for minute in range(count):
  100. self.store_transaction_metric(
  101. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  102. )
  103. for axis in ["epm()", "tpm()"]:
  104. response = self.do_request(
  105. data={
  106. "start": self.day_ago,
  107. "end": self.day_ago + timedelta(hours=24),
  108. "interval": "24h",
  109. "yAxis": axis,
  110. "project": self.project.id,
  111. "dataset": "metricsEnhanced",
  112. **self.additional_params,
  113. },
  114. )
  115. assert response.status_code == 200, response.content
  116. data = response.data["data"]
  117. assert len(data) == 2
  118. assert response.data["isMetricsData"]
  119. assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
  120. def test_throughput_epm_hour_rollup_offset_of_hour(self):
  121. # Each of these denotes how many events to create in each hour
  122. event_counts = [6, 0, 6, 3, 0, 3]
  123. for hour, count in enumerate(event_counts):
  124. for minute in range(count):
  125. self.store_transaction_metric(
  126. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute + 30)
  127. )
  128. for axis in ["tpm()", "epm()"]:
  129. response = self.do_request(
  130. data={
  131. "start": self.day_ago + timedelta(minutes=30),
  132. "end": self.day_ago + timedelta(hours=6, minutes=30),
  133. "interval": "1h",
  134. "yAxis": axis,
  135. "project": self.project.id,
  136. "dataset": "metricsEnhanced",
  137. **self.additional_params,
  138. },
  139. )
  140. assert response.status_code == 200, response.content
  141. data = response.data["data"]
  142. assert len(data) == 6
  143. assert response.data["isMetricsData"]
  144. rows = data[0:6]
  145. for test in zip(event_counts, rows):
  146. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  147. def test_throughput_eps_minute_rollup(self):
  148. # Each of these denotes how many events to create in each minute
  149. event_counts = [6, 0, 6, 3, 0, 3]
  150. for minute, count in enumerate(event_counts):
  151. for second in range(count):
  152. self.store_transaction_metric(
  153. 1, timestamp=self.day_ago + timedelta(minutes=minute, seconds=second)
  154. )
  155. for axis in ["eps()", "tps()"]:
  156. response = self.do_request(
  157. data={
  158. "start": self.day_ago,
  159. "end": self.day_ago + timedelta(minutes=6),
  160. "interval": "1m",
  161. "yAxis": axis,
  162. "project": self.project.id,
  163. "dataset": "metricsEnhanced",
  164. **self.additional_params,
  165. },
  166. )
  167. assert response.status_code == 200, response.content
  168. data = response.data["data"]
  169. assert len(data) == 6
  170. assert response.data["isMetricsData"]
  171. rows = data[0:6]
  172. for test in zip(event_counts, rows):
  173. assert test[1][1][0]["count"] == test[0] / 60.0
  174. def test_failure_rate(self):
  175. for hour in range(6):
  176. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  177. self.store_transaction_metric(1, tags={"transaction.status": "ok"}, timestamp=timestamp)
  178. if hour < 3:
  179. self.store_transaction_metric(
  180. 1, tags={"transaction.status": "internal_error"}, timestamp=timestamp
  181. )
  182. response = self.do_request(
  183. data={
  184. "start": self.day_ago,
  185. "end": self.day_ago + timedelta(hours=6),
  186. "interval": "1h",
  187. "yAxis": ["failure_rate()"],
  188. "project": self.project.id,
  189. "dataset": "metricsEnhanced",
  190. **self.additional_params,
  191. },
  192. )
  193. assert response.status_code == 200, response.content
  194. data = response.data["data"]
  195. assert len(data) == 6
  196. assert response.data["isMetricsData"]
  197. assert [attrs for time, attrs in response.data["data"]] == [
  198. [{"count": 0.5}],
  199. [{"count": 0.5}],
  200. [{"count": 0.5}],
  201. [{"count": 0}],
  202. [{"count": 0}],
  203. [{"count": 0}],
  204. ]
  205. def test_percentiles_multi_axis(self):
  206. for hour in range(6):
  207. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  208. self.store_transaction_metric(111, timestamp=timestamp)
  209. self.store_transaction_metric(222, metric="measurements.lcp", timestamp=timestamp)
  210. response = self.do_request(
  211. data={
  212. "start": self.day_ago,
  213. "end": self.day_ago + timedelta(hours=6),
  214. "interval": "1h",
  215. "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
  216. "project": self.project.id,
  217. "dataset": "metricsEnhanced",
  218. **self.additional_params,
  219. },
  220. )
  221. assert response.status_code == 200, response.content
  222. lcp = response.data["p75(measurements.lcp)"]
  223. duration = response.data["p75(transaction.duration)"]
  224. assert len(duration["data"]) == 6
  225. assert duration["isMetricsData"]
  226. assert len(lcp["data"]) == 6
  227. assert lcp["isMetricsData"]
  228. for item in duration["data"]:
  229. assert item[1][0]["count"] == 111
  230. for item in lcp["data"]:
  231. assert item[1][0]["count"] == 222
  232. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query", return_value={})
  233. def test_multiple_yaxis_only_one_query(self, mock_query):
  234. self.do_request(
  235. data={
  236. "project": self.project.id,
  237. "start": self.day_ago,
  238. "end": self.day_ago + timedelta(hours=2),
  239. "interval": "1h",
  240. "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
  241. "dataset": "metricsEnhanced",
  242. **self.additional_params,
  243. },
  244. )
  245. assert mock_query.call_count == 1
  246. def test_aggregate_function_user_count(self):
  247. self.store_transaction_metric(
  248. 1, metric="user", timestamp=self.day_ago + timedelta(minutes=30)
  249. )
  250. self.store_transaction_metric(
  251. 1, metric="user", timestamp=self.day_ago + timedelta(hours=1, minutes=30)
  252. )
  253. response = self.do_request(
  254. data={
  255. "start": self.day_ago,
  256. "end": self.day_ago + timedelta(hours=2),
  257. "interval": "1h",
  258. "yAxis": "count_unique(user)",
  259. "dataset": "metricsEnhanced",
  260. **self.additional_params,
  261. },
  262. )
  263. assert response.status_code == 200, response.content
  264. assert response.data["isMetricsData"]
  265. assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
  266. meta = response.data["meta"]
  267. assert meta["isMetricsData"] == response.data["isMetricsData"]
  268. def test_non_mep_query_fallsback(self):
  269. def get_mep(query):
  270. response = self.do_request(
  271. data={
  272. "project": self.project.id,
  273. "start": self.day_ago,
  274. "end": self.day_ago + timedelta(hours=2),
  275. "interval": "1h",
  276. "query": query,
  277. "yAxis": ["epm()"],
  278. "dataset": "metricsEnhanced",
  279. **self.additional_params,
  280. },
  281. )
  282. assert response.status_code == 200, response.content
  283. return response.data["isMetricsData"]
  284. assert get_mep(""), "empty query"
  285. assert get_mep("event.type:transaction"), "event type transaction"
  286. assert not get_mep("event.type:error"), "event type error"
  287. assert not get_mep("transaction.duration:<15min"), "outlier filter"
  288. assert get_mep("epm():>0.01"), "throughput filter"
  289. assert not get_mep(
  290. "event.type:transaction OR event.type:error"
  291. ), "boolean with non-mep filter"
  292. assert get_mep(
  293. "event.type:transaction OR transaction:foo_transaction"
  294. ), "boolean with mep filter"
  295. def test_having_condition_with_preventing_aggregates(self):
  296. response = self.do_request(
  297. data={
  298. "project": self.project.id,
  299. "start": self.day_ago,
  300. "end": self.day_ago + timedelta(hours=2),
  301. "interval": "1h",
  302. "query": "p95():<5s",
  303. "yAxis": ["epm()"],
  304. "dataset": "metricsEnhanced",
  305. "preventMetricAggregates": "1",
  306. **self.additional_params,
  307. },
  308. )
  309. assert response.status_code == 200, response.content
  310. assert not response.data["isMetricsData"]
  311. meta = response.data["meta"]
  312. assert meta["isMetricsData"] == response.data["isMetricsData"]
  313. def test_explicit_not_mep(self):
  314. response = self.do_request(
  315. data={
  316. "project": self.project.id,
  317. "start": self.day_ago,
  318. "end": self.day_ago + timedelta(hours=2),
  319. "interval": "1h",
  320. # Should be a mep able query
  321. "query": "",
  322. "yAxis": ["epm()"],
  323. "metricsEnhanced": "0",
  324. **self.additional_params,
  325. },
  326. )
  327. assert response.status_code == 200, response.content
  328. assert not response.data["isMetricsData"]
  329. meta = response.data["meta"]
  330. assert meta["isMetricsData"] == response.data["isMetricsData"]
  331. def test_sum_transaction_duration(self):
  332. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  333. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  334. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  335. response = self.do_request(
  336. data={
  337. "start": self.day_ago,
  338. "end": self.day_ago + timedelta(hours=2),
  339. "interval": "1h",
  340. "yAxis": "sum(transaction.duration)",
  341. "dataset": "metricsEnhanced",
  342. **self.additional_params,
  343. },
  344. )
  345. assert response.status_code == 200, response.content
  346. assert response.data["isMetricsData"]
  347. assert [attrs for time, attrs in response.data["data"]] == [
  348. [{"count": 123}],
  349. [{"count": 1245}],
  350. ]
  351. meta = response.data["meta"]
  352. assert meta["isMetricsData"] == response.data["isMetricsData"]
  353. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  354. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  355. def test_sum_transaction_duration_with_comparison(self):
  356. # We store the data for the previous day (in order to have values for the comparison).
  357. self.store_transaction_metric(
  358. 1, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  359. )
  360. self.store_transaction_metric(
  361. 2, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  362. )
  363. # We store the data for today.
  364. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  365. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(minutes=30))
  366. response = self.do_request(
  367. data={
  368. "start": self.day_ago,
  369. "end": self.day_ago + timedelta(days=1),
  370. "interval": "1d",
  371. "yAxis": "sum(transaction.duration)",
  372. "comparisonDelta": 86400,
  373. "dataset": "metricsEnhanced",
  374. **self.additional_params,
  375. },
  376. )
  377. assert response.status_code == 200, response.content
  378. assert response.data["isMetricsData"]
  379. # For some reason, if all tests run, there is some shared state that makes this test have data in the second
  380. # time bucket, which is filled automatically by the zerofilling. In order to avoid this flaky failure, we will
  381. # only check that the first bucket contains the actual data.
  382. assert [attrs for time, attrs in response.data["data"]][0] == [
  383. {"comparisonCount": 3.0, "count": 579.0}
  384. ]
  385. meta = response.data["meta"]
  386. assert meta["isMetricsData"] == response.data["isMetricsData"]
  387. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  388. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  389. def test_custom_measurement(self):
  390. self.store_transaction_metric(
  391. 123,
  392. metric="measurements.bytes_transfered",
  393. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  394. entity="metrics_distributions",
  395. tags={"transaction": "foo_transaction"},
  396. timestamp=self.day_ago + timedelta(minutes=30),
  397. )
  398. self.store_transaction_metric(
  399. 456,
  400. metric="measurements.bytes_transfered",
  401. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  402. entity="metrics_distributions",
  403. tags={"transaction": "foo_transaction"},
  404. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  405. )
  406. self.store_transaction_metric(
  407. 789,
  408. metric="measurements.bytes_transfered",
  409. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  410. entity="metrics_distributions",
  411. tags={"transaction": "foo_transaction"},
  412. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  413. )
  414. response = self.do_request(
  415. data={
  416. "start": self.day_ago,
  417. "end": self.day_ago + timedelta(hours=2),
  418. "interval": "1h",
  419. "yAxis": "sum(measurements.datacenter_memory)",
  420. "dataset": "metricsEnhanced",
  421. **self.additional_params,
  422. },
  423. )
  424. assert response.status_code == 200, response.content
  425. assert response.data["isMetricsData"]
  426. assert [attrs for time, attrs in response.data["data"]] == [
  427. [{"count": 123}],
  428. [{"count": 1245}],
  429. ]
  430. meta = response.data["meta"]
  431. assert meta["isMetricsData"] == response.data["isMetricsData"]
  432. assert meta["fields"] == {"time": "date", "sum_measurements_datacenter_memory": "size"}
  433. assert meta["units"] == {"time": None, "sum_measurements_datacenter_memory": "pebibyte"}
  434. def test_does_not_fallback_if_custom_metric_is_out_of_request_time_range(self):
  435. self.store_transaction_metric(
  436. 123,
  437. timestamp=self.day_ago + timedelta(hours=1),
  438. internal_metric="d:transactions/measurements.custom@kibibyte",
  439. entity="metrics_distributions",
  440. )
  441. response = self.do_request(
  442. data={
  443. "start": self.day_ago,
  444. "end": self.day_ago + timedelta(hours=2),
  445. "interval": "1h",
  446. "yAxis": "p99(measurements.custom)",
  447. "dataset": "metricsEnhanced",
  448. **self.additional_params,
  449. },
  450. )
  451. meta = response.data["meta"]
  452. assert response.status_code == 200, response.content
  453. assert response.data["isMetricsData"]
  454. assert meta["isMetricsData"]
  455. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  456. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  457. def test_multi_yaxis_custom_measurement(self):
  458. self.store_transaction_metric(
  459. 123,
  460. metric="measurements.bytes_transfered",
  461. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  462. entity="metrics_distributions",
  463. tags={"transaction": "foo_transaction"},
  464. timestamp=self.day_ago + timedelta(minutes=30),
  465. )
  466. self.store_transaction_metric(
  467. 456,
  468. metric="measurements.bytes_transfered",
  469. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  470. entity="metrics_distributions",
  471. tags={"transaction": "foo_transaction"},
  472. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  473. )
  474. self.store_transaction_metric(
  475. 789,
  476. metric="measurements.bytes_transfered",
  477. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  478. entity="metrics_distributions",
  479. tags={"transaction": "foo_transaction"},
  480. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  481. )
  482. response = self.do_request(
  483. data={
  484. "start": self.day_ago,
  485. "end": self.day_ago + timedelta(hours=2),
  486. "interval": "1h",
  487. "yAxis": [
  488. "sum(measurements.datacenter_memory)",
  489. "p50(measurements.datacenter_memory)",
  490. ],
  491. "dataset": "metricsEnhanced",
  492. **self.additional_params,
  493. },
  494. )
  495. assert response.status_code == 200, response.content
  496. sum_data = response.data["sum(measurements.datacenter_memory)"]
  497. p50_data = response.data["p50(measurements.datacenter_memory)"]
  498. assert sum_data["isMetricsData"]
  499. assert p50_data["isMetricsData"]
  500. assert [attrs for time, attrs in sum_data["data"]] == [
  501. [{"count": 123}],
  502. [{"count": 1245}],
  503. ]
  504. assert [attrs for time, attrs in p50_data["data"]] == [
  505. [{"count": 123}],
  506. [{"count": 622.5}],
  507. ]
  508. sum_meta = sum_data["meta"]
  509. assert sum_meta["isMetricsData"] == sum_data["isMetricsData"]
  510. assert sum_meta["fields"] == {
  511. "time": "date",
  512. "sum_measurements_datacenter_memory": "size",
  513. "p50_measurements_datacenter_memory": "size",
  514. }
  515. assert sum_meta["units"] == {
  516. "time": None,
  517. "sum_measurements_datacenter_memory": "pebibyte",
  518. "p50_measurements_datacenter_memory": "pebibyte",
  519. }
  520. p50_meta = p50_data["meta"]
  521. assert p50_meta["isMetricsData"] == p50_data["isMetricsData"]
  522. assert p50_meta["fields"] == {
  523. "time": "date",
  524. "sum_measurements_datacenter_memory": "size",
  525. "p50_measurements_datacenter_memory": "size",
  526. }
  527. assert p50_meta["units"] == {
  528. "time": None,
  529. "sum_measurements_datacenter_memory": "pebibyte",
  530. "p50_measurements_datacenter_memory": "pebibyte",
  531. }
  532. def test_dataset_metrics_does_not_fallback(self):
  533. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  534. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  535. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  536. response = self.do_request(
  537. data={
  538. "start": self.day_ago,
  539. "end": self.day_ago + timedelta(hours=2),
  540. "interval": "1h",
  541. "query": "transaction.duration:<5s",
  542. "yAxis": "sum(transaction.duration)",
  543. "dataset": "metrics",
  544. **self.additional_params,
  545. },
  546. )
  547. assert response.status_code == 400, response.content
  548. def test_title_filter(self):
  549. self.store_transaction_metric(
  550. 123,
  551. tags={"transaction": "foo_transaction"},
  552. timestamp=self.day_ago + timedelta(minutes=30),
  553. )
  554. response = self.do_request(
  555. data={
  556. "start": self.day_ago,
  557. "end": self.day_ago + timedelta(hours=2),
  558. "interval": "1h",
  559. "query": "title:foo_transaction",
  560. "yAxis": [
  561. "sum(transaction.duration)",
  562. ],
  563. "dataset": "metricsEnhanced",
  564. **self.additional_params,
  565. },
  566. )
  567. assert response.status_code == 200, response.content
  568. data = response.data["data"]
  569. assert [attrs for time, attrs in data] == [
  570. [{"count": 123}],
  571. [{"count": 0}],
  572. ]
  573. def test_transaction_status_unknown_error(self):
  574. self.store_transaction_metric(
  575. 123,
  576. tags={"transaction.status": "unknown"},
  577. timestamp=self.day_ago + timedelta(minutes=30),
  578. )
  579. response = self.do_request(
  580. data={
  581. "start": self.day_ago,
  582. "end": self.day_ago + timedelta(hours=2),
  583. "interval": "1h",
  584. "query": "transaction.status:unknown_error",
  585. "yAxis": [
  586. "sum(transaction.duration)",
  587. ],
  588. "dataset": "metricsEnhanced",
  589. **self.additional_params,
  590. },
  591. )
  592. assert response.status_code == 200, response.content
  593. data = response.data["data"]
  594. assert [attrs for time, attrs in data] == [
  595. [{"count": 123}],
  596. [{"count": 0}],
  597. ]
  598. def test_custom_performance_metric_meta_contains_field_and_unit_data(self):
  599. self.store_transaction_metric(
  600. 123,
  601. timestamp=self.day_ago + timedelta(hours=1),
  602. internal_metric="d:transactions/measurements.custom@kibibyte",
  603. entity="metrics_distributions",
  604. )
  605. response = self.do_request(
  606. data={
  607. "start": self.day_ago,
  608. "end": self.day_ago + timedelta(hours=2),
  609. "interval": "1h",
  610. "yAxis": "p99(measurements.custom)",
  611. "query": "",
  612. **self.additional_params,
  613. },
  614. )
  615. assert response.status_code == 200
  616. meta = response.data["meta"]
  617. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  618. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  619. def test_multi_series_custom_performance_metric_meta_contains_field_and_unit_data(self):
  620. self.store_transaction_metric(
  621. 123,
  622. timestamp=self.day_ago + timedelta(hours=1),
  623. internal_metric="d:transactions/measurements.custom@kibibyte",
  624. entity="metrics_distributions",
  625. )
  626. self.store_transaction_metric(
  627. 123,
  628. timestamp=self.day_ago + timedelta(hours=1),
  629. internal_metric="d:transactions/measurements.another.custom@pebibyte",
  630. entity="metrics_distributions",
  631. )
  632. response = self.do_request(
  633. data={
  634. "start": self.day_ago,
  635. "end": self.day_ago + timedelta(hours=2),
  636. "interval": "1h",
  637. "yAxis": [
  638. "p95(measurements.custom)",
  639. "p99(measurements.custom)",
  640. "p99(measurements.another.custom)",
  641. ],
  642. "query": "",
  643. **self.additional_params,
  644. },
  645. )
  646. assert response.status_code == 200
  647. meta = response.data["p95(measurements.custom)"]["meta"]
  648. assert meta["fields"] == {
  649. "time": "date",
  650. "p95_measurements_custom": "size",
  651. "p99_measurements_custom": "size",
  652. "p99_measurements_another_custom": "size",
  653. }
  654. assert meta["units"] == {
  655. "time": None,
  656. "p95_measurements_custom": "kibibyte",
  657. "p99_measurements_custom": "kibibyte",
  658. "p99_measurements_another_custom": "pebibyte",
  659. }
  660. assert meta == response.data["p99(measurements.custom)"]["meta"]
  661. assert meta == response.data["p99(measurements.another.custom)"]["meta"]
  662. def test_no_top_events_with_project_field(self):
  663. project = self.create_project()
  664. response = self.do_request(
  665. data={
  666. # make sure to query the project with 0 events
  667. "project": project.id,
  668. "start": self.day_ago,
  669. "end": self.day_ago + timedelta(hours=2),
  670. "interval": "1h",
  671. "yAxis": "count()",
  672. "orderby": ["-count()"],
  673. "field": ["count()", "project"],
  674. "topEvents": 5,
  675. "dataset": "metrics",
  676. **self.additional_params,
  677. },
  678. )
  679. assert response.status_code == 200, response.content
  680. # When there are no top events, we do not return an empty dict.
  681. # Instead, we return a single zero-filled series for an empty graph.
  682. data = response.data["data"]
  683. assert [attrs for time, attrs in data] == [[{"count": 0}], [{"count": 0}]]
  684. def test_top_events_with_transaction(self):
  685. transaction_spec = [("foo", 100), ("bar", 200), ("baz", 300)]
  686. for offset in range(5):
  687. for transaction, duration in transaction_spec:
  688. self.store_transaction_metric(
  689. duration,
  690. tags={"transaction": f"{transaction}_transaction"},
  691. timestamp=self.day_ago + timedelta(hours=offset, minutes=30),
  692. )
  693. response = self.do_request(
  694. data={
  695. # make sure to query the project with 0 events
  696. "project": self.project.id,
  697. "start": self.day_ago,
  698. "end": self.day_ago + timedelta(hours=5),
  699. "interval": "1h",
  700. "yAxis": "p75(transaction.duration)",
  701. "orderby": ["-p75(transaction.duration)"],
  702. "field": ["p75(transaction.duration)", "transaction"],
  703. "topEvents": 5,
  704. "dataset": "metrics",
  705. **self.additional_params,
  706. },
  707. )
  708. assert response.status_code == 200, response.content
  709. for position, (transaction, duration) in enumerate(transaction_spec):
  710. data = response.data[f"{transaction}_transaction"]
  711. chart_data = data["data"]
  712. assert data["order"] == 2 - position
  713. assert [attrs for time, attrs in chart_data] == [[{"count": duration}]] * 5
  714. def test_top_events_with_project(self):
  715. self.store_transaction_metric(
  716. 100,
  717. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  718. )
  719. response = self.do_request(
  720. data={
  721. # make sure to query the project with 0 events
  722. "project": self.project.id,
  723. "start": self.day_ago,
  724. "end": self.day_ago + timedelta(hours=5),
  725. "interval": "1h",
  726. "yAxis": "p75(transaction.duration)",
  727. "orderby": ["-p75(transaction.duration)"],
  728. "field": ["p75(transaction.duration)", "project"],
  729. "topEvents": 5,
  730. "dataset": "metrics",
  731. **self.additional_params,
  732. },
  733. )
  734. assert response.status_code == 200, response.content
  735. data = response.data[f"{self.project.slug}"]
  736. assert data["order"] == 0
  737. def test_split_decision_for_errors_widget(self):
  738. error_data = load_data("python", timestamp=before_now(minutes=1))
  739. self.store_event(
  740. data={
  741. **error_data,
  742. "exception": {"values": [{"type": "blah", "data": {"values": []}}]},
  743. },
  744. project_id=self.project.id,
  745. )
  746. _, widget, __ = create_widget(
  747. ["count()", "error.type"], "error.type:blah", self.project, discover_widget_split=None
  748. )
  749. response = self.do_request(
  750. {
  751. "field": ["count()", "error.type"],
  752. "query": "error.type:blah",
  753. "dataset": "metricsEnhanced",
  754. "per_page": 50,
  755. "dashboardWidgetId": widget.id,
  756. }
  757. )
  758. assert response.status_code == 200, response.content
  759. assert response.data.get("meta").get(
  760. "discoverSplitDecision"
  761. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  762. widget.refresh_from_db()
  763. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  764. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  765. def test_split_decision_for_transactions_widget(self):
  766. self.store_transaction_metric(
  767. 100,
  768. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  769. )
  770. _, widget, __ = create_widget(
  771. ["count()", "transaction.name"], "", self.project, discover_widget_split=None
  772. )
  773. assert widget.discover_widget_split is None
  774. response = self.do_request(
  775. {
  776. "field": ["count()", "transaction.name"],
  777. "query": "",
  778. "dataset": "metricsEnhanced",
  779. "per_page": 50,
  780. "dashboardWidgetId": widget.id,
  781. }
  782. )
  783. assert response.status_code == 200, response.content
  784. assert response.data.get("meta").get(
  785. "discoverSplitDecision"
  786. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.TRANSACTION_LIKE)
  787. widget.refresh_from_db()
  788. assert widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  789. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  790. def test_split_decision_for_top_events_errors_widget(self):
  791. error_data = load_data("python", timestamp=before_now(minutes=1))
  792. self.store_event(
  793. data={
  794. **error_data,
  795. "exception": {"values": [{"type": "test_error", "data": {"values": []}}]},
  796. },
  797. project_id=self.project.id,
  798. )
  799. _, widget, __ = create_widget(
  800. ["count()", "error.type"],
  801. "error.type:test_error",
  802. self.project,
  803. discover_widget_split=None,
  804. )
  805. response = self.do_request(
  806. {
  807. "field": ["count()", "error.type"],
  808. "query": "error.type:test_error",
  809. "dataset": "metricsEnhanced",
  810. "per_page": 50,
  811. "dashboardWidgetId": widget.id,
  812. "topEvents": 5,
  813. }
  814. )
  815. assert response.status_code == 200, response.content
  816. # Only a singular result for the test_error event
  817. assert len(response.data) == 1
  818. # Results are grouped by the error type
  819. assert response.data.get("test_error").get("meta").get(
  820. "discoverSplitDecision"
  821. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  822. widget.refresh_from_db()
  823. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  824. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  825. def test_split_decision_for_top_events_transactions_widget(self):
  826. self.store_transaction_metric(
  827. 100,
  828. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  829. tags={"transaction": "foo_transaction"},
  830. )
  831. _, widget, __ = create_widget(
  832. ["count()", "transaction"], "", self.project, discover_widget_split=None
  833. )
  834. assert widget.discover_widget_split is None
  835. response = self.do_request(
  836. {
  837. "field": ["count()", "transaction"],
  838. "query": "",
  839. "dataset": "metricsEnhanced",
  840. "per_page": 50,
  841. "dashboardWidgetId": widget.id,
  842. "topEvents": 5,
  843. }
  844. )
  845. assert response.status_code == 200, response.content
  846. # Only a singular result for the transaction
  847. assert len(response.data) == 1
  848. # Results are grouped by the transaction
  849. assert response.data.get("foo_transaction").get("meta").get(
  850. "discoverSplitDecision"
  851. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.TRANSACTION_LIKE)
  852. widget.refresh_from_db()
  853. assert widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  854. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  855. def test_split_decision_for_ambiguous_widget_without_data(self):
  856. _, widget, __ = create_widget(
  857. ["count()", "transaction.name", "error.type"],
  858. "",
  859. self.project,
  860. discover_widget_split=None,
  861. )
  862. assert widget.discover_widget_split is None
  863. response = self.do_request(
  864. {
  865. "field": ["count()", "transaction.name", "error.type"],
  866. "query": "",
  867. "dataset": "metricsEnhanced",
  868. "per_page": 50,
  869. "dashboardWidgetId": widget.id,
  870. },
  871. features={"organizations:performance-discover-dataset-selector": True},
  872. )
  873. assert response.status_code == 200, response.content
  874. assert response.data.get("meta").get(
  875. "discoverSplitDecision"
  876. ) == DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  877. widget.refresh_from_db()
  878. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  879. assert widget.dataset_source == DatasetSourcesTypes.FORCED.value
  880. def test_inp_percentile(self):
  881. for hour in range(6):
  882. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  883. self.store_transaction_metric(
  884. 111,
  885. metric="measurements.inp",
  886. timestamp=timestamp,
  887. use_case_id=UseCaseID.TRANSACTIONS,
  888. )
  889. response = self.do_request(
  890. data={
  891. "start": self.day_ago,
  892. "end": self.day_ago + timedelta(hours=6),
  893. "interval": "1h",
  894. "yAxis": ["p75(measurements.inp)"],
  895. "project": self.project.id,
  896. "dataset": "metrics",
  897. **self.additional_params,
  898. },
  899. )
  900. assert response.status_code == 200, response.content
  901. data = response.data
  902. assert len(data["data"]) == 6
  903. assert data["isMetricsData"]
  904. assert data["meta"]["fields"]["p75_measurements_inp"] == "duration"
  905. for item in data["data"]:
  906. assert item[1][0]["count"] == 111
  907. def test_metrics_enhanced_defaults_to_transactions_with_feature_flag(self):
  908. # Store an error
  909. self.store_event(
  910. data={
  911. "event_id": "a" * 32,
  912. "message": "poof",
  913. "user": {"email": self.user.email},
  914. "timestamp": before_now(days=1, minutes=1).isoformat(),
  915. "tags": {"notMetrics": "this makes it not metrics"},
  916. },
  917. project_id=self.project.id,
  918. )
  919. # Store a transaction
  920. transaction_data = load_data("transaction")
  921. self.store_event(
  922. {
  923. **transaction_data,
  924. "tags": {"notMetrics": "this makes it not metrics"},
  925. "start_timestamp": before_now(days=1, minutes=1).isoformat(),
  926. "timestamp": before_now(days=1).isoformat(),
  927. },
  928. project_id=self.project.id,
  929. )
  930. features = {
  931. "organizations:performance-discover-dataset-selector": True,
  932. "organizations:discover-basic": True,
  933. "organizations:global-views": True,
  934. }
  935. query = {
  936. "field": ["count()"],
  937. "query": 'notMetrics:"this makes it not metrics"',
  938. "statsPeriod": "1d",
  939. "interval": "1d",
  940. "dataset": "metricsEnhanced",
  941. }
  942. response = self.do_request(query, features=features)
  943. assert response.status_code == 200, response.content
  944. assert len(response.data["data"]) == 2
  945. # First bucket, where the transaction should be
  946. assert response.data["data"][0][1][0]["count"] == 1
  947. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  948. OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest
  949. ):
  950. def setUp(self):
  951. super().setUp()
  952. self.features["organizations:use-metrics-layer"] = True
  953. self.additional_params = {"forceMetricsLayer": "true"}
  954. def test_counter_standard_metric(self):
  955. mri = "c:transactions/usage@none"
  956. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  957. self.store_transaction_metric(
  958. value,
  959. metric=mri,
  960. internal_metric=mri,
  961. entity="metrics_counters",
  962. timestamp=self.day_ago + timedelta(minutes=index),
  963. use_case_id=UseCaseID.CUSTOM,
  964. )
  965. response = self.do_request(
  966. data={
  967. "start": self.day_ago,
  968. "end": self.day_ago + timedelta(hours=6),
  969. "interval": "1m",
  970. "yAxis": [f"sum({mri})"],
  971. "project": self.project.id,
  972. "dataset": "metricsEnhanced",
  973. **self.additional_params,
  974. },
  975. )
  976. assert response.status_code == 200, response.content
  977. data = response.data["data"]
  978. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  979. assert value[0]["count"] == expected_value # type: ignore[index]
  980. def test_counter_custom_metric(self):
  981. mri = "c:custom/sentry.process_profile.track_outcome@second"
  982. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  983. self.store_transaction_metric(
  984. value,
  985. metric=mri,
  986. internal_metric=mri,
  987. entity="metrics_counters",
  988. timestamp=self.day_ago + timedelta(hours=index),
  989. use_case_id=UseCaseID.CUSTOM,
  990. )
  991. response = self.do_request(
  992. data={
  993. "start": self.day_ago,
  994. "end": self.day_ago + timedelta(hours=6),
  995. "interval": "1h",
  996. "yAxis": [f"sum({mri})"],
  997. "project": self.project.id,
  998. "dataset": "metricsEnhanced",
  999. **self.additional_params,
  1000. },
  1001. )
  1002. assert response.status_code == 200, response.content
  1003. data = response.data["data"]
  1004. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  1005. assert value[0]["count"] == expected_value # type: ignore[index]
  1006. def test_distribution_custom_metric(self):
  1007. mri = "d:custom/sentry.process_profile.track_outcome@second"
  1008. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  1009. for multiplier in (1, 2, 3):
  1010. self.store_transaction_metric(
  1011. value * multiplier,
  1012. metric=mri,
  1013. internal_metric=mri,
  1014. entity="metrics_distributions",
  1015. timestamp=self.day_ago + timedelta(hours=index),
  1016. use_case_id=UseCaseID.CUSTOM,
  1017. )
  1018. response = self.do_request(
  1019. data={
  1020. "start": self.day_ago,
  1021. "end": self.day_ago + timedelta(hours=6),
  1022. "interval": "1h",
  1023. "yAxis": [f"min({mri})", f"max({mri})", f"p90({mri})"],
  1024. "project": self.project.id,
  1025. "dataset": "metricsEnhanced",
  1026. **self.additional_params,
  1027. },
  1028. )
  1029. assert response.status_code == 200, response.content
  1030. data = response.data
  1031. min = data[f"min({mri})"]["data"]
  1032. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  1033. assert value[0]["count"] == expected_value # type: ignore[index]
  1034. max = data[f"max({mri})"]["data"]
  1035. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  1036. assert value[0]["count"] == expected_value # type: ignore[index]
  1037. p90 = data[f"p90({mri})"]["data"]
  1038. for (_, value), expected_value in zip(p90, [28.0, 56.0, 84.0, 112.0, 140.0, 168.0]):
  1039. assert value[0]["count"] == expected_value # type: ignore[index]
  1040. def test_set_custom_metric(self):
  1041. mri = "s:custom/sentry.process_profile.track_outcome@second"
  1042. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  1043. # We store each value a second time, since we want to check the de-duplication of sets.
  1044. for i in range(0, 2):
  1045. self.store_transaction_metric(
  1046. value,
  1047. metric=mri,
  1048. internal_metric=mri,
  1049. entity="metrics_sets",
  1050. timestamp=self.day_ago + timedelta(hours=index),
  1051. use_case_id=UseCaseID.CUSTOM,
  1052. )
  1053. response = self.do_request(
  1054. data={
  1055. "start": self.day_ago,
  1056. "end": self.day_ago + timedelta(hours=6),
  1057. "interval": "1h",
  1058. "yAxis": [f"count_unique({mri})"],
  1059. "project": self.project.id,
  1060. "dataset": "metricsEnhanced",
  1061. **self.additional_params,
  1062. },
  1063. )
  1064. assert response.status_code == 200, response.content
  1065. data = response.data["data"]
  1066. for (_, value), expected_value in zip(data, [1, 1, 1, 1, 1, 1]):
  1067. assert value[0]["count"] == expected_value # type: ignore[index]
  1068. def test_gauge_custom_metric(self):
  1069. mri = "g:custom/sentry.process_profile.track_outcome@second"
  1070. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  1071. for multiplier in (1, 3):
  1072. self.store_transaction_metric(
  1073. value * multiplier,
  1074. metric=mri,
  1075. internal_metric=mri,
  1076. entity="metrics_gauges",
  1077. # When multiple gauges are merged, in order to make the `last` merge work deterministically it's
  1078. # better to have the gauges with different timestamps so that the last value is always the same.
  1079. timestamp=self.day_ago + timedelta(hours=index, minutes=multiplier),
  1080. use_case_id=UseCaseID.CUSTOM,
  1081. )
  1082. response = self.do_request(
  1083. data={
  1084. "start": self.day_ago,
  1085. "end": self.day_ago + timedelta(hours=6),
  1086. "interval": "1h",
  1087. "yAxis": [
  1088. f"min({mri})",
  1089. f"max({mri})",
  1090. f"last({mri})",
  1091. f"sum({mri})",
  1092. f"count({mri})",
  1093. ],
  1094. "project": self.project.id,
  1095. "dataset": "metricsEnhanced",
  1096. **self.additional_params,
  1097. },
  1098. )
  1099. assert response.status_code == 200, response.content
  1100. data = response.data
  1101. min = data[f"min({mri})"]["data"]
  1102. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  1103. assert value[0]["count"] == expected_value # type: ignore[index]
  1104. max = data[f"max({mri})"]["data"]
  1105. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  1106. assert value[0]["count"] == expected_value # type: ignore[index]
  1107. last = data[f"last({mri})"]["data"]
  1108. for (_, value), expected_value in zip(last, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  1109. assert value[0]["count"] == expected_value # type: ignore[index]
  1110. sum = data[f"sum({mri})"]["data"]
  1111. for (_, value), expected_value in zip(sum, [40.0, 80.0, 120.0, 160.0, 200.0, 240.0]):
  1112. assert value[0]["count"] == expected_value # type: ignore[index]
  1113. count = data[f"count({mri})"]["data"]
  1114. for (_, value), expected_value in zip(count, [40, 80, 120, 160, 200, 240]):
  1115. assert value[0]["count"] == expected_value # type: ignore[index]
  1116. @pytest.mark.querybuilder
  1117. def test_throughput_spm_hour_rollup(self):
  1118. # Each of these denotes how many events to create in each hour
  1119. event_counts = [6, 0, 6, 3, 0, 3]
  1120. for hour, count in enumerate(event_counts):
  1121. for minute in range(count):
  1122. self.store_span_metric(
  1123. 1,
  1124. timestamp=self.day_ago + timedelta(hours=hour, minutes=minute),
  1125. )
  1126. response = self.do_request(
  1127. data={
  1128. "start": self.day_ago,
  1129. "end": self.day_ago + timedelta(hours=6),
  1130. "interval": "1h",
  1131. "yAxis": "spm()",
  1132. "project": self.project.id,
  1133. "dataset": "metrics",
  1134. },
  1135. )
  1136. assert response.status_code == 200, response.content
  1137. data = response.data["data"]
  1138. assert len(data) == 6
  1139. assert response.data["meta"]["dataset"] == "metrics"
  1140. rows = data[0:6]
  1141. for test in zip(event_counts, rows):
  1142. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  1143. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithOnDemandWidgets(
  1144. MetricsEnhancedPerformanceTestCase
  1145. ):
  1146. endpoint = "sentry-api-0-organization-events-stats"
  1147. def setUp(self):
  1148. super().setUp()
  1149. self.login_as(user=self.user)
  1150. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  1151. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  1152. Environment.get_or_create(self.project, "production")
  1153. self.url = reverse(
  1154. "sentry-api-0-organization-events-stats",
  1155. kwargs={"organization_id_or_slug": self.project.organization.slug},
  1156. )
  1157. self.features = {
  1158. "organizations:on-demand-metrics-extraction-widgets": True,
  1159. "organizations:on-demand-metrics-extraction": True,
  1160. }
  1161. def test_top_events_wrong_on_demand_type(self):
  1162. query = "transaction.duration:>=100"
  1163. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1164. response = self.do_request(
  1165. data={
  1166. "project": self.project.id,
  1167. "start": self.day_ago,
  1168. "end": self.day_ago + timedelta(hours=2),
  1169. "interval": "1h",
  1170. "orderby": ["-count()"],
  1171. "environment": "production",
  1172. "query": query,
  1173. "yAxis": yAxis,
  1174. "field": [
  1175. "count()",
  1176. ],
  1177. "topEvents": 5,
  1178. "dataset": "metrics",
  1179. "useOnDemandMetrics": "true",
  1180. "onDemandType": "not_real",
  1181. },
  1182. )
  1183. assert response.status_code == 400, response.content
  1184. def test_top_events_works_without_on_demand_type(self):
  1185. query = "transaction.duration:>=100"
  1186. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1187. response = self.do_request(
  1188. data={
  1189. "project": self.project.id,
  1190. "start": self.day_ago,
  1191. "end": self.day_ago + timedelta(hours=2),
  1192. "interval": "1h",
  1193. "orderby": ["-count()"],
  1194. "environment": "production",
  1195. "query": query,
  1196. "yAxis": yAxis,
  1197. "field": [
  1198. "count()",
  1199. ],
  1200. "topEvents": 5,
  1201. "dataset": "metrics",
  1202. "useOnDemandMetrics": "true",
  1203. },
  1204. )
  1205. assert response.status_code == 200, response.content
  1206. def test_top_events_with_transaction_on_demand(self):
  1207. field = "count()"
  1208. field_two = "count_web_vitals(measurements.lcp, good)"
  1209. groupbys = ["customtag1", "customtag2"]
  1210. query = "transaction.duration:>=100"
  1211. spec = OnDemandMetricSpec(
  1212. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1213. )
  1214. spec_two = OnDemandMetricSpec(
  1215. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1216. )
  1217. for hour in range(0, 5):
  1218. self.store_on_demand_metric(
  1219. hour * 62 * 24,
  1220. spec=spec,
  1221. additional_tags={
  1222. "customtag1": "foo",
  1223. "customtag2": "red",
  1224. "environment": "production",
  1225. },
  1226. timestamp=self.day_ago + timedelta(hours=hour),
  1227. )
  1228. self.store_on_demand_metric(
  1229. hour * 60 * 24,
  1230. spec=spec_two,
  1231. additional_tags={
  1232. "customtag1": "bar",
  1233. "customtag2": "blue",
  1234. "environment": "production",
  1235. },
  1236. timestamp=self.day_ago + timedelta(hours=hour),
  1237. )
  1238. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1239. response = self.do_request(
  1240. data={
  1241. "project": self.project.id,
  1242. "start": self.day_ago,
  1243. "end": self.day_ago + timedelta(hours=2),
  1244. "interval": "1h",
  1245. "orderby": ["-count()"],
  1246. "environment": "production",
  1247. "query": query,
  1248. "yAxis": yAxis,
  1249. "field": [
  1250. "count()",
  1251. "count_web_vitals(measurements.lcp, good)",
  1252. "customtag1",
  1253. "customtag2",
  1254. ],
  1255. "topEvents": 5,
  1256. "dataset": "metricsEnhanced",
  1257. "useOnDemandMetrics": "true",
  1258. "onDemandType": "dynamic_query",
  1259. },
  1260. )
  1261. assert response.status_code == 200, response.content
  1262. groups = [
  1263. ("foo,red", "count()", 0.0, 1488.0),
  1264. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1265. ("bar,blue", "count()", 0.0, 0.0),
  1266. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1267. ]
  1268. assert len(response.data.keys()) == 2
  1269. for group_count in groups:
  1270. group, agg, row1, row2 = group_count
  1271. row_data = response.data[group][agg]["data"][:2]
  1272. assert [attrs for _, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1273. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1274. assert response.data[group]["isMetricsExtractedData"]
  1275. def test_top_events_with_transaction_on_demand_and_no_environment(self):
  1276. field = "count()"
  1277. field_two = "count_web_vitals(measurements.lcp, good)"
  1278. groupbys = ["customtag1", "customtag2"]
  1279. query = "transaction.duration:>=100"
  1280. spec = OnDemandMetricSpec(
  1281. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1282. )
  1283. spec_two = OnDemandMetricSpec(
  1284. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1285. )
  1286. for hour in range(0, 5):
  1287. self.store_on_demand_metric(
  1288. hour * 62 * 24,
  1289. spec=spec,
  1290. additional_tags={
  1291. "customtag1": "foo",
  1292. "customtag2": "red",
  1293. "environment": "production",
  1294. },
  1295. timestamp=self.day_ago + timedelta(hours=hour),
  1296. )
  1297. self.store_on_demand_metric(
  1298. hour * 60 * 24,
  1299. spec=spec_two,
  1300. additional_tags={
  1301. "customtag1": "bar",
  1302. "customtag2": "blue",
  1303. "environment": "production",
  1304. },
  1305. timestamp=self.day_ago + timedelta(hours=hour),
  1306. )
  1307. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1308. response = self.do_request(
  1309. data={
  1310. "project": self.project.id,
  1311. "start": self.day_ago,
  1312. "end": self.day_ago + timedelta(hours=2),
  1313. "interval": "1h",
  1314. "orderby": ["-count()"],
  1315. "query": query,
  1316. "yAxis": yAxis,
  1317. "field": [
  1318. "count()",
  1319. "count_web_vitals(measurements.lcp, good)",
  1320. "customtag1",
  1321. "customtag2",
  1322. ],
  1323. "topEvents": 5,
  1324. "dataset": "metricsEnhanced",
  1325. "useOnDemandMetrics": "true",
  1326. "onDemandType": "dynamic_query",
  1327. },
  1328. )
  1329. assert response.status_code == 200, response.content
  1330. groups = [
  1331. ("foo,red", "count()", 0.0, 1488.0),
  1332. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1333. ("bar,blue", "count()", 0.0, 0.0),
  1334. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1335. ]
  1336. assert len(response.data.keys()) == 2
  1337. for group_count in groups:
  1338. group, agg, row1, row2 = group_count
  1339. row_data = response.data[group][agg]["data"][:2]
  1340. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1341. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1342. assert response.data[group]["isMetricsExtractedData"]
  1343. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_transaction_only(self):
  1344. field = "count()"
  1345. field_two = "count_web_vitals(measurements.lcp, good)"
  1346. groupbys = ["customtag1", "customtag2"]
  1347. query = "transaction.duration:>=100"
  1348. spec = OnDemandMetricSpec(
  1349. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1350. )
  1351. spec_two = OnDemandMetricSpec(
  1352. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1353. )
  1354. _, widget, __ = create_widget(
  1355. ["count()"],
  1356. "",
  1357. self.project,
  1358. discover_widget_split=None,
  1359. )
  1360. for hour in range(0, 2):
  1361. self.store_on_demand_metric(
  1362. hour * 62 * 24,
  1363. spec=spec,
  1364. additional_tags={
  1365. "customtag1": "foo",
  1366. "customtag2": "red",
  1367. "environment": "production",
  1368. },
  1369. timestamp=self.day_ago + timedelta(hours=hour),
  1370. )
  1371. self.store_on_demand_metric(
  1372. hour * 60 * 24,
  1373. spec=spec_two,
  1374. additional_tags={
  1375. "customtag1": "bar",
  1376. "customtag2": "blue",
  1377. "environment": "production",
  1378. },
  1379. timestamp=self.day_ago + timedelta(hours=hour),
  1380. )
  1381. yAxis = [field, field_two]
  1382. response = self.do_request(
  1383. data={
  1384. "project": self.project.id,
  1385. "start": self.day_ago,
  1386. "end": self.day_ago + timedelta(hours=2),
  1387. "interval": "1h",
  1388. "orderby": ["-count()"],
  1389. "query": query,
  1390. "yAxis": yAxis,
  1391. "field": yAxis + groupbys,
  1392. "topEvents": 5,
  1393. "dataset": "metricsEnhanced",
  1394. "useOnDemandMetrics": "true",
  1395. "onDemandType": "dynamic_query",
  1396. "dashboardWidgetId": widget.id,
  1397. },
  1398. )
  1399. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1400. assert saved_widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  1401. assert response.status_code == 200, response.content
  1402. # Fell back to discover data which is empty for this test (empty group of '').
  1403. assert len(response.data.keys()) == 2
  1404. assert bool(response.data["foo,red"])
  1405. assert bool(response.data["bar,blue"])
  1406. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_error(
  1407. self,
  1408. ):
  1409. self.project = self.create_project(organization=self.organization)
  1410. Environment.get_or_create(self.project, "production")
  1411. field = "count()"
  1412. field_two = "count()"
  1413. groupbys = ["customtag1", "customtag2"]
  1414. query = "query.dataset:foo"
  1415. _, widget, __ = create_widget(
  1416. ["count()"],
  1417. "",
  1418. self.project,
  1419. discover_widget_split=None,
  1420. )
  1421. self.store_event(
  1422. data={
  1423. "event_id": "a" * 32,
  1424. "message": "very bad",
  1425. "type": "error",
  1426. "start_timestamp": (self.day_ago + timedelta(hours=1)).isoformat(),
  1427. "timestamp": (self.day_ago + timedelta(hours=1)).isoformat(),
  1428. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1429. },
  1430. project_id=self.project.id,
  1431. )
  1432. self.store_event(
  1433. data={
  1434. "event_id": "b" * 32,
  1435. "message": "very bad 2",
  1436. "type": "error",
  1437. "start_timestamp": (self.day_ago + timedelta(hours=1)).isoformat(),
  1438. "timestamp": (self.day_ago + timedelta(hours=1)).isoformat(),
  1439. "tags": {"customtag1": "error_value2", "query.dataset": "foo"},
  1440. },
  1441. project_id=self.project.id,
  1442. )
  1443. yAxis = ["count()"]
  1444. response = self.do_request(
  1445. data={
  1446. "project": self.project.id,
  1447. "start": self.day_ago,
  1448. "end": self.day_ago + timedelta(hours=2),
  1449. "interval": "1h",
  1450. "orderby": ["-count()"],
  1451. "query": query,
  1452. "yAxis": yAxis,
  1453. "field": [field, field_two] + groupbys,
  1454. "topEvents": 5,
  1455. "dataset": "metricsEnhanced",
  1456. "useOnDemandMetrics": "true",
  1457. "onDemandType": "dynamic_query",
  1458. "dashboardWidgetId": widget.id,
  1459. },
  1460. )
  1461. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1462. assert saved_widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  1463. assert response.status_code == 200, response.content
  1464. # Fell back to discover data which is empty for this test (empty group of '').
  1465. assert len(response.data.keys()) == 2
  1466. assert bool(response.data["error_value,"])
  1467. assert bool(response.data["error_value2,"])
  1468. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_discover(self):
  1469. self.project = self.create_project(organization=self.organization)
  1470. Environment.get_or_create(self.project, "production")
  1471. field = "count()"
  1472. field_two = "count()"
  1473. groupbys = ["customtag1", "customtag2"]
  1474. query = "query.dataset:foo"
  1475. spec = OnDemandMetricSpec(
  1476. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1477. )
  1478. spec_two = OnDemandMetricSpec(
  1479. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1480. )
  1481. _, widget, __ = create_widget(
  1482. ["count()"],
  1483. "",
  1484. self.project,
  1485. discover_widget_split=None,
  1486. )
  1487. self.store_event(
  1488. data={
  1489. "event_id": "a" * 32,
  1490. "message": "very bad",
  1491. "type": "error",
  1492. "timestamp": (self.day_ago + timedelta(hours=1)).isoformat(),
  1493. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1494. },
  1495. project_id=self.project.id,
  1496. )
  1497. transaction = load_data("transaction")
  1498. transaction["timestamp"] = (self.day_ago + timedelta(hours=1)).isoformat()
  1499. transaction["start_timestamp"] = (self.day_ago + timedelta(hours=1)).isoformat()
  1500. transaction["tags"] = {"customtag1": "transaction_value", "query.dataset": "foo"}
  1501. self.store_event(
  1502. data=transaction,
  1503. project_id=self.project.id,
  1504. )
  1505. for hour in range(0, 5):
  1506. self.store_on_demand_metric(
  1507. hour * 62 * 24,
  1508. spec=spec,
  1509. additional_tags={
  1510. "customtag1": "foo",
  1511. "customtag2": "red",
  1512. "environment": "production",
  1513. },
  1514. timestamp=self.day_ago + timedelta(hours=hour),
  1515. )
  1516. self.store_on_demand_metric(
  1517. hour * 60 * 24,
  1518. spec=spec_two,
  1519. additional_tags={
  1520. "customtag1": "bar",
  1521. "customtag2": "blue",
  1522. "environment": "production",
  1523. },
  1524. timestamp=self.day_ago + timedelta(hours=hour),
  1525. )
  1526. yAxis = ["count()"]
  1527. response = self.do_request(
  1528. data={
  1529. "project": self.project.id,
  1530. "start": self.day_ago,
  1531. "end": self.day_ago + timedelta(hours=2),
  1532. "interval": "1h",
  1533. "orderby": ["-count()"],
  1534. "query": query,
  1535. "yAxis": yAxis,
  1536. "field": [field, field_two, "customtag1", "customtag2"],
  1537. "topEvents": 5,
  1538. "dataset": "metricsEnhanced",
  1539. "useOnDemandMetrics": "true",
  1540. "onDemandType": "dynamic_query",
  1541. "dashboardWidgetId": widget.id,
  1542. },
  1543. )
  1544. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1545. assert saved_widget.discover_widget_split == DashboardWidgetTypes.DISCOVER
  1546. assert response.status_code == 200, response.content
  1547. assert response.status_code == 200, response.content
  1548. # Fell back to discover data which is empty for this test (empty group of '').
  1549. assert len(response.data.keys()) == 2
  1550. assert bool(response.data["error_value,"])
  1551. assert bool(response.data["transaction_value,"])
  1552. def test_top_events_with_transaction_on_demand_passing_widget_id_saved(self):
  1553. field = "count()"
  1554. field_two = "count_web_vitals(measurements.lcp, good)"
  1555. groupbys = ["customtag1", "customtag2"]
  1556. query = "transaction.duration:>=100"
  1557. spec = OnDemandMetricSpec(
  1558. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1559. )
  1560. spec_two = OnDemandMetricSpec(
  1561. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1562. )
  1563. _, widget, __ = create_widget(
  1564. ["count()"],
  1565. "",
  1566. self.project,
  1567. discover_widget_split=DashboardWidgetTypes.TRANSACTION_LIKE, # Transactions like uses on-demand
  1568. )
  1569. for hour in range(0, 5):
  1570. self.store_on_demand_metric(
  1571. hour * 62 * 24,
  1572. spec=spec,
  1573. additional_tags={
  1574. "customtag1": "foo",
  1575. "customtag2": "red",
  1576. "environment": "production",
  1577. },
  1578. timestamp=self.day_ago + timedelta(hours=hour),
  1579. )
  1580. self.store_on_demand_metric(
  1581. hour * 60 * 24,
  1582. spec=spec_two,
  1583. additional_tags={
  1584. "customtag1": "bar",
  1585. "customtag2": "blue",
  1586. "environment": "production",
  1587. },
  1588. timestamp=self.day_ago + timedelta(hours=hour),
  1589. )
  1590. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1591. with mock.patch.object(widget, "save") as mock_widget_save:
  1592. response = self.do_request(
  1593. data={
  1594. "project": self.project.id,
  1595. "start": self.day_ago,
  1596. "end": self.day_ago + timedelta(hours=2),
  1597. "interval": "1h",
  1598. "orderby": ["-count()"],
  1599. "query": query,
  1600. "yAxis": yAxis,
  1601. "field": [
  1602. "count()",
  1603. "count_web_vitals(measurements.lcp, good)",
  1604. "customtag1",
  1605. "customtag2",
  1606. ],
  1607. "topEvents": 5,
  1608. "dataset": "metricsEnhanced",
  1609. "useOnDemandMetrics": "true",
  1610. "onDemandType": "dynamic_query",
  1611. "dashboardWidgetId": widget.id,
  1612. },
  1613. )
  1614. assert bool(mock_widget_save.assert_not_called)
  1615. assert response.status_code == 200, response.content
  1616. groups = [
  1617. ("foo,red", "count()", 0.0, 1488.0),
  1618. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1619. ("bar,blue", "count()", 0.0, 0.0),
  1620. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1621. ]
  1622. assert len(response.data.keys()) == 2
  1623. for group_count in groups:
  1624. group, agg, row1, row2 = group_count
  1625. row_data = response.data[group][agg]["data"][:2]
  1626. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1627. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1628. assert response.data[group]["isMetricsExtractedData"]
  1629. def test_timeseries_on_demand_with_multiple_percentiles(self):
  1630. field = "p75(measurements.fcp)"
  1631. field_two = "p75(measurements.lcp)"
  1632. query = "transaction.duration:>=100"
  1633. spec = OnDemandMetricSpec(field=field, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY)
  1634. spec_two = OnDemandMetricSpec(
  1635. field=field_two, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1636. )
  1637. assert (
  1638. spec._query_str_for_hash
  1639. == "event.measurements.fcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1640. )
  1641. assert (
  1642. spec_two._query_str_for_hash
  1643. == "event.measurements.lcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1644. )
  1645. for count in range(0, 4):
  1646. self.store_on_demand_metric(
  1647. count * 100,
  1648. spec=spec,
  1649. timestamp=self.day_ago + timedelta(hours=1),
  1650. )
  1651. self.store_on_demand_metric(
  1652. count * 200.0,
  1653. spec=spec_two,
  1654. timestamp=self.day_ago + timedelta(hours=1),
  1655. )
  1656. yAxis = [field, field_two]
  1657. response = self.do_request(
  1658. data={
  1659. "project": self.project.id,
  1660. "start": self.day_ago,
  1661. "end": self.day_ago + timedelta(hours=2),
  1662. "interval": "1h",
  1663. "orderby": [field],
  1664. "query": query,
  1665. "yAxis": yAxis,
  1666. "dataset": "metricsEnhanced",
  1667. "useOnDemandMetrics": "true",
  1668. "onDemandType": "dynamic_query",
  1669. },
  1670. )
  1671. assert response.status_code == 200, response.content
  1672. assert response.data["p75(measurements.fcp)"]["meta"]["isMetricsExtractedData"]
  1673. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1674. assert [attrs for time, attrs in response.data["p75(measurements.fcp)"]["data"]] == [
  1675. [{"count": 0}],
  1676. [{"count": 225.0}],
  1677. ]
  1678. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsExtractedData"]
  1679. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1680. assert [attrs for time, attrs in response.data["p75(measurements.lcp)"]["data"]] == [
  1681. [{"count": 0}],
  1682. [{"count": 450.0}],
  1683. ]
  1684. def test_apdex_issue(self):
  1685. field = "apdex(300)"
  1686. groupbys = ["group_tag"]
  1687. query = "transaction.duration:>=100"
  1688. spec = OnDemandMetricSpec(
  1689. field=field,
  1690. groupbys=groupbys,
  1691. query=query,
  1692. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1693. )
  1694. for hour in range(0, 5):
  1695. self.store_on_demand_metric(
  1696. 1,
  1697. spec=spec,
  1698. additional_tags={
  1699. "group_tag": "group_one",
  1700. "environment": "production",
  1701. "satisfaction": "tolerable",
  1702. },
  1703. timestamp=self.day_ago + timedelta(hours=hour),
  1704. )
  1705. self.store_on_demand_metric(
  1706. 1,
  1707. spec=spec,
  1708. additional_tags={
  1709. "group_tag": "group_two",
  1710. "environment": "production",
  1711. "satisfaction": "satisfactory",
  1712. },
  1713. timestamp=self.day_ago + timedelta(hours=hour),
  1714. )
  1715. response = self.do_request(
  1716. data={
  1717. "dataset": "metricsEnhanced",
  1718. "environment": "production",
  1719. "excludeOther": 1,
  1720. "field": [field, "group_tag"],
  1721. "start": self.day_ago,
  1722. "end": self.day_ago + timedelta(hours=2),
  1723. "interval": "1h",
  1724. "orderby": f"-{field}",
  1725. "partial": 1,
  1726. "project": self.project.id,
  1727. "query": query,
  1728. "topEvents": 5,
  1729. "yAxis": field,
  1730. "onDemandType": "dynamic_query",
  1731. "useOnDemandMetrics": "true",
  1732. },
  1733. )
  1734. assert response.status_code == 200, response.content
  1735. assert response.data["group_one"]["meta"]["isMetricsExtractedData"] is True
  1736. assert [attrs for time, attrs in response.data["group_one"]["data"]] == [
  1737. [{"count": 0.5}],
  1738. [{"count": 0.5}],
  1739. ]
  1740. def test_glob_http_referer_on_demand(self):
  1741. agg = "count()"
  1742. network_id_tag = "networkId"
  1743. url = "https://sentry.io"
  1744. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1745. spec = OnDemandMetricSpec(
  1746. field=agg,
  1747. groupbys=[network_id_tag],
  1748. query=query,
  1749. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1750. )
  1751. assert spec.to_metric_spec(self.project) == {
  1752. "category": "transaction",
  1753. "mri": "c:transactions/on_demand@none",
  1754. "field": None,
  1755. "tags": [
  1756. {"key": "query_hash", "value": "ac241f56"},
  1757. {"key": "networkId", "field": "event.tags.networkId"},
  1758. {"key": "environment", "field": "event.environment"},
  1759. ],
  1760. "condition": {
  1761. "op": "and",
  1762. "inner": [
  1763. {
  1764. "op": "glob",
  1765. "name": "event.request.url",
  1766. "value": ["https://sentry.io/*/foo/bar/*"],
  1767. },
  1768. {
  1769. "op": "glob",
  1770. "name": "event.request.headers.Referer",
  1771. "value": ["https://sentry.io/*/bar/*"],
  1772. },
  1773. ],
  1774. },
  1775. }
  1776. for hour in range(0, 5):
  1777. self.store_on_demand_metric(
  1778. 1,
  1779. spec=spec,
  1780. additional_tags={network_id_tag: "1234"},
  1781. timestamp=self.day_ago + timedelta(hours=hour),
  1782. )
  1783. self.store_on_demand_metric(
  1784. 1,
  1785. spec=spec,
  1786. additional_tags={network_id_tag: "5678"},
  1787. timestamp=self.day_ago + timedelta(hours=hour),
  1788. )
  1789. response = self.do_request(
  1790. data={
  1791. "dataset": "metricsEnhanced",
  1792. "field": [network_id_tag, agg],
  1793. "start": self.day_ago,
  1794. "end": self.day_ago + timedelta(hours=5),
  1795. "onDemandType": "dynamic_query",
  1796. "orderby": f"-{agg}",
  1797. "interval": "1d",
  1798. "partial": 1,
  1799. "query": query,
  1800. "referrer": "api.dashboards.widget.bar-chart",
  1801. "project": self.project.id,
  1802. "topEvents": 2,
  1803. "useOnDemandMetrics": "true",
  1804. "yAxis": agg,
  1805. },
  1806. )
  1807. assert response.status_code == 200, response.content
  1808. for datum in response.data.values():
  1809. assert datum["meta"] == {
  1810. "dataset": "metricsEnhanced",
  1811. "datasetReason": "unchanged",
  1812. "fields": {},
  1813. "isMetricsData": False,
  1814. "isMetricsExtractedData": True,
  1815. "tips": {},
  1816. "units": {},
  1817. }
  1818. def _test_is_metrics_extracted_data(
  1819. self, params: dict[str, Any], expected_on_demand_query: bool, dataset: str
  1820. ) -> None:
  1821. spec = OnDemandMetricSpec(
  1822. field="count()",
  1823. query="transaction.duration:>1s",
  1824. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1825. )
  1826. self.store_on_demand_metric(1, spec=spec)
  1827. response = self.do_request(params)
  1828. assert response.status_code == 200, response.content
  1829. meta = response.data["meta"]
  1830. # This is the main thing we want to test for
  1831. assert meta.get("isMetricsExtractedData", False) is expected_on_demand_query
  1832. assert meta["dataset"] == dataset
  1833. return meta
  1834. def test_is_metrics_extracted_data_is_included(self):
  1835. self._test_is_metrics_extracted_data(
  1836. {
  1837. "dataset": "metricsEnhanced",
  1838. "query": "transaction.duration:>=91",
  1839. "useOnDemandMetrics": "true",
  1840. "yAxis": "count()",
  1841. },
  1842. expected_on_demand_query=True,
  1843. dataset="metricsEnhanced",
  1844. )
  1845. def test_on_demand_epm_no_query(self):
  1846. params = {
  1847. "dataset": "metricsEnhanced",
  1848. "environment": "production",
  1849. "onDemandType": "dynamic_query",
  1850. "project": self.project.id,
  1851. "query": "",
  1852. "statsPeriod": "1h",
  1853. "useOnDemandMetrics": "true",
  1854. "yAxis": ["epm()"],
  1855. }
  1856. response = self.do_request(params)
  1857. assert response.status_code == 200, response.content
  1858. assert response.data["meta"] == {
  1859. "fields": {"time": "date", "epm": "rate"},
  1860. "units": {"time": None, "epm": None},
  1861. "isMetricsData": True,
  1862. "isMetricsExtractedData": False,
  1863. "tips": {},
  1864. "datasetReason": "unchanged",
  1865. "dataset": "metricsEnhanced",
  1866. }
  1867. def test_group_by_transaction(self):
  1868. field = "count()"
  1869. groupbys = ["transaction"]
  1870. query = "transaction.duration:>=100"
  1871. spec = OnDemandMetricSpec(
  1872. field=field,
  1873. groupbys=groupbys,
  1874. query=query,
  1875. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1876. )
  1877. for hour in range(0, 2):
  1878. self.store_on_demand_metric(
  1879. (hour + 1) * 5,
  1880. spec=spec,
  1881. additional_tags={
  1882. "transaction": "/performance",
  1883. "environment": "production",
  1884. },
  1885. timestamp=self.day_ago + timedelta(hours=hour),
  1886. )
  1887. response = self.do_request(
  1888. data={
  1889. "dataset": "metricsEnhanced",
  1890. "environment": "production",
  1891. "excludeOther": 1,
  1892. "field": [field, "transaction"],
  1893. "start": self.day_ago,
  1894. "end": self.day_ago + timedelta(hours=2),
  1895. "interval": "1h",
  1896. "orderby": f"-{field}",
  1897. "partial": 1,
  1898. "project": self.project.id,
  1899. "query": query,
  1900. "topEvents": 5,
  1901. "yAxis": field,
  1902. "onDemandType": "dynamic_query",
  1903. "useOnDemandMetrics": "true",
  1904. },
  1905. )
  1906. assert response.status_code == 200, response.content
  1907. assert response.data["/performance"]["meta"]["isMetricsExtractedData"] is True
  1908. assert [attrs for time, attrs in response.data["/performance"]["data"]] == [
  1909. [{"count": 5.0}],
  1910. [{"count": 10.0}],
  1911. ]
  1912. def _setup_orderby_tests(self, query):
  1913. count_spec = OnDemandMetricSpec(
  1914. field="count()",
  1915. groupbys=["networkId"],
  1916. query=query,
  1917. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1918. )
  1919. p95_spec = OnDemandMetricSpec(
  1920. field="p95(transaction.duration)",
  1921. groupbys=["networkId"],
  1922. query=query,
  1923. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1924. )
  1925. for hour in range(0, 5):
  1926. self.store_on_demand_metric(
  1927. 1,
  1928. spec=count_spec,
  1929. additional_tags={"networkId": "1234"},
  1930. timestamp=self.day_ago + timedelta(hours=hour),
  1931. )
  1932. self.store_on_demand_metric(
  1933. 100,
  1934. spec=p95_spec,
  1935. additional_tags={"networkId": "1234"},
  1936. timestamp=self.day_ago + timedelta(hours=hour),
  1937. )
  1938. self.store_on_demand_metric(
  1939. 200,
  1940. spec=p95_spec,
  1941. additional_tags={"networkId": "5678"},
  1942. timestamp=self.day_ago + timedelta(hours=hour),
  1943. )
  1944. # Store twice as many 5678 so orderby puts it later
  1945. self.store_on_demand_metric(
  1946. 2,
  1947. spec=count_spec,
  1948. additional_tags={"networkId": "5678"},
  1949. timestamp=self.day_ago + timedelta(hours=hour),
  1950. )
  1951. def test_order_by_aggregate_top_events_desc(self):
  1952. url = "https://sentry.io"
  1953. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1954. self._setup_orderby_tests(query)
  1955. response = self.do_request(
  1956. data={
  1957. "dataset": "metricsEnhanced",
  1958. "field": ["networkId", "count()"],
  1959. "start": self.day_ago,
  1960. "end": self.day_ago + timedelta(hours=5),
  1961. "onDemandType": "dynamic_query",
  1962. "orderby": "-count()",
  1963. "interval": "1d",
  1964. "partial": 1,
  1965. "query": query,
  1966. "referrer": "api.dashboards.widget.bar-chart",
  1967. "project": self.project.id,
  1968. "topEvents": 2,
  1969. "useOnDemandMetrics": "true",
  1970. "yAxis": "count()",
  1971. },
  1972. )
  1973. assert response.status_code == 200, response.content
  1974. assert len(response.data) == 3
  1975. data1 = response.data["5678"]
  1976. assert data1["order"] == 0
  1977. assert data1["data"][0][1][0]["count"] == 10
  1978. data2 = response.data["1234"]
  1979. assert data2["order"] == 1
  1980. assert data2["data"][0][1][0]["count"] == 5
  1981. for datum in response.data.values():
  1982. assert datum["meta"] == {
  1983. "dataset": "metricsEnhanced",
  1984. "datasetReason": "unchanged",
  1985. "fields": {},
  1986. "isMetricsData": False,
  1987. "isMetricsExtractedData": True,
  1988. "tips": {},
  1989. "units": {},
  1990. }
  1991. def test_order_by_aggregate_top_events_asc(self):
  1992. url = "https://sentry.io"
  1993. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1994. self._setup_orderby_tests(query)
  1995. response = self.do_request(
  1996. data={
  1997. "dataset": "metricsEnhanced",
  1998. "field": ["networkId", "count()"],
  1999. "start": self.day_ago,
  2000. "end": self.day_ago + timedelta(hours=5),
  2001. "onDemandType": "dynamic_query",
  2002. "orderby": "count()",
  2003. "interval": "1d",
  2004. "partial": 1,
  2005. "query": query,
  2006. "referrer": "api.dashboards.widget.bar-chart",
  2007. "project": self.project.id,
  2008. "topEvents": 2,
  2009. "useOnDemandMetrics": "true",
  2010. "yAxis": "count()",
  2011. },
  2012. )
  2013. assert response.status_code == 200, response.content
  2014. assert len(response.data) == 3
  2015. data1 = response.data["1234"]
  2016. assert data1["order"] == 0
  2017. assert data1["data"][0][1][0]["count"] == 5
  2018. data2 = response.data["5678"]
  2019. assert data2["order"] == 1
  2020. assert data2["data"][0][1][0]["count"] == 10
  2021. for datum in response.data.values():
  2022. assert datum["meta"] == {
  2023. "dataset": "metricsEnhanced",
  2024. "datasetReason": "unchanged",
  2025. "fields": {},
  2026. "isMetricsData": False,
  2027. "isMetricsExtractedData": True,
  2028. "tips": {},
  2029. "units": {},
  2030. }
  2031. def test_order_by_aggregate_top_events_graph_different_aggregate(self):
  2032. url = "https://sentry.io"
  2033. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  2034. self._setup_orderby_tests(query)
  2035. response = self.do_request(
  2036. data={
  2037. "dataset": "metricsEnhanced",
  2038. "field": ["networkId", "count()"],
  2039. "start": self.day_ago,
  2040. "end": self.day_ago + timedelta(hours=5),
  2041. "onDemandType": "dynamic_query",
  2042. "orderby": "count()",
  2043. "interval": "1d",
  2044. "partial": 1,
  2045. "query": query,
  2046. "referrer": "api.dashboards.widget.bar-chart",
  2047. "project": self.project.id,
  2048. "topEvents": 2,
  2049. "useOnDemandMetrics": "true",
  2050. "yAxis": "p95(transaction.duration)",
  2051. },
  2052. )
  2053. assert response.status_code == 200, response.content
  2054. assert len(response.data) == 3
  2055. data1 = response.data["1234"]
  2056. assert data1["order"] == 0
  2057. assert data1["data"][0][1][0]["count"] == 100
  2058. data2 = response.data["5678"]
  2059. assert data2["order"] == 1
  2060. assert data2["data"][0][1][0]["count"] == 200
  2061. for datum in response.data.values():
  2062. assert datum["meta"] == {
  2063. "dataset": "metricsEnhanced",
  2064. "datasetReason": "unchanged",
  2065. "fields": {},
  2066. "isMetricsData": False,
  2067. "isMetricsExtractedData": True,
  2068. "tips": {},
  2069. "units": {},
  2070. }
  2071. def test_cannot_order_by_tag(self):
  2072. url = "https://sentry.io"
  2073. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  2074. self._setup_orderby_tests(query)
  2075. response = self.do_request(
  2076. data={
  2077. "dataset": "metrics",
  2078. "field": ["networkId", "count()"],
  2079. "start": self.day_ago,
  2080. "end": self.day_ago + timedelta(hours=5),
  2081. "onDemandType": "dynamic_query",
  2082. "orderby": "-networkId",
  2083. "interval": "1d",
  2084. "partial": 1,
  2085. "query": query,
  2086. "referrer": "api.dashboards.widget.bar-chart",
  2087. "project": self.project.id,
  2088. "topEvents": 2,
  2089. "useOnDemandMetrics": "true",
  2090. "yAxis": "count()",
  2091. },
  2092. )
  2093. assert response.status_code == 400, response.content
  2094. def test_order_by_two_aggregates(self):
  2095. url = "https://sentry.io"
  2096. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  2097. self._setup_orderby_tests(query)
  2098. response = self.do_request(
  2099. data={
  2100. "dataset": "metrics",
  2101. "field": ["networkId", "count()", "p95(transaction.duration)"],
  2102. "start": self.day_ago,
  2103. "end": self.day_ago + timedelta(hours=5),
  2104. "onDemandType": "dynamic_query",
  2105. "orderby": ["count()", "p95(transaction.duration)"],
  2106. "interval": "1d",
  2107. "partial": 1,
  2108. "query": query,
  2109. "referrer": "api.dashboards.widget.bar-chart",
  2110. "project": self.project.id,
  2111. "topEvents": 2,
  2112. "useOnDemandMetrics": "true",
  2113. "yAxis": "p95(transaction.duration)",
  2114. },
  2115. )
  2116. assert response.status_code == 400, response.content
  2117. def test_top_events_with_tag(self):
  2118. query = "transaction.duration:>=100"
  2119. yAxis = ["count()"]
  2120. field = "count()"
  2121. groupbys = ["some-field"]
  2122. spec = OnDemandMetricSpec(
  2123. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  2124. )
  2125. self.store_on_demand_metric(
  2126. 1,
  2127. spec=spec,
  2128. additional_tags={
  2129. "some-field": "bar",
  2130. "environment": "production",
  2131. },
  2132. timestamp=self.day_ago,
  2133. )
  2134. response = self.do_request(
  2135. data={
  2136. "project": self.project.id,
  2137. "start": self.day_ago,
  2138. "end": self.day_ago + timedelta(hours=2),
  2139. "interval": "1h",
  2140. "orderby": ["-count()"],
  2141. "environment": "production",
  2142. "query": query,
  2143. "yAxis": yAxis,
  2144. "field": [
  2145. "some-field",
  2146. "count()",
  2147. ],
  2148. "topEvents": 5,
  2149. "dataset": "metrics",
  2150. "useOnDemandMetrics": "true",
  2151. },
  2152. )
  2153. assert response.status_code == 200, response.content