test_organization_events_stats_mep.py 90 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333
  1. from __future__ import annotations
  2. from datetime import timedelta
  3. from typing import Any
  4. from unittest import mock
  5. import pytest
  6. from django.urls import reverse
  7. from rest_framework.response import Response
  8. from sentry.discover.models import DatasetSourcesTypes
  9. from sentry.models.dashboard_widget import DashboardWidget, DashboardWidgetTypes
  10. from sentry.models.environment import Environment
  11. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  12. from sentry.snuba.metrics.extraction import MetricSpecType, OnDemandMetricSpec
  13. from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
  14. from sentry.testutils.helpers.datetime import before_now, iso_format
  15. from sentry.testutils.helpers.on_demand import create_widget
  16. from sentry.utils.samples import load_data
  17. pytestmark = pytest.mark.sentry_metrics
  18. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
  19. MetricsEnhancedPerformanceTestCase
  20. ):
  21. endpoint = "sentry-api-0-organization-events-stats"
  22. METRIC_STRINGS = [
  23. "foo_transaction",
  24. "d:transactions/measurements.datacenter_memory@pebibyte",
  25. ]
  26. def setUp(self):
  27. super().setUp()
  28. self.login_as(user=self.user)
  29. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  30. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  31. self.url = reverse(
  32. "sentry-api-0-organization-events-stats",
  33. kwargs={"organization_id_or_slug": self.project.organization.slug},
  34. )
  35. self.features = {
  36. "organizations:performance-use-metrics": True,
  37. }
  38. self.additional_params = dict()
  39. # These throughput tests should roughly match the ones in OrganizationEventsStatsEndpointTest
  40. @pytest.mark.querybuilder
  41. def test_throughput_epm_hour_rollup(self):
  42. # Each of these denotes how many events to create in each hour
  43. event_counts = [6, 0, 6, 3, 0, 3]
  44. for hour, count in enumerate(event_counts):
  45. for minute in range(count):
  46. self.store_transaction_metric(
  47. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  48. )
  49. for axis in ["epm()", "tpm()"]:
  50. response = self.do_request(
  51. data={
  52. "start": iso_format(self.day_ago),
  53. "end": iso_format(self.day_ago + timedelta(hours=6)),
  54. "interval": "1h",
  55. "yAxis": axis,
  56. "project": self.project.id,
  57. "dataset": "metricsEnhanced",
  58. **self.additional_params,
  59. },
  60. )
  61. assert response.status_code == 200, response.content
  62. data = response.data["data"]
  63. assert len(data) == 6
  64. assert response.data["isMetricsData"]
  65. rows = data[0:6]
  66. for test in zip(event_counts, rows):
  67. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  68. @pytest.mark.querybuilder
  69. def test_throughput_spm_hour_rollup(self):
  70. # Each of these denotes how many events to create in each hour
  71. event_counts = [6, 0, 6, 3, 0, 3]
  72. for hour, count in enumerate(event_counts):
  73. for minute in range(count):
  74. self.store_span_metric(
  75. 1,
  76. timestamp=self.day_ago + timedelta(hours=hour, minutes=minute),
  77. )
  78. response = self.do_request(
  79. data={
  80. "start": iso_format(self.day_ago),
  81. "end": iso_format(self.day_ago + timedelta(hours=6)),
  82. "interval": "1h",
  83. "yAxis": "spm()",
  84. "project": self.project.id,
  85. "dataset": "metrics",
  86. **self.additional_params,
  87. },
  88. )
  89. assert response.status_code == 200, response.content
  90. data = response.data["data"]
  91. assert len(data) == 6
  92. assert response.data["meta"]["dataset"] == "metrics"
  93. rows = data[0:6]
  94. for test in zip(event_counts, rows):
  95. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  96. def test_throughput_epm_day_rollup(self):
  97. # Each of these denotes how many events to create in each minute
  98. event_counts = [6, 0, 6, 3, 0, 3]
  99. for hour, count in enumerate(event_counts):
  100. for minute in range(count):
  101. self.store_transaction_metric(
  102. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  103. )
  104. for axis in ["epm()", "tpm()"]:
  105. response = self.do_request(
  106. data={
  107. "start": iso_format(self.day_ago),
  108. "end": iso_format(self.day_ago + timedelta(hours=24)),
  109. "interval": "24h",
  110. "yAxis": axis,
  111. "project": self.project.id,
  112. "dataset": "metricsEnhanced",
  113. **self.additional_params,
  114. },
  115. )
  116. assert response.status_code == 200, response.content
  117. data = response.data["data"]
  118. assert len(data) == 2
  119. assert response.data["isMetricsData"]
  120. assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
  121. def test_throughput_epm_hour_rollup_offset_of_hour(self):
  122. # Each of these denotes how many events to create in each hour
  123. event_counts = [6, 0, 6, 3, 0, 3]
  124. for hour, count in enumerate(event_counts):
  125. for minute in range(count):
  126. self.store_transaction_metric(
  127. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute + 30)
  128. )
  129. for axis in ["tpm()", "epm()"]:
  130. response = self.do_request(
  131. data={
  132. "start": iso_format(self.day_ago + timedelta(minutes=30)),
  133. "end": iso_format(self.day_ago + timedelta(hours=6, minutes=30)),
  134. "interval": "1h",
  135. "yAxis": axis,
  136. "project": self.project.id,
  137. "dataset": "metricsEnhanced",
  138. **self.additional_params,
  139. },
  140. )
  141. assert response.status_code == 200, response.content
  142. data = response.data["data"]
  143. assert len(data) == 6
  144. assert response.data["isMetricsData"]
  145. rows = data[0:6]
  146. for test in zip(event_counts, rows):
  147. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  148. def test_throughput_eps_minute_rollup(self):
  149. # Each of these denotes how many events to create in each minute
  150. event_counts = [6, 0, 6, 3, 0, 3]
  151. for minute, count in enumerate(event_counts):
  152. for second in range(count):
  153. self.store_transaction_metric(
  154. 1, timestamp=self.day_ago + timedelta(minutes=minute, seconds=second)
  155. )
  156. for axis in ["eps()", "tps()"]:
  157. response = self.do_request(
  158. data={
  159. "start": iso_format(self.day_ago),
  160. "end": iso_format(self.day_ago + timedelta(minutes=6)),
  161. "interval": "1m",
  162. "yAxis": axis,
  163. "project": self.project.id,
  164. "dataset": "metricsEnhanced",
  165. **self.additional_params,
  166. },
  167. )
  168. assert response.status_code == 200, response.content
  169. data = response.data["data"]
  170. assert len(data) == 6
  171. assert response.data["isMetricsData"]
  172. rows = data[0:6]
  173. for test in zip(event_counts, rows):
  174. assert test[1][1][0]["count"] == test[0] / 60.0
  175. def test_failure_rate(self):
  176. for hour in range(6):
  177. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  178. self.store_transaction_metric(1, tags={"transaction.status": "ok"}, timestamp=timestamp)
  179. if hour < 3:
  180. self.store_transaction_metric(
  181. 1, tags={"transaction.status": "internal_error"}, timestamp=timestamp
  182. )
  183. response = self.do_request(
  184. data={
  185. "start": iso_format(self.day_ago),
  186. "end": iso_format(self.day_ago + timedelta(hours=6)),
  187. "interval": "1h",
  188. "yAxis": ["failure_rate()"],
  189. "project": self.project.id,
  190. "dataset": "metricsEnhanced",
  191. **self.additional_params,
  192. },
  193. )
  194. assert response.status_code == 200, response.content
  195. data = response.data["data"]
  196. assert len(data) == 6
  197. assert response.data["isMetricsData"]
  198. assert [attrs for time, attrs in response.data["data"]] == [
  199. [{"count": 0.5}],
  200. [{"count": 0.5}],
  201. [{"count": 0.5}],
  202. [{"count": 0}],
  203. [{"count": 0}],
  204. [{"count": 0}],
  205. ]
  206. def test_percentiles_multi_axis(self):
  207. for hour in range(6):
  208. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  209. self.store_transaction_metric(111, timestamp=timestamp)
  210. self.store_transaction_metric(222, metric="measurements.lcp", timestamp=timestamp)
  211. response = self.do_request(
  212. data={
  213. "start": iso_format(self.day_ago),
  214. "end": iso_format(self.day_ago + timedelta(hours=6)),
  215. "interval": "1h",
  216. "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
  217. "project": self.project.id,
  218. "dataset": "metricsEnhanced",
  219. **self.additional_params,
  220. },
  221. )
  222. assert response.status_code == 200, response.content
  223. lcp = response.data["p75(measurements.lcp)"]
  224. duration = response.data["p75(transaction.duration)"]
  225. assert len(duration["data"]) == 6
  226. assert duration["isMetricsData"]
  227. assert len(lcp["data"]) == 6
  228. assert lcp["isMetricsData"]
  229. for item in duration["data"]:
  230. assert item[1][0]["count"] == 111
  231. for item in lcp["data"]:
  232. assert item[1][0]["count"] == 222
  233. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query", return_value={})
  234. def test_multiple_yaxis_only_one_query(self, mock_query):
  235. self.do_request(
  236. data={
  237. "project": self.project.id,
  238. "start": iso_format(self.day_ago),
  239. "end": iso_format(self.day_ago + timedelta(hours=2)),
  240. "interval": "1h",
  241. "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
  242. "dataset": "metricsEnhanced",
  243. **self.additional_params,
  244. },
  245. )
  246. assert mock_query.call_count == 1
  247. def test_aggregate_function_user_count(self):
  248. self.store_transaction_metric(
  249. 1, metric="user", timestamp=self.day_ago + timedelta(minutes=30)
  250. )
  251. self.store_transaction_metric(
  252. 1, metric="user", timestamp=self.day_ago + timedelta(hours=1, minutes=30)
  253. )
  254. response = self.do_request(
  255. data={
  256. "start": iso_format(self.day_ago),
  257. "end": iso_format(self.day_ago + timedelta(hours=2)),
  258. "interval": "1h",
  259. "yAxis": "count_unique(user)",
  260. "dataset": "metricsEnhanced",
  261. **self.additional_params,
  262. },
  263. )
  264. assert response.status_code == 200, response.content
  265. assert response.data["isMetricsData"]
  266. assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
  267. meta = response.data["meta"]
  268. assert meta["isMetricsData"] == response.data["isMetricsData"]
  269. def test_non_mep_query_fallsback(self):
  270. def get_mep(query):
  271. response = self.do_request(
  272. data={
  273. "project": self.project.id,
  274. "start": iso_format(self.day_ago),
  275. "end": iso_format(self.day_ago + timedelta(hours=2)),
  276. "interval": "1h",
  277. "query": query,
  278. "yAxis": ["epm()"],
  279. "dataset": "metricsEnhanced",
  280. **self.additional_params,
  281. },
  282. )
  283. assert response.status_code == 200, response.content
  284. return response.data["isMetricsData"]
  285. assert get_mep(""), "empty query"
  286. assert get_mep("event.type:transaction"), "event type transaction"
  287. assert not get_mep("event.type:error"), "event type error"
  288. assert not get_mep("transaction.duration:<15min"), "outlier filter"
  289. assert get_mep("epm():>0.01"), "throughput filter"
  290. assert not get_mep(
  291. "event.type:transaction OR event.type:error"
  292. ), "boolean with non-mep filter"
  293. assert get_mep(
  294. "event.type:transaction OR transaction:foo_transaction"
  295. ), "boolean with mep filter"
  296. def test_having_condition_with_preventing_aggregates(self):
  297. response = self.do_request(
  298. data={
  299. "project": self.project.id,
  300. "start": iso_format(self.day_ago),
  301. "end": iso_format(self.day_ago + timedelta(hours=2)),
  302. "interval": "1h",
  303. "query": "p95():<5s",
  304. "yAxis": ["epm()"],
  305. "dataset": "metricsEnhanced",
  306. "preventMetricAggregates": "1",
  307. **self.additional_params,
  308. },
  309. )
  310. assert response.status_code == 200, response.content
  311. assert not response.data["isMetricsData"]
  312. meta = response.data["meta"]
  313. assert meta["isMetricsData"] == response.data["isMetricsData"]
  314. def test_explicit_not_mep(self):
  315. response = self.do_request(
  316. data={
  317. "project": self.project.id,
  318. "start": iso_format(self.day_ago),
  319. "end": iso_format(self.day_ago + timedelta(hours=2)),
  320. "interval": "1h",
  321. # Should be a mep able query
  322. "query": "",
  323. "yAxis": ["epm()"],
  324. "metricsEnhanced": "0",
  325. **self.additional_params,
  326. },
  327. )
  328. assert response.status_code == 200, response.content
  329. assert not response.data["isMetricsData"]
  330. meta = response.data["meta"]
  331. assert meta["isMetricsData"] == response.data["isMetricsData"]
  332. def test_sum_transaction_duration(self):
  333. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  334. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  335. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  336. response = self.do_request(
  337. data={
  338. "start": iso_format(self.day_ago),
  339. "end": iso_format(self.day_ago + timedelta(hours=2)),
  340. "interval": "1h",
  341. "yAxis": "sum(transaction.duration)",
  342. "dataset": "metricsEnhanced",
  343. **self.additional_params,
  344. },
  345. )
  346. assert response.status_code == 200, response.content
  347. assert response.data["isMetricsData"]
  348. assert [attrs for time, attrs in response.data["data"]] == [
  349. [{"count": 123}],
  350. [{"count": 1245}],
  351. ]
  352. meta = response.data["meta"]
  353. assert meta["isMetricsData"] == response.data["isMetricsData"]
  354. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  355. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  356. def test_sum_transaction_duration_with_comparison(self):
  357. # We store the data for the previous day (in order to have values for the comparison).
  358. self.store_transaction_metric(
  359. 1, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  360. )
  361. self.store_transaction_metric(
  362. 2, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  363. )
  364. # We store the data for today.
  365. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  366. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(minutes=30))
  367. response = self.do_request(
  368. data={
  369. "start": iso_format(self.day_ago),
  370. "end": iso_format(self.day_ago + timedelta(days=1)),
  371. "interval": "1d",
  372. "yAxis": "sum(transaction.duration)",
  373. "comparisonDelta": 86400,
  374. "dataset": "metricsEnhanced",
  375. **self.additional_params,
  376. },
  377. )
  378. assert response.status_code == 200, response.content
  379. assert response.data["isMetricsData"]
  380. # For some reason, if all tests run, there is some shared state that makes this test have data in the second
  381. # time bucket, which is filled automatically by the zerofilling. In order to avoid this flaky failure, we will
  382. # only check that the first bucket contains the actual data.
  383. assert [attrs for time, attrs in response.data["data"]][0] == [
  384. {"comparisonCount": 3.0, "count": 579.0}
  385. ]
  386. meta = response.data["meta"]
  387. assert meta["isMetricsData"] == response.data["isMetricsData"]
  388. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  389. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  390. def test_custom_measurement(self):
  391. self.store_transaction_metric(
  392. 123,
  393. metric="measurements.bytes_transfered",
  394. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  395. entity="metrics_distributions",
  396. tags={"transaction": "foo_transaction"},
  397. timestamp=self.day_ago + timedelta(minutes=30),
  398. )
  399. self.store_transaction_metric(
  400. 456,
  401. metric="measurements.bytes_transfered",
  402. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  403. entity="metrics_distributions",
  404. tags={"transaction": "foo_transaction"},
  405. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  406. )
  407. self.store_transaction_metric(
  408. 789,
  409. metric="measurements.bytes_transfered",
  410. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  411. entity="metrics_distributions",
  412. tags={"transaction": "foo_transaction"},
  413. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  414. )
  415. response = self.do_request(
  416. data={
  417. "start": iso_format(self.day_ago),
  418. "end": iso_format(self.day_ago + timedelta(hours=2)),
  419. "interval": "1h",
  420. "yAxis": "sum(measurements.datacenter_memory)",
  421. "dataset": "metricsEnhanced",
  422. **self.additional_params,
  423. },
  424. )
  425. assert response.status_code == 200, response.content
  426. assert response.data["isMetricsData"]
  427. assert [attrs for time, attrs in response.data["data"]] == [
  428. [{"count": 123}],
  429. [{"count": 1245}],
  430. ]
  431. meta = response.data["meta"]
  432. assert meta["isMetricsData"] == response.data["isMetricsData"]
  433. assert meta["fields"] == {"time": "date", "sum_measurements_datacenter_memory": "size"}
  434. assert meta["units"] == {"time": None, "sum_measurements_datacenter_memory": "pebibyte"}
  435. def test_does_not_fallback_if_custom_metric_is_out_of_request_time_range(self):
  436. self.store_transaction_metric(
  437. 123,
  438. timestamp=self.day_ago + timedelta(hours=1),
  439. internal_metric="d:transactions/measurements.custom@kibibyte",
  440. entity="metrics_distributions",
  441. )
  442. response = self.do_request(
  443. data={
  444. "start": iso_format(self.day_ago),
  445. "end": iso_format(self.day_ago + timedelta(hours=2)),
  446. "interval": "1h",
  447. "yAxis": "p99(measurements.custom)",
  448. "dataset": "metricsEnhanced",
  449. **self.additional_params,
  450. },
  451. )
  452. meta = response.data["meta"]
  453. assert response.status_code == 200, response.content
  454. assert response.data["isMetricsData"]
  455. assert meta["isMetricsData"]
  456. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  457. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  458. def test_multi_yaxis_custom_measurement(self):
  459. self.store_transaction_metric(
  460. 123,
  461. metric="measurements.bytes_transfered",
  462. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  463. entity="metrics_distributions",
  464. tags={"transaction": "foo_transaction"},
  465. timestamp=self.day_ago + timedelta(minutes=30),
  466. )
  467. self.store_transaction_metric(
  468. 456,
  469. metric="measurements.bytes_transfered",
  470. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  471. entity="metrics_distributions",
  472. tags={"transaction": "foo_transaction"},
  473. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  474. )
  475. self.store_transaction_metric(
  476. 789,
  477. metric="measurements.bytes_transfered",
  478. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  479. entity="metrics_distributions",
  480. tags={"transaction": "foo_transaction"},
  481. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  482. )
  483. response = self.do_request(
  484. data={
  485. "start": iso_format(self.day_ago),
  486. "end": iso_format(self.day_ago + timedelta(hours=2)),
  487. "interval": "1h",
  488. "yAxis": [
  489. "sum(measurements.datacenter_memory)",
  490. "p50(measurements.datacenter_memory)",
  491. ],
  492. "dataset": "metricsEnhanced",
  493. **self.additional_params,
  494. },
  495. )
  496. assert response.status_code == 200, response.content
  497. sum_data = response.data["sum(measurements.datacenter_memory)"]
  498. p50_data = response.data["p50(measurements.datacenter_memory)"]
  499. assert sum_data["isMetricsData"]
  500. assert p50_data["isMetricsData"]
  501. assert [attrs for time, attrs in sum_data["data"]] == [
  502. [{"count": 123}],
  503. [{"count": 1245}],
  504. ]
  505. assert [attrs for time, attrs in p50_data["data"]] == [
  506. [{"count": 123}],
  507. [{"count": 622.5}],
  508. ]
  509. sum_meta = sum_data["meta"]
  510. assert sum_meta["isMetricsData"] == sum_data["isMetricsData"]
  511. assert sum_meta["fields"] == {
  512. "time": "date",
  513. "sum_measurements_datacenter_memory": "size",
  514. "p50_measurements_datacenter_memory": "size",
  515. }
  516. assert sum_meta["units"] == {
  517. "time": None,
  518. "sum_measurements_datacenter_memory": "pebibyte",
  519. "p50_measurements_datacenter_memory": "pebibyte",
  520. }
  521. p50_meta = p50_data["meta"]
  522. assert p50_meta["isMetricsData"] == p50_data["isMetricsData"]
  523. assert p50_meta["fields"] == {
  524. "time": "date",
  525. "sum_measurements_datacenter_memory": "size",
  526. "p50_measurements_datacenter_memory": "size",
  527. }
  528. assert p50_meta["units"] == {
  529. "time": None,
  530. "sum_measurements_datacenter_memory": "pebibyte",
  531. "p50_measurements_datacenter_memory": "pebibyte",
  532. }
  533. def test_dataset_metrics_does_not_fallback(self):
  534. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  535. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  536. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  537. response = self.do_request(
  538. data={
  539. "start": iso_format(self.day_ago),
  540. "end": iso_format(self.day_ago + timedelta(hours=2)),
  541. "interval": "1h",
  542. "query": "transaction.duration:<5s",
  543. "yAxis": "sum(transaction.duration)",
  544. "dataset": "metrics",
  545. **self.additional_params,
  546. },
  547. )
  548. assert response.status_code == 400, response.content
  549. def test_title_filter(self):
  550. self.store_transaction_metric(
  551. 123,
  552. tags={"transaction": "foo_transaction"},
  553. timestamp=self.day_ago + timedelta(minutes=30),
  554. )
  555. response = self.do_request(
  556. data={
  557. "start": iso_format(self.day_ago),
  558. "end": iso_format(self.day_ago + timedelta(hours=2)),
  559. "interval": "1h",
  560. "query": "title:foo_transaction",
  561. "yAxis": [
  562. "sum(transaction.duration)",
  563. ],
  564. "dataset": "metricsEnhanced",
  565. **self.additional_params,
  566. },
  567. )
  568. assert response.status_code == 200, response.content
  569. data = response.data["data"]
  570. assert [attrs for time, attrs in data] == [
  571. [{"count": 123}],
  572. [{"count": 0}],
  573. ]
  574. def test_transaction_status_unknown_error(self):
  575. self.store_transaction_metric(
  576. 123,
  577. tags={"transaction.status": "unknown"},
  578. timestamp=self.day_ago + timedelta(minutes=30),
  579. )
  580. response = self.do_request(
  581. data={
  582. "start": iso_format(self.day_ago),
  583. "end": iso_format(self.day_ago + timedelta(hours=2)),
  584. "interval": "1h",
  585. "query": "transaction.status:unknown_error",
  586. "yAxis": [
  587. "sum(transaction.duration)",
  588. ],
  589. "dataset": "metricsEnhanced",
  590. **self.additional_params,
  591. },
  592. )
  593. assert response.status_code == 200, response.content
  594. data = response.data["data"]
  595. assert [attrs for time, attrs in data] == [
  596. [{"count": 123}],
  597. [{"count": 0}],
  598. ]
  599. def test_custom_performance_metric_meta_contains_field_and_unit_data(self):
  600. self.store_transaction_metric(
  601. 123,
  602. timestamp=self.day_ago + timedelta(hours=1),
  603. internal_metric="d:transactions/measurements.custom@kibibyte",
  604. entity="metrics_distributions",
  605. )
  606. response = self.do_request(
  607. data={
  608. "start": iso_format(self.day_ago),
  609. "end": iso_format(self.day_ago + timedelta(hours=2)),
  610. "interval": "1h",
  611. "yAxis": "p99(measurements.custom)",
  612. "query": "",
  613. **self.additional_params,
  614. },
  615. )
  616. assert response.status_code == 200
  617. meta = response.data["meta"]
  618. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  619. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  620. def test_multi_series_custom_performance_metric_meta_contains_field_and_unit_data(self):
  621. self.store_transaction_metric(
  622. 123,
  623. timestamp=self.day_ago + timedelta(hours=1),
  624. internal_metric="d:transactions/measurements.custom@kibibyte",
  625. entity="metrics_distributions",
  626. )
  627. self.store_transaction_metric(
  628. 123,
  629. timestamp=self.day_ago + timedelta(hours=1),
  630. internal_metric="d:transactions/measurements.another.custom@pebibyte",
  631. entity="metrics_distributions",
  632. )
  633. response = self.do_request(
  634. data={
  635. "start": iso_format(self.day_ago),
  636. "end": iso_format(self.day_ago + timedelta(hours=2)),
  637. "interval": "1h",
  638. "yAxis": [
  639. "p95(measurements.custom)",
  640. "p99(measurements.custom)",
  641. "p99(measurements.another.custom)",
  642. ],
  643. "query": "",
  644. **self.additional_params,
  645. },
  646. )
  647. assert response.status_code == 200
  648. meta = response.data["p95(measurements.custom)"]["meta"]
  649. assert meta["fields"] == {
  650. "time": "date",
  651. "p95_measurements_custom": "size",
  652. "p99_measurements_custom": "size",
  653. "p99_measurements_another_custom": "size",
  654. }
  655. assert meta["units"] == {
  656. "time": None,
  657. "p95_measurements_custom": "kibibyte",
  658. "p99_measurements_custom": "kibibyte",
  659. "p99_measurements_another_custom": "pebibyte",
  660. }
  661. assert meta == response.data["p99(measurements.custom)"]["meta"]
  662. assert meta == response.data["p99(measurements.another.custom)"]["meta"]
  663. def test_no_top_events_with_project_field(self):
  664. project = self.create_project()
  665. response = self.do_request(
  666. data={
  667. # make sure to query the project with 0 events
  668. "project": project.id,
  669. "start": iso_format(self.day_ago),
  670. "end": iso_format(self.day_ago + timedelta(hours=2)),
  671. "interval": "1h",
  672. "yAxis": "count()",
  673. "orderby": ["-count()"],
  674. "field": ["count()", "project"],
  675. "topEvents": 5,
  676. "dataset": "metrics",
  677. **self.additional_params,
  678. },
  679. )
  680. assert response.status_code == 200, response.content
  681. # When there are no top events, we do not return an empty dict.
  682. # Instead, we return a single zero-filled series for an empty graph.
  683. data = response.data["data"]
  684. assert [attrs for time, attrs in data] == [[{"count": 0}], [{"count": 0}]]
  685. def test_top_events_with_transaction(self):
  686. transaction_spec = [("foo", 100), ("bar", 200), ("baz", 300)]
  687. for offset in range(5):
  688. for transaction, duration in transaction_spec:
  689. self.store_transaction_metric(
  690. duration,
  691. tags={"transaction": f"{transaction}_transaction"},
  692. timestamp=self.day_ago + timedelta(hours=offset, minutes=30),
  693. )
  694. response = self.do_request(
  695. data={
  696. # make sure to query the project with 0 events
  697. "project": self.project.id,
  698. "start": iso_format(self.day_ago),
  699. "end": iso_format(self.day_ago + timedelta(hours=5)),
  700. "interval": "1h",
  701. "yAxis": "p75(transaction.duration)",
  702. "orderby": ["-p75(transaction.duration)"],
  703. "field": ["p75(transaction.duration)", "transaction"],
  704. "topEvents": 5,
  705. "dataset": "metrics",
  706. **self.additional_params,
  707. },
  708. )
  709. assert response.status_code == 200, response.content
  710. for position, (transaction, duration) in enumerate(transaction_spec):
  711. data = response.data[f"{transaction}_transaction"]
  712. chart_data = data["data"]
  713. assert data["order"] == 2 - position
  714. assert [attrs for time, attrs in chart_data] == [[{"count": duration}]] * 5
  715. def test_top_events_with_project(self):
  716. self.store_transaction_metric(
  717. 100,
  718. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  719. )
  720. response = self.do_request(
  721. data={
  722. # make sure to query the project with 0 events
  723. "project": self.project.id,
  724. "start": iso_format(self.day_ago),
  725. "end": iso_format(self.day_ago + timedelta(hours=5)),
  726. "interval": "1h",
  727. "yAxis": "p75(transaction.duration)",
  728. "orderby": ["-p75(transaction.duration)"],
  729. "field": ["p75(transaction.duration)", "project"],
  730. "topEvents": 5,
  731. "dataset": "metrics",
  732. **self.additional_params,
  733. },
  734. )
  735. assert response.status_code == 200, response.content
  736. data = response.data[f"{self.project.slug}"]
  737. assert data["order"] == 0
  738. def test_split_decision_for_errors_widget(self):
  739. error_data = load_data("python", timestamp=before_now(minutes=1))
  740. self.store_event(
  741. data={
  742. **error_data,
  743. "exception": {"values": [{"type": "blah", "data": {"values": []}}]},
  744. },
  745. project_id=self.project.id,
  746. )
  747. _, widget, __ = create_widget(
  748. ["count()", "error.type"], "error.type:blah", self.project, discover_widget_split=None
  749. )
  750. response = self.do_request(
  751. {
  752. "field": ["count()", "error.type"],
  753. "query": "error.type:blah",
  754. "dataset": "metricsEnhanced",
  755. "per_page": 50,
  756. "dashboardWidgetId": widget.id,
  757. }
  758. )
  759. assert response.status_code == 200, response.content
  760. assert response.data.get("meta").get(
  761. "discoverSplitDecision"
  762. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  763. widget.refresh_from_db()
  764. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  765. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  766. def test_split_decision_for_transactions_widget(self):
  767. self.store_transaction_metric(
  768. 100,
  769. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  770. )
  771. _, widget, __ = create_widget(
  772. ["count()", "transaction.name"], "", self.project, discover_widget_split=None
  773. )
  774. assert widget.discover_widget_split is None
  775. response = self.do_request(
  776. {
  777. "field": ["count()", "transaction.name"],
  778. "query": "",
  779. "dataset": "metricsEnhanced",
  780. "per_page": 50,
  781. "dashboardWidgetId": widget.id,
  782. }
  783. )
  784. assert response.status_code == 200, response.content
  785. assert response.data.get("meta").get(
  786. "discoverSplitDecision"
  787. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.TRANSACTION_LIKE)
  788. widget.refresh_from_db()
  789. assert widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  790. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  791. def test_split_decision_for_top_events_errors_widget(self):
  792. error_data = load_data("python", timestamp=before_now(minutes=1))
  793. self.store_event(
  794. data={
  795. **error_data,
  796. "exception": {"values": [{"type": "test_error", "data": {"values": []}}]},
  797. },
  798. project_id=self.project.id,
  799. )
  800. _, widget, __ = create_widget(
  801. ["count()", "error.type"],
  802. "error.type:test_error",
  803. self.project,
  804. discover_widget_split=None,
  805. )
  806. response = self.do_request(
  807. {
  808. "field": ["count()", "error.type"],
  809. "query": "error.type:test_error",
  810. "dataset": "metricsEnhanced",
  811. "per_page": 50,
  812. "dashboardWidgetId": widget.id,
  813. "topEvents": 5,
  814. }
  815. )
  816. assert response.status_code == 200, response.content
  817. # Only a singular result for the test_error event
  818. assert len(response.data) == 1
  819. # Results are grouped by the error type
  820. assert response.data.get("test_error").get("meta").get(
  821. "discoverSplitDecision"
  822. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  823. widget.refresh_from_db()
  824. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  825. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  826. def test_split_decision_for_top_events_transactions_widget(self):
  827. self.store_transaction_metric(
  828. 100,
  829. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  830. tags={"transaction": "foo_transaction"},
  831. )
  832. _, widget, __ = create_widget(
  833. ["count()", "transaction"], "", self.project, discover_widget_split=None
  834. )
  835. assert widget.discover_widget_split is None
  836. response = self.do_request(
  837. {
  838. "field": ["count()", "transaction"],
  839. "query": "",
  840. "dataset": "metricsEnhanced",
  841. "per_page": 50,
  842. "dashboardWidgetId": widget.id,
  843. "topEvents": 5,
  844. }
  845. )
  846. assert response.status_code == 200, response.content
  847. # Only a singular result for the transaction
  848. assert len(response.data) == 1
  849. # Results are grouped by the transaction
  850. assert response.data.get("foo_transaction").get("meta").get(
  851. "discoverSplitDecision"
  852. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.TRANSACTION_LIKE)
  853. widget.refresh_from_db()
  854. assert widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  855. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  856. def test_split_decision_for_ambiguous_widget_without_data(self):
  857. _, widget, __ = create_widget(
  858. ["count()", "transaction.name", "error.type"],
  859. "",
  860. self.project,
  861. discover_widget_split=None,
  862. )
  863. assert widget.discover_widget_split is None
  864. response = self.do_request(
  865. {
  866. "field": ["count()", "transaction.name", "error.type"],
  867. "query": "",
  868. "dataset": "metricsEnhanced",
  869. "per_page": 50,
  870. "dashboardWidgetId": widget.id,
  871. },
  872. features={"organizations:performance-discover-dataset-selector": True},
  873. )
  874. assert response.status_code == 200, response.content
  875. assert response.data.get("meta").get(
  876. "discoverSplitDecision"
  877. ) == DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  878. widget.refresh_from_db()
  879. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  880. assert widget.dataset_source == DatasetSourcesTypes.FORCED.value
  881. def test_inp_percentile(self):
  882. for hour in range(6):
  883. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  884. self.store_transaction_metric(
  885. 111,
  886. metric="measurements.inp",
  887. timestamp=timestamp,
  888. use_case_id=UseCaseID.TRANSACTIONS,
  889. )
  890. response = self.do_request(
  891. data={
  892. "start": iso_format(self.day_ago),
  893. "end": iso_format(self.day_ago + timedelta(hours=6)),
  894. "interval": "1h",
  895. "yAxis": ["p75(measurements.inp)"],
  896. "project": self.project.id,
  897. "dataset": "metrics",
  898. **self.additional_params,
  899. },
  900. )
  901. assert response.status_code == 200, response.content
  902. data = response.data
  903. assert len(data["data"]) == 6
  904. assert data["isMetricsData"]
  905. assert data["meta"]["fields"]["p75_measurements_inp"] == "duration"
  906. for item in data["data"]:
  907. assert item[1][0]["count"] == 111
  908. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  909. OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest
  910. ):
  911. def setUp(self):
  912. super().setUp()
  913. self.features["organizations:use-metrics-layer"] = True
  914. self.additional_params = {"forceMetricsLayer": "true"}
  915. def test_counter_standard_metric(self):
  916. mri = "c:transactions/usage@none"
  917. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  918. self.store_transaction_metric(
  919. value,
  920. metric=mri,
  921. internal_metric=mri,
  922. entity="metrics_counters",
  923. timestamp=self.day_ago + timedelta(minutes=index),
  924. use_case_id=UseCaseID.CUSTOM,
  925. )
  926. response = self.do_request(
  927. data={
  928. "start": iso_format(self.day_ago),
  929. "end": iso_format(self.day_ago + timedelta(hours=6)),
  930. "interval": "1m",
  931. "yAxis": [f"sum({mri})"],
  932. "project": self.project.id,
  933. "dataset": "metricsEnhanced",
  934. **self.additional_params,
  935. },
  936. )
  937. assert response.status_code == 200, response.content
  938. data = response.data["data"]
  939. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  940. assert value[0]["count"] == expected_value # type: ignore[index]
  941. def test_counter_custom_metric(self):
  942. mri = "c:custom/sentry.process_profile.track_outcome@second"
  943. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  944. self.store_transaction_metric(
  945. value,
  946. metric=mri,
  947. internal_metric=mri,
  948. entity="metrics_counters",
  949. timestamp=self.day_ago + timedelta(hours=index),
  950. use_case_id=UseCaseID.CUSTOM,
  951. )
  952. response = self.do_request(
  953. data={
  954. "start": iso_format(self.day_ago),
  955. "end": iso_format(self.day_ago + timedelta(hours=6)),
  956. "interval": "1h",
  957. "yAxis": [f"sum({mri})"],
  958. "project": self.project.id,
  959. "dataset": "metricsEnhanced",
  960. **self.additional_params,
  961. },
  962. )
  963. assert response.status_code == 200, response.content
  964. data = response.data["data"]
  965. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  966. assert value[0]["count"] == expected_value # type: ignore[index]
  967. def test_distribution_custom_metric(self):
  968. mri = "d:custom/sentry.process_profile.track_outcome@second"
  969. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  970. for multiplier in (1, 2, 3):
  971. self.store_transaction_metric(
  972. value * multiplier,
  973. metric=mri,
  974. internal_metric=mri,
  975. entity="metrics_distributions",
  976. timestamp=self.day_ago + timedelta(hours=index),
  977. use_case_id=UseCaseID.CUSTOM,
  978. )
  979. response = self.do_request(
  980. data={
  981. "start": iso_format(self.day_ago),
  982. "end": iso_format(self.day_ago + timedelta(hours=6)),
  983. "interval": "1h",
  984. "yAxis": [f"min({mri})", f"max({mri})", f"p90({mri})"],
  985. "project": self.project.id,
  986. "dataset": "metricsEnhanced",
  987. **self.additional_params,
  988. },
  989. )
  990. assert response.status_code == 200, response.content
  991. data = response.data
  992. min = data[f"min({mri})"]["data"]
  993. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  994. assert value[0]["count"] == expected_value # type: ignore[index]
  995. max = data[f"max({mri})"]["data"]
  996. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  997. assert value[0]["count"] == expected_value # type: ignore[index]
  998. p90 = data[f"p90({mri})"]["data"]
  999. for (_, value), expected_value in zip(p90, [28.0, 56.0, 84.0, 112.0, 140.0, 168.0]):
  1000. assert value[0]["count"] == expected_value # type: ignore[index]
  1001. def test_set_custom_metric(self):
  1002. mri = "s:custom/sentry.process_profile.track_outcome@second"
  1003. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  1004. # We store each value a second time, since we want to check the de-duplication of sets.
  1005. for i in range(0, 2):
  1006. self.store_transaction_metric(
  1007. value,
  1008. metric=mri,
  1009. internal_metric=mri,
  1010. entity="metrics_sets",
  1011. timestamp=self.day_ago + timedelta(hours=index),
  1012. use_case_id=UseCaseID.CUSTOM,
  1013. )
  1014. response = self.do_request(
  1015. data={
  1016. "start": iso_format(self.day_ago),
  1017. "end": iso_format(self.day_ago + timedelta(hours=6)),
  1018. "interval": "1h",
  1019. "yAxis": [f"count_unique({mri})"],
  1020. "project": self.project.id,
  1021. "dataset": "metricsEnhanced",
  1022. **self.additional_params,
  1023. },
  1024. )
  1025. assert response.status_code == 200, response.content
  1026. data = response.data["data"]
  1027. for (_, value), expected_value in zip(data, [1, 1, 1, 1, 1, 1]):
  1028. assert value[0]["count"] == expected_value # type: ignore[index]
  1029. def test_gauge_custom_metric(self):
  1030. mri = "g:custom/sentry.process_profile.track_outcome@second"
  1031. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  1032. for multiplier in (1, 3):
  1033. self.store_transaction_metric(
  1034. value * multiplier,
  1035. metric=mri,
  1036. internal_metric=mri,
  1037. entity="metrics_gauges",
  1038. # When multiple gauges are merged, in order to make the `last` merge work deterministically it's
  1039. # better to have the gauges with different timestamps so that the last value is always the same.
  1040. timestamp=self.day_ago + timedelta(hours=index, minutes=multiplier),
  1041. use_case_id=UseCaseID.CUSTOM,
  1042. )
  1043. response = self.do_request(
  1044. data={
  1045. "start": iso_format(self.day_ago),
  1046. "end": iso_format(self.day_ago + timedelta(hours=6)),
  1047. "interval": "1h",
  1048. "yAxis": [
  1049. f"min({mri})",
  1050. f"max({mri})",
  1051. f"last({mri})",
  1052. f"sum({mri})",
  1053. f"count({mri})",
  1054. ],
  1055. "project": self.project.id,
  1056. "dataset": "metricsEnhanced",
  1057. **self.additional_params,
  1058. },
  1059. )
  1060. assert response.status_code == 200, response.content
  1061. data = response.data
  1062. min = data[f"min({mri})"]["data"]
  1063. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  1064. assert value[0]["count"] == expected_value # type: ignore[index]
  1065. max = data[f"max({mri})"]["data"]
  1066. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  1067. assert value[0]["count"] == expected_value # type: ignore[index]
  1068. last = data[f"last({mri})"]["data"]
  1069. for (_, value), expected_value in zip(last, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  1070. assert value[0]["count"] == expected_value # type: ignore[index]
  1071. sum = data[f"sum({mri})"]["data"]
  1072. for (_, value), expected_value in zip(sum, [40.0, 80.0, 120.0, 160.0, 200.0, 240.0]):
  1073. assert value[0]["count"] == expected_value # type: ignore[index]
  1074. count = data[f"count({mri})"]["data"]
  1075. for (_, value), expected_value in zip(count, [40, 80, 120, 160, 200, 240]):
  1076. assert value[0]["count"] == expected_value # type: ignore[index]
  1077. @pytest.mark.querybuilder
  1078. def test_throughput_spm_hour_rollup(self):
  1079. # Each of these denotes how many events to create in each hour
  1080. event_counts = [6, 0, 6, 3, 0, 3]
  1081. for hour, count in enumerate(event_counts):
  1082. for minute in range(count):
  1083. self.store_span_metric(
  1084. 1,
  1085. timestamp=self.day_ago + timedelta(hours=hour, minutes=minute),
  1086. )
  1087. response = self.do_request(
  1088. data={
  1089. "start": iso_format(self.day_ago),
  1090. "end": iso_format(self.day_ago + timedelta(hours=6)),
  1091. "interval": "1h",
  1092. "yAxis": "spm()",
  1093. "project": self.project.id,
  1094. "dataset": "metrics",
  1095. },
  1096. )
  1097. assert response.status_code == 200, response.content
  1098. data = response.data["data"]
  1099. assert len(data) == 6
  1100. assert response.data["meta"]["dataset"] == "metrics"
  1101. rows = data[0:6]
  1102. for test in zip(event_counts, rows):
  1103. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  1104. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithOnDemandWidgets(
  1105. MetricsEnhancedPerformanceTestCase
  1106. ):
  1107. endpoint = "sentry-api-0-organization-events-stats"
  1108. def setUp(self):
  1109. super().setUp()
  1110. self.login_as(user=self.user)
  1111. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  1112. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  1113. Environment.get_or_create(self.project, "production")
  1114. self.url = reverse(
  1115. "sentry-api-0-organization-events-stats",
  1116. kwargs={"organization_id_or_slug": self.project.organization.slug},
  1117. )
  1118. self.features = {
  1119. "organizations:on-demand-metrics-extraction-widgets": True,
  1120. "organizations:on-demand-metrics-extraction": True,
  1121. }
  1122. def _make_on_demand_request(
  1123. self, params: dict[str, Any], extra_features: dict[str, bool] | None = None
  1124. ) -> Response:
  1125. """Ensures that the required parameters for an on-demand request are included."""
  1126. # Expected parameters for this helper function
  1127. params["dataset"] = "metricsEnhanced"
  1128. params["useOnDemandMetrics"] = "true"
  1129. params["onDemandType"] = "dynamic_query"
  1130. _features = {**self.features, **(extra_features or {})}
  1131. return self.do_request(params, features=_features)
  1132. def test_top_events_wrong_on_demand_type(self):
  1133. query = "transaction.duration:>=100"
  1134. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1135. response = self.do_request(
  1136. data={
  1137. "project": self.project.id,
  1138. "start": iso_format(self.day_ago),
  1139. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1140. "interval": "1h",
  1141. "orderby": ["-count()"],
  1142. "environment": "production",
  1143. "query": query,
  1144. "yAxis": yAxis,
  1145. "field": [
  1146. "count()",
  1147. ],
  1148. "topEvents": 5,
  1149. "dataset": "metrics",
  1150. "useOnDemandMetrics": "true",
  1151. "onDemandType": "not_real",
  1152. },
  1153. )
  1154. assert response.status_code == 400, response.content
  1155. def test_top_events_works_without_on_demand_type(self):
  1156. query = "transaction.duration:>=100"
  1157. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1158. response = self.do_request(
  1159. data={
  1160. "project": self.project.id,
  1161. "start": iso_format(self.day_ago),
  1162. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1163. "interval": "1h",
  1164. "orderby": ["-count()"],
  1165. "environment": "production",
  1166. "query": query,
  1167. "yAxis": yAxis,
  1168. "field": [
  1169. "count()",
  1170. ],
  1171. "topEvents": 5,
  1172. "dataset": "metrics",
  1173. "useOnDemandMetrics": "true",
  1174. },
  1175. )
  1176. assert response.status_code == 200, response.content
  1177. def test_top_events_with_transaction_on_demand(self):
  1178. field = "count()"
  1179. field_two = "count_web_vitals(measurements.lcp, good)"
  1180. groupbys = ["customtag1", "customtag2"]
  1181. query = "transaction.duration:>=100"
  1182. spec = OnDemandMetricSpec(
  1183. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1184. )
  1185. spec_two = OnDemandMetricSpec(
  1186. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1187. )
  1188. for hour in range(0, 5):
  1189. self.store_on_demand_metric(
  1190. hour * 62 * 24,
  1191. spec=spec,
  1192. additional_tags={
  1193. "customtag1": "foo",
  1194. "customtag2": "red",
  1195. "environment": "production",
  1196. },
  1197. timestamp=self.day_ago + timedelta(hours=hour),
  1198. )
  1199. self.store_on_demand_metric(
  1200. hour * 60 * 24,
  1201. spec=spec_two,
  1202. additional_tags={
  1203. "customtag1": "bar",
  1204. "customtag2": "blue",
  1205. "environment": "production",
  1206. },
  1207. timestamp=self.day_ago + timedelta(hours=hour),
  1208. )
  1209. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1210. response = self.do_request(
  1211. data={
  1212. "project": self.project.id,
  1213. "start": iso_format(self.day_ago),
  1214. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1215. "interval": "1h",
  1216. "orderby": ["-count()"],
  1217. "environment": "production",
  1218. "query": query,
  1219. "yAxis": yAxis,
  1220. "field": [
  1221. "count()",
  1222. "count_web_vitals(measurements.lcp, good)",
  1223. "customtag1",
  1224. "customtag2",
  1225. ],
  1226. "topEvents": 5,
  1227. "dataset": "metricsEnhanced",
  1228. "useOnDemandMetrics": "true",
  1229. "onDemandType": "dynamic_query",
  1230. },
  1231. )
  1232. assert response.status_code == 200, response.content
  1233. groups = [
  1234. ("foo,red", "count()", 0.0, 1488.0),
  1235. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1236. ("bar,blue", "count()", 0.0, 0.0),
  1237. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1238. ]
  1239. assert len(response.data.keys()) == 2
  1240. for group_count in groups:
  1241. group, agg, row1, row2 = group_count
  1242. row_data = response.data[group][agg]["data"][:2]
  1243. assert [attrs for _, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1244. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1245. assert response.data[group]["isMetricsExtractedData"]
  1246. def test_top_events_with_transaction_on_demand_and_no_environment(self):
  1247. field = "count()"
  1248. field_two = "count_web_vitals(measurements.lcp, good)"
  1249. groupbys = ["customtag1", "customtag2"]
  1250. query = "transaction.duration:>=100"
  1251. spec = OnDemandMetricSpec(
  1252. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1253. )
  1254. spec_two = OnDemandMetricSpec(
  1255. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1256. )
  1257. for hour in range(0, 5):
  1258. self.store_on_demand_metric(
  1259. hour * 62 * 24,
  1260. spec=spec,
  1261. additional_tags={
  1262. "customtag1": "foo",
  1263. "customtag2": "red",
  1264. "environment": "production",
  1265. },
  1266. timestamp=self.day_ago + timedelta(hours=hour),
  1267. )
  1268. self.store_on_demand_metric(
  1269. hour * 60 * 24,
  1270. spec=spec_two,
  1271. additional_tags={
  1272. "customtag1": "bar",
  1273. "customtag2": "blue",
  1274. "environment": "production",
  1275. },
  1276. timestamp=self.day_ago + timedelta(hours=hour),
  1277. )
  1278. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1279. response = self.do_request(
  1280. data={
  1281. "project": self.project.id,
  1282. "start": iso_format(self.day_ago),
  1283. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1284. "interval": "1h",
  1285. "orderby": ["-count()"],
  1286. "query": query,
  1287. "yAxis": yAxis,
  1288. "field": [
  1289. "count()",
  1290. "count_web_vitals(measurements.lcp, good)",
  1291. "customtag1",
  1292. "customtag2",
  1293. ],
  1294. "topEvents": 5,
  1295. "dataset": "metricsEnhanced",
  1296. "useOnDemandMetrics": "true",
  1297. "onDemandType": "dynamic_query",
  1298. },
  1299. )
  1300. assert response.status_code == 200, response.content
  1301. groups = [
  1302. ("foo,red", "count()", 0.0, 1488.0),
  1303. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1304. ("bar,blue", "count()", 0.0, 0.0),
  1305. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1306. ]
  1307. assert len(response.data.keys()) == 2
  1308. for group_count in groups:
  1309. group, agg, row1, row2 = group_count
  1310. row_data = response.data[group][agg]["data"][:2]
  1311. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1312. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1313. assert response.data[group]["isMetricsExtractedData"]
  1314. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_transaction_only(self):
  1315. field = "count()"
  1316. field_two = "count_web_vitals(measurements.lcp, good)"
  1317. groupbys = ["customtag1", "customtag2"]
  1318. query = "transaction.duration:>=100"
  1319. spec = OnDemandMetricSpec(
  1320. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1321. )
  1322. spec_two = OnDemandMetricSpec(
  1323. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1324. )
  1325. _, widget, __ = create_widget(
  1326. ["count()"],
  1327. "",
  1328. self.project,
  1329. discover_widget_split=None,
  1330. )
  1331. for hour in range(0, 2):
  1332. self.store_on_demand_metric(
  1333. hour * 62 * 24,
  1334. spec=spec,
  1335. additional_tags={
  1336. "customtag1": "foo",
  1337. "customtag2": "red",
  1338. "environment": "production",
  1339. },
  1340. timestamp=self.day_ago + timedelta(hours=hour),
  1341. )
  1342. self.store_on_demand_metric(
  1343. hour * 60 * 24,
  1344. spec=spec_two,
  1345. additional_tags={
  1346. "customtag1": "bar",
  1347. "customtag2": "blue",
  1348. "environment": "production",
  1349. },
  1350. timestamp=self.day_ago + timedelta(hours=hour),
  1351. )
  1352. yAxis = [field, field_two]
  1353. response = self.do_request(
  1354. data={
  1355. "project": self.project.id,
  1356. "start": iso_format(self.day_ago),
  1357. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1358. "interval": "1h",
  1359. "orderby": ["-count()"],
  1360. "query": query,
  1361. "yAxis": yAxis,
  1362. "field": yAxis + groupbys,
  1363. "topEvents": 5,
  1364. "dataset": "metricsEnhanced",
  1365. "useOnDemandMetrics": "true",
  1366. "onDemandType": "dynamic_query",
  1367. "dashboardWidgetId": widget.id,
  1368. },
  1369. )
  1370. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1371. assert saved_widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  1372. assert response.status_code == 200, response.content
  1373. # Fell back to discover data which is empty for this test (empty group of '').
  1374. assert len(response.data.keys()) == 2
  1375. assert bool(response.data["foo,red"])
  1376. assert bool(response.data["bar,blue"])
  1377. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_error(
  1378. self,
  1379. ):
  1380. self.project = self.create_project(organization=self.organization)
  1381. Environment.get_or_create(self.project, "production")
  1382. field = "count()"
  1383. field_two = "count()"
  1384. groupbys = ["customtag1", "customtag2"]
  1385. query = "query.dataset:foo"
  1386. _, widget, __ = create_widget(
  1387. ["count()"],
  1388. "",
  1389. self.project,
  1390. discover_widget_split=None,
  1391. )
  1392. self.store_event(
  1393. data={
  1394. "event_id": "a" * 32,
  1395. "message": "very bad",
  1396. "type": "error",
  1397. "start_timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1398. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1399. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1400. },
  1401. project_id=self.project.id,
  1402. )
  1403. self.store_event(
  1404. data={
  1405. "event_id": "b" * 32,
  1406. "message": "very bad 2",
  1407. "type": "error",
  1408. "start_timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1409. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1410. "tags": {"customtag1": "error_value2", "query.dataset": "foo"},
  1411. },
  1412. project_id=self.project.id,
  1413. )
  1414. yAxis = ["count()"]
  1415. response = self.do_request(
  1416. data={
  1417. "project": self.project.id,
  1418. "start": iso_format(self.day_ago),
  1419. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1420. "interval": "1h",
  1421. "orderby": ["-count()"],
  1422. "query": query,
  1423. "yAxis": yAxis,
  1424. "field": [field, field_two] + groupbys,
  1425. "topEvents": 5,
  1426. "dataset": "metricsEnhanced",
  1427. "useOnDemandMetrics": "true",
  1428. "onDemandType": "dynamic_query",
  1429. "dashboardWidgetId": widget.id,
  1430. },
  1431. )
  1432. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1433. assert saved_widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  1434. assert response.status_code == 200, response.content
  1435. # Fell back to discover data which is empty for this test (empty group of '').
  1436. assert len(response.data.keys()) == 2
  1437. assert bool(response.data["error_value,"])
  1438. assert bool(response.data["error_value2,"])
  1439. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_discover(self):
  1440. self.project = self.create_project(organization=self.organization)
  1441. Environment.get_or_create(self.project, "production")
  1442. field = "count()"
  1443. field_two = "count()"
  1444. groupbys = ["customtag1", "customtag2"]
  1445. query = "query.dataset:foo"
  1446. spec = OnDemandMetricSpec(
  1447. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1448. )
  1449. spec_two = OnDemandMetricSpec(
  1450. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1451. )
  1452. _, widget, __ = create_widget(
  1453. ["count()"],
  1454. "",
  1455. self.project,
  1456. discover_widget_split=None,
  1457. )
  1458. self.store_event(
  1459. data={
  1460. "event_id": "a" * 32,
  1461. "message": "very bad",
  1462. "type": "error",
  1463. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1464. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1465. },
  1466. project_id=self.project.id,
  1467. )
  1468. transaction = load_data("transaction")
  1469. transaction["timestamp"] = iso_format(self.day_ago + timedelta(hours=1))
  1470. transaction["start_timestamp"] = iso_format(self.day_ago + timedelta(hours=1))
  1471. transaction["tags"] = {"customtag1": "transaction_value", "query.dataset": "foo"}
  1472. self.store_event(
  1473. data=transaction,
  1474. project_id=self.project.id,
  1475. )
  1476. for hour in range(0, 5):
  1477. self.store_on_demand_metric(
  1478. hour * 62 * 24,
  1479. spec=spec,
  1480. additional_tags={
  1481. "customtag1": "foo",
  1482. "customtag2": "red",
  1483. "environment": "production",
  1484. },
  1485. timestamp=self.day_ago + timedelta(hours=hour),
  1486. )
  1487. self.store_on_demand_metric(
  1488. hour * 60 * 24,
  1489. spec=spec_two,
  1490. additional_tags={
  1491. "customtag1": "bar",
  1492. "customtag2": "blue",
  1493. "environment": "production",
  1494. },
  1495. timestamp=self.day_ago + timedelta(hours=hour),
  1496. )
  1497. yAxis = ["count()"]
  1498. response = self.do_request(
  1499. data={
  1500. "project": self.project.id,
  1501. "start": iso_format(self.day_ago),
  1502. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1503. "interval": "1h",
  1504. "orderby": ["-count()"],
  1505. "query": query,
  1506. "yAxis": yAxis,
  1507. "field": [field, field_two, "customtag1", "customtag2"],
  1508. "topEvents": 5,
  1509. "dataset": "metricsEnhanced",
  1510. "useOnDemandMetrics": "true",
  1511. "onDemandType": "dynamic_query",
  1512. "dashboardWidgetId": widget.id,
  1513. },
  1514. )
  1515. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1516. assert saved_widget.discover_widget_split == DashboardWidgetTypes.DISCOVER
  1517. assert response.status_code == 200, response.content
  1518. assert response.status_code == 200, response.content
  1519. # Fell back to discover data which is empty for this test (empty group of '').
  1520. assert len(response.data.keys()) == 2
  1521. assert bool(response.data["error_value,"])
  1522. assert bool(response.data["transaction_value,"])
  1523. def test_top_events_with_transaction_on_demand_passing_widget_id_saved(self):
  1524. field = "count()"
  1525. field_two = "count_web_vitals(measurements.lcp, good)"
  1526. groupbys = ["customtag1", "customtag2"]
  1527. query = "transaction.duration:>=100"
  1528. spec = OnDemandMetricSpec(
  1529. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1530. )
  1531. spec_two = OnDemandMetricSpec(
  1532. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1533. )
  1534. _, widget, __ = create_widget(
  1535. ["count()"],
  1536. "",
  1537. self.project,
  1538. discover_widget_split=DashboardWidgetTypes.TRANSACTION_LIKE, # Transactions like uses on-demand
  1539. )
  1540. for hour in range(0, 5):
  1541. self.store_on_demand_metric(
  1542. hour * 62 * 24,
  1543. spec=spec,
  1544. additional_tags={
  1545. "customtag1": "foo",
  1546. "customtag2": "red",
  1547. "environment": "production",
  1548. },
  1549. timestamp=self.day_ago + timedelta(hours=hour),
  1550. )
  1551. self.store_on_demand_metric(
  1552. hour * 60 * 24,
  1553. spec=spec_two,
  1554. additional_tags={
  1555. "customtag1": "bar",
  1556. "customtag2": "blue",
  1557. "environment": "production",
  1558. },
  1559. timestamp=self.day_ago + timedelta(hours=hour),
  1560. )
  1561. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1562. with mock.patch.object(widget, "save") as mock_widget_save:
  1563. response = self.do_request(
  1564. data={
  1565. "project": self.project.id,
  1566. "start": iso_format(self.day_ago),
  1567. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1568. "interval": "1h",
  1569. "orderby": ["-count()"],
  1570. "query": query,
  1571. "yAxis": yAxis,
  1572. "field": [
  1573. "count()",
  1574. "count_web_vitals(measurements.lcp, good)",
  1575. "customtag1",
  1576. "customtag2",
  1577. ],
  1578. "topEvents": 5,
  1579. "dataset": "metricsEnhanced",
  1580. "useOnDemandMetrics": "true",
  1581. "onDemandType": "dynamic_query",
  1582. "dashboardWidgetId": widget.id,
  1583. },
  1584. )
  1585. assert bool(mock_widget_save.assert_not_called)
  1586. assert response.status_code == 200, response.content
  1587. groups = [
  1588. ("foo,red", "count()", 0.0, 1488.0),
  1589. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1590. ("bar,blue", "count()", 0.0, 0.0),
  1591. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1592. ]
  1593. assert len(response.data.keys()) == 2
  1594. for group_count in groups:
  1595. group, agg, row1, row2 = group_count
  1596. row_data = response.data[group][agg]["data"][:2]
  1597. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1598. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1599. assert response.data[group]["isMetricsExtractedData"]
  1600. def test_timeseries_on_demand_with_multiple_percentiles(self):
  1601. field = "p75(measurements.fcp)"
  1602. field_two = "p75(measurements.lcp)"
  1603. query = "transaction.duration:>=100"
  1604. spec = OnDemandMetricSpec(field=field, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY)
  1605. spec_two = OnDemandMetricSpec(
  1606. field=field_two, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1607. )
  1608. assert (
  1609. spec._query_str_for_hash
  1610. == "event.measurements.fcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1611. )
  1612. assert (
  1613. spec_two._query_str_for_hash
  1614. == "event.measurements.lcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1615. )
  1616. for count in range(0, 4):
  1617. self.store_on_demand_metric(
  1618. count * 100,
  1619. spec=spec,
  1620. timestamp=self.day_ago + timedelta(hours=1),
  1621. )
  1622. self.store_on_demand_metric(
  1623. count * 200.0,
  1624. spec=spec_two,
  1625. timestamp=self.day_ago + timedelta(hours=1),
  1626. )
  1627. yAxis = [field, field_two]
  1628. response = self.do_request(
  1629. data={
  1630. "project": self.project.id,
  1631. "start": iso_format(self.day_ago),
  1632. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1633. "interval": "1h",
  1634. "orderby": [field],
  1635. "query": query,
  1636. "yAxis": yAxis,
  1637. "dataset": "metricsEnhanced",
  1638. "useOnDemandMetrics": "true",
  1639. "onDemandType": "dynamic_query",
  1640. },
  1641. )
  1642. assert response.status_code == 200, response.content
  1643. assert response.data["p75(measurements.fcp)"]["meta"]["isMetricsExtractedData"]
  1644. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1645. assert [attrs for time, attrs in response.data["p75(measurements.fcp)"]["data"]] == [
  1646. [{"count": 0}],
  1647. [{"count": 225.0}],
  1648. ]
  1649. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsExtractedData"]
  1650. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1651. assert [attrs for time, attrs in response.data["p75(measurements.lcp)"]["data"]] == [
  1652. [{"count": 0}],
  1653. [{"count": 450.0}],
  1654. ]
  1655. def test_apdex_issue(self):
  1656. field = "apdex(300)"
  1657. groupbys = ["group_tag"]
  1658. query = "transaction.duration:>=100"
  1659. spec = OnDemandMetricSpec(
  1660. field=field,
  1661. groupbys=groupbys,
  1662. query=query,
  1663. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1664. )
  1665. for hour in range(0, 5):
  1666. self.store_on_demand_metric(
  1667. 1,
  1668. spec=spec,
  1669. additional_tags={
  1670. "group_tag": "group_one",
  1671. "environment": "production",
  1672. "satisfaction": "tolerable",
  1673. },
  1674. timestamp=self.day_ago + timedelta(hours=hour),
  1675. )
  1676. self.store_on_demand_metric(
  1677. 1,
  1678. spec=spec,
  1679. additional_tags={
  1680. "group_tag": "group_two",
  1681. "environment": "production",
  1682. "satisfaction": "satisfactory",
  1683. },
  1684. timestamp=self.day_ago + timedelta(hours=hour),
  1685. )
  1686. response = self.do_request(
  1687. data={
  1688. "dataset": "metricsEnhanced",
  1689. "environment": "production",
  1690. "excludeOther": 1,
  1691. "field": [field, "group_tag"],
  1692. "start": iso_format(self.day_ago),
  1693. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1694. "interval": "1h",
  1695. "orderby": f"-{field}",
  1696. "partial": 1,
  1697. "project": self.project.id,
  1698. "query": query,
  1699. "topEvents": 5,
  1700. "yAxis": field,
  1701. "onDemandType": "dynamic_query",
  1702. "useOnDemandMetrics": "true",
  1703. },
  1704. )
  1705. assert response.status_code == 200, response.content
  1706. assert response.data["group_one"]["meta"]["isMetricsExtractedData"] is True
  1707. assert [attrs for time, attrs in response.data["group_one"]["data"]] == [
  1708. [{"count": 0.5}],
  1709. [{"count": 0.5}],
  1710. ]
  1711. def test_glob_http_referer_on_demand(self):
  1712. agg = "count()"
  1713. network_id_tag = "networkId"
  1714. url = "https://sentry.io"
  1715. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1716. spec = OnDemandMetricSpec(
  1717. field=agg,
  1718. groupbys=[network_id_tag],
  1719. query=query,
  1720. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1721. )
  1722. assert spec.to_metric_spec(self.project) == {
  1723. "category": "transaction",
  1724. "mri": "c:transactions/on_demand@none",
  1725. "field": None,
  1726. "tags": [
  1727. {"key": "query_hash", "value": "ac241f56"},
  1728. {"key": "networkId", "field": "event.tags.networkId"},
  1729. {"key": "environment", "field": "event.environment"},
  1730. ],
  1731. "condition": {
  1732. "op": "and",
  1733. "inner": [
  1734. {
  1735. "op": "glob",
  1736. "name": "event.request.url",
  1737. "value": ["https://sentry.io/*/foo/bar/*"],
  1738. },
  1739. {
  1740. "op": "glob",
  1741. "name": "event.request.headers.Referer",
  1742. "value": ["https://sentry.io/*/bar/*"],
  1743. },
  1744. ],
  1745. },
  1746. }
  1747. for hour in range(0, 5):
  1748. self.store_on_demand_metric(
  1749. 1,
  1750. spec=spec,
  1751. additional_tags={network_id_tag: "1234"},
  1752. timestamp=self.day_ago + timedelta(hours=hour),
  1753. )
  1754. self.store_on_demand_metric(
  1755. 1,
  1756. spec=spec,
  1757. additional_tags={network_id_tag: "5678"},
  1758. timestamp=self.day_ago + timedelta(hours=hour),
  1759. )
  1760. response = self.do_request(
  1761. data={
  1762. "dataset": "metricsEnhanced",
  1763. "field": [network_id_tag, agg],
  1764. "start": iso_format(self.day_ago),
  1765. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1766. "onDemandType": "dynamic_query",
  1767. "orderby": f"-{agg}",
  1768. "interval": "1d",
  1769. "partial": 1,
  1770. "query": query,
  1771. "referrer": "api.dashboards.widget.bar-chart",
  1772. "project": self.project.id,
  1773. "topEvents": 2,
  1774. "useOnDemandMetrics": "true",
  1775. "yAxis": agg,
  1776. },
  1777. )
  1778. assert response.status_code == 200, response.content
  1779. for datum in response.data.values():
  1780. assert datum["meta"] == {
  1781. "dataset": "metricsEnhanced",
  1782. "datasetReason": "unchanged",
  1783. "fields": {},
  1784. "isMetricsData": False,
  1785. "isMetricsExtractedData": True,
  1786. "tips": {},
  1787. "units": {},
  1788. }
  1789. def _test_is_metrics_extracted_data(
  1790. self, params: dict[str, Any], expected_on_demand_query: bool, dataset: str
  1791. ) -> None:
  1792. spec = OnDemandMetricSpec(
  1793. field="count()",
  1794. query="transaction.duration:>1s",
  1795. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1796. )
  1797. self.store_on_demand_metric(1, spec=spec)
  1798. response = self.do_request(params)
  1799. assert response.status_code == 200, response.content
  1800. meta = response.data["meta"]
  1801. # This is the main thing we want to test for
  1802. assert meta.get("isMetricsExtractedData", False) is expected_on_demand_query
  1803. assert meta["dataset"] == dataset
  1804. return meta
  1805. def test_is_metrics_extracted_data_is_included(self):
  1806. self._test_is_metrics_extracted_data(
  1807. {
  1808. "dataset": "metricsEnhanced",
  1809. "query": "transaction.duration:>=91",
  1810. "useOnDemandMetrics": "true",
  1811. "yAxis": "count()",
  1812. },
  1813. expected_on_demand_query=True,
  1814. dataset="metricsEnhanced",
  1815. )
  1816. def test_on_demand_epm_no_query(self):
  1817. params = {
  1818. "dataset": "metricsEnhanced",
  1819. "environment": "production",
  1820. "onDemandType": "dynamic_query",
  1821. "project": self.project.id,
  1822. "query": "",
  1823. "statsPeriod": "1h",
  1824. "useOnDemandMetrics": "true",
  1825. "yAxis": ["epm()"],
  1826. }
  1827. response = self.do_request(params)
  1828. assert response.status_code == 200, response.content
  1829. assert response.data["meta"] == {
  1830. "fields": {"time": "date", "epm_900": "rate"},
  1831. "units": {"time": None, "epm_900": None},
  1832. "isMetricsData": True,
  1833. "isMetricsExtractedData": False,
  1834. "tips": {},
  1835. "datasetReason": "unchanged",
  1836. "dataset": "metricsEnhanced",
  1837. }
  1838. def test_group_by_transaction(self):
  1839. field = "count()"
  1840. groupbys = ["transaction"]
  1841. query = "transaction.duration:>=100"
  1842. spec = OnDemandMetricSpec(
  1843. field=field,
  1844. groupbys=groupbys,
  1845. query=query,
  1846. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1847. )
  1848. for hour in range(0, 2):
  1849. self.store_on_demand_metric(
  1850. (hour + 1) * 5,
  1851. spec=spec,
  1852. additional_tags={
  1853. "transaction": "/performance",
  1854. "environment": "production",
  1855. },
  1856. timestamp=self.day_ago + timedelta(hours=hour),
  1857. )
  1858. response = self.do_request(
  1859. data={
  1860. "dataset": "metricsEnhanced",
  1861. "environment": "production",
  1862. "excludeOther": 1,
  1863. "field": [field, "transaction"],
  1864. "start": iso_format(self.day_ago),
  1865. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1866. "interval": "1h",
  1867. "orderby": f"-{field}",
  1868. "partial": 1,
  1869. "project": self.project.id,
  1870. "query": query,
  1871. "topEvents": 5,
  1872. "yAxis": field,
  1873. "onDemandType": "dynamic_query",
  1874. "useOnDemandMetrics": "true",
  1875. },
  1876. )
  1877. assert response.status_code == 200, response.content
  1878. assert response.data["/performance"]["meta"]["isMetricsExtractedData"] is True
  1879. assert [attrs for time, attrs in response.data["/performance"]["data"]] == [
  1880. [{"count": 5.0}],
  1881. [{"count": 10.0}],
  1882. ]
  1883. def _setup_orderby_tests(self, query):
  1884. count_spec = OnDemandMetricSpec(
  1885. field="count()",
  1886. groupbys=["networkId"],
  1887. query=query,
  1888. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1889. )
  1890. p95_spec = OnDemandMetricSpec(
  1891. field="p95(transaction.duration)",
  1892. groupbys=["networkId"],
  1893. query=query,
  1894. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1895. )
  1896. for hour in range(0, 5):
  1897. self.store_on_demand_metric(
  1898. 1,
  1899. spec=count_spec,
  1900. additional_tags={"networkId": "1234"},
  1901. timestamp=self.day_ago + timedelta(hours=hour),
  1902. )
  1903. self.store_on_demand_metric(
  1904. 100,
  1905. spec=p95_spec,
  1906. additional_tags={"networkId": "1234"},
  1907. timestamp=self.day_ago + timedelta(hours=hour),
  1908. )
  1909. self.store_on_demand_metric(
  1910. 200,
  1911. spec=p95_spec,
  1912. additional_tags={"networkId": "5678"},
  1913. timestamp=self.day_ago + timedelta(hours=hour),
  1914. )
  1915. # Store twice as many 5678 so orderby puts it later
  1916. self.store_on_demand_metric(
  1917. 2,
  1918. spec=count_spec,
  1919. additional_tags={"networkId": "5678"},
  1920. timestamp=self.day_ago + timedelta(hours=hour),
  1921. )
  1922. def test_order_by_aggregate_top_events_desc(self):
  1923. url = "https://sentry.io"
  1924. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1925. self._setup_orderby_tests(query)
  1926. response = self.do_request(
  1927. data={
  1928. "dataset": "metricsEnhanced",
  1929. "field": ["networkId", "count()"],
  1930. "start": iso_format(self.day_ago),
  1931. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1932. "onDemandType": "dynamic_query",
  1933. "orderby": "-count()",
  1934. "interval": "1d",
  1935. "partial": 1,
  1936. "query": query,
  1937. "referrer": "api.dashboards.widget.bar-chart",
  1938. "project": self.project.id,
  1939. "topEvents": 2,
  1940. "useOnDemandMetrics": "true",
  1941. "yAxis": "count()",
  1942. },
  1943. )
  1944. assert response.status_code == 200, response.content
  1945. assert len(response.data) == 3
  1946. data1 = response.data["5678"]
  1947. assert data1["order"] == 0
  1948. assert data1["data"][0][1][0]["count"] == 10
  1949. data2 = response.data["1234"]
  1950. assert data2["order"] == 1
  1951. assert data2["data"][0][1][0]["count"] == 5
  1952. for datum in response.data.values():
  1953. assert datum["meta"] == {
  1954. "dataset": "metricsEnhanced",
  1955. "datasetReason": "unchanged",
  1956. "fields": {},
  1957. "isMetricsData": False,
  1958. "isMetricsExtractedData": True,
  1959. "tips": {},
  1960. "units": {},
  1961. }
  1962. def test_order_by_aggregate_top_events_asc(self):
  1963. url = "https://sentry.io"
  1964. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1965. self._setup_orderby_tests(query)
  1966. response = self.do_request(
  1967. data={
  1968. "dataset": "metricsEnhanced",
  1969. "field": ["networkId", "count()"],
  1970. "start": iso_format(self.day_ago),
  1971. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1972. "onDemandType": "dynamic_query",
  1973. "orderby": "count()",
  1974. "interval": "1d",
  1975. "partial": 1,
  1976. "query": query,
  1977. "referrer": "api.dashboards.widget.bar-chart",
  1978. "project": self.project.id,
  1979. "topEvents": 2,
  1980. "useOnDemandMetrics": "true",
  1981. "yAxis": "count()",
  1982. },
  1983. )
  1984. assert response.status_code == 200, response.content
  1985. assert len(response.data) == 3
  1986. data1 = response.data["1234"]
  1987. assert data1["order"] == 0
  1988. assert data1["data"][0][1][0]["count"] == 5
  1989. data2 = response.data["5678"]
  1990. assert data2["order"] == 1
  1991. assert data2["data"][0][1][0]["count"] == 10
  1992. for datum in response.data.values():
  1993. assert datum["meta"] == {
  1994. "dataset": "metricsEnhanced",
  1995. "datasetReason": "unchanged",
  1996. "fields": {},
  1997. "isMetricsData": False,
  1998. "isMetricsExtractedData": True,
  1999. "tips": {},
  2000. "units": {},
  2001. }
  2002. def test_order_by_aggregate_top_events_graph_different_aggregate(self):
  2003. url = "https://sentry.io"
  2004. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  2005. self._setup_orderby_tests(query)
  2006. response = self.do_request(
  2007. data={
  2008. "dataset": "metricsEnhanced",
  2009. "field": ["networkId", "count()"],
  2010. "start": iso_format(self.day_ago),
  2011. "end": iso_format(self.day_ago + timedelta(hours=5)),
  2012. "onDemandType": "dynamic_query",
  2013. "orderby": "count()",
  2014. "interval": "1d",
  2015. "partial": 1,
  2016. "query": query,
  2017. "referrer": "api.dashboards.widget.bar-chart",
  2018. "project": self.project.id,
  2019. "topEvents": 2,
  2020. "useOnDemandMetrics": "true",
  2021. "yAxis": "p95(transaction.duration)",
  2022. },
  2023. )
  2024. assert response.status_code == 200, response.content
  2025. assert len(response.data) == 3
  2026. data1 = response.data["1234"]
  2027. assert data1["order"] == 0
  2028. assert data1["data"][0][1][0]["count"] == 100
  2029. data2 = response.data["5678"]
  2030. assert data2["order"] == 1
  2031. assert data2["data"][0][1][0]["count"] == 200
  2032. for datum in response.data.values():
  2033. assert datum["meta"] == {
  2034. "dataset": "metricsEnhanced",
  2035. "datasetReason": "unchanged",
  2036. "fields": {},
  2037. "isMetricsData": False,
  2038. "isMetricsExtractedData": True,
  2039. "tips": {},
  2040. "units": {},
  2041. }
  2042. def test_cannot_order_by_tag(self):
  2043. url = "https://sentry.io"
  2044. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  2045. self._setup_orderby_tests(query)
  2046. response = self.do_request(
  2047. data={
  2048. "dataset": "metrics",
  2049. "field": ["networkId", "count()"],
  2050. "start": iso_format(self.day_ago),
  2051. "end": iso_format(self.day_ago + timedelta(hours=5)),
  2052. "onDemandType": "dynamic_query",
  2053. "orderby": "-networkId",
  2054. "interval": "1d",
  2055. "partial": 1,
  2056. "query": query,
  2057. "referrer": "api.dashboards.widget.bar-chart",
  2058. "project": self.project.id,
  2059. "topEvents": 2,
  2060. "useOnDemandMetrics": "true",
  2061. "yAxis": "count()",
  2062. },
  2063. )
  2064. assert response.status_code == 400, response.content
  2065. def test_order_by_two_aggregates(self):
  2066. url = "https://sentry.io"
  2067. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  2068. self._setup_orderby_tests(query)
  2069. response = self.do_request(
  2070. data={
  2071. "dataset": "metrics",
  2072. "field": ["networkId", "count()", "p95(transaction.duration)"],
  2073. "start": iso_format(self.day_ago),
  2074. "end": iso_format(self.day_ago + timedelta(hours=5)),
  2075. "onDemandType": "dynamic_query",
  2076. "orderby": ["count()", "p95(transaction.duration)"],
  2077. "interval": "1d",
  2078. "partial": 1,
  2079. "query": query,
  2080. "referrer": "api.dashboards.widget.bar-chart",
  2081. "project": self.project.id,
  2082. "topEvents": 2,
  2083. "useOnDemandMetrics": "true",
  2084. "yAxis": "p95(transaction.duration)",
  2085. },
  2086. )
  2087. assert response.status_code == 400, response.content
  2088. def test_top_events_with_tag(self):
  2089. query = "transaction.duration:>=100"
  2090. yAxis = ["count()"]
  2091. field = "count()"
  2092. groupbys = ["some-field"]
  2093. spec = OnDemandMetricSpec(
  2094. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  2095. )
  2096. self.store_on_demand_metric(
  2097. 1,
  2098. spec=spec,
  2099. additional_tags={
  2100. "some-field": "bar",
  2101. "environment": "production",
  2102. },
  2103. timestamp=self.day_ago,
  2104. )
  2105. response = self.do_request(
  2106. data={
  2107. "project": self.project.id,
  2108. "start": iso_format(self.day_ago),
  2109. "end": iso_format(self.day_ago + timedelta(hours=2)),
  2110. "interval": "1h",
  2111. "orderby": ["-count()"],
  2112. "environment": "production",
  2113. "query": query,
  2114. "yAxis": yAxis,
  2115. "field": [
  2116. "some-field",
  2117. "count()",
  2118. ],
  2119. "topEvents": 5,
  2120. "dataset": "metrics",
  2121. "useOnDemandMetrics": "true",
  2122. },
  2123. )
  2124. assert response.status_code == 200, response.content