test_organization_events_stats_mep.py 80 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069
  1. from __future__ import annotations
  2. from datetime import timedelta
  3. from typing import Any
  4. from unittest import mock
  5. import pytest
  6. from django.urls import reverse
  7. from rest_framework.response import Response
  8. from sentry.models.dashboard_widget import DashboardWidget, DashboardWidgetTypes
  9. from sentry.models.environment import Environment
  10. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  11. from sentry.snuba.metrics.extraction import MetricSpecType, OnDemandMetricSpec
  12. from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
  13. from sentry.testutils.helpers.datetime import before_now, iso_format
  14. from sentry.testutils.helpers.on_demand import create_widget
  15. from sentry.utils.samples import load_data
  16. pytestmark = pytest.mark.sentry_metrics
  17. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
  18. MetricsEnhancedPerformanceTestCase
  19. ):
  20. endpoint = "sentry-api-0-organization-events-stats"
  21. METRIC_STRINGS = [
  22. "foo_transaction",
  23. "d:transactions/measurements.datacenter_memory@pebibyte",
  24. ]
  25. def setUp(self):
  26. super().setUp()
  27. self.login_as(user=self.user)
  28. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  29. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  30. self.url = reverse(
  31. "sentry-api-0-organization-events-stats",
  32. kwargs={"organization_id_or_slug": self.project.organization.slug},
  33. )
  34. self.features = {
  35. "organizations:performance-use-metrics": True,
  36. }
  37. self.additional_params = dict()
  38. # These throughput tests should roughly match the ones in OrganizationEventsStatsEndpointTest
  39. @pytest.mark.querybuilder
  40. def test_throughput_epm_hour_rollup(self):
  41. # Each of these denotes how many events to create in each hour
  42. event_counts = [6, 0, 6, 3, 0, 3]
  43. for hour, count in enumerate(event_counts):
  44. for minute in range(count):
  45. self.store_transaction_metric(
  46. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  47. )
  48. for axis in ["epm()", "tpm()"]:
  49. response = self.do_request(
  50. data={
  51. "start": iso_format(self.day_ago),
  52. "end": iso_format(self.day_ago + timedelta(hours=6)),
  53. "interval": "1h",
  54. "yAxis": axis,
  55. "project": self.project.id,
  56. "dataset": "metricsEnhanced",
  57. **self.additional_params,
  58. },
  59. )
  60. assert response.status_code == 200, response.content
  61. data = response.data["data"]
  62. assert len(data) == 6
  63. assert response.data["isMetricsData"]
  64. rows = data[0:6]
  65. for test in zip(event_counts, rows):
  66. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  67. def test_throughput_epm_day_rollup(self):
  68. # Each of these denotes how many events to create in each minute
  69. event_counts = [6, 0, 6, 3, 0, 3]
  70. for hour, count in enumerate(event_counts):
  71. for minute in range(count):
  72. self.store_transaction_metric(
  73. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  74. )
  75. for axis in ["epm()", "tpm()"]:
  76. response = self.do_request(
  77. data={
  78. "start": iso_format(self.day_ago),
  79. "end": iso_format(self.day_ago + timedelta(hours=24)),
  80. "interval": "24h",
  81. "yAxis": axis,
  82. "project": self.project.id,
  83. "dataset": "metricsEnhanced",
  84. **self.additional_params,
  85. },
  86. )
  87. assert response.status_code == 200, response.content
  88. data = response.data["data"]
  89. assert len(data) == 2
  90. assert response.data["isMetricsData"]
  91. assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
  92. def test_throughput_epm_hour_rollup_offset_of_hour(self):
  93. # Each of these denotes how many events to create in each hour
  94. event_counts = [6, 0, 6, 3, 0, 3]
  95. for hour, count in enumerate(event_counts):
  96. for minute in range(count):
  97. self.store_transaction_metric(
  98. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute + 30)
  99. )
  100. for axis in ["tpm()", "epm()"]:
  101. response = self.do_request(
  102. data={
  103. "start": iso_format(self.day_ago + timedelta(minutes=30)),
  104. "end": iso_format(self.day_ago + timedelta(hours=6, minutes=30)),
  105. "interval": "1h",
  106. "yAxis": axis,
  107. "project": self.project.id,
  108. "dataset": "metricsEnhanced",
  109. **self.additional_params,
  110. },
  111. )
  112. assert response.status_code == 200, response.content
  113. data = response.data["data"]
  114. assert len(data) == 6
  115. assert response.data["isMetricsData"]
  116. rows = data[0:6]
  117. for test in zip(event_counts, rows):
  118. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  119. def test_throughput_eps_minute_rollup(self):
  120. # Each of these denotes how many events to create in each minute
  121. event_counts = [6, 0, 6, 3, 0, 3]
  122. for minute, count in enumerate(event_counts):
  123. for second in range(count):
  124. self.store_transaction_metric(
  125. 1, timestamp=self.day_ago + timedelta(minutes=minute, seconds=second)
  126. )
  127. for axis in ["eps()", "tps()"]:
  128. response = self.do_request(
  129. data={
  130. "start": iso_format(self.day_ago),
  131. "end": iso_format(self.day_ago + timedelta(minutes=6)),
  132. "interval": "1m",
  133. "yAxis": axis,
  134. "project": self.project.id,
  135. "dataset": "metricsEnhanced",
  136. **self.additional_params,
  137. },
  138. )
  139. assert response.status_code == 200, response.content
  140. data = response.data["data"]
  141. assert len(data) == 6
  142. assert response.data["isMetricsData"]
  143. rows = data[0:6]
  144. for test in zip(event_counts, rows):
  145. assert test[1][1][0]["count"] == test[0] / 60.0
  146. def test_failure_rate(self):
  147. for hour in range(6):
  148. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  149. self.store_transaction_metric(1, tags={"transaction.status": "ok"}, timestamp=timestamp)
  150. if hour < 3:
  151. self.store_transaction_metric(
  152. 1, tags={"transaction.status": "internal_error"}, timestamp=timestamp
  153. )
  154. response = self.do_request(
  155. data={
  156. "start": iso_format(self.day_ago),
  157. "end": iso_format(self.day_ago + timedelta(hours=6)),
  158. "interval": "1h",
  159. "yAxis": ["failure_rate()"],
  160. "project": self.project.id,
  161. "dataset": "metricsEnhanced",
  162. **self.additional_params,
  163. },
  164. )
  165. assert response.status_code == 200, response.content
  166. data = response.data["data"]
  167. assert len(data) == 6
  168. assert response.data["isMetricsData"]
  169. assert [attrs for time, attrs in response.data["data"]] == [
  170. [{"count": 0.5}],
  171. [{"count": 0.5}],
  172. [{"count": 0.5}],
  173. [{"count": 0}],
  174. [{"count": 0}],
  175. [{"count": 0}],
  176. ]
  177. def test_percentiles_multi_axis(self):
  178. for hour in range(6):
  179. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  180. self.store_transaction_metric(111, timestamp=timestamp)
  181. self.store_transaction_metric(222, metric="measurements.lcp", timestamp=timestamp)
  182. response = self.do_request(
  183. data={
  184. "start": iso_format(self.day_ago),
  185. "end": iso_format(self.day_ago + timedelta(hours=6)),
  186. "interval": "1h",
  187. "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
  188. "project": self.project.id,
  189. "dataset": "metricsEnhanced",
  190. **self.additional_params,
  191. },
  192. )
  193. assert response.status_code == 200, response.content
  194. lcp = response.data["p75(measurements.lcp)"]
  195. duration = response.data["p75(transaction.duration)"]
  196. assert len(duration["data"]) == 6
  197. assert duration["isMetricsData"]
  198. assert len(lcp["data"]) == 6
  199. assert lcp["isMetricsData"]
  200. for item in duration["data"]:
  201. assert item[1][0]["count"] == 111
  202. for item in lcp["data"]:
  203. assert item[1][0]["count"] == 222
  204. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query", return_value={})
  205. def test_multiple_yaxis_only_one_query(self, mock_query):
  206. self.do_request(
  207. data={
  208. "project": self.project.id,
  209. "start": iso_format(self.day_ago),
  210. "end": iso_format(self.day_ago + timedelta(hours=2)),
  211. "interval": "1h",
  212. "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
  213. "dataset": "metricsEnhanced",
  214. **self.additional_params,
  215. },
  216. )
  217. assert mock_query.call_count == 1
  218. def test_aggregate_function_user_count(self):
  219. self.store_transaction_metric(
  220. 1, metric="user", timestamp=self.day_ago + timedelta(minutes=30)
  221. )
  222. self.store_transaction_metric(
  223. 1, metric="user", timestamp=self.day_ago + timedelta(hours=1, minutes=30)
  224. )
  225. response = self.do_request(
  226. data={
  227. "start": iso_format(self.day_ago),
  228. "end": iso_format(self.day_ago + timedelta(hours=2)),
  229. "interval": "1h",
  230. "yAxis": "count_unique(user)",
  231. "dataset": "metricsEnhanced",
  232. **self.additional_params,
  233. },
  234. )
  235. assert response.status_code == 200, response.content
  236. assert response.data["isMetricsData"]
  237. assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
  238. meta = response.data["meta"]
  239. assert meta["isMetricsData"] == response.data["isMetricsData"]
  240. def test_non_mep_query_fallsback(self):
  241. def get_mep(query):
  242. response = self.do_request(
  243. data={
  244. "project": self.project.id,
  245. "start": iso_format(self.day_ago),
  246. "end": iso_format(self.day_ago + timedelta(hours=2)),
  247. "interval": "1h",
  248. "query": query,
  249. "yAxis": ["epm()"],
  250. "dataset": "metricsEnhanced",
  251. **self.additional_params,
  252. },
  253. )
  254. assert response.status_code == 200, response.content
  255. return response.data["isMetricsData"]
  256. assert get_mep(""), "empty query"
  257. assert get_mep("event.type:transaction"), "event type transaction"
  258. assert not get_mep("event.type:error"), "event type error"
  259. assert not get_mep("transaction.duration:<15min"), "outlier filter"
  260. assert get_mep("epm():>0.01"), "throughput filter"
  261. assert not get_mep(
  262. "event.type:transaction OR event.type:error"
  263. ), "boolean with non-mep filter"
  264. assert get_mep(
  265. "event.type:transaction OR transaction:foo_transaction"
  266. ), "boolean with mep filter"
  267. def test_having_condition_with_preventing_aggregates(self):
  268. response = self.do_request(
  269. data={
  270. "project": self.project.id,
  271. "start": iso_format(self.day_ago),
  272. "end": iso_format(self.day_ago + timedelta(hours=2)),
  273. "interval": "1h",
  274. "query": "p95():<5s",
  275. "yAxis": ["epm()"],
  276. "dataset": "metricsEnhanced",
  277. "preventMetricAggregates": "1",
  278. **self.additional_params,
  279. },
  280. )
  281. assert response.status_code == 200, response.content
  282. assert not response.data["isMetricsData"]
  283. meta = response.data["meta"]
  284. assert meta["isMetricsData"] == response.data["isMetricsData"]
  285. def test_explicit_not_mep(self):
  286. response = self.do_request(
  287. data={
  288. "project": self.project.id,
  289. "start": iso_format(self.day_ago),
  290. "end": iso_format(self.day_ago + timedelta(hours=2)),
  291. "interval": "1h",
  292. # Should be a mep able query
  293. "query": "",
  294. "yAxis": ["epm()"],
  295. "metricsEnhanced": "0",
  296. **self.additional_params,
  297. },
  298. )
  299. assert response.status_code == 200, response.content
  300. assert not response.data["isMetricsData"]
  301. meta = response.data["meta"]
  302. assert meta["isMetricsData"] == response.data["isMetricsData"]
  303. def test_sum_transaction_duration(self):
  304. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  305. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  306. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  307. response = self.do_request(
  308. data={
  309. "start": iso_format(self.day_ago),
  310. "end": iso_format(self.day_ago + timedelta(hours=2)),
  311. "interval": "1h",
  312. "yAxis": "sum(transaction.duration)",
  313. "dataset": "metricsEnhanced",
  314. **self.additional_params,
  315. },
  316. )
  317. assert response.status_code == 200, response.content
  318. assert response.data["isMetricsData"]
  319. assert [attrs for time, attrs in response.data["data"]] == [
  320. [{"count": 123}],
  321. [{"count": 1245}],
  322. ]
  323. meta = response.data["meta"]
  324. assert meta["isMetricsData"] == response.data["isMetricsData"]
  325. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  326. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  327. def test_sum_transaction_duration_with_comparison(self):
  328. # We store the data for the previous day (in order to have values for the comparison).
  329. self.store_transaction_metric(
  330. 1, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  331. )
  332. self.store_transaction_metric(
  333. 2, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  334. )
  335. # We store the data for today.
  336. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  337. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(minutes=30))
  338. response = self.do_request(
  339. data={
  340. "start": iso_format(self.day_ago),
  341. "end": iso_format(self.day_ago + timedelta(days=1)),
  342. "interval": "1d",
  343. "yAxis": "sum(transaction.duration)",
  344. "comparisonDelta": 86400,
  345. "dataset": "metricsEnhanced",
  346. **self.additional_params,
  347. },
  348. )
  349. assert response.status_code == 200, response.content
  350. assert response.data["isMetricsData"]
  351. # For some reason, if all tests run, there is some shared state that makes this test have data in the second
  352. # time bucket, which is filled automatically by the zerofilling. In order to avoid this flaky failure, we will
  353. # only check that the first bucket contains the actual data.
  354. assert [attrs for time, attrs in response.data["data"]][0] == [
  355. {"comparisonCount": 3.0, "count": 579.0}
  356. ]
  357. meta = response.data["meta"]
  358. assert meta["isMetricsData"] == response.data["isMetricsData"]
  359. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  360. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  361. def test_custom_measurement(self):
  362. self.store_transaction_metric(
  363. 123,
  364. metric="measurements.bytes_transfered",
  365. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  366. entity="metrics_distributions",
  367. tags={"transaction": "foo_transaction"},
  368. timestamp=self.day_ago + timedelta(minutes=30),
  369. )
  370. self.store_transaction_metric(
  371. 456,
  372. metric="measurements.bytes_transfered",
  373. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  374. entity="metrics_distributions",
  375. tags={"transaction": "foo_transaction"},
  376. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  377. )
  378. self.store_transaction_metric(
  379. 789,
  380. metric="measurements.bytes_transfered",
  381. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  382. entity="metrics_distributions",
  383. tags={"transaction": "foo_transaction"},
  384. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  385. )
  386. response = self.do_request(
  387. data={
  388. "start": iso_format(self.day_ago),
  389. "end": iso_format(self.day_ago + timedelta(hours=2)),
  390. "interval": "1h",
  391. "yAxis": "sum(measurements.datacenter_memory)",
  392. "dataset": "metricsEnhanced",
  393. **self.additional_params,
  394. },
  395. )
  396. assert response.status_code == 200, response.content
  397. assert response.data["isMetricsData"]
  398. assert [attrs for time, attrs in response.data["data"]] == [
  399. [{"count": 123}],
  400. [{"count": 1245}],
  401. ]
  402. meta = response.data["meta"]
  403. assert meta["isMetricsData"] == response.data["isMetricsData"]
  404. assert meta["fields"] == {"time": "date", "sum_measurements_datacenter_memory": "size"}
  405. assert meta["units"] == {"time": None, "sum_measurements_datacenter_memory": "pebibyte"}
  406. def test_does_not_fallback_if_custom_metric_is_out_of_request_time_range(self):
  407. self.store_transaction_metric(
  408. 123,
  409. timestamp=self.day_ago + timedelta(hours=1),
  410. internal_metric="d:transactions/measurements.custom@kibibyte",
  411. entity="metrics_distributions",
  412. )
  413. response = self.do_request(
  414. data={
  415. "start": iso_format(self.day_ago),
  416. "end": iso_format(self.day_ago + timedelta(hours=2)),
  417. "interval": "1h",
  418. "yAxis": "p99(measurements.custom)",
  419. "dataset": "metricsEnhanced",
  420. **self.additional_params,
  421. },
  422. )
  423. meta = response.data["meta"]
  424. assert response.status_code == 200, response.content
  425. assert response.data["isMetricsData"]
  426. assert meta["isMetricsData"]
  427. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  428. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  429. def test_multi_yaxis_custom_measurement(self):
  430. self.store_transaction_metric(
  431. 123,
  432. metric="measurements.bytes_transfered",
  433. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  434. entity="metrics_distributions",
  435. tags={"transaction": "foo_transaction"},
  436. timestamp=self.day_ago + timedelta(minutes=30),
  437. )
  438. self.store_transaction_metric(
  439. 456,
  440. metric="measurements.bytes_transfered",
  441. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  442. entity="metrics_distributions",
  443. tags={"transaction": "foo_transaction"},
  444. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  445. )
  446. self.store_transaction_metric(
  447. 789,
  448. metric="measurements.bytes_transfered",
  449. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  450. entity="metrics_distributions",
  451. tags={"transaction": "foo_transaction"},
  452. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  453. )
  454. response = self.do_request(
  455. data={
  456. "start": iso_format(self.day_ago),
  457. "end": iso_format(self.day_ago + timedelta(hours=2)),
  458. "interval": "1h",
  459. "yAxis": [
  460. "sum(measurements.datacenter_memory)",
  461. "p50(measurements.datacenter_memory)",
  462. ],
  463. "dataset": "metricsEnhanced",
  464. **self.additional_params,
  465. },
  466. )
  467. assert response.status_code == 200, response.content
  468. sum_data = response.data["sum(measurements.datacenter_memory)"]
  469. p50_data = response.data["p50(measurements.datacenter_memory)"]
  470. assert sum_data["isMetricsData"]
  471. assert p50_data["isMetricsData"]
  472. assert [attrs for time, attrs in sum_data["data"]] == [
  473. [{"count": 123}],
  474. [{"count": 1245}],
  475. ]
  476. assert [attrs for time, attrs in p50_data["data"]] == [
  477. [{"count": 123}],
  478. [{"count": 622.5}],
  479. ]
  480. sum_meta = sum_data["meta"]
  481. assert sum_meta["isMetricsData"] == sum_data["isMetricsData"]
  482. assert sum_meta["fields"] == {
  483. "time": "date",
  484. "sum_measurements_datacenter_memory": "size",
  485. "p50_measurements_datacenter_memory": "size",
  486. }
  487. assert sum_meta["units"] == {
  488. "time": None,
  489. "sum_measurements_datacenter_memory": "pebibyte",
  490. "p50_measurements_datacenter_memory": "pebibyte",
  491. }
  492. p50_meta = p50_data["meta"]
  493. assert p50_meta["isMetricsData"] == p50_data["isMetricsData"]
  494. assert p50_meta["fields"] == {
  495. "time": "date",
  496. "sum_measurements_datacenter_memory": "size",
  497. "p50_measurements_datacenter_memory": "size",
  498. }
  499. assert p50_meta["units"] == {
  500. "time": None,
  501. "sum_measurements_datacenter_memory": "pebibyte",
  502. "p50_measurements_datacenter_memory": "pebibyte",
  503. }
  504. def test_dataset_metrics_does_not_fallback(self):
  505. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  506. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  507. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  508. response = self.do_request(
  509. data={
  510. "start": iso_format(self.day_ago),
  511. "end": iso_format(self.day_ago + timedelta(hours=2)),
  512. "interval": "1h",
  513. "query": "transaction.duration:<5s",
  514. "yAxis": "sum(transaction.duration)",
  515. "dataset": "metrics",
  516. **self.additional_params,
  517. },
  518. )
  519. assert response.status_code == 400, response.content
  520. def test_title_filter(self):
  521. self.store_transaction_metric(
  522. 123,
  523. tags={"transaction": "foo_transaction"},
  524. timestamp=self.day_ago + timedelta(minutes=30),
  525. )
  526. response = self.do_request(
  527. data={
  528. "start": iso_format(self.day_ago),
  529. "end": iso_format(self.day_ago + timedelta(hours=2)),
  530. "interval": "1h",
  531. "query": "title:foo_transaction",
  532. "yAxis": [
  533. "sum(transaction.duration)",
  534. ],
  535. "dataset": "metricsEnhanced",
  536. **self.additional_params,
  537. },
  538. )
  539. assert response.status_code == 200, response.content
  540. data = response.data["data"]
  541. assert [attrs for time, attrs in data] == [
  542. [{"count": 123}],
  543. [{"count": 0}],
  544. ]
  545. def test_transaction_status_unknown_error(self):
  546. self.store_transaction_metric(
  547. 123,
  548. tags={"transaction.status": "unknown"},
  549. timestamp=self.day_ago + timedelta(minutes=30),
  550. )
  551. response = self.do_request(
  552. data={
  553. "start": iso_format(self.day_ago),
  554. "end": iso_format(self.day_ago + timedelta(hours=2)),
  555. "interval": "1h",
  556. "query": "transaction.status:unknown_error",
  557. "yAxis": [
  558. "sum(transaction.duration)",
  559. ],
  560. "dataset": "metricsEnhanced",
  561. **self.additional_params,
  562. },
  563. )
  564. assert response.status_code == 200, response.content
  565. data = response.data["data"]
  566. assert [attrs for time, attrs in data] == [
  567. [{"count": 123}],
  568. [{"count": 0}],
  569. ]
  570. def test_custom_performance_metric_meta_contains_field_and_unit_data(self):
  571. self.store_transaction_metric(
  572. 123,
  573. timestamp=self.day_ago + timedelta(hours=1),
  574. internal_metric="d:transactions/measurements.custom@kibibyte",
  575. entity="metrics_distributions",
  576. )
  577. response = self.do_request(
  578. data={
  579. "start": iso_format(self.day_ago),
  580. "end": iso_format(self.day_ago + timedelta(hours=2)),
  581. "interval": "1h",
  582. "yAxis": "p99(measurements.custom)",
  583. "query": "",
  584. **self.additional_params,
  585. },
  586. )
  587. assert response.status_code == 200
  588. meta = response.data["meta"]
  589. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  590. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  591. def test_multi_series_custom_performance_metric_meta_contains_field_and_unit_data(self):
  592. self.store_transaction_metric(
  593. 123,
  594. timestamp=self.day_ago + timedelta(hours=1),
  595. internal_metric="d:transactions/measurements.custom@kibibyte",
  596. entity="metrics_distributions",
  597. )
  598. self.store_transaction_metric(
  599. 123,
  600. timestamp=self.day_ago + timedelta(hours=1),
  601. internal_metric="d:transactions/measurements.another.custom@pebibyte",
  602. entity="metrics_distributions",
  603. )
  604. response = self.do_request(
  605. data={
  606. "start": iso_format(self.day_ago),
  607. "end": iso_format(self.day_ago + timedelta(hours=2)),
  608. "interval": "1h",
  609. "yAxis": [
  610. "p95(measurements.custom)",
  611. "p99(measurements.custom)",
  612. "p99(measurements.another.custom)",
  613. ],
  614. "query": "",
  615. **self.additional_params,
  616. },
  617. )
  618. assert response.status_code == 200
  619. meta = response.data["p95(measurements.custom)"]["meta"]
  620. assert meta["fields"] == {
  621. "time": "date",
  622. "p95_measurements_custom": "size",
  623. "p99_measurements_custom": "size",
  624. "p99_measurements_another_custom": "size",
  625. }
  626. assert meta["units"] == {
  627. "time": None,
  628. "p95_measurements_custom": "kibibyte",
  629. "p99_measurements_custom": "kibibyte",
  630. "p99_measurements_another_custom": "pebibyte",
  631. }
  632. assert meta == response.data["p99(measurements.custom)"]["meta"]
  633. assert meta == response.data["p99(measurements.another.custom)"]["meta"]
  634. def test_no_top_events_with_project_field(self):
  635. project = self.create_project()
  636. response = self.do_request(
  637. data={
  638. # make sure to query the project with 0 events
  639. "project": project.id,
  640. "start": iso_format(self.day_ago),
  641. "end": iso_format(self.day_ago + timedelta(hours=2)),
  642. "interval": "1h",
  643. "yAxis": "count()",
  644. "orderby": ["-count()"],
  645. "field": ["count()", "project"],
  646. "topEvents": 5,
  647. "dataset": "metrics",
  648. **self.additional_params,
  649. },
  650. )
  651. assert response.status_code == 200, response.content
  652. # When there are no top events, we do not return an empty dict.
  653. # Instead, we return a single zero-filled series for an empty graph.
  654. data = response.data["data"]
  655. assert [attrs for time, attrs in data] == [[{"count": 0}], [{"count": 0}]]
  656. def test_top_events_with_transaction(self):
  657. transaction_spec = [("foo", 100), ("bar", 200), ("baz", 300)]
  658. for offset in range(5):
  659. for transaction, duration in transaction_spec:
  660. self.store_transaction_metric(
  661. duration,
  662. tags={"transaction": f"{transaction}_transaction"},
  663. timestamp=self.day_ago + timedelta(hours=offset, minutes=30),
  664. )
  665. response = self.do_request(
  666. data={
  667. # make sure to query the project with 0 events
  668. "project": self.project.id,
  669. "start": iso_format(self.day_ago),
  670. "end": iso_format(self.day_ago + timedelta(hours=5)),
  671. "interval": "1h",
  672. "yAxis": "p75(transaction.duration)",
  673. "orderby": ["-p75(transaction.duration)"],
  674. "field": ["p75(transaction.duration)", "transaction"],
  675. "topEvents": 5,
  676. "dataset": "metrics",
  677. **self.additional_params,
  678. },
  679. )
  680. assert response.status_code == 200, response.content
  681. for position, (transaction, duration) in enumerate(transaction_spec):
  682. data = response.data[f"{transaction}_transaction"]
  683. chart_data = data["data"]
  684. assert data["order"] == 2 - position
  685. assert [attrs for time, attrs in chart_data] == [[{"count": duration}]] * 5
  686. def test_top_events_with_project(self):
  687. self.store_transaction_metric(
  688. 100,
  689. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  690. )
  691. response = self.do_request(
  692. data={
  693. # make sure to query the project with 0 events
  694. "project": self.project.id,
  695. "start": iso_format(self.day_ago),
  696. "end": iso_format(self.day_ago + timedelta(hours=5)),
  697. "interval": "1h",
  698. "yAxis": "p75(transaction.duration)",
  699. "orderby": ["-p75(transaction.duration)"],
  700. "field": ["p75(transaction.duration)", "project"],
  701. "topEvents": 5,
  702. "dataset": "metrics",
  703. **self.additional_params,
  704. },
  705. )
  706. assert response.status_code == 200, response.content
  707. data = response.data[f"{self.project.slug}"]
  708. assert data["order"] == 0
  709. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  710. OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest
  711. ):
  712. def setUp(self):
  713. super().setUp()
  714. self.features["organizations:use-metrics-layer"] = True
  715. self.additional_params = {"forceMetricsLayer": "true"}
  716. def test_counter_standard_metric(self):
  717. mri = "c:transactions/usage@none"
  718. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  719. self.store_transaction_metric(
  720. value,
  721. metric=mri,
  722. internal_metric=mri,
  723. entity="metrics_counters",
  724. timestamp=self.day_ago + timedelta(minutes=index),
  725. use_case_id=UseCaseID.CUSTOM,
  726. )
  727. response = self.do_request(
  728. data={
  729. "start": iso_format(self.day_ago),
  730. "end": iso_format(self.day_ago + timedelta(hours=6)),
  731. "interval": "1m",
  732. "yAxis": [f"sum({mri})"],
  733. "project": self.project.id,
  734. "dataset": "metricsEnhanced",
  735. **self.additional_params,
  736. },
  737. )
  738. assert response.status_code == 200, response.content
  739. data = response.data["data"]
  740. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  741. assert value[0]["count"] == expected_value # type: ignore[index]
  742. def test_counter_custom_metric(self):
  743. mri = "c:custom/sentry.process_profile.track_outcome@second"
  744. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  745. self.store_transaction_metric(
  746. value,
  747. metric=mri,
  748. internal_metric=mri,
  749. entity="metrics_counters",
  750. timestamp=self.day_ago + timedelta(hours=index),
  751. use_case_id=UseCaseID.CUSTOM,
  752. )
  753. response = self.do_request(
  754. data={
  755. "start": iso_format(self.day_ago),
  756. "end": iso_format(self.day_ago + timedelta(hours=6)),
  757. "interval": "1h",
  758. "yAxis": [f"sum({mri})"],
  759. "project": self.project.id,
  760. "dataset": "metricsEnhanced",
  761. **self.additional_params,
  762. },
  763. )
  764. assert response.status_code == 200, response.content
  765. data = response.data["data"]
  766. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  767. assert value[0]["count"] == expected_value # type: ignore[index]
  768. def test_distribution_custom_metric(self):
  769. mri = "d:custom/sentry.process_profile.track_outcome@second"
  770. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  771. for multiplier in (1, 2, 3):
  772. self.store_transaction_metric(
  773. value * multiplier,
  774. metric=mri,
  775. internal_metric=mri,
  776. entity="metrics_distributions",
  777. timestamp=self.day_ago + timedelta(hours=index),
  778. use_case_id=UseCaseID.CUSTOM,
  779. )
  780. response = self.do_request(
  781. data={
  782. "start": iso_format(self.day_ago),
  783. "end": iso_format(self.day_ago + timedelta(hours=6)),
  784. "interval": "1h",
  785. "yAxis": [f"min({mri})", f"max({mri})", f"p90({mri})"],
  786. "project": self.project.id,
  787. "dataset": "metricsEnhanced",
  788. **self.additional_params,
  789. },
  790. )
  791. assert response.status_code == 200, response.content
  792. data = response.data
  793. min = data[f"min({mri})"]["data"]
  794. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  795. assert value[0]["count"] == expected_value # type: ignore[index]
  796. max = data[f"max({mri})"]["data"]
  797. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  798. assert value[0]["count"] == expected_value # type: ignore[index]
  799. p90 = data[f"p90({mri})"]["data"]
  800. for (_, value), expected_value in zip(p90, [28.0, 56.0, 84.0, 112.0, 140.0, 168.0]):
  801. assert value[0]["count"] == expected_value # type: ignore[index]
  802. def test_set_custom_metric(self):
  803. mri = "s:custom/sentry.process_profile.track_outcome@second"
  804. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  805. # We store each value a second time, since we want to check the de-duplication of sets.
  806. for i in range(0, 2):
  807. self.store_transaction_metric(
  808. value,
  809. metric=mri,
  810. internal_metric=mri,
  811. entity="metrics_sets",
  812. timestamp=self.day_ago + timedelta(hours=index),
  813. use_case_id=UseCaseID.CUSTOM,
  814. )
  815. response = self.do_request(
  816. data={
  817. "start": iso_format(self.day_ago),
  818. "end": iso_format(self.day_ago + timedelta(hours=6)),
  819. "interval": "1h",
  820. "yAxis": [f"count_unique({mri})"],
  821. "project": self.project.id,
  822. "dataset": "metricsEnhanced",
  823. **self.additional_params,
  824. },
  825. )
  826. assert response.status_code == 200, response.content
  827. data = response.data["data"]
  828. for (_, value), expected_value in zip(data, [1, 1, 1, 1, 1, 1]):
  829. assert value[0]["count"] == expected_value # type: ignore[index]
  830. def test_gauge_custom_metric(self):
  831. mri = "g:custom/sentry.process_profile.track_outcome@second"
  832. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  833. for multiplier in (1, 3):
  834. self.store_transaction_metric(
  835. value * multiplier,
  836. metric=mri,
  837. internal_metric=mri,
  838. entity="metrics_gauges",
  839. # When multiple gauges are merged, in order to make the `last` merge work deterministically it's
  840. # better to have the gauges with different timestamps so that the last value is always the same.
  841. timestamp=self.day_ago + timedelta(hours=index, minutes=multiplier),
  842. use_case_id=UseCaseID.CUSTOM,
  843. )
  844. response = self.do_request(
  845. data={
  846. "start": iso_format(self.day_ago),
  847. "end": iso_format(self.day_ago + timedelta(hours=6)),
  848. "interval": "1h",
  849. "yAxis": [
  850. f"min({mri})",
  851. f"max({mri})",
  852. f"last({mri})",
  853. f"sum({mri})",
  854. f"count({mri})",
  855. ],
  856. "project": self.project.id,
  857. "dataset": "metricsEnhanced",
  858. **self.additional_params,
  859. },
  860. )
  861. assert response.status_code == 200, response.content
  862. data = response.data
  863. min = data[f"min({mri})"]["data"]
  864. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  865. assert value[0]["count"] == expected_value # type: ignore[index]
  866. max = data[f"max({mri})"]["data"]
  867. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  868. assert value[0]["count"] == expected_value # type: ignore[index]
  869. last = data[f"last({mri})"]["data"]
  870. for (_, value), expected_value in zip(last, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  871. assert value[0]["count"] == expected_value # type: ignore[index]
  872. sum = data[f"sum({mri})"]["data"]
  873. for (_, value), expected_value in zip(sum, [40.0, 80.0, 120.0, 160.0, 200.0, 240.0]):
  874. assert value[0]["count"] == expected_value # type: ignore[index]
  875. count = data[f"count({mri})"]["data"]
  876. for (_, value), expected_value in zip(count, [40, 80, 120, 160, 200, 240]):
  877. assert value[0]["count"] == expected_value # type: ignore[index]
  878. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithOnDemandWidgets(
  879. MetricsEnhancedPerformanceTestCase
  880. ):
  881. endpoint = "sentry-api-0-organization-events-stats"
  882. def setUp(self):
  883. super().setUp()
  884. self.login_as(user=self.user)
  885. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  886. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  887. Environment.get_or_create(self.project, "production")
  888. self.url = reverse(
  889. "sentry-api-0-organization-events-stats",
  890. kwargs={"organization_id_or_slug": self.project.organization.slug},
  891. )
  892. self.features = {
  893. "organizations:on-demand-metrics-extraction-widgets": True,
  894. "organizations:on-demand-metrics-extraction": True,
  895. }
  896. def _make_on_demand_request(
  897. self, params: dict[str, Any], extra_features: dict[str, bool] | None = None
  898. ) -> Response:
  899. """Ensures that the required parameters for an on-demand request are included."""
  900. # Expected parameters for this helper function
  901. params["dataset"] = "metricsEnhanced"
  902. params["useOnDemandMetrics"] = "true"
  903. params["onDemandType"] = "dynamic_query"
  904. _features = {**self.features, **(extra_features or {})}
  905. return self.do_request(params, features=_features)
  906. def test_top_events_wrong_on_demand_type(self):
  907. query = "transaction.duration:>=100"
  908. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  909. response = self.do_request(
  910. data={
  911. "project": self.project.id,
  912. "start": iso_format(self.day_ago),
  913. "end": iso_format(self.day_ago + timedelta(hours=2)),
  914. "interval": "1h",
  915. "orderby": ["-count()"],
  916. "environment": "production",
  917. "query": query,
  918. "yAxis": yAxis,
  919. "field": [
  920. "count()",
  921. ],
  922. "topEvents": 5,
  923. "dataset": "metrics",
  924. "useOnDemandMetrics": "true",
  925. "onDemandType": "not_real",
  926. },
  927. )
  928. assert response.status_code == 400, response.content
  929. def test_top_events_works_without_on_demand_type(self):
  930. query = "transaction.duration:>=100"
  931. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  932. response = self.do_request(
  933. data={
  934. "project": self.project.id,
  935. "start": iso_format(self.day_ago),
  936. "end": iso_format(self.day_ago + timedelta(hours=2)),
  937. "interval": "1h",
  938. "orderby": ["-count()"],
  939. "environment": "production",
  940. "query": query,
  941. "yAxis": yAxis,
  942. "field": [
  943. "count()",
  944. ],
  945. "topEvents": 5,
  946. "dataset": "metrics",
  947. "useOnDemandMetrics": "true",
  948. },
  949. )
  950. assert response.status_code == 200, response.content
  951. def test_top_events_with_transaction_on_demand(self):
  952. field = "count()"
  953. field_two = "count_web_vitals(measurements.lcp, good)"
  954. groupbys = ["customtag1", "customtag2"]
  955. query = "transaction.duration:>=100"
  956. spec = OnDemandMetricSpec(
  957. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  958. )
  959. spec_two = OnDemandMetricSpec(
  960. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  961. )
  962. for hour in range(0, 5):
  963. self.store_on_demand_metric(
  964. hour * 62 * 24,
  965. spec=spec,
  966. additional_tags={
  967. "customtag1": "foo",
  968. "customtag2": "red",
  969. "environment": "production",
  970. },
  971. timestamp=self.day_ago + timedelta(hours=hour),
  972. )
  973. self.store_on_demand_metric(
  974. hour * 60 * 24,
  975. spec=spec_two,
  976. additional_tags={
  977. "customtag1": "bar",
  978. "customtag2": "blue",
  979. "environment": "production",
  980. },
  981. timestamp=self.day_ago + timedelta(hours=hour),
  982. )
  983. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  984. response = self.do_request(
  985. data={
  986. "project": self.project.id,
  987. "start": iso_format(self.day_ago),
  988. "end": iso_format(self.day_ago + timedelta(hours=2)),
  989. "interval": "1h",
  990. "orderby": ["-count()"],
  991. "environment": "production",
  992. "query": query,
  993. "yAxis": yAxis,
  994. "field": [
  995. "count()",
  996. "count_web_vitals(measurements.lcp, good)",
  997. "customtag1",
  998. "customtag2",
  999. ],
  1000. "topEvents": 5,
  1001. "dataset": "metricsEnhanced",
  1002. "useOnDemandMetrics": "true",
  1003. "onDemandType": "dynamic_query",
  1004. },
  1005. )
  1006. assert response.status_code == 200, response.content
  1007. groups = [
  1008. ("foo,red", "count()", 0.0, 1488.0),
  1009. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1010. ("bar,blue", "count()", 0.0, 0.0),
  1011. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1012. ]
  1013. assert len(response.data.keys()) == 2
  1014. for group_count in groups:
  1015. group, agg, row1, row2 = group_count
  1016. row_data = response.data[group][agg]["data"][:2]
  1017. assert [attrs for _, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1018. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1019. assert response.data[group]["isMetricsExtractedData"]
  1020. def test_top_events_with_transaction_on_demand_and_no_environment(self):
  1021. field = "count()"
  1022. field_two = "count_web_vitals(measurements.lcp, good)"
  1023. groupbys = ["customtag1", "customtag2"]
  1024. query = "transaction.duration:>=100"
  1025. spec = OnDemandMetricSpec(
  1026. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1027. )
  1028. spec_two = OnDemandMetricSpec(
  1029. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1030. )
  1031. for hour in range(0, 5):
  1032. self.store_on_demand_metric(
  1033. hour * 62 * 24,
  1034. spec=spec,
  1035. additional_tags={
  1036. "customtag1": "foo",
  1037. "customtag2": "red",
  1038. "environment": "production",
  1039. },
  1040. timestamp=self.day_ago + timedelta(hours=hour),
  1041. )
  1042. self.store_on_demand_metric(
  1043. hour * 60 * 24,
  1044. spec=spec_two,
  1045. additional_tags={
  1046. "customtag1": "bar",
  1047. "customtag2": "blue",
  1048. "environment": "production",
  1049. },
  1050. timestamp=self.day_ago + timedelta(hours=hour),
  1051. )
  1052. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1053. response = self.do_request(
  1054. data={
  1055. "project": self.project.id,
  1056. "start": iso_format(self.day_ago),
  1057. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1058. "interval": "1h",
  1059. "orderby": ["-count()"],
  1060. "query": query,
  1061. "yAxis": yAxis,
  1062. "field": [
  1063. "count()",
  1064. "count_web_vitals(measurements.lcp, good)",
  1065. "customtag1",
  1066. "customtag2",
  1067. ],
  1068. "topEvents": 5,
  1069. "dataset": "metricsEnhanced",
  1070. "useOnDemandMetrics": "true",
  1071. "onDemandType": "dynamic_query",
  1072. },
  1073. )
  1074. assert response.status_code == 200, response.content
  1075. groups = [
  1076. ("foo,red", "count()", 0.0, 1488.0),
  1077. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1078. ("bar,blue", "count()", 0.0, 0.0),
  1079. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1080. ]
  1081. assert len(response.data.keys()) == 2
  1082. for group_count in groups:
  1083. group, agg, row1, row2 = group_count
  1084. row_data = response.data[group][agg]["data"][:2]
  1085. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1086. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1087. assert response.data[group]["isMetricsExtractedData"]
  1088. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_transaction_only(self):
  1089. field = "count()"
  1090. field_two = "count_web_vitals(measurements.lcp, good)"
  1091. groupbys = ["customtag1", "customtag2"]
  1092. query = "transaction.duration:>=100"
  1093. spec = OnDemandMetricSpec(
  1094. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1095. )
  1096. spec_two = OnDemandMetricSpec(
  1097. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1098. )
  1099. _, widget, __ = create_widget(
  1100. ["count()"],
  1101. "",
  1102. self.project,
  1103. discover_widget_split=None,
  1104. )
  1105. for hour in range(0, 2):
  1106. self.store_on_demand_metric(
  1107. hour * 62 * 24,
  1108. spec=spec,
  1109. additional_tags={
  1110. "customtag1": "foo",
  1111. "customtag2": "red",
  1112. "environment": "production",
  1113. },
  1114. timestamp=self.day_ago + timedelta(hours=hour),
  1115. )
  1116. self.store_on_demand_metric(
  1117. hour * 60 * 24,
  1118. spec=spec_two,
  1119. additional_tags={
  1120. "customtag1": "bar",
  1121. "customtag2": "blue",
  1122. "environment": "production",
  1123. },
  1124. timestamp=self.day_ago + timedelta(hours=hour),
  1125. )
  1126. yAxis = [field, field_two]
  1127. response = self.do_request(
  1128. data={
  1129. "project": self.project.id,
  1130. "start": iso_format(self.day_ago),
  1131. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1132. "interval": "1h",
  1133. "orderby": ["-count()"],
  1134. "query": query,
  1135. "yAxis": yAxis,
  1136. "field": yAxis + groupbys,
  1137. "topEvents": 5,
  1138. "dataset": "metricsEnhanced",
  1139. "useOnDemandMetrics": "true",
  1140. "onDemandType": "dynamic_query",
  1141. "dashboardWidgetId": widget.id,
  1142. },
  1143. )
  1144. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1145. assert saved_widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  1146. assert response.status_code == 200, response.content
  1147. # Fell back to discover data which is empty for this test (empty group of '').
  1148. assert len(response.data.keys()) == 2
  1149. assert bool(response.data["foo,red"])
  1150. assert bool(response.data["bar,blue"])
  1151. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_error(
  1152. self,
  1153. ):
  1154. self.project = self.create_project(organization=self.organization)
  1155. Environment.get_or_create(self.project, "production")
  1156. field = "count()"
  1157. field_two = "count()"
  1158. groupbys = ["customtag1", "customtag2"]
  1159. query = "query.dataset:foo"
  1160. _, widget, __ = create_widget(
  1161. ["count()"],
  1162. "",
  1163. self.project,
  1164. discover_widget_split=None,
  1165. )
  1166. self.store_event(
  1167. data={
  1168. "event_id": "a" * 32,
  1169. "message": "very bad",
  1170. "type": "error",
  1171. "start_timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1172. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1173. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1174. },
  1175. project_id=self.project.id,
  1176. )
  1177. self.store_event(
  1178. data={
  1179. "event_id": "b" * 32,
  1180. "message": "very bad 2",
  1181. "type": "error",
  1182. "start_timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1183. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1184. "tags": {"customtag1": "error_value2", "query.dataset": "foo"},
  1185. },
  1186. project_id=self.project.id,
  1187. )
  1188. yAxis = ["count()"]
  1189. response = self.do_request(
  1190. data={
  1191. "project": self.project.id,
  1192. "start": iso_format(self.day_ago),
  1193. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1194. "interval": "1h",
  1195. "orderby": ["-count()"],
  1196. "query": query,
  1197. "yAxis": yAxis,
  1198. "field": [field, field_two] + groupbys,
  1199. "topEvents": 5,
  1200. "dataset": "metricsEnhanced",
  1201. "useOnDemandMetrics": "true",
  1202. "onDemandType": "dynamic_query",
  1203. "dashboardWidgetId": widget.id,
  1204. },
  1205. )
  1206. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1207. assert saved_widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  1208. assert response.status_code == 200, response.content
  1209. # Fell back to discover data which is empty for this test (empty group of '').
  1210. assert len(response.data.keys()) == 2
  1211. assert bool(response.data["error_value,"])
  1212. assert bool(response.data["error_value2,"])
  1213. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_discover(self):
  1214. self.project = self.create_project(organization=self.organization)
  1215. Environment.get_or_create(self.project, "production")
  1216. field = "count()"
  1217. field_two = "count()"
  1218. groupbys = ["customtag1", "customtag2"]
  1219. query = "query.dataset:foo"
  1220. spec = OnDemandMetricSpec(
  1221. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1222. )
  1223. spec_two = OnDemandMetricSpec(
  1224. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1225. )
  1226. _, widget, __ = create_widget(
  1227. ["count()"],
  1228. "",
  1229. self.project,
  1230. discover_widget_split=None,
  1231. )
  1232. self.store_event(
  1233. data={
  1234. "event_id": "a" * 32,
  1235. "message": "very bad",
  1236. "type": "error",
  1237. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1238. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1239. },
  1240. project_id=self.project.id,
  1241. )
  1242. transaction = load_data("transaction")
  1243. transaction["timestamp"] = iso_format(self.day_ago + timedelta(hours=1))
  1244. transaction["start_timestamp"] = iso_format(self.day_ago + timedelta(hours=1))
  1245. transaction["tags"] = {"customtag1": "transaction_value", "query.dataset": "foo"}
  1246. self.store_event(
  1247. data=transaction,
  1248. project_id=self.project.id,
  1249. )
  1250. for hour in range(0, 5):
  1251. self.store_on_demand_metric(
  1252. hour * 62 * 24,
  1253. spec=spec,
  1254. additional_tags={
  1255. "customtag1": "foo",
  1256. "customtag2": "red",
  1257. "environment": "production",
  1258. },
  1259. timestamp=self.day_ago + timedelta(hours=hour),
  1260. )
  1261. self.store_on_demand_metric(
  1262. hour * 60 * 24,
  1263. spec=spec_two,
  1264. additional_tags={
  1265. "customtag1": "bar",
  1266. "customtag2": "blue",
  1267. "environment": "production",
  1268. },
  1269. timestamp=self.day_ago + timedelta(hours=hour),
  1270. )
  1271. yAxis = ["count()"]
  1272. response = self.do_request(
  1273. data={
  1274. "project": self.project.id,
  1275. "start": iso_format(self.day_ago),
  1276. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1277. "interval": "1h",
  1278. "orderby": ["-count()"],
  1279. "query": query,
  1280. "yAxis": yAxis,
  1281. "field": [field, field_two, "customtag1", "customtag2"],
  1282. "topEvents": 5,
  1283. "dataset": "metricsEnhanced",
  1284. "useOnDemandMetrics": "true",
  1285. "onDemandType": "dynamic_query",
  1286. "dashboardWidgetId": widget.id,
  1287. },
  1288. )
  1289. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1290. assert saved_widget.discover_widget_split == DashboardWidgetTypes.DISCOVER
  1291. assert response.status_code == 200, response.content
  1292. assert response.status_code == 200, response.content
  1293. # Fell back to discover data which is empty for this test (empty group of '').
  1294. assert len(response.data.keys()) == 2
  1295. assert bool(response.data["error_value,"])
  1296. assert bool(response.data["transaction_value,"])
  1297. def test_top_events_with_transaction_on_demand_passing_widget_id_saved(self):
  1298. field = "count()"
  1299. field_two = "count_web_vitals(measurements.lcp, good)"
  1300. groupbys = ["customtag1", "customtag2"]
  1301. query = "transaction.duration:>=100"
  1302. spec = OnDemandMetricSpec(
  1303. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1304. )
  1305. spec_two = OnDemandMetricSpec(
  1306. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1307. )
  1308. _, widget, __ = create_widget(
  1309. ["count()"],
  1310. "",
  1311. self.project,
  1312. discover_widget_split=DashboardWidgetTypes.TRANSACTION_LIKE, # Transactions like uses on-demand
  1313. )
  1314. for hour in range(0, 5):
  1315. self.store_on_demand_metric(
  1316. hour * 62 * 24,
  1317. spec=spec,
  1318. additional_tags={
  1319. "customtag1": "foo",
  1320. "customtag2": "red",
  1321. "environment": "production",
  1322. },
  1323. timestamp=self.day_ago + timedelta(hours=hour),
  1324. )
  1325. self.store_on_demand_metric(
  1326. hour * 60 * 24,
  1327. spec=spec_two,
  1328. additional_tags={
  1329. "customtag1": "bar",
  1330. "customtag2": "blue",
  1331. "environment": "production",
  1332. },
  1333. timestamp=self.day_ago + timedelta(hours=hour),
  1334. )
  1335. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1336. with mock.patch.object(widget, "save") as mock_widget_save:
  1337. response = self.do_request(
  1338. data={
  1339. "project": self.project.id,
  1340. "start": iso_format(self.day_ago),
  1341. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1342. "interval": "1h",
  1343. "orderby": ["-count()"],
  1344. "query": query,
  1345. "yAxis": yAxis,
  1346. "field": [
  1347. "count()",
  1348. "count_web_vitals(measurements.lcp, good)",
  1349. "customtag1",
  1350. "customtag2",
  1351. ],
  1352. "topEvents": 5,
  1353. "dataset": "metricsEnhanced",
  1354. "useOnDemandMetrics": "true",
  1355. "onDemandType": "dynamic_query",
  1356. "dashboardWidgetId": widget.id,
  1357. },
  1358. )
  1359. assert bool(mock_widget_save.assert_not_called)
  1360. assert response.status_code == 200, response.content
  1361. groups = [
  1362. ("foo,red", "count()", 0.0, 1488.0),
  1363. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1364. ("bar,blue", "count()", 0.0, 0.0),
  1365. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1366. ]
  1367. assert len(response.data.keys()) == 2
  1368. for group_count in groups:
  1369. group, agg, row1, row2 = group_count
  1370. row_data = response.data[group][agg]["data"][:2]
  1371. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1372. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1373. assert response.data[group]["isMetricsExtractedData"]
  1374. def test_timeseries_on_demand_with_multiple_percentiles(self):
  1375. field = "p75(measurements.fcp)"
  1376. field_two = "p75(measurements.lcp)"
  1377. query = "transaction.duration:>=100"
  1378. spec = OnDemandMetricSpec(field=field, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY)
  1379. spec_two = OnDemandMetricSpec(
  1380. field=field_two, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1381. )
  1382. assert (
  1383. spec._query_str_for_hash
  1384. == "event.measurements.fcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1385. )
  1386. assert (
  1387. spec_two._query_str_for_hash
  1388. == "event.measurements.lcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1389. )
  1390. for count in range(0, 4):
  1391. self.store_on_demand_metric(
  1392. count * 100,
  1393. spec=spec,
  1394. timestamp=self.day_ago + timedelta(hours=1),
  1395. )
  1396. self.store_on_demand_metric(
  1397. count * 200.0,
  1398. spec=spec_two,
  1399. timestamp=self.day_ago + timedelta(hours=1),
  1400. )
  1401. yAxis = [field, field_two]
  1402. response = self.do_request(
  1403. data={
  1404. "project": self.project.id,
  1405. "start": iso_format(self.day_ago),
  1406. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1407. "interval": "1h",
  1408. "orderby": [field],
  1409. "query": query,
  1410. "yAxis": yAxis,
  1411. "dataset": "metricsEnhanced",
  1412. "useOnDemandMetrics": "true",
  1413. "onDemandType": "dynamic_query",
  1414. },
  1415. )
  1416. assert response.status_code == 200, response.content
  1417. assert response.data["p75(measurements.fcp)"]["meta"]["isMetricsExtractedData"]
  1418. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1419. assert [attrs for time, attrs in response.data["p75(measurements.fcp)"]["data"]] == [
  1420. [{"count": 0}],
  1421. [{"count": 225.0}],
  1422. ]
  1423. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsExtractedData"]
  1424. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1425. assert [attrs for time, attrs in response.data["p75(measurements.lcp)"]["data"]] == [
  1426. [{"count": 0}],
  1427. [{"count": 450.0}],
  1428. ]
  1429. def test_apdex_issue(self):
  1430. field = "apdex(300)"
  1431. groupbys = ["group_tag"]
  1432. query = "transaction.duration:>=100"
  1433. spec = OnDemandMetricSpec(
  1434. field=field,
  1435. groupbys=groupbys,
  1436. query=query,
  1437. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1438. )
  1439. for hour in range(0, 5):
  1440. self.store_on_demand_metric(
  1441. 1,
  1442. spec=spec,
  1443. additional_tags={
  1444. "group_tag": "group_one",
  1445. "environment": "production",
  1446. "satisfaction": "tolerable",
  1447. },
  1448. timestamp=self.day_ago + timedelta(hours=hour),
  1449. )
  1450. self.store_on_demand_metric(
  1451. 1,
  1452. spec=spec,
  1453. additional_tags={
  1454. "group_tag": "group_two",
  1455. "environment": "production",
  1456. "satisfaction": "satisfactory",
  1457. },
  1458. timestamp=self.day_ago + timedelta(hours=hour),
  1459. )
  1460. response = self.do_request(
  1461. data={
  1462. "dataset": "metricsEnhanced",
  1463. "environment": "production",
  1464. "excludeOther": 1,
  1465. "field": [field, "group_tag"],
  1466. "start": iso_format(self.day_ago),
  1467. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1468. "interval": "1h",
  1469. "orderby": f"-{field}",
  1470. "partial": 1,
  1471. "project": self.project.id,
  1472. "query": query,
  1473. "topEvents": 5,
  1474. "yAxis": field,
  1475. "onDemandType": "dynamic_query",
  1476. "useOnDemandMetrics": "true",
  1477. },
  1478. )
  1479. assert response.status_code == 200, response.content
  1480. assert response.data["group_one"]["meta"]["isMetricsExtractedData"] is True
  1481. assert [attrs for time, attrs in response.data["group_one"]["data"]] == [
  1482. [{"count": 0.5}],
  1483. [{"count": 0.5}],
  1484. ]
  1485. def test_glob_http_referer_on_demand(self):
  1486. agg = "count()"
  1487. network_id_tag = "networkId"
  1488. url = "https://sentry.io"
  1489. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1490. spec = OnDemandMetricSpec(
  1491. field=agg,
  1492. groupbys=[network_id_tag],
  1493. query=query,
  1494. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1495. )
  1496. assert spec.to_metric_spec(self.project) == {
  1497. "category": "transaction",
  1498. "mri": "c:transactions/on_demand@none",
  1499. "field": None,
  1500. "tags": [
  1501. {"key": "query_hash", "value": "ac241f56"},
  1502. {"key": "networkId", "field": "event.tags.networkId"},
  1503. {"key": "environment", "field": "event.environment"},
  1504. ],
  1505. "condition": {
  1506. "op": "and",
  1507. "inner": [
  1508. {
  1509. "op": "glob",
  1510. "name": "event.request.url",
  1511. "value": ["https://sentry.io/*/foo/bar/*"],
  1512. },
  1513. {
  1514. "op": "glob",
  1515. "name": "event.request.headers.Referer",
  1516. "value": ["https://sentry.io/*/bar/*"],
  1517. },
  1518. ],
  1519. },
  1520. }
  1521. for hour in range(0, 5):
  1522. self.store_on_demand_metric(
  1523. 1,
  1524. spec=spec,
  1525. additional_tags={network_id_tag: "1234"},
  1526. timestamp=self.day_ago + timedelta(hours=hour),
  1527. )
  1528. self.store_on_demand_metric(
  1529. 1,
  1530. spec=spec,
  1531. additional_tags={network_id_tag: "5678"},
  1532. timestamp=self.day_ago + timedelta(hours=hour),
  1533. )
  1534. response = self.do_request(
  1535. data={
  1536. "dataset": "metricsEnhanced",
  1537. "field": [network_id_tag, agg],
  1538. "start": iso_format(self.day_ago),
  1539. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1540. "onDemandType": "dynamic_query",
  1541. "orderby": f"-{agg}",
  1542. "interval": "1d",
  1543. "partial": 1,
  1544. "query": query,
  1545. "referrer": "api.dashboards.widget.bar-chart",
  1546. "project": self.project.id,
  1547. "topEvents": 2,
  1548. "useOnDemandMetrics": "true",
  1549. "yAxis": agg,
  1550. },
  1551. )
  1552. assert response.status_code == 200, response.content
  1553. for datum in response.data.values():
  1554. assert datum["meta"] == {
  1555. "dataset": "metricsEnhanced",
  1556. "datasetReason": "unchanged",
  1557. "fields": {},
  1558. "isMetricsData": False,
  1559. "isMetricsExtractedData": True,
  1560. "tips": {},
  1561. "units": {},
  1562. }
  1563. def _test_is_metrics_extracted_data(
  1564. self, params: dict[str, Any], expected_on_demand_query: bool, dataset: str
  1565. ) -> None:
  1566. spec = OnDemandMetricSpec(
  1567. field="count()",
  1568. query="transaction.duration:>1s",
  1569. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1570. )
  1571. self.store_on_demand_metric(1, spec=spec)
  1572. response = self.do_request(params)
  1573. assert response.status_code == 200, response.content
  1574. meta = response.data["meta"]
  1575. # This is the main thing we want to test for
  1576. assert meta.get("isMetricsExtractedData", False) is expected_on_demand_query
  1577. assert meta["dataset"] == dataset
  1578. return meta
  1579. def test_is_metrics_extracted_data_is_included(self):
  1580. self._test_is_metrics_extracted_data(
  1581. {
  1582. "dataset": "metricsEnhanced",
  1583. "query": "transaction.duration:>=91",
  1584. "useOnDemandMetrics": "true",
  1585. "yAxis": "count()",
  1586. },
  1587. expected_on_demand_query=True,
  1588. dataset="metricsEnhanced",
  1589. )
  1590. def test_on_demand_epm_no_query(self):
  1591. params = {
  1592. "dataset": "metricsEnhanced",
  1593. "environment": "production",
  1594. "onDemandType": "dynamic_query",
  1595. "project": self.project.id,
  1596. "query": "",
  1597. "statsPeriod": "1h",
  1598. "useOnDemandMetrics": "true",
  1599. "yAxis": ["epm()"],
  1600. }
  1601. response = self.do_request(params)
  1602. assert response.status_code == 200, response.content
  1603. assert response.data["meta"] == {
  1604. "fields": {"time": "date", "epm_900": "rate"},
  1605. "units": {"time": None, "epm_900": None},
  1606. "isMetricsData": True,
  1607. "isMetricsExtractedData": False,
  1608. "tips": {},
  1609. "datasetReason": "unchanged",
  1610. "dataset": "metricsEnhanced",
  1611. }
  1612. def test_group_by_transaction(self):
  1613. field = "count()"
  1614. groupbys = ["transaction"]
  1615. query = "transaction.duration:>=100"
  1616. spec = OnDemandMetricSpec(
  1617. field=field,
  1618. groupbys=groupbys,
  1619. query=query,
  1620. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1621. )
  1622. for hour in range(0, 2):
  1623. self.store_on_demand_metric(
  1624. (hour + 1) * 5,
  1625. spec=spec,
  1626. additional_tags={
  1627. "transaction": "/performance",
  1628. "environment": "production",
  1629. },
  1630. timestamp=self.day_ago + timedelta(hours=hour),
  1631. )
  1632. response = self.do_request(
  1633. data={
  1634. "dataset": "metricsEnhanced",
  1635. "environment": "production",
  1636. "excludeOther": 1,
  1637. "field": [field, "transaction"],
  1638. "start": iso_format(self.day_ago),
  1639. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1640. "interval": "1h",
  1641. "orderby": f"-{field}",
  1642. "partial": 1,
  1643. "project": self.project.id,
  1644. "query": query,
  1645. "topEvents": 5,
  1646. "yAxis": field,
  1647. "onDemandType": "dynamic_query",
  1648. "useOnDemandMetrics": "true",
  1649. },
  1650. )
  1651. assert response.status_code == 200, response.content
  1652. assert response.data["/performance"]["meta"]["isMetricsExtractedData"] is True
  1653. assert [attrs for time, attrs in response.data["/performance"]["data"]] == [
  1654. [{"count": 5.0}],
  1655. [{"count": 10.0}],
  1656. ]
  1657. def _setup_orderby_tests(self, query):
  1658. count_spec = OnDemandMetricSpec(
  1659. field="count()",
  1660. groupbys=["networkId"],
  1661. query=query,
  1662. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1663. )
  1664. p95_spec = OnDemandMetricSpec(
  1665. field="p95(transaction.duration)",
  1666. groupbys=["networkId"],
  1667. query=query,
  1668. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1669. )
  1670. for hour in range(0, 5):
  1671. self.store_on_demand_metric(
  1672. 1,
  1673. spec=count_spec,
  1674. additional_tags={"networkId": "1234"},
  1675. timestamp=self.day_ago + timedelta(hours=hour),
  1676. )
  1677. self.store_on_demand_metric(
  1678. 100,
  1679. spec=p95_spec,
  1680. additional_tags={"networkId": "1234"},
  1681. timestamp=self.day_ago + timedelta(hours=hour),
  1682. )
  1683. self.store_on_demand_metric(
  1684. 200,
  1685. spec=p95_spec,
  1686. additional_tags={"networkId": "5678"},
  1687. timestamp=self.day_ago + timedelta(hours=hour),
  1688. )
  1689. # Store twice as many 5678 so orderby puts it later
  1690. self.store_on_demand_metric(
  1691. 2,
  1692. spec=count_spec,
  1693. additional_tags={"networkId": "5678"},
  1694. timestamp=self.day_ago + timedelta(hours=hour),
  1695. )
  1696. def test_order_by_aggregate_top_events_desc(self):
  1697. url = "https://sentry.io"
  1698. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1699. self._setup_orderby_tests(query)
  1700. response = self.do_request(
  1701. data={
  1702. "dataset": "metricsEnhanced",
  1703. "field": ["networkId", "count()"],
  1704. "start": iso_format(self.day_ago),
  1705. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1706. "onDemandType": "dynamic_query",
  1707. "orderby": "-count()",
  1708. "interval": "1d",
  1709. "partial": 1,
  1710. "query": query,
  1711. "referrer": "api.dashboards.widget.bar-chart",
  1712. "project": self.project.id,
  1713. "topEvents": 2,
  1714. "useOnDemandMetrics": "true",
  1715. "yAxis": "count()",
  1716. },
  1717. )
  1718. assert response.status_code == 200, response.content
  1719. assert len(response.data) == 3
  1720. data1 = response.data["5678"]
  1721. assert data1["order"] == 0
  1722. assert data1["data"][0][1][0]["count"] == 10
  1723. data2 = response.data["1234"]
  1724. assert data2["order"] == 1
  1725. assert data2["data"][0][1][0]["count"] == 5
  1726. for datum in response.data.values():
  1727. assert datum["meta"] == {
  1728. "dataset": "metricsEnhanced",
  1729. "datasetReason": "unchanged",
  1730. "fields": {},
  1731. "isMetricsData": False,
  1732. "isMetricsExtractedData": True,
  1733. "tips": {},
  1734. "units": {},
  1735. }
  1736. def test_order_by_aggregate_top_events_asc(self):
  1737. url = "https://sentry.io"
  1738. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1739. self._setup_orderby_tests(query)
  1740. response = self.do_request(
  1741. data={
  1742. "dataset": "metricsEnhanced",
  1743. "field": ["networkId", "count()"],
  1744. "start": iso_format(self.day_ago),
  1745. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1746. "onDemandType": "dynamic_query",
  1747. "orderby": "count()",
  1748. "interval": "1d",
  1749. "partial": 1,
  1750. "query": query,
  1751. "referrer": "api.dashboards.widget.bar-chart",
  1752. "project": self.project.id,
  1753. "topEvents": 2,
  1754. "useOnDemandMetrics": "true",
  1755. "yAxis": "count()",
  1756. },
  1757. )
  1758. assert response.status_code == 200, response.content
  1759. assert len(response.data) == 3
  1760. data1 = response.data["1234"]
  1761. assert data1["order"] == 0
  1762. assert data1["data"][0][1][0]["count"] == 5
  1763. data2 = response.data["5678"]
  1764. assert data2["order"] == 1
  1765. assert data2["data"][0][1][0]["count"] == 10
  1766. for datum in response.data.values():
  1767. assert datum["meta"] == {
  1768. "dataset": "metricsEnhanced",
  1769. "datasetReason": "unchanged",
  1770. "fields": {},
  1771. "isMetricsData": False,
  1772. "isMetricsExtractedData": True,
  1773. "tips": {},
  1774. "units": {},
  1775. }
  1776. def test_order_by_aggregate_top_events_graph_different_aggregate(self):
  1777. url = "https://sentry.io"
  1778. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1779. self._setup_orderby_tests(query)
  1780. response = self.do_request(
  1781. data={
  1782. "dataset": "metricsEnhanced",
  1783. "field": ["networkId", "count()"],
  1784. "start": iso_format(self.day_ago),
  1785. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1786. "onDemandType": "dynamic_query",
  1787. "orderby": "count()",
  1788. "interval": "1d",
  1789. "partial": 1,
  1790. "query": query,
  1791. "referrer": "api.dashboards.widget.bar-chart",
  1792. "project": self.project.id,
  1793. "topEvents": 2,
  1794. "useOnDemandMetrics": "true",
  1795. "yAxis": "p95(transaction.duration)",
  1796. },
  1797. )
  1798. assert response.status_code == 200, response.content
  1799. assert len(response.data) == 3
  1800. data1 = response.data["1234"]
  1801. assert data1["order"] == 0
  1802. assert data1["data"][0][1][0]["count"] == 100
  1803. data2 = response.data["5678"]
  1804. assert data2["order"] == 1
  1805. assert data2["data"][0][1][0]["count"] == 200
  1806. for datum in response.data.values():
  1807. assert datum["meta"] == {
  1808. "dataset": "metricsEnhanced",
  1809. "datasetReason": "unchanged",
  1810. "fields": {},
  1811. "isMetricsData": False,
  1812. "isMetricsExtractedData": True,
  1813. "tips": {},
  1814. "units": {},
  1815. }
  1816. def test_cannot_order_by_tag(self):
  1817. url = "https://sentry.io"
  1818. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1819. self._setup_orderby_tests(query)
  1820. response = self.do_request(
  1821. data={
  1822. "dataset": "metrics",
  1823. "field": ["networkId", "count()"],
  1824. "start": iso_format(self.day_ago),
  1825. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1826. "onDemandType": "dynamic_query",
  1827. "orderby": "-networkId",
  1828. "interval": "1d",
  1829. "partial": 1,
  1830. "query": query,
  1831. "referrer": "api.dashboards.widget.bar-chart",
  1832. "project": self.project.id,
  1833. "topEvents": 2,
  1834. "useOnDemandMetrics": "true",
  1835. "yAxis": "count()",
  1836. },
  1837. )
  1838. assert response.status_code == 400, response.content
  1839. def test_order_by_two_aggregates(self):
  1840. url = "https://sentry.io"
  1841. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1842. self._setup_orderby_tests(query)
  1843. response = self.do_request(
  1844. data={
  1845. "dataset": "metrics",
  1846. "field": ["networkId", "count()", "p95(transaction.duration)"],
  1847. "start": iso_format(self.day_ago),
  1848. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1849. "onDemandType": "dynamic_query",
  1850. "orderby": ["count()", "p95(transaction.duration)"],
  1851. "interval": "1d",
  1852. "partial": 1,
  1853. "query": query,
  1854. "referrer": "api.dashboards.widget.bar-chart",
  1855. "project": self.project.id,
  1856. "topEvents": 2,
  1857. "useOnDemandMetrics": "true",
  1858. "yAxis": "p95(transaction.duration)",
  1859. },
  1860. )
  1861. assert response.status_code == 400, response.content
  1862. def test_top_events_with_tag(self):
  1863. query = "transaction.duration:>=100"
  1864. yAxis = ["count()"]
  1865. field = "count()"
  1866. groupbys = ["some-field"]
  1867. spec = OnDemandMetricSpec(
  1868. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1869. )
  1870. self.store_on_demand_metric(
  1871. 1,
  1872. spec=spec,
  1873. additional_tags={
  1874. "some-field": "bar",
  1875. "environment": "production",
  1876. },
  1877. timestamp=self.day_ago,
  1878. )
  1879. response = self.do_request(
  1880. data={
  1881. "project": self.project.id,
  1882. "start": iso_format(self.day_ago),
  1883. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1884. "interval": "1h",
  1885. "orderby": ["-count()"],
  1886. "environment": "production",
  1887. "query": query,
  1888. "yAxis": yAxis,
  1889. "field": [
  1890. "some-field",
  1891. "count()",
  1892. ],
  1893. "topEvents": 5,
  1894. "dataset": "metrics",
  1895. "useOnDemandMetrics": "true",
  1896. },
  1897. )
  1898. assert response.status_code == 200, response.content