test_organization_events_stats_mep.py 89 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298
  1. from __future__ import annotations
  2. from datetime import timedelta
  3. from typing import Any
  4. from unittest import mock
  5. import pytest
  6. from django.urls import reverse
  7. from rest_framework.response import Response
  8. from sentry.discover.models import DatasetSourcesTypes
  9. from sentry.models.dashboard_widget import DashboardWidget, DashboardWidgetTypes
  10. from sentry.models.environment import Environment
  11. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  12. from sentry.snuba.metrics.extraction import MetricSpecType, OnDemandMetricSpec
  13. from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
  14. from sentry.testutils.helpers.datetime import before_now, iso_format
  15. from sentry.testutils.helpers.on_demand import create_widget
  16. from sentry.utils.samples import load_data
  17. pytestmark = pytest.mark.sentry_metrics
  18. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest(
  19. MetricsEnhancedPerformanceTestCase
  20. ):
  21. endpoint = "sentry-api-0-organization-events-stats"
  22. METRIC_STRINGS = [
  23. "foo_transaction",
  24. "d:transactions/measurements.datacenter_memory@pebibyte",
  25. ]
  26. def setUp(self):
  27. super().setUp()
  28. self.login_as(user=self.user)
  29. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  30. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  31. self.url = reverse(
  32. "sentry-api-0-organization-events-stats",
  33. kwargs={"organization_id_or_slug": self.project.organization.slug},
  34. )
  35. self.features = {
  36. "organizations:performance-use-metrics": True,
  37. }
  38. self.additional_params = dict()
  39. # These throughput tests should roughly match the ones in OrganizationEventsStatsEndpointTest
  40. @pytest.mark.querybuilder
  41. def test_throughput_epm_hour_rollup(self):
  42. # Each of these denotes how many events to create in each hour
  43. event_counts = [6, 0, 6, 3, 0, 3]
  44. for hour, count in enumerate(event_counts):
  45. for minute in range(count):
  46. self.store_transaction_metric(
  47. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  48. )
  49. for axis in ["epm()", "tpm()"]:
  50. response = self.do_request(
  51. data={
  52. "start": iso_format(self.day_ago),
  53. "end": iso_format(self.day_ago + timedelta(hours=6)),
  54. "interval": "1h",
  55. "yAxis": axis,
  56. "project": self.project.id,
  57. "dataset": "metricsEnhanced",
  58. **self.additional_params,
  59. },
  60. )
  61. assert response.status_code == 200, response.content
  62. data = response.data["data"]
  63. assert len(data) == 6
  64. assert response.data["isMetricsData"]
  65. rows = data[0:6]
  66. for test in zip(event_counts, rows):
  67. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  68. def test_throughput_epm_day_rollup(self):
  69. # Each of these denotes how many events to create in each minute
  70. event_counts = [6, 0, 6, 3, 0, 3]
  71. for hour, count in enumerate(event_counts):
  72. for minute in range(count):
  73. self.store_transaction_metric(
  74. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute)
  75. )
  76. for axis in ["epm()", "tpm()"]:
  77. response = self.do_request(
  78. data={
  79. "start": iso_format(self.day_ago),
  80. "end": iso_format(self.day_ago + timedelta(hours=24)),
  81. "interval": "24h",
  82. "yAxis": axis,
  83. "project": self.project.id,
  84. "dataset": "metricsEnhanced",
  85. **self.additional_params,
  86. },
  87. )
  88. assert response.status_code == 200, response.content
  89. data = response.data["data"]
  90. assert len(data) == 2
  91. assert response.data["isMetricsData"]
  92. assert data[0][1][0]["count"] == sum(event_counts) / (86400.0 / 60.0)
  93. def test_throughput_epm_hour_rollup_offset_of_hour(self):
  94. # Each of these denotes how many events to create in each hour
  95. event_counts = [6, 0, 6, 3, 0, 3]
  96. for hour, count in enumerate(event_counts):
  97. for minute in range(count):
  98. self.store_transaction_metric(
  99. 1, timestamp=self.day_ago + timedelta(hours=hour, minutes=minute + 30)
  100. )
  101. for axis in ["tpm()", "epm()"]:
  102. response = self.do_request(
  103. data={
  104. "start": iso_format(self.day_ago + timedelta(minutes=30)),
  105. "end": iso_format(self.day_ago + timedelta(hours=6, minutes=30)),
  106. "interval": "1h",
  107. "yAxis": axis,
  108. "project": self.project.id,
  109. "dataset": "metricsEnhanced",
  110. **self.additional_params,
  111. },
  112. )
  113. assert response.status_code == 200, response.content
  114. data = response.data["data"]
  115. assert len(data) == 6
  116. assert response.data["isMetricsData"]
  117. rows = data[0:6]
  118. for test in zip(event_counts, rows):
  119. assert test[1][1][0]["count"] == test[0] / (3600.0 / 60.0)
  120. def test_throughput_eps_minute_rollup(self):
  121. # Each of these denotes how many events to create in each minute
  122. event_counts = [6, 0, 6, 3, 0, 3]
  123. for minute, count in enumerate(event_counts):
  124. for second in range(count):
  125. self.store_transaction_metric(
  126. 1, timestamp=self.day_ago + timedelta(minutes=minute, seconds=second)
  127. )
  128. for axis in ["eps()", "tps()"]:
  129. response = self.do_request(
  130. data={
  131. "start": iso_format(self.day_ago),
  132. "end": iso_format(self.day_ago + timedelta(minutes=6)),
  133. "interval": "1m",
  134. "yAxis": axis,
  135. "project": self.project.id,
  136. "dataset": "metricsEnhanced",
  137. **self.additional_params,
  138. },
  139. )
  140. assert response.status_code == 200, response.content
  141. data = response.data["data"]
  142. assert len(data) == 6
  143. assert response.data["isMetricsData"]
  144. rows = data[0:6]
  145. for test in zip(event_counts, rows):
  146. assert test[1][1][0]["count"] == test[0] / 60.0
  147. def test_failure_rate(self):
  148. for hour in range(6):
  149. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  150. self.store_transaction_metric(1, tags={"transaction.status": "ok"}, timestamp=timestamp)
  151. if hour < 3:
  152. self.store_transaction_metric(
  153. 1, tags={"transaction.status": "internal_error"}, timestamp=timestamp
  154. )
  155. response = self.do_request(
  156. data={
  157. "start": iso_format(self.day_ago),
  158. "end": iso_format(self.day_ago + timedelta(hours=6)),
  159. "interval": "1h",
  160. "yAxis": ["failure_rate()"],
  161. "project": self.project.id,
  162. "dataset": "metricsEnhanced",
  163. **self.additional_params,
  164. },
  165. )
  166. assert response.status_code == 200, response.content
  167. data = response.data["data"]
  168. assert len(data) == 6
  169. assert response.data["isMetricsData"]
  170. assert [attrs for time, attrs in response.data["data"]] == [
  171. [{"count": 0.5}],
  172. [{"count": 0.5}],
  173. [{"count": 0.5}],
  174. [{"count": 0}],
  175. [{"count": 0}],
  176. [{"count": 0}],
  177. ]
  178. def test_percentiles_multi_axis(self):
  179. for hour in range(6):
  180. timestamp = self.day_ago + timedelta(hours=hour, minutes=30)
  181. self.store_transaction_metric(111, timestamp=timestamp)
  182. self.store_transaction_metric(222, metric="measurements.lcp", timestamp=timestamp)
  183. response = self.do_request(
  184. data={
  185. "start": iso_format(self.day_ago),
  186. "end": iso_format(self.day_ago + timedelta(hours=6)),
  187. "interval": "1h",
  188. "yAxis": ["p75(measurements.lcp)", "p75(transaction.duration)"],
  189. "project": self.project.id,
  190. "dataset": "metricsEnhanced",
  191. **self.additional_params,
  192. },
  193. )
  194. assert response.status_code == 200, response.content
  195. lcp = response.data["p75(measurements.lcp)"]
  196. duration = response.data["p75(transaction.duration)"]
  197. assert len(duration["data"]) == 6
  198. assert duration["isMetricsData"]
  199. assert len(lcp["data"]) == 6
  200. assert lcp["isMetricsData"]
  201. for item in duration["data"]:
  202. assert item[1][0]["count"] == 111
  203. for item in lcp["data"]:
  204. assert item[1][0]["count"] == 222
  205. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query", return_value={})
  206. def test_multiple_yaxis_only_one_query(self, mock_query):
  207. self.do_request(
  208. data={
  209. "project": self.project.id,
  210. "start": iso_format(self.day_ago),
  211. "end": iso_format(self.day_ago + timedelta(hours=2)),
  212. "interval": "1h",
  213. "yAxis": ["epm()", "eps()", "tpm()", "p50(transaction.duration)"],
  214. "dataset": "metricsEnhanced",
  215. **self.additional_params,
  216. },
  217. )
  218. assert mock_query.call_count == 1
  219. def test_aggregate_function_user_count(self):
  220. self.store_transaction_metric(
  221. 1, metric="user", timestamp=self.day_ago + timedelta(minutes=30)
  222. )
  223. self.store_transaction_metric(
  224. 1, metric="user", timestamp=self.day_ago + timedelta(hours=1, minutes=30)
  225. )
  226. response = self.do_request(
  227. data={
  228. "start": iso_format(self.day_ago),
  229. "end": iso_format(self.day_ago + timedelta(hours=2)),
  230. "interval": "1h",
  231. "yAxis": "count_unique(user)",
  232. "dataset": "metricsEnhanced",
  233. **self.additional_params,
  234. },
  235. )
  236. assert response.status_code == 200, response.content
  237. assert response.data["isMetricsData"]
  238. assert [attrs for time, attrs in response.data["data"]] == [[{"count": 1}], [{"count": 1}]]
  239. meta = response.data["meta"]
  240. assert meta["isMetricsData"] == response.data["isMetricsData"]
  241. def test_non_mep_query_fallsback(self):
  242. def get_mep(query):
  243. response = self.do_request(
  244. data={
  245. "project": self.project.id,
  246. "start": iso_format(self.day_ago),
  247. "end": iso_format(self.day_ago + timedelta(hours=2)),
  248. "interval": "1h",
  249. "query": query,
  250. "yAxis": ["epm()"],
  251. "dataset": "metricsEnhanced",
  252. **self.additional_params,
  253. },
  254. )
  255. assert response.status_code == 200, response.content
  256. return response.data["isMetricsData"]
  257. assert get_mep(""), "empty query"
  258. assert get_mep("event.type:transaction"), "event type transaction"
  259. assert not get_mep("event.type:error"), "event type error"
  260. assert not get_mep("transaction.duration:<15min"), "outlier filter"
  261. assert get_mep("epm():>0.01"), "throughput filter"
  262. assert not get_mep(
  263. "event.type:transaction OR event.type:error"
  264. ), "boolean with non-mep filter"
  265. assert get_mep(
  266. "event.type:transaction OR transaction:foo_transaction"
  267. ), "boolean with mep filter"
  268. def test_having_condition_with_preventing_aggregates(self):
  269. response = self.do_request(
  270. data={
  271. "project": self.project.id,
  272. "start": iso_format(self.day_ago),
  273. "end": iso_format(self.day_ago + timedelta(hours=2)),
  274. "interval": "1h",
  275. "query": "p95():<5s",
  276. "yAxis": ["epm()"],
  277. "dataset": "metricsEnhanced",
  278. "preventMetricAggregates": "1",
  279. **self.additional_params,
  280. },
  281. )
  282. assert response.status_code == 200, response.content
  283. assert not response.data["isMetricsData"]
  284. meta = response.data["meta"]
  285. assert meta["isMetricsData"] == response.data["isMetricsData"]
  286. def test_explicit_not_mep(self):
  287. response = self.do_request(
  288. data={
  289. "project": self.project.id,
  290. "start": iso_format(self.day_ago),
  291. "end": iso_format(self.day_ago + timedelta(hours=2)),
  292. "interval": "1h",
  293. # Should be a mep able query
  294. "query": "",
  295. "yAxis": ["epm()"],
  296. "metricsEnhanced": "0",
  297. **self.additional_params,
  298. },
  299. )
  300. assert response.status_code == 200, response.content
  301. assert not response.data["isMetricsData"]
  302. meta = response.data["meta"]
  303. assert meta["isMetricsData"] == response.data["isMetricsData"]
  304. def test_sum_transaction_duration(self):
  305. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  306. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  307. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  308. response = self.do_request(
  309. data={
  310. "start": iso_format(self.day_ago),
  311. "end": iso_format(self.day_ago + timedelta(hours=2)),
  312. "interval": "1h",
  313. "yAxis": "sum(transaction.duration)",
  314. "dataset": "metricsEnhanced",
  315. **self.additional_params,
  316. },
  317. )
  318. assert response.status_code == 200, response.content
  319. assert response.data["isMetricsData"]
  320. assert [attrs for time, attrs in response.data["data"]] == [
  321. [{"count": 123}],
  322. [{"count": 1245}],
  323. ]
  324. meta = response.data["meta"]
  325. assert meta["isMetricsData"] == response.data["isMetricsData"]
  326. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  327. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  328. def test_sum_transaction_duration_with_comparison(self):
  329. # We store the data for the previous day (in order to have values for the comparison).
  330. self.store_transaction_metric(
  331. 1, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  332. )
  333. self.store_transaction_metric(
  334. 2, timestamp=self.day_ago - timedelta(days=1) + timedelta(minutes=30)
  335. )
  336. # We store the data for today.
  337. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  338. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(minutes=30))
  339. response = self.do_request(
  340. data={
  341. "start": iso_format(self.day_ago),
  342. "end": iso_format(self.day_ago + timedelta(days=1)),
  343. "interval": "1d",
  344. "yAxis": "sum(transaction.duration)",
  345. "comparisonDelta": 86400,
  346. "dataset": "metricsEnhanced",
  347. **self.additional_params,
  348. },
  349. )
  350. assert response.status_code == 200, response.content
  351. assert response.data["isMetricsData"]
  352. # For some reason, if all tests run, there is some shared state that makes this test have data in the second
  353. # time bucket, which is filled automatically by the zerofilling. In order to avoid this flaky failure, we will
  354. # only check that the first bucket contains the actual data.
  355. assert [attrs for time, attrs in response.data["data"]][0] == [
  356. {"comparisonCount": 3.0, "count": 579.0}
  357. ]
  358. meta = response.data["meta"]
  359. assert meta["isMetricsData"] == response.data["isMetricsData"]
  360. assert meta["fields"] == {"time": "date", "sum_transaction_duration": "duration"}
  361. assert meta["units"] == {"time": None, "sum_transaction_duration": "millisecond"}
  362. def test_custom_measurement(self):
  363. self.store_transaction_metric(
  364. 123,
  365. metric="measurements.bytes_transfered",
  366. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  367. entity="metrics_distributions",
  368. tags={"transaction": "foo_transaction"},
  369. timestamp=self.day_ago + timedelta(minutes=30),
  370. )
  371. self.store_transaction_metric(
  372. 456,
  373. metric="measurements.bytes_transfered",
  374. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  375. entity="metrics_distributions",
  376. tags={"transaction": "foo_transaction"},
  377. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  378. )
  379. self.store_transaction_metric(
  380. 789,
  381. metric="measurements.bytes_transfered",
  382. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  383. entity="metrics_distributions",
  384. tags={"transaction": "foo_transaction"},
  385. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  386. )
  387. response = self.do_request(
  388. data={
  389. "start": iso_format(self.day_ago),
  390. "end": iso_format(self.day_ago + timedelta(hours=2)),
  391. "interval": "1h",
  392. "yAxis": "sum(measurements.datacenter_memory)",
  393. "dataset": "metricsEnhanced",
  394. **self.additional_params,
  395. },
  396. )
  397. assert response.status_code == 200, response.content
  398. assert response.data["isMetricsData"]
  399. assert [attrs for time, attrs in response.data["data"]] == [
  400. [{"count": 123}],
  401. [{"count": 1245}],
  402. ]
  403. meta = response.data["meta"]
  404. assert meta["isMetricsData"] == response.data["isMetricsData"]
  405. assert meta["fields"] == {"time": "date", "sum_measurements_datacenter_memory": "size"}
  406. assert meta["units"] == {"time": None, "sum_measurements_datacenter_memory": "pebibyte"}
  407. def test_does_not_fallback_if_custom_metric_is_out_of_request_time_range(self):
  408. self.store_transaction_metric(
  409. 123,
  410. timestamp=self.day_ago + timedelta(hours=1),
  411. internal_metric="d:transactions/measurements.custom@kibibyte",
  412. entity="metrics_distributions",
  413. )
  414. response = self.do_request(
  415. data={
  416. "start": iso_format(self.day_ago),
  417. "end": iso_format(self.day_ago + timedelta(hours=2)),
  418. "interval": "1h",
  419. "yAxis": "p99(measurements.custom)",
  420. "dataset": "metricsEnhanced",
  421. **self.additional_params,
  422. },
  423. )
  424. meta = response.data["meta"]
  425. assert response.status_code == 200, response.content
  426. assert response.data["isMetricsData"]
  427. assert meta["isMetricsData"]
  428. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  429. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  430. def test_multi_yaxis_custom_measurement(self):
  431. self.store_transaction_metric(
  432. 123,
  433. metric="measurements.bytes_transfered",
  434. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  435. entity="metrics_distributions",
  436. tags={"transaction": "foo_transaction"},
  437. timestamp=self.day_ago + timedelta(minutes=30),
  438. )
  439. self.store_transaction_metric(
  440. 456,
  441. metric="measurements.bytes_transfered",
  442. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  443. entity="metrics_distributions",
  444. tags={"transaction": "foo_transaction"},
  445. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  446. )
  447. self.store_transaction_metric(
  448. 789,
  449. metric="measurements.bytes_transfered",
  450. internal_metric="d:transactions/measurements.datacenter_memory@pebibyte",
  451. entity="metrics_distributions",
  452. tags={"transaction": "foo_transaction"},
  453. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  454. )
  455. response = self.do_request(
  456. data={
  457. "start": iso_format(self.day_ago),
  458. "end": iso_format(self.day_ago + timedelta(hours=2)),
  459. "interval": "1h",
  460. "yAxis": [
  461. "sum(measurements.datacenter_memory)",
  462. "p50(measurements.datacenter_memory)",
  463. ],
  464. "dataset": "metricsEnhanced",
  465. **self.additional_params,
  466. },
  467. )
  468. assert response.status_code == 200, response.content
  469. sum_data = response.data["sum(measurements.datacenter_memory)"]
  470. p50_data = response.data["p50(measurements.datacenter_memory)"]
  471. assert sum_data["isMetricsData"]
  472. assert p50_data["isMetricsData"]
  473. assert [attrs for time, attrs in sum_data["data"]] == [
  474. [{"count": 123}],
  475. [{"count": 1245}],
  476. ]
  477. assert [attrs for time, attrs in p50_data["data"]] == [
  478. [{"count": 123}],
  479. [{"count": 622.5}],
  480. ]
  481. sum_meta = sum_data["meta"]
  482. assert sum_meta["isMetricsData"] == sum_data["isMetricsData"]
  483. assert sum_meta["fields"] == {
  484. "time": "date",
  485. "sum_measurements_datacenter_memory": "size",
  486. "p50_measurements_datacenter_memory": "size",
  487. }
  488. assert sum_meta["units"] == {
  489. "time": None,
  490. "sum_measurements_datacenter_memory": "pebibyte",
  491. "p50_measurements_datacenter_memory": "pebibyte",
  492. }
  493. p50_meta = p50_data["meta"]
  494. assert p50_meta["isMetricsData"] == p50_data["isMetricsData"]
  495. assert p50_meta["fields"] == {
  496. "time": "date",
  497. "sum_measurements_datacenter_memory": "size",
  498. "p50_measurements_datacenter_memory": "size",
  499. }
  500. assert p50_meta["units"] == {
  501. "time": None,
  502. "sum_measurements_datacenter_memory": "pebibyte",
  503. "p50_measurements_datacenter_memory": "pebibyte",
  504. }
  505. def test_dataset_metrics_does_not_fallback(self):
  506. self.store_transaction_metric(123, timestamp=self.day_ago + timedelta(minutes=30))
  507. self.store_transaction_metric(456, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  508. self.store_transaction_metric(789, timestamp=self.day_ago + timedelta(hours=1, minutes=30))
  509. response = self.do_request(
  510. data={
  511. "start": iso_format(self.day_ago),
  512. "end": iso_format(self.day_ago + timedelta(hours=2)),
  513. "interval": "1h",
  514. "query": "transaction.duration:<5s",
  515. "yAxis": "sum(transaction.duration)",
  516. "dataset": "metrics",
  517. **self.additional_params,
  518. },
  519. )
  520. assert response.status_code == 400, response.content
  521. def test_title_filter(self):
  522. self.store_transaction_metric(
  523. 123,
  524. tags={"transaction": "foo_transaction"},
  525. timestamp=self.day_ago + timedelta(minutes=30),
  526. )
  527. response = self.do_request(
  528. data={
  529. "start": iso_format(self.day_ago),
  530. "end": iso_format(self.day_ago + timedelta(hours=2)),
  531. "interval": "1h",
  532. "query": "title:foo_transaction",
  533. "yAxis": [
  534. "sum(transaction.duration)",
  535. ],
  536. "dataset": "metricsEnhanced",
  537. **self.additional_params,
  538. },
  539. )
  540. assert response.status_code == 200, response.content
  541. data = response.data["data"]
  542. assert [attrs for time, attrs in data] == [
  543. [{"count": 123}],
  544. [{"count": 0}],
  545. ]
  546. def test_transaction_status_unknown_error(self):
  547. self.store_transaction_metric(
  548. 123,
  549. tags={"transaction.status": "unknown"},
  550. timestamp=self.day_ago + timedelta(minutes=30),
  551. )
  552. response = self.do_request(
  553. data={
  554. "start": iso_format(self.day_ago),
  555. "end": iso_format(self.day_ago + timedelta(hours=2)),
  556. "interval": "1h",
  557. "query": "transaction.status:unknown_error",
  558. "yAxis": [
  559. "sum(transaction.duration)",
  560. ],
  561. "dataset": "metricsEnhanced",
  562. **self.additional_params,
  563. },
  564. )
  565. assert response.status_code == 200, response.content
  566. data = response.data["data"]
  567. assert [attrs for time, attrs in data] == [
  568. [{"count": 123}],
  569. [{"count": 0}],
  570. ]
  571. def test_custom_performance_metric_meta_contains_field_and_unit_data(self):
  572. self.store_transaction_metric(
  573. 123,
  574. timestamp=self.day_ago + timedelta(hours=1),
  575. internal_metric="d:transactions/measurements.custom@kibibyte",
  576. entity="metrics_distributions",
  577. )
  578. response = self.do_request(
  579. data={
  580. "start": iso_format(self.day_ago),
  581. "end": iso_format(self.day_ago + timedelta(hours=2)),
  582. "interval": "1h",
  583. "yAxis": "p99(measurements.custom)",
  584. "query": "",
  585. **self.additional_params,
  586. },
  587. )
  588. assert response.status_code == 200
  589. meta = response.data["meta"]
  590. assert meta["fields"] == {"time": "date", "p99_measurements_custom": "size"}
  591. assert meta["units"] == {"time": None, "p99_measurements_custom": "kibibyte"}
  592. def test_multi_series_custom_performance_metric_meta_contains_field_and_unit_data(self):
  593. self.store_transaction_metric(
  594. 123,
  595. timestamp=self.day_ago + timedelta(hours=1),
  596. internal_metric="d:transactions/measurements.custom@kibibyte",
  597. entity="metrics_distributions",
  598. )
  599. self.store_transaction_metric(
  600. 123,
  601. timestamp=self.day_ago + timedelta(hours=1),
  602. internal_metric="d:transactions/measurements.another.custom@pebibyte",
  603. entity="metrics_distributions",
  604. )
  605. response = self.do_request(
  606. data={
  607. "start": iso_format(self.day_ago),
  608. "end": iso_format(self.day_ago + timedelta(hours=2)),
  609. "interval": "1h",
  610. "yAxis": [
  611. "p95(measurements.custom)",
  612. "p99(measurements.custom)",
  613. "p99(measurements.another.custom)",
  614. ],
  615. "query": "",
  616. **self.additional_params,
  617. },
  618. )
  619. assert response.status_code == 200
  620. meta = response.data["p95(measurements.custom)"]["meta"]
  621. assert meta["fields"] == {
  622. "time": "date",
  623. "p95_measurements_custom": "size",
  624. "p99_measurements_custom": "size",
  625. "p99_measurements_another_custom": "size",
  626. }
  627. assert meta["units"] == {
  628. "time": None,
  629. "p95_measurements_custom": "kibibyte",
  630. "p99_measurements_custom": "kibibyte",
  631. "p99_measurements_another_custom": "pebibyte",
  632. }
  633. assert meta == response.data["p99(measurements.custom)"]["meta"]
  634. assert meta == response.data["p99(measurements.another.custom)"]["meta"]
  635. def test_no_top_events_with_project_field(self):
  636. project = self.create_project()
  637. response = self.do_request(
  638. data={
  639. # make sure to query the project with 0 events
  640. "project": project.id,
  641. "start": iso_format(self.day_ago),
  642. "end": iso_format(self.day_ago + timedelta(hours=2)),
  643. "interval": "1h",
  644. "yAxis": "count()",
  645. "orderby": ["-count()"],
  646. "field": ["count()", "project"],
  647. "topEvents": 5,
  648. "dataset": "metrics",
  649. **self.additional_params,
  650. },
  651. )
  652. assert response.status_code == 200, response.content
  653. # When there are no top events, we do not return an empty dict.
  654. # Instead, we return a single zero-filled series for an empty graph.
  655. data = response.data["data"]
  656. assert [attrs for time, attrs in data] == [[{"count": 0}], [{"count": 0}]]
  657. def test_top_events_with_transaction(self):
  658. transaction_spec = [("foo", 100), ("bar", 200), ("baz", 300)]
  659. for offset in range(5):
  660. for transaction, duration in transaction_spec:
  661. self.store_transaction_metric(
  662. duration,
  663. tags={"transaction": f"{transaction}_transaction"},
  664. timestamp=self.day_ago + timedelta(hours=offset, minutes=30),
  665. )
  666. response = self.do_request(
  667. data={
  668. # make sure to query the project with 0 events
  669. "project": self.project.id,
  670. "start": iso_format(self.day_ago),
  671. "end": iso_format(self.day_ago + timedelta(hours=5)),
  672. "interval": "1h",
  673. "yAxis": "p75(transaction.duration)",
  674. "orderby": ["-p75(transaction.duration)"],
  675. "field": ["p75(transaction.duration)", "transaction"],
  676. "topEvents": 5,
  677. "dataset": "metrics",
  678. **self.additional_params,
  679. },
  680. )
  681. assert response.status_code == 200, response.content
  682. for position, (transaction, duration) in enumerate(transaction_spec):
  683. data = response.data[f"{transaction}_transaction"]
  684. chart_data = data["data"]
  685. assert data["order"] == 2 - position
  686. assert [attrs for time, attrs in chart_data] == [[{"count": duration}]] * 5
  687. def test_top_events_with_project(self):
  688. self.store_transaction_metric(
  689. 100,
  690. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  691. )
  692. response = self.do_request(
  693. data={
  694. # make sure to query the project with 0 events
  695. "project": self.project.id,
  696. "start": iso_format(self.day_ago),
  697. "end": iso_format(self.day_ago + timedelta(hours=5)),
  698. "interval": "1h",
  699. "yAxis": "p75(transaction.duration)",
  700. "orderby": ["-p75(transaction.duration)"],
  701. "field": ["p75(transaction.duration)", "project"],
  702. "topEvents": 5,
  703. "dataset": "metrics",
  704. **self.additional_params,
  705. },
  706. )
  707. assert response.status_code == 200, response.content
  708. data = response.data[f"{self.project.slug}"]
  709. assert data["order"] == 0
  710. def test_split_decision_for_errors_widget(self):
  711. error_data = load_data("python", timestamp=before_now(minutes=1))
  712. self.store_event(
  713. data={
  714. **error_data,
  715. "exception": {"values": [{"type": "blah", "data": {"values": []}}]},
  716. },
  717. project_id=self.project.id,
  718. )
  719. _, widget, __ = create_widget(
  720. ["count()", "error.type"], "error.type:blah", self.project, discover_widget_split=None
  721. )
  722. response = self.do_request(
  723. {
  724. "field": ["count()", "error.type"],
  725. "query": "error.type:blah",
  726. "dataset": "metricsEnhanced",
  727. "per_page": 50,
  728. "dashboardWidgetId": widget.id,
  729. }
  730. )
  731. assert response.status_code == 200, response.content
  732. assert response.data.get("meta").get(
  733. "discoverSplitDecision"
  734. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  735. widget.refresh_from_db()
  736. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  737. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  738. def test_split_decision_for_transactions_widget(self):
  739. self.store_transaction_metric(
  740. 100,
  741. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  742. )
  743. _, widget, __ = create_widget(
  744. ["count()", "transaction.name"], "", self.project, discover_widget_split=None
  745. )
  746. assert widget.discover_widget_split is None
  747. response = self.do_request(
  748. {
  749. "field": ["count()", "transaction.name"],
  750. "query": "",
  751. "dataset": "metricsEnhanced",
  752. "per_page": 50,
  753. "dashboardWidgetId": widget.id,
  754. }
  755. )
  756. assert response.status_code == 200, response.content
  757. assert response.data.get("meta").get(
  758. "discoverSplitDecision"
  759. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.TRANSACTION_LIKE)
  760. widget.refresh_from_db()
  761. assert widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  762. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  763. def test_split_decision_for_top_events_errors_widget(self):
  764. error_data = load_data("python", timestamp=before_now(minutes=1))
  765. self.store_event(
  766. data={
  767. **error_data,
  768. "exception": {"values": [{"type": "test_error", "data": {"values": []}}]},
  769. },
  770. project_id=self.project.id,
  771. )
  772. _, widget, __ = create_widget(
  773. ["count()", "error.type"],
  774. "error.type:test_error",
  775. self.project,
  776. discover_widget_split=None,
  777. )
  778. response = self.do_request(
  779. {
  780. "field": ["count()", "error.type"],
  781. "query": "error.type:test_error",
  782. "dataset": "metricsEnhanced",
  783. "per_page": 50,
  784. "dashboardWidgetId": widget.id,
  785. "topEvents": 5,
  786. }
  787. )
  788. assert response.status_code == 200, response.content
  789. # Only a singular result for the test_error event
  790. assert len(response.data) == 1
  791. # Results are grouped by the error type
  792. assert response.data.get("test_error").get("meta").get(
  793. "discoverSplitDecision"
  794. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  795. widget.refresh_from_db()
  796. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  797. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  798. def test_split_decision_for_top_events_transactions_widget(self):
  799. self.store_transaction_metric(
  800. 100,
  801. timestamp=self.day_ago + timedelta(hours=1, minutes=30),
  802. tags={"transaction": "foo_transaction"},
  803. )
  804. _, widget, __ = create_widget(
  805. ["count()", "transaction"], "", self.project, discover_widget_split=None
  806. )
  807. assert widget.discover_widget_split is None
  808. response = self.do_request(
  809. {
  810. "field": ["count()", "transaction"],
  811. "query": "",
  812. "dataset": "metricsEnhanced",
  813. "per_page": 50,
  814. "dashboardWidgetId": widget.id,
  815. "topEvents": 5,
  816. }
  817. )
  818. assert response.status_code == 200, response.content
  819. # Only a singular result for the transaction
  820. assert len(response.data) == 1
  821. # Results are grouped by the transaction
  822. assert response.data.get("foo_transaction").get("meta").get(
  823. "discoverSplitDecision"
  824. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.TRANSACTION_LIKE)
  825. widget.refresh_from_db()
  826. assert widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  827. assert widget.dataset_source == DatasetSourcesTypes.INFERRED.value
  828. def test_split_decision_for_ambiguous_widget_without_data(self):
  829. _, widget, __ = create_widget(
  830. ["count()", "transaction.op", "error.type"],
  831. "",
  832. self.project,
  833. discover_widget_split=None,
  834. )
  835. assert widget.discover_widget_split is None
  836. response = self.do_request(
  837. {
  838. "field": ["count()", "transaction.op", "error.type"],
  839. "query": "",
  840. "dataset": "metricsEnhanced",
  841. "per_page": 50,
  842. "dashboardWidgetId": widget.id,
  843. },
  844. features={"organizations:performance-discover-dataset-selector": True},
  845. )
  846. assert response.status_code == 200, response.content
  847. assert response.data.get("meta").get(
  848. "discoverSplitDecision"
  849. ) == DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  850. widget.refresh_from_db()
  851. assert widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  852. assert widget.dataset_source == DatasetSourcesTypes.FORCED.value
  853. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query")
  854. def test_split_decision_can_be_inferred_from_fields(self, mock_mep_query):
  855. _, widget, __ = create_widget(
  856. ["count()"], "", self.project, discover_widget_split=None, columns=["error.type"]
  857. )
  858. assert widget.discover_widget_split is None
  859. response = self.do_request(
  860. {
  861. "field": ["error.type", "count()"],
  862. "query": "",
  863. "dataset": "metricsEnhanced",
  864. "per_page": 50,
  865. "dashboardWidgetId": widget.id,
  866. }
  867. )
  868. assert response.status_code == 200, response.content
  869. assert response.data.get("meta").get(
  870. "discoverSplitDecision"
  871. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  872. widget.refresh_from_db()
  873. assert widget.discover_widget_split is DashboardWidgetTypes.ERROR_EVENTS
  874. # This is the "original" data call that gets bypassed by the inference
  875. mock_mep_query.assert_not_called()
  876. @mock.patch("sentry.snuba.metrics_enhanced_performance.timeseries_query")
  877. def test_split_decision_can_be_inferred_from_query(self, mock_mep_query):
  878. _, widget, __ = create_widget(
  879. ["count()"],
  880. "",
  881. self.project,
  882. discover_widget_split=None,
  883. )
  884. assert widget.discover_widget_split is None
  885. response = self.do_request(
  886. {
  887. "field": ["count()"],
  888. "query": "error.type:blah",
  889. "dataset": "metricsEnhanced",
  890. "per_page": 50,
  891. "dashboardWidgetId": widget.id,
  892. }
  893. )
  894. assert response.status_code == 200, response.content
  895. assert response.data.get("meta").get(
  896. "discoverSplitDecision"
  897. ) is DashboardWidgetTypes.get_type_name(DashboardWidgetTypes.ERROR_EVENTS)
  898. widget.refresh_from_db()
  899. assert widget.discover_widget_split is DashboardWidgetTypes.ERROR_EVENTS
  900. # This is the "original" data call that gets bypassed by the inference
  901. mock_mep_query.assert_not_called()
  902. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  903. OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTest
  904. ):
  905. def setUp(self):
  906. super().setUp()
  907. self.features["organizations:use-metrics-layer"] = True
  908. self.additional_params = {"forceMetricsLayer": "true"}
  909. def test_counter_standard_metric(self):
  910. mri = "c:transactions/usage@none"
  911. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  912. self.store_transaction_metric(
  913. value,
  914. metric=mri,
  915. internal_metric=mri,
  916. entity="metrics_counters",
  917. timestamp=self.day_ago + timedelta(minutes=index),
  918. use_case_id=UseCaseID.CUSTOM,
  919. )
  920. response = self.do_request(
  921. data={
  922. "start": iso_format(self.day_ago),
  923. "end": iso_format(self.day_ago + timedelta(hours=6)),
  924. "interval": "1m",
  925. "yAxis": [f"sum({mri})"],
  926. "project": self.project.id,
  927. "dataset": "metricsEnhanced",
  928. **self.additional_params,
  929. },
  930. )
  931. assert response.status_code == 200, response.content
  932. data = response.data["data"]
  933. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  934. assert value[0]["count"] == expected_value # type: ignore[index]
  935. def test_counter_custom_metric(self):
  936. mri = "c:custom/sentry.process_profile.track_outcome@second"
  937. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  938. self.store_transaction_metric(
  939. value,
  940. metric=mri,
  941. internal_metric=mri,
  942. entity="metrics_counters",
  943. timestamp=self.day_ago + timedelta(hours=index),
  944. use_case_id=UseCaseID.CUSTOM,
  945. )
  946. response = self.do_request(
  947. data={
  948. "start": iso_format(self.day_ago),
  949. "end": iso_format(self.day_ago + timedelta(hours=6)),
  950. "interval": "1h",
  951. "yAxis": [f"sum({mri})"],
  952. "project": self.project.id,
  953. "dataset": "metricsEnhanced",
  954. **self.additional_params,
  955. },
  956. )
  957. assert response.status_code == 200, response.content
  958. data = response.data["data"]
  959. for (_, value), expected_value in zip(data, [10, 20, 30, 40, 50, 60]):
  960. assert value[0]["count"] == expected_value # type: ignore[index]
  961. def test_distribution_custom_metric(self):
  962. mri = "d:custom/sentry.process_profile.track_outcome@second"
  963. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  964. for multiplier in (1, 2, 3):
  965. self.store_transaction_metric(
  966. value * multiplier,
  967. metric=mri,
  968. internal_metric=mri,
  969. entity="metrics_distributions",
  970. timestamp=self.day_ago + timedelta(hours=index),
  971. use_case_id=UseCaseID.CUSTOM,
  972. )
  973. response = self.do_request(
  974. data={
  975. "start": iso_format(self.day_ago),
  976. "end": iso_format(self.day_ago + timedelta(hours=6)),
  977. "interval": "1h",
  978. "yAxis": [f"min({mri})", f"max({mri})", f"p90({mri})"],
  979. "project": self.project.id,
  980. "dataset": "metricsEnhanced",
  981. **self.additional_params,
  982. },
  983. )
  984. assert response.status_code == 200, response.content
  985. data = response.data
  986. min = data[f"min({mri})"]["data"]
  987. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  988. assert value[0]["count"] == expected_value # type: ignore[index]
  989. max = data[f"max({mri})"]["data"]
  990. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  991. assert value[0]["count"] == expected_value # type: ignore[index]
  992. p90 = data[f"p90({mri})"]["data"]
  993. for (_, value), expected_value in zip(p90, [28.0, 56.0, 84.0, 112.0, 140.0, 168.0]):
  994. assert value[0]["count"] == expected_value # type: ignore[index]
  995. def test_set_custom_metric(self):
  996. mri = "s:custom/sentry.process_profile.track_outcome@second"
  997. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  998. # We store each value a second time, since we want to check the de-duplication of sets.
  999. for i in range(0, 2):
  1000. self.store_transaction_metric(
  1001. value,
  1002. metric=mri,
  1003. internal_metric=mri,
  1004. entity="metrics_sets",
  1005. timestamp=self.day_ago + timedelta(hours=index),
  1006. use_case_id=UseCaseID.CUSTOM,
  1007. )
  1008. response = self.do_request(
  1009. data={
  1010. "start": iso_format(self.day_ago),
  1011. "end": iso_format(self.day_ago + timedelta(hours=6)),
  1012. "interval": "1h",
  1013. "yAxis": [f"count_unique({mri})"],
  1014. "project": self.project.id,
  1015. "dataset": "metricsEnhanced",
  1016. **self.additional_params,
  1017. },
  1018. )
  1019. assert response.status_code == 200, response.content
  1020. data = response.data["data"]
  1021. for (_, value), expected_value in zip(data, [1, 1, 1, 1, 1, 1]):
  1022. assert value[0]["count"] == expected_value # type: ignore[index]
  1023. def test_gauge_custom_metric(self):
  1024. mri = "g:custom/sentry.process_profile.track_outcome@second"
  1025. for index, value in enumerate((10, 20, 30, 40, 50, 60)):
  1026. for multiplier in (1, 3):
  1027. self.store_transaction_metric(
  1028. value * multiplier,
  1029. metric=mri,
  1030. internal_metric=mri,
  1031. entity="metrics_gauges",
  1032. # When multiple gauges are merged, in order to make the `last` merge work deterministically it's
  1033. # better to have the gauges with different timestamps so that the last value is always the same.
  1034. timestamp=self.day_ago + timedelta(hours=index, minutes=multiplier),
  1035. use_case_id=UseCaseID.CUSTOM,
  1036. )
  1037. response = self.do_request(
  1038. data={
  1039. "start": iso_format(self.day_ago),
  1040. "end": iso_format(self.day_ago + timedelta(hours=6)),
  1041. "interval": "1h",
  1042. "yAxis": [
  1043. f"min({mri})",
  1044. f"max({mri})",
  1045. f"last({mri})",
  1046. f"sum({mri})",
  1047. f"count({mri})",
  1048. ],
  1049. "project": self.project.id,
  1050. "dataset": "metricsEnhanced",
  1051. **self.additional_params,
  1052. },
  1053. )
  1054. assert response.status_code == 200, response.content
  1055. data = response.data
  1056. min = data[f"min({mri})"]["data"]
  1057. for (_, value), expected_value in zip(min, [10.0, 20.0, 30.0, 40.0, 50.0, 60.0]):
  1058. assert value[0]["count"] == expected_value # type: ignore[index]
  1059. max = data[f"max({mri})"]["data"]
  1060. for (_, value), expected_value in zip(max, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  1061. assert value[0]["count"] == expected_value # type: ignore[index]
  1062. last = data[f"last({mri})"]["data"]
  1063. for (_, value), expected_value in zip(last, [30.0, 60.0, 90.0, 120.0, 150.0, 180.0]):
  1064. assert value[0]["count"] == expected_value # type: ignore[index]
  1065. sum = data[f"sum({mri})"]["data"]
  1066. for (_, value), expected_value in zip(sum, [40.0, 80.0, 120.0, 160.0, 200.0, 240.0]):
  1067. assert value[0]["count"] == expected_value # type: ignore[index]
  1068. count = data[f"count({mri})"]["data"]
  1069. for (_, value), expected_value in zip(count, [40, 80, 120, 160, 200, 240]):
  1070. assert value[0]["count"] == expected_value # type: ignore[index]
  1071. class OrganizationEventsStatsMetricsEnhancedPerformanceEndpointTestWithOnDemandWidgets(
  1072. MetricsEnhancedPerformanceTestCase
  1073. ):
  1074. endpoint = "sentry-api-0-organization-events-stats"
  1075. def setUp(self):
  1076. super().setUp()
  1077. self.login_as(user=self.user)
  1078. self.day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
  1079. self.DEFAULT_METRIC_TIMESTAMP = self.day_ago
  1080. Environment.get_or_create(self.project, "production")
  1081. self.url = reverse(
  1082. "sentry-api-0-organization-events-stats",
  1083. kwargs={"organization_id_or_slug": self.project.organization.slug},
  1084. )
  1085. self.features = {
  1086. "organizations:on-demand-metrics-extraction-widgets": True,
  1087. "organizations:on-demand-metrics-extraction": True,
  1088. }
  1089. def _make_on_demand_request(
  1090. self, params: dict[str, Any], extra_features: dict[str, bool] | None = None
  1091. ) -> Response:
  1092. """Ensures that the required parameters for an on-demand request are included."""
  1093. # Expected parameters for this helper function
  1094. params["dataset"] = "metricsEnhanced"
  1095. params["useOnDemandMetrics"] = "true"
  1096. params["onDemandType"] = "dynamic_query"
  1097. _features = {**self.features, **(extra_features or {})}
  1098. return self.do_request(params, features=_features)
  1099. def test_top_events_wrong_on_demand_type(self):
  1100. query = "transaction.duration:>=100"
  1101. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1102. response = self.do_request(
  1103. data={
  1104. "project": self.project.id,
  1105. "start": iso_format(self.day_ago),
  1106. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1107. "interval": "1h",
  1108. "orderby": ["-count()"],
  1109. "environment": "production",
  1110. "query": query,
  1111. "yAxis": yAxis,
  1112. "field": [
  1113. "count()",
  1114. ],
  1115. "topEvents": 5,
  1116. "dataset": "metrics",
  1117. "useOnDemandMetrics": "true",
  1118. "onDemandType": "not_real",
  1119. },
  1120. )
  1121. assert response.status_code == 400, response.content
  1122. def test_top_events_works_without_on_demand_type(self):
  1123. query = "transaction.duration:>=100"
  1124. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1125. response = self.do_request(
  1126. data={
  1127. "project": self.project.id,
  1128. "start": iso_format(self.day_ago),
  1129. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1130. "interval": "1h",
  1131. "orderby": ["-count()"],
  1132. "environment": "production",
  1133. "query": query,
  1134. "yAxis": yAxis,
  1135. "field": [
  1136. "count()",
  1137. ],
  1138. "topEvents": 5,
  1139. "dataset": "metrics",
  1140. "useOnDemandMetrics": "true",
  1141. },
  1142. )
  1143. assert response.status_code == 200, response.content
  1144. def test_top_events_with_transaction_on_demand(self):
  1145. field = "count()"
  1146. field_two = "count_web_vitals(measurements.lcp, good)"
  1147. groupbys = ["customtag1", "customtag2"]
  1148. query = "transaction.duration:>=100"
  1149. spec = OnDemandMetricSpec(
  1150. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1151. )
  1152. spec_two = OnDemandMetricSpec(
  1153. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1154. )
  1155. for hour in range(0, 5):
  1156. self.store_on_demand_metric(
  1157. hour * 62 * 24,
  1158. spec=spec,
  1159. additional_tags={
  1160. "customtag1": "foo",
  1161. "customtag2": "red",
  1162. "environment": "production",
  1163. },
  1164. timestamp=self.day_ago + timedelta(hours=hour),
  1165. )
  1166. self.store_on_demand_metric(
  1167. hour * 60 * 24,
  1168. spec=spec_two,
  1169. additional_tags={
  1170. "customtag1": "bar",
  1171. "customtag2": "blue",
  1172. "environment": "production",
  1173. },
  1174. timestamp=self.day_ago + timedelta(hours=hour),
  1175. )
  1176. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1177. response = self.do_request(
  1178. data={
  1179. "project": self.project.id,
  1180. "start": iso_format(self.day_ago),
  1181. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1182. "interval": "1h",
  1183. "orderby": ["-count()"],
  1184. "environment": "production",
  1185. "query": query,
  1186. "yAxis": yAxis,
  1187. "field": [
  1188. "count()",
  1189. "count_web_vitals(measurements.lcp, good)",
  1190. "customtag1",
  1191. "customtag2",
  1192. ],
  1193. "topEvents": 5,
  1194. "dataset": "metricsEnhanced",
  1195. "useOnDemandMetrics": "true",
  1196. "onDemandType": "dynamic_query",
  1197. },
  1198. )
  1199. assert response.status_code == 200, response.content
  1200. groups = [
  1201. ("foo,red", "count()", 0.0, 1488.0),
  1202. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1203. ("bar,blue", "count()", 0.0, 0.0),
  1204. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1205. ]
  1206. assert len(response.data.keys()) == 2
  1207. for group_count in groups:
  1208. group, agg, row1, row2 = group_count
  1209. row_data = response.data[group][agg]["data"][:2]
  1210. assert [attrs for _, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1211. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1212. assert response.data[group]["isMetricsExtractedData"]
  1213. def test_top_events_with_transaction_on_demand_and_no_environment(self):
  1214. field = "count()"
  1215. field_two = "count_web_vitals(measurements.lcp, good)"
  1216. groupbys = ["customtag1", "customtag2"]
  1217. query = "transaction.duration:>=100"
  1218. spec = OnDemandMetricSpec(
  1219. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1220. )
  1221. spec_two = OnDemandMetricSpec(
  1222. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1223. )
  1224. for hour in range(0, 5):
  1225. self.store_on_demand_metric(
  1226. hour * 62 * 24,
  1227. spec=spec,
  1228. additional_tags={
  1229. "customtag1": "foo",
  1230. "customtag2": "red",
  1231. "environment": "production",
  1232. },
  1233. timestamp=self.day_ago + timedelta(hours=hour),
  1234. )
  1235. self.store_on_demand_metric(
  1236. hour * 60 * 24,
  1237. spec=spec_two,
  1238. additional_tags={
  1239. "customtag1": "bar",
  1240. "customtag2": "blue",
  1241. "environment": "production",
  1242. },
  1243. timestamp=self.day_ago + timedelta(hours=hour),
  1244. )
  1245. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1246. response = self.do_request(
  1247. data={
  1248. "project": self.project.id,
  1249. "start": iso_format(self.day_ago),
  1250. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1251. "interval": "1h",
  1252. "orderby": ["-count()"],
  1253. "query": query,
  1254. "yAxis": yAxis,
  1255. "field": [
  1256. "count()",
  1257. "count_web_vitals(measurements.lcp, good)",
  1258. "customtag1",
  1259. "customtag2",
  1260. ],
  1261. "topEvents": 5,
  1262. "dataset": "metricsEnhanced",
  1263. "useOnDemandMetrics": "true",
  1264. "onDemandType": "dynamic_query",
  1265. },
  1266. )
  1267. assert response.status_code == 200, response.content
  1268. groups = [
  1269. ("foo,red", "count()", 0.0, 1488.0),
  1270. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1271. ("bar,blue", "count()", 0.0, 0.0),
  1272. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1273. ]
  1274. assert len(response.data.keys()) == 2
  1275. for group_count in groups:
  1276. group, agg, row1, row2 = group_count
  1277. row_data = response.data[group][agg]["data"][:2]
  1278. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1279. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1280. assert response.data[group]["isMetricsExtractedData"]
  1281. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_transaction_only(self):
  1282. field = "count()"
  1283. field_two = "count_web_vitals(measurements.lcp, good)"
  1284. groupbys = ["customtag1", "customtag2"]
  1285. query = "transaction.duration:>=100"
  1286. spec = OnDemandMetricSpec(
  1287. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1288. )
  1289. spec_two = OnDemandMetricSpec(
  1290. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1291. )
  1292. _, widget, __ = create_widget(
  1293. ["count()"],
  1294. "",
  1295. self.project,
  1296. discover_widget_split=None,
  1297. )
  1298. for hour in range(0, 2):
  1299. self.store_on_demand_metric(
  1300. hour * 62 * 24,
  1301. spec=spec,
  1302. additional_tags={
  1303. "customtag1": "foo",
  1304. "customtag2": "red",
  1305. "environment": "production",
  1306. },
  1307. timestamp=self.day_ago + timedelta(hours=hour),
  1308. )
  1309. self.store_on_demand_metric(
  1310. hour * 60 * 24,
  1311. spec=spec_two,
  1312. additional_tags={
  1313. "customtag1": "bar",
  1314. "customtag2": "blue",
  1315. "environment": "production",
  1316. },
  1317. timestamp=self.day_ago + timedelta(hours=hour),
  1318. )
  1319. yAxis = [field, field_two]
  1320. response = self.do_request(
  1321. data={
  1322. "project": self.project.id,
  1323. "start": iso_format(self.day_ago),
  1324. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1325. "interval": "1h",
  1326. "orderby": ["-count()"],
  1327. "query": query,
  1328. "yAxis": yAxis,
  1329. "field": yAxis + groupbys,
  1330. "topEvents": 5,
  1331. "dataset": "metricsEnhanced",
  1332. "useOnDemandMetrics": "true",
  1333. "onDemandType": "dynamic_query",
  1334. "dashboardWidgetId": widget.id,
  1335. },
  1336. )
  1337. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1338. assert saved_widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE
  1339. assert response.status_code == 200, response.content
  1340. # Fell back to discover data which is empty for this test (empty group of '').
  1341. assert len(response.data.keys()) == 2
  1342. assert bool(response.data["foo,red"])
  1343. assert bool(response.data["bar,blue"])
  1344. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_error(
  1345. self,
  1346. ):
  1347. self.project = self.create_project(organization=self.organization)
  1348. Environment.get_or_create(self.project, "production")
  1349. field = "count()"
  1350. field_two = "count()"
  1351. groupbys = ["customtag1", "customtag2"]
  1352. query = "query.dataset:foo"
  1353. _, widget, __ = create_widget(
  1354. ["count()"],
  1355. "",
  1356. self.project,
  1357. discover_widget_split=None,
  1358. )
  1359. self.store_event(
  1360. data={
  1361. "event_id": "a" * 32,
  1362. "message": "very bad",
  1363. "type": "error",
  1364. "start_timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1365. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1366. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1367. },
  1368. project_id=self.project.id,
  1369. )
  1370. self.store_event(
  1371. data={
  1372. "event_id": "b" * 32,
  1373. "message": "very bad 2",
  1374. "type": "error",
  1375. "start_timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1376. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1377. "tags": {"customtag1": "error_value2", "query.dataset": "foo"},
  1378. },
  1379. project_id=self.project.id,
  1380. )
  1381. yAxis = ["count()"]
  1382. response = self.do_request(
  1383. data={
  1384. "project": self.project.id,
  1385. "start": iso_format(self.day_ago),
  1386. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1387. "interval": "1h",
  1388. "orderby": ["-count()"],
  1389. "query": query,
  1390. "yAxis": yAxis,
  1391. "field": [field, field_two] + groupbys,
  1392. "topEvents": 5,
  1393. "dataset": "metricsEnhanced",
  1394. "useOnDemandMetrics": "true",
  1395. "onDemandType": "dynamic_query",
  1396. "dashboardWidgetId": widget.id,
  1397. },
  1398. )
  1399. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1400. assert saved_widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS
  1401. assert response.status_code == 200, response.content
  1402. # Fell back to discover data which is empty for this test (empty group of '').
  1403. assert len(response.data.keys()) == 2
  1404. assert bool(response.data["error_value,"])
  1405. assert bool(response.data["error_value2,"])
  1406. def test_top_events_with_transaction_on_demand_passing_widget_id_unsaved_discover(self):
  1407. self.project = self.create_project(organization=self.organization)
  1408. Environment.get_or_create(self.project, "production")
  1409. field = "count()"
  1410. field_two = "count()"
  1411. groupbys = ["customtag1", "customtag2"]
  1412. query = "query.dataset:foo"
  1413. spec = OnDemandMetricSpec(
  1414. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1415. )
  1416. spec_two = OnDemandMetricSpec(
  1417. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1418. )
  1419. _, widget, __ = create_widget(
  1420. ["count()"],
  1421. "",
  1422. self.project,
  1423. discover_widget_split=None,
  1424. )
  1425. self.store_event(
  1426. data={
  1427. "event_id": "a" * 32,
  1428. "message": "very bad",
  1429. "type": "error",
  1430. "timestamp": iso_format(self.day_ago + timedelta(hours=1)),
  1431. "tags": {"customtag1": "error_value", "query.dataset": "foo"},
  1432. },
  1433. project_id=self.project.id,
  1434. )
  1435. transaction = load_data("transaction")
  1436. transaction["timestamp"] = iso_format(self.day_ago + timedelta(hours=1))
  1437. transaction["start_timestamp"] = iso_format(self.day_ago + timedelta(hours=1))
  1438. transaction["tags"] = {"customtag1": "transaction_value", "query.dataset": "foo"}
  1439. self.store_event(
  1440. data=transaction,
  1441. project_id=self.project.id,
  1442. )
  1443. for hour in range(0, 5):
  1444. self.store_on_demand_metric(
  1445. hour * 62 * 24,
  1446. spec=spec,
  1447. additional_tags={
  1448. "customtag1": "foo",
  1449. "customtag2": "red",
  1450. "environment": "production",
  1451. },
  1452. timestamp=self.day_ago + timedelta(hours=hour),
  1453. )
  1454. self.store_on_demand_metric(
  1455. hour * 60 * 24,
  1456. spec=spec_two,
  1457. additional_tags={
  1458. "customtag1": "bar",
  1459. "customtag2": "blue",
  1460. "environment": "production",
  1461. },
  1462. timestamp=self.day_ago + timedelta(hours=hour),
  1463. )
  1464. yAxis = ["count()"]
  1465. response = self.do_request(
  1466. data={
  1467. "project": self.project.id,
  1468. "start": iso_format(self.day_ago),
  1469. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1470. "interval": "1h",
  1471. "orderby": ["-count()"],
  1472. "query": query,
  1473. "yAxis": yAxis,
  1474. "field": [field, field_two, "customtag1", "customtag2"],
  1475. "topEvents": 5,
  1476. "dataset": "metricsEnhanced",
  1477. "useOnDemandMetrics": "true",
  1478. "onDemandType": "dynamic_query",
  1479. "dashboardWidgetId": widget.id,
  1480. },
  1481. )
  1482. saved_widget = DashboardWidget.objects.get(id=widget.id)
  1483. assert saved_widget.discover_widget_split == DashboardWidgetTypes.DISCOVER
  1484. assert response.status_code == 200, response.content
  1485. assert response.status_code == 200, response.content
  1486. # Fell back to discover data which is empty for this test (empty group of '').
  1487. assert len(response.data.keys()) == 2
  1488. assert bool(response.data["error_value,"])
  1489. assert bool(response.data["transaction_value,"])
  1490. def test_top_events_with_transaction_on_demand_passing_widget_id_saved(self):
  1491. field = "count()"
  1492. field_two = "count_web_vitals(measurements.lcp, good)"
  1493. groupbys = ["customtag1", "customtag2"]
  1494. query = "transaction.duration:>=100"
  1495. spec = OnDemandMetricSpec(
  1496. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1497. )
  1498. spec_two = OnDemandMetricSpec(
  1499. field=field_two, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1500. )
  1501. _, widget, __ = create_widget(
  1502. ["count()"],
  1503. "",
  1504. self.project,
  1505. discover_widget_split=DashboardWidgetTypes.TRANSACTION_LIKE, # Transactions like uses on-demand
  1506. )
  1507. for hour in range(0, 5):
  1508. self.store_on_demand_metric(
  1509. hour * 62 * 24,
  1510. spec=spec,
  1511. additional_tags={
  1512. "customtag1": "foo",
  1513. "customtag2": "red",
  1514. "environment": "production",
  1515. },
  1516. timestamp=self.day_ago + timedelta(hours=hour),
  1517. )
  1518. self.store_on_demand_metric(
  1519. hour * 60 * 24,
  1520. spec=spec_two,
  1521. additional_tags={
  1522. "customtag1": "bar",
  1523. "customtag2": "blue",
  1524. "environment": "production",
  1525. },
  1526. timestamp=self.day_ago + timedelta(hours=hour),
  1527. )
  1528. yAxis = ["count()", "count_web_vitals(measurements.lcp, good)"]
  1529. with mock.patch.object(widget, "save") as mock_widget_save:
  1530. response = self.do_request(
  1531. data={
  1532. "project": self.project.id,
  1533. "start": iso_format(self.day_ago),
  1534. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1535. "interval": "1h",
  1536. "orderby": ["-count()"],
  1537. "query": query,
  1538. "yAxis": yAxis,
  1539. "field": [
  1540. "count()",
  1541. "count_web_vitals(measurements.lcp, good)",
  1542. "customtag1",
  1543. "customtag2",
  1544. ],
  1545. "topEvents": 5,
  1546. "dataset": "metricsEnhanced",
  1547. "useOnDemandMetrics": "true",
  1548. "onDemandType": "dynamic_query",
  1549. "dashboardWidgetId": widget.id,
  1550. },
  1551. )
  1552. assert bool(mock_widget_save.assert_not_called)
  1553. assert response.status_code == 200, response.content
  1554. groups = [
  1555. ("foo,red", "count()", 0.0, 1488.0),
  1556. ("foo,red", "count_web_vitals(measurements.lcp, good)", 0.0, 0.0),
  1557. ("bar,blue", "count()", 0.0, 0.0),
  1558. ("bar,blue", "count_web_vitals(measurements.lcp, good)", 0.0, 1440.0),
  1559. ]
  1560. assert len(response.data.keys()) == 2
  1561. for group_count in groups:
  1562. group, agg, row1, row2 = group_count
  1563. row_data = response.data[group][agg]["data"][:2]
  1564. assert [attrs for time, attrs in row_data] == [[{"count": row1}], [{"count": row2}]]
  1565. assert response.data[group][agg]["meta"]["isMetricsExtractedData"]
  1566. assert response.data[group]["isMetricsExtractedData"]
  1567. def test_timeseries_on_demand_with_multiple_percentiles(self):
  1568. field = "p75(measurements.fcp)"
  1569. field_two = "p75(measurements.lcp)"
  1570. query = "transaction.duration:>=100"
  1571. spec = OnDemandMetricSpec(field=field, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY)
  1572. spec_two = OnDemandMetricSpec(
  1573. field=field_two, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  1574. )
  1575. assert (
  1576. spec._query_str_for_hash
  1577. == "event.measurements.fcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1578. )
  1579. assert (
  1580. spec_two._query_str_for_hash
  1581. == "event.measurements.lcp.value;{'name': 'event.duration', 'op': 'gte', 'value': 100.0}"
  1582. )
  1583. for count in range(0, 4):
  1584. self.store_on_demand_metric(
  1585. count * 100,
  1586. spec=spec,
  1587. timestamp=self.day_ago + timedelta(hours=1),
  1588. )
  1589. self.store_on_demand_metric(
  1590. count * 200.0,
  1591. spec=spec_two,
  1592. timestamp=self.day_ago + timedelta(hours=1),
  1593. )
  1594. yAxis = [field, field_two]
  1595. response = self.do_request(
  1596. data={
  1597. "project": self.project.id,
  1598. "start": iso_format(self.day_ago),
  1599. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1600. "interval": "1h",
  1601. "orderby": [field],
  1602. "query": query,
  1603. "yAxis": yAxis,
  1604. "dataset": "metricsEnhanced",
  1605. "useOnDemandMetrics": "true",
  1606. "onDemandType": "dynamic_query",
  1607. },
  1608. )
  1609. assert response.status_code == 200, response.content
  1610. assert response.data["p75(measurements.fcp)"]["meta"]["isMetricsExtractedData"]
  1611. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1612. assert [attrs for time, attrs in response.data["p75(measurements.fcp)"]["data"]] == [
  1613. [{"count": 0}],
  1614. [{"count": 225.0}],
  1615. ]
  1616. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsExtractedData"]
  1617. assert response.data["p75(measurements.lcp)"]["meta"]["isMetricsData"]
  1618. assert [attrs for time, attrs in response.data["p75(measurements.lcp)"]["data"]] == [
  1619. [{"count": 0}],
  1620. [{"count": 450.0}],
  1621. ]
  1622. def test_apdex_issue(self):
  1623. field = "apdex(300)"
  1624. groupbys = ["group_tag"]
  1625. query = "transaction.duration:>=100"
  1626. spec = OnDemandMetricSpec(
  1627. field=field,
  1628. groupbys=groupbys,
  1629. query=query,
  1630. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1631. )
  1632. for hour in range(0, 5):
  1633. self.store_on_demand_metric(
  1634. 1,
  1635. spec=spec,
  1636. additional_tags={
  1637. "group_tag": "group_one",
  1638. "environment": "production",
  1639. "satisfaction": "tolerable",
  1640. },
  1641. timestamp=self.day_ago + timedelta(hours=hour),
  1642. )
  1643. self.store_on_demand_metric(
  1644. 1,
  1645. spec=spec,
  1646. additional_tags={
  1647. "group_tag": "group_two",
  1648. "environment": "production",
  1649. "satisfaction": "satisfactory",
  1650. },
  1651. timestamp=self.day_ago + timedelta(hours=hour),
  1652. )
  1653. response = self.do_request(
  1654. data={
  1655. "dataset": "metricsEnhanced",
  1656. "environment": "production",
  1657. "excludeOther": 1,
  1658. "field": [field, "group_tag"],
  1659. "start": iso_format(self.day_ago),
  1660. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1661. "interval": "1h",
  1662. "orderby": f"-{field}",
  1663. "partial": 1,
  1664. "project": self.project.id,
  1665. "query": query,
  1666. "topEvents": 5,
  1667. "yAxis": field,
  1668. "onDemandType": "dynamic_query",
  1669. "useOnDemandMetrics": "true",
  1670. },
  1671. )
  1672. assert response.status_code == 200, response.content
  1673. assert response.data["group_one"]["meta"]["isMetricsExtractedData"] is True
  1674. assert [attrs for time, attrs in response.data["group_one"]["data"]] == [
  1675. [{"count": 0.5}],
  1676. [{"count": 0.5}],
  1677. ]
  1678. def test_glob_http_referer_on_demand(self):
  1679. agg = "count()"
  1680. network_id_tag = "networkId"
  1681. url = "https://sentry.io"
  1682. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1683. spec = OnDemandMetricSpec(
  1684. field=agg,
  1685. groupbys=[network_id_tag],
  1686. query=query,
  1687. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1688. )
  1689. assert spec.to_metric_spec(self.project) == {
  1690. "category": "transaction",
  1691. "mri": "c:transactions/on_demand@none",
  1692. "field": None,
  1693. "tags": [
  1694. {"key": "query_hash", "value": "ac241f56"},
  1695. {"key": "networkId", "field": "event.tags.networkId"},
  1696. {"key": "environment", "field": "event.environment"},
  1697. ],
  1698. "condition": {
  1699. "op": "and",
  1700. "inner": [
  1701. {
  1702. "op": "glob",
  1703. "name": "event.request.url",
  1704. "value": ["https://sentry.io/*/foo/bar/*"],
  1705. },
  1706. {
  1707. "op": "glob",
  1708. "name": "event.request.headers.Referer",
  1709. "value": ["https://sentry.io/*/bar/*"],
  1710. },
  1711. ],
  1712. },
  1713. }
  1714. for hour in range(0, 5):
  1715. self.store_on_demand_metric(
  1716. 1,
  1717. spec=spec,
  1718. additional_tags={network_id_tag: "1234"},
  1719. timestamp=self.day_ago + timedelta(hours=hour),
  1720. )
  1721. self.store_on_demand_metric(
  1722. 1,
  1723. spec=spec,
  1724. additional_tags={network_id_tag: "5678"},
  1725. timestamp=self.day_ago + timedelta(hours=hour),
  1726. )
  1727. response = self.do_request(
  1728. data={
  1729. "dataset": "metricsEnhanced",
  1730. "field": [network_id_tag, agg],
  1731. "start": iso_format(self.day_ago),
  1732. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1733. "onDemandType": "dynamic_query",
  1734. "orderby": f"-{agg}",
  1735. "interval": "1d",
  1736. "partial": 1,
  1737. "query": query,
  1738. "referrer": "api.dashboards.widget.bar-chart",
  1739. "project": self.project.id,
  1740. "topEvents": 2,
  1741. "useOnDemandMetrics": "true",
  1742. "yAxis": agg,
  1743. },
  1744. )
  1745. assert response.status_code == 200, response.content
  1746. for datum in response.data.values():
  1747. assert datum["meta"] == {
  1748. "dataset": "metricsEnhanced",
  1749. "datasetReason": "unchanged",
  1750. "fields": {},
  1751. "isMetricsData": False,
  1752. "isMetricsExtractedData": True,
  1753. "tips": {},
  1754. "units": {},
  1755. }
  1756. def _test_is_metrics_extracted_data(
  1757. self, params: dict[str, Any], expected_on_demand_query: bool, dataset: str
  1758. ) -> None:
  1759. spec = OnDemandMetricSpec(
  1760. field="count()",
  1761. query="transaction.duration:>1s",
  1762. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1763. )
  1764. self.store_on_demand_metric(1, spec=spec)
  1765. response = self.do_request(params)
  1766. assert response.status_code == 200, response.content
  1767. meta = response.data["meta"]
  1768. # This is the main thing we want to test for
  1769. assert meta.get("isMetricsExtractedData", False) is expected_on_demand_query
  1770. assert meta["dataset"] == dataset
  1771. return meta
  1772. def test_is_metrics_extracted_data_is_included(self):
  1773. self._test_is_metrics_extracted_data(
  1774. {
  1775. "dataset": "metricsEnhanced",
  1776. "query": "transaction.duration:>=91",
  1777. "useOnDemandMetrics": "true",
  1778. "yAxis": "count()",
  1779. },
  1780. expected_on_demand_query=True,
  1781. dataset="metricsEnhanced",
  1782. )
  1783. def test_on_demand_epm_no_query(self):
  1784. params = {
  1785. "dataset": "metricsEnhanced",
  1786. "environment": "production",
  1787. "onDemandType": "dynamic_query",
  1788. "project": self.project.id,
  1789. "query": "",
  1790. "statsPeriod": "1h",
  1791. "useOnDemandMetrics": "true",
  1792. "yAxis": ["epm()"],
  1793. }
  1794. response = self.do_request(params)
  1795. assert response.status_code == 200, response.content
  1796. assert response.data["meta"] == {
  1797. "fields": {"time": "date", "epm_900": "rate"},
  1798. "units": {"time": None, "epm_900": None},
  1799. "isMetricsData": True,
  1800. "isMetricsExtractedData": False,
  1801. "tips": {},
  1802. "datasetReason": "unchanged",
  1803. "dataset": "metricsEnhanced",
  1804. }
  1805. def test_group_by_transaction(self):
  1806. field = "count()"
  1807. groupbys = ["transaction"]
  1808. query = "transaction.duration:>=100"
  1809. spec = OnDemandMetricSpec(
  1810. field=field,
  1811. groupbys=groupbys,
  1812. query=query,
  1813. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1814. )
  1815. for hour in range(0, 2):
  1816. self.store_on_demand_metric(
  1817. (hour + 1) * 5,
  1818. spec=spec,
  1819. additional_tags={
  1820. "transaction": "/performance",
  1821. "environment": "production",
  1822. },
  1823. timestamp=self.day_ago + timedelta(hours=hour),
  1824. )
  1825. response = self.do_request(
  1826. data={
  1827. "dataset": "metricsEnhanced",
  1828. "environment": "production",
  1829. "excludeOther": 1,
  1830. "field": [field, "transaction"],
  1831. "start": iso_format(self.day_ago),
  1832. "end": iso_format(self.day_ago + timedelta(hours=2)),
  1833. "interval": "1h",
  1834. "orderby": f"-{field}",
  1835. "partial": 1,
  1836. "project": self.project.id,
  1837. "query": query,
  1838. "topEvents": 5,
  1839. "yAxis": field,
  1840. "onDemandType": "dynamic_query",
  1841. "useOnDemandMetrics": "true",
  1842. },
  1843. )
  1844. assert response.status_code == 200, response.content
  1845. assert response.data["/performance"]["meta"]["isMetricsExtractedData"] is True
  1846. assert [attrs for time, attrs in response.data["/performance"]["data"]] == [
  1847. [{"count": 5.0}],
  1848. [{"count": 10.0}],
  1849. ]
  1850. def _setup_orderby_tests(self, query):
  1851. count_spec = OnDemandMetricSpec(
  1852. field="count()",
  1853. groupbys=["networkId"],
  1854. query=query,
  1855. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1856. )
  1857. p95_spec = OnDemandMetricSpec(
  1858. field="p95(transaction.duration)",
  1859. groupbys=["networkId"],
  1860. query=query,
  1861. spec_type=MetricSpecType.DYNAMIC_QUERY,
  1862. )
  1863. for hour in range(0, 5):
  1864. self.store_on_demand_metric(
  1865. 1,
  1866. spec=count_spec,
  1867. additional_tags={"networkId": "1234"},
  1868. timestamp=self.day_ago + timedelta(hours=hour),
  1869. )
  1870. self.store_on_demand_metric(
  1871. 100,
  1872. spec=p95_spec,
  1873. additional_tags={"networkId": "1234"},
  1874. timestamp=self.day_ago + timedelta(hours=hour),
  1875. )
  1876. self.store_on_demand_metric(
  1877. 200,
  1878. spec=p95_spec,
  1879. additional_tags={"networkId": "5678"},
  1880. timestamp=self.day_ago + timedelta(hours=hour),
  1881. )
  1882. # Store twice as many 5678 so orderby puts it later
  1883. self.store_on_demand_metric(
  1884. 2,
  1885. spec=count_spec,
  1886. additional_tags={"networkId": "5678"},
  1887. timestamp=self.day_ago + timedelta(hours=hour),
  1888. )
  1889. def test_order_by_aggregate_top_events_desc(self):
  1890. url = "https://sentry.io"
  1891. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1892. self._setup_orderby_tests(query)
  1893. response = self.do_request(
  1894. data={
  1895. "dataset": "metricsEnhanced",
  1896. "field": ["networkId", "count()"],
  1897. "start": iso_format(self.day_ago),
  1898. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1899. "onDemandType": "dynamic_query",
  1900. "orderby": "-count()",
  1901. "interval": "1d",
  1902. "partial": 1,
  1903. "query": query,
  1904. "referrer": "api.dashboards.widget.bar-chart",
  1905. "project": self.project.id,
  1906. "topEvents": 2,
  1907. "useOnDemandMetrics": "true",
  1908. "yAxis": "count()",
  1909. },
  1910. )
  1911. assert response.status_code == 200, response.content
  1912. assert len(response.data) == 3
  1913. data1 = response.data["5678"]
  1914. assert data1["order"] == 0
  1915. assert data1["data"][0][1][0]["count"] == 10
  1916. data2 = response.data["1234"]
  1917. assert data2["order"] == 1
  1918. assert data2["data"][0][1][0]["count"] == 5
  1919. for datum in response.data.values():
  1920. assert datum["meta"] == {
  1921. "dataset": "metricsEnhanced",
  1922. "datasetReason": "unchanged",
  1923. "fields": {},
  1924. "isMetricsData": False,
  1925. "isMetricsExtractedData": True,
  1926. "tips": {},
  1927. "units": {},
  1928. }
  1929. def test_order_by_aggregate_top_events_asc(self):
  1930. url = "https://sentry.io"
  1931. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1932. self._setup_orderby_tests(query)
  1933. response = self.do_request(
  1934. data={
  1935. "dataset": "metricsEnhanced",
  1936. "field": ["networkId", "count()"],
  1937. "start": iso_format(self.day_ago),
  1938. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1939. "onDemandType": "dynamic_query",
  1940. "orderby": "count()",
  1941. "interval": "1d",
  1942. "partial": 1,
  1943. "query": query,
  1944. "referrer": "api.dashboards.widget.bar-chart",
  1945. "project": self.project.id,
  1946. "topEvents": 2,
  1947. "useOnDemandMetrics": "true",
  1948. "yAxis": "count()",
  1949. },
  1950. )
  1951. assert response.status_code == 200, response.content
  1952. assert len(response.data) == 3
  1953. data1 = response.data["1234"]
  1954. assert data1["order"] == 0
  1955. assert data1["data"][0][1][0]["count"] == 5
  1956. data2 = response.data["5678"]
  1957. assert data2["order"] == 1
  1958. assert data2["data"][0][1][0]["count"] == 10
  1959. for datum in response.data.values():
  1960. assert datum["meta"] == {
  1961. "dataset": "metricsEnhanced",
  1962. "datasetReason": "unchanged",
  1963. "fields": {},
  1964. "isMetricsData": False,
  1965. "isMetricsExtractedData": True,
  1966. "tips": {},
  1967. "units": {},
  1968. }
  1969. def test_order_by_aggregate_top_events_graph_different_aggregate(self):
  1970. url = "https://sentry.io"
  1971. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  1972. self._setup_orderby_tests(query)
  1973. response = self.do_request(
  1974. data={
  1975. "dataset": "metricsEnhanced",
  1976. "field": ["networkId", "count()"],
  1977. "start": iso_format(self.day_ago),
  1978. "end": iso_format(self.day_ago + timedelta(hours=5)),
  1979. "onDemandType": "dynamic_query",
  1980. "orderby": "count()",
  1981. "interval": "1d",
  1982. "partial": 1,
  1983. "query": query,
  1984. "referrer": "api.dashboards.widget.bar-chart",
  1985. "project": self.project.id,
  1986. "topEvents": 2,
  1987. "useOnDemandMetrics": "true",
  1988. "yAxis": "p95(transaction.duration)",
  1989. },
  1990. )
  1991. assert response.status_code == 200, response.content
  1992. assert len(response.data) == 3
  1993. data1 = response.data["1234"]
  1994. assert data1["order"] == 0
  1995. assert data1["data"][0][1][0]["count"] == 100
  1996. data2 = response.data["5678"]
  1997. assert data2["order"] == 1
  1998. assert data2["data"][0][1][0]["count"] == 200
  1999. for datum in response.data.values():
  2000. assert datum["meta"] == {
  2001. "dataset": "metricsEnhanced",
  2002. "datasetReason": "unchanged",
  2003. "fields": {},
  2004. "isMetricsData": False,
  2005. "isMetricsExtractedData": True,
  2006. "tips": {},
  2007. "units": {},
  2008. }
  2009. def test_cannot_order_by_tag(self):
  2010. url = "https://sentry.io"
  2011. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  2012. self._setup_orderby_tests(query)
  2013. response = self.do_request(
  2014. data={
  2015. "dataset": "metrics",
  2016. "field": ["networkId", "count()"],
  2017. "start": iso_format(self.day_ago),
  2018. "end": iso_format(self.day_ago + timedelta(hours=5)),
  2019. "onDemandType": "dynamic_query",
  2020. "orderby": "-networkId",
  2021. "interval": "1d",
  2022. "partial": 1,
  2023. "query": query,
  2024. "referrer": "api.dashboards.widget.bar-chart",
  2025. "project": self.project.id,
  2026. "topEvents": 2,
  2027. "useOnDemandMetrics": "true",
  2028. "yAxis": "count()",
  2029. },
  2030. )
  2031. assert response.status_code == 400, response.content
  2032. def test_order_by_two_aggregates(self):
  2033. url = "https://sentry.io"
  2034. query = f'http.url:{url}/*/foo/bar/* http.referer:"{url}/*/bar/*" event.type:transaction'
  2035. self._setup_orderby_tests(query)
  2036. response = self.do_request(
  2037. data={
  2038. "dataset": "metrics",
  2039. "field": ["networkId", "count()", "p95(transaction.duration)"],
  2040. "start": iso_format(self.day_ago),
  2041. "end": iso_format(self.day_ago + timedelta(hours=5)),
  2042. "onDemandType": "dynamic_query",
  2043. "orderby": ["count()", "p95(transaction.duration)"],
  2044. "interval": "1d",
  2045. "partial": 1,
  2046. "query": query,
  2047. "referrer": "api.dashboards.widget.bar-chart",
  2048. "project": self.project.id,
  2049. "topEvents": 2,
  2050. "useOnDemandMetrics": "true",
  2051. "yAxis": "p95(transaction.duration)",
  2052. },
  2053. )
  2054. assert response.status_code == 400, response.content
  2055. def test_top_events_with_tag(self):
  2056. query = "transaction.duration:>=100"
  2057. yAxis = ["count()"]
  2058. field = "count()"
  2059. groupbys = ["some-field"]
  2060. spec = OnDemandMetricSpec(
  2061. field=field, groupbys=groupbys, query=query, spec_type=MetricSpecType.DYNAMIC_QUERY
  2062. )
  2063. self.store_on_demand_metric(
  2064. 1,
  2065. spec=spec,
  2066. additional_tags={
  2067. "some-field": "bar",
  2068. "environment": "production",
  2069. },
  2070. timestamp=self.day_ago,
  2071. )
  2072. response = self.do_request(
  2073. data={
  2074. "project": self.project.id,
  2075. "start": iso_format(self.day_ago),
  2076. "end": iso_format(self.day_ago + timedelta(hours=2)),
  2077. "interval": "1h",
  2078. "orderby": ["-count()"],
  2079. "environment": "production",
  2080. "query": query,
  2081. "yAxis": yAxis,
  2082. "field": [
  2083. "some-field",
  2084. "count()",
  2085. ],
  2086. "topEvents": 5,
  2087. "dataset": "metrics",
  2088. "useOnDemandMetrics": "true",
  2089. },
  2090. )
  2091. assert response.status_code == 200, response.content