test_metrics_layer.py 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036
  1. from __future__ import annotations
  2. from collections.abc import Mapping
  3. from datetime import datetime, timedelta, timezone
  4. from typing import Literal
  5. import pytest
  6. from snuba_sdk import (
  7. ArithmeticOperator,
  8. Column,
  9. Condition,
  10. Direction,
  11. Formula,
  12. Limit,
  13. Metric,
  14. MetricsQuery,
  15. MetricsScope,
  16. Op,
  17. Request,
  18. Rollup,
  19. Timeseries,
  20. )
  21. from sentry.exceptions import InvalidParams
  22. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  23. from sentry.snuba.metrics.naming_layer import SessionMRI, TransactionMRI
  24. from sentry.snuba.metrics.naming_layer.public import TransactionStatusTagValue, TransactionTagsKey
  25. from sentry.snuba.metrics_layer.query import (
  26. bulk_run_query,
  27. fetch_metric_mris,
  28. fetch_metric_tag_keys,
  29. fetch_metric_tag_values,
  30. run_query,
  31. )
  32. from sentry.testutils.cases import BaseMetricsTestCase, TestCase
  33. pytestmark = pytest.mark.sentry_metrics
  34. class MQLTest(TestCase, BaseMetricsTestCase):
  35. def ts(self, dt: datetime) -> int:
  36. return int(dt.timestamp())
  37. def setUp(self) -> None:
  38. super().setUp()
  39. self.generic_metrics: Mapping[str, Literal["counter", "set", "distribution", "gauge"]] = {
  40. TransactionMRI.DURATION.value: "distribution",
  41. TransactionMRI.USER.value: "set",
  42. TransactionMRI.COUNT_PER_ROOT_PROJECT.value: "counter",
  43. "g:transactions/test_gauge@none": "gauge",
  44. }
  45. self.metrics: Mapping[str, Literal["counter", "set", "distribution"]] = {
  46. SessionMRI.RAW_DURATION.value: "distribution",
  47. SessionMRI.RAW_USER.value: "set",
  48. SessionMRI.RAW_SESSION.value: "counter",
  49. }
  50. self.now = datetime.now(tz=timezone.utc).replace(microsecond=0)
  51. self.hour_ago = self.now - timedelta(hours=1)
  52. self.org_id = self.project.organization_id
  53. for mri, metric_type in self.generic_metrics.items():
  54. assert metric_type in {"counter", "distribution", "set", "gauge"}
  55. for i in range(10):
  56. value: int | dict[str, int]
  57. if metric_type == "gauge":
  58. value = {
  59. "min": i,
  60. "max": i,
  61. "sum": i,
  62. "count": i,
  63. "last": i,
  64. }
  65. else:
  66. value = i
  67. self.store_metric(
  68. org_id=self.org_id,
  69. project_id=self.project.id,
  70. mri=mri,
  71. tags={
  72. "transaction": f"transaction_{i % 2}",
  73. "status_code": "500" if i % 3 == 0 else "200",
  74. "device": "BlackBerry" if i % 2 == 0 else "Nokia",
  75. },
  76. timestamp=self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  77. value=value,
  78. sampling_weight=10,
  79. )
  80. for mri, metric_type in self.metrics.items():
  81. assert metric_type in {"counter", "distribution", "set"}
  82. for i in range(10):
  83. value = i
  84. self.store_metric(
  85. self.org_id,
  86. self.project.id,
  87. mri,
  88. {
  89. "release": "release_even" if i % 2 == 0 else "release_odd",
  90. },
  91. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  92. value,
  93. )
  94. def test_basic_generic_metrics(self) -> None:
  95. query = MetricsQuery(
  96. query=Timeseries(
  97. metric=Metric(
  98. "transaction.duration",
  99. TransactionMRI.DURATION.value,
  100. ),
  101. aggregate="max",
  102. ),
  103. start=self.hour_ago,
  104. end=self.now,
  105. rollup=Rollup(interval=60, granularity=60),
  106. scope=MetricsScope(
  107. org_ids=[self.org_id],
  108. project_ids=[self.project.id],
  109. use_case_id=UseCaseID.TRANSACTIONS.value,
  110. ),
  111. )
  112. request = Request(
  113. dataset="generic_metrics",
  114. app_id="tests",
  115. query=query,
  116. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  117. )
  118. result = run_query(request)
  119. assert len(result["data"]) == 10
  120. rows = result["data"]
  121. for i in range(10):
  122. assert rows[i]["aggregate_value"] == i
  123. assert (
  124. rows[i]["time"]
  125. == (
  126. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  127. ).isoformat()
  128. )
  129. def test_basic_bulk_generic_metrics(self) -> None:
  130. query = MetricsQuery(
  131. query=None,
  132. start=self.hour_ago,
  133. end=self.now,
  134. rollup=Rollup(interval=60, granularity=60),
  135. scope=MetricsScope(
  136. org_ids=[self.org_id],
  137. project_ids=[self.project.id],
  138. use_case_id=UseCaseID.TRANSACTIONS.value,
  139. ),
  140. )
  141. query1 = query.set_query(
  142. Timeseries(
  143. metric=Metric(
  144. "transaction.duration",
  145. TransactionMRI.DURATION.value,
  146. ),
  147. aggregate="max",
  148. )
  149. )
  150. query2 = query.set_query(
  151. Timeseries(
  152. metric=Metric(
  153. public_name=None,
  154. mri=TransactionMRI.USER.value,
  155. ),
  156. aggregate="uniq",
  157. )
  158. )
  159. request1 = Request(
  160. dataset="generic_metrics",
  161. app_id="tests",
  162. query=query1,
  163. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  164. )
  165. request2 = Request(
  166. dataset="generic_metrics",
  167. app_id="tests",
  168. query=query2,
  169. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  170. )
  171. results = bulk_run_query([request1, request2])
  172. assert len(results) == 2
  173. result = results[0] # Distribution
  174. rows = result["data"]
  175. for i in range(10):
  176. assert rows[i]["aggregate_value"] == i
  177. assert (
  178. rows[i]["time"]
  179. == (
  180. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  181. ).isoformat()
  182. )
  183. def test_groupby_generic_metrics(self) -> None:
  184. query = MetricsQuery(
  185. query=Timeseries(
  186. metric=Metric(
  187. "transaction.duration",
  188. TransactionMRI.DURATION.value,
  189. ),
  190. aggregate="quantiles",
  191. aggregate_params=[0.5, 0.99],
  192. groupby=[Column("transaction")],
  193. ),
  194. start=self.hour_ago,
  195. end=self.now,
  196. rollup=Rollup(interval=60, granularity=60),
  197. scope=MetricsScope(
  198. org_ids=[self.org_id],
  199. project_ids=[self.project.id],
  200. use_case_id=UseCaseID.TRANSACTIONS.value,
  201. ),
  202. )
  203. request = Request(
  204. dataset="generic_metrics",
  205. app_id="tests",
  206. query=query,
  207. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  208. )
  209. result = run_query(request)
  210. assert len(result["data"]) == 10
  211. rows = result["data"]
  212. for i in range(10):
  213. assert rows[i]["aggregate_value"] == [i, i]
  214. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  215. assert (
  216. rows[i]["time"]
  217. == (
  218. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  219. ).isoformat()
  220. )
  221. def test_filters_generic_metrics(self) -> None:
  222. query = MetricsQuery(
  223. query=Timeseries(
  224. metric=Metric(
  225. "transaction.duration",
  226. TransactionMRI.DURATION.value,
  227. ),
  228. aggregate="quantiles",
  229. aggregate_params=[0.5],
  230. filters=[
  231. Condition(Column("status_code"), Op.EQ, "500"),
  232. Condition(Column("device"), Op.EQ, "BlackBerry"),
  233. ],
  234. ),
  235. start=self.hour_ago,
  236. end=self.now,
  237. rollup=Rollup(interval=60, granularity=60),
  238. scope=MetricsScope(
  239. org_ids=[self.org_id],
  240. project_ids=[self.project.id],
  241. use_case_id=UseCaseID.TRANSACTIONS.value,
  242. ),
  243. )
  244. request = Request(
  245. dataset="generic_metrics",
  246. app_id="tests",
  247. query=query,
  248. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  249. )
  250. result = run_query(request)
  251. assert len(result["data"]) == 2
  252. rows = result["data"]
  253. # TODO: Snuba is going to start returning 0 instead of [0] for single value aggregates
  254. # For now handle both cases for backwards compatibility
  255. assert rows[0]["aggregate_value"] in ([0], 0)
  256. assert rows[1]["aggregate_value"] in ([6.0], 6)
  257. def test_complex_generic_metrics(self) -> None:
  258. query = MetricsQuery(
  259. query=Timeseries(
  260. metric=Metric(
  261. "transaction.duration",
  262. TransactionMRI.DURATION.value,
  263. ),
  264. aggregate="quantiles",
  265. aggregate_params=[0.5],
  266. filters=[
  267. Condition(Column("status_code"), Op.EQ, "500"),
  268. Condition(Column("device"), Op.EQ, "BlackBerry"),
  269. ],
  270. groupby=[Column("transaction")],
  271. ),
  272. start=self.hour_ago,
  273. end=self.now,
  274. rollup=Rollup(interval=60, granularity=60),
  275. scope=MetricsScope(
  276. org_ids=[self.org_id],
  277. project_ids=[self.project.id],
  278. use_case_id=UseCaseID.TRANSACTIONS.value,
  279. ),
  280. )
  281. request = Request(
  282. dataset="generic_metrics",
  283. app_id="tests",
  284. query=query,
  285. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  286. )
  287. result = run_query(request)
  288. assert len(result["data"]) == 2
  289. rows = result["data"]
  290. # TODO: Snuba is going to start returning 0 instead of [0] for single value aggregates
  291. # For now handle both cases for backwards compatibility
  292. assert rows[0]["aggregate_value"] in ([0], 0)
  293. assert rows[0]["transaction"] == "transaction_0"
  294. assert rows[1]["aggregate_value"] in ([6.0], 6)
  295. assert rows[1]["transaction"] == "transaction_0"
  296. def test_totals(self) -> None:
  297. query = MetricsQuery(
  298. query=Timeseries(
  299. metric=Metric(
  300. "transaction.duration",
  301. TransactionMRI.DURATION.value,
  302. ),
  303. aggregate="max",
  304. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  305. groupby=[Column("transaction")],
  306. ),
  307. start=self.hour_ago,
  308. end=self.now,
  309. rollup=Rollup(totals=True, granularity=60, orderby=Direction.ASC),
  310. scope=MetricsScope(
  311. org_ids=[self.org_id],
  312. project_ids=[self.project.id],
  313. use_case_id=UseCaseID.TRANSACTIONS.value,
  314. ),
  315. )
  316. request = Request(
  317. dataset="generic_metrics",
  318. app_id="tests",
  319. query=query,
  320. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  321. )
  322. result = run_query(request)
  323. assert len(result["data"]) == 2
  324. rows = result["data"]
  325. assert rows[0]["aggregate_value"] == 7.0
  326. assert rows[1]["aggregate_value"] == 8.0
  327. def test_meta_data_in_response(self) -> None:
  328. query = MetricsQuery(
  329. query=Timeseries(
  330. metric=Metric(
  331. "transaction.duration",
  332. TransactionMRI.DURATION.value,
  333. ),
  334. aggregate="max",
  335. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  336. groupby=[Column("transaction")],
  337. ),
  338. start=self.hour_ago.replace(minute=16, second=59),
  339. end=self.now.replace(minute=16, second=59),
  340. rollup=Rollup(interval=60, granularity=60),
  341. scope=MetricsScope(
  342. org_ids=[self.org_id],
  343. project_ids=[self.project.id],
  344. use_case_id=UseCaseID.TRANSACTIONS.value,
  345. ),
  346. )
  347. request = Request(
  348. dataset="generic_metrics",
  349. app_id="tests",
  350. query=query,
  351. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  352. )
  353. result = run_query(request)
  354. assert result["modified_start"] == self.hour_ago.replace(minute=16, second=0)
  355. assert result["modified_end"] == self.now.replace(minute=17, second=0)
  356. assert result["indexer_mappings"] == {
  357. "d:transactions/duration@millisecond": 9223372036854775909,
  358. "status_code": 10000,
  359. "transaction": 9223372036854776020,
  360. }
  361. def test_bad_query(self) -> None:
  362. query = MetricsQuery(
  363. query=Timeseries(
  364. metric=Metric(
  365. "transaction.duration",
  366. "not a real MRI",
  367. ),
  368. aggregate="max",
  369. ),
  370. start=self.hour_ago.replace(minute=16, second=59),
  371. end=self.now.replace(minute=16, second=59),
  372. rollup=Rollup(interval=60, granularity=60),
  373. scope=MetricsScope(
  374. org_ids=[self.org_id],
  375. project_ids=[self.project.id],
  376. use_case_id=UseCaseID.TRANSACTIONS.value,
  377. ),
  378. )
  379. request = Request(
  380. dataset="generic_metrics",
  381. app_id="tests",
  382. query=query,
  383. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  384. )
  385. with pytest.raises(InvalidParams):
  386. run_query(request)
  387. def test_interval_with_totals(self) -> None:
  388. query = MetricsQuery(
  389. query=Timeseries(
  390. metric=Metric(
  391. "transaction.duration",
  392. TransactionMRI.DURATION.value,
  393. ),
  394. aggregate="max",
  395. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  396. groupby=[Column("transaction")],
  397. ),
  398. start=self.hour_ago,
  399. end=self.now,
  400. rollup=Rollup(interval=60, totals=True, granularity=60),
  401. scope=MetricsScope(
  402. org_ids=[self.org_id],
  403. project_ids=[self.project.id],
  404. use_case_id=UseCaseID.TRANSACTIONS.value,
  405. ),
  406. )
  407. request = Request(
  408. dataset="generic_metrics",
  409. app_id="tests",
  410. query=query,
  411. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  412. )
  413. result = run_query(request)
  414. assert len(result["data"]) == 6
  415. assert result["totals"]["aggregate_value"] == 8.0
  416. def test_automatic_granularity(self) -> None:
  417. query = MetricsQuery(
  418. query=Timeseries(
  419. metric=Metric(
  420. "transaction.duration",
  421. TransactionMRI.DURATION.value,
  422. ),
  423. aggregate="max",
  424. ),
  425. start=self.hour_ago,
  426. end=self.now,
  427. rollup=Rollup(interval=120),
  428. scope=MetricsScope(
  429. org_ids=[self.org_id],
  430. project_ids=[self.project.id],
  431. ),
  432. )
  433. request = Request(
  434. dataset="generic_metrics",
  435. app_id="tests",
  436. query=query,
  437. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  438. )
  439. result = run_query(request)
  440. # There's a flaky off by one error here that is very difficult to track down
  441. # TODO: figure out why this is flaky and assert to one specific value
  442. assert len(result["data"]) in [5, 6]
  443. def test_automatic_dataset(self) -> None:
  444. query = MetricsQuery(
  445. query=Timeseries(
  446. metric=Metric(
  447. None,
  448. SessionMRI.RAW_DURATION.value,
  449. ),
  450. aggregate="max",
  451. ),
  452. start=self.hour_ago,
  453. end=self.now,
  454. rollup=Rollup(interval=60, granularity=60),
  455. scope=MetricsScope(
  456. org_ids=[self.org_id],
  457. project_ids=[self.project.id],
  458. use_case_id=UseCaseID.SESSIONS.value,
  459. ),
  460. )
  461. request = Request(
  462. dataset="generic_metrics",
  463. app_id="tests",
  464. query=query,
  465. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  466. )
  467. result = run_query(request)
  468. assert request.dataset == "metrics"
  469. assert len(result["data"]) == 10
  470. def test_gauges(self) -> None:
  471. query = MetricsQuery(
  472. query=Timeseries(
  473. metric=Metric(
  474. None,
  475. "g:transactions/test_gauge@none",
  476. ),
  477. aggregate="last",
  478. ),
  479. start=self.hour_ago,
  480. end=self.now,
  481. rollup=Rollup(interval=60, totals=True, granularity=60),
  482. scope=MetricsScope(
  483. org_ids=[self.org_id],
  484. project_ids=[self.project.id],
  485. ),
  486. )
  487. request = Request(
  488. dataset="generic_metrics",
  489. app_id="tests",
  490. query=query,
  491. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  492. )
  493. result = run_query(request)
  494. assert len(result["data"]) == 10
  495. assert result["totals"]["aggregate_value"] == 9.0
  496. def test_metrics_groupby(self) -> None:
  497. query = MetricsQuery(
  498. query=Timeseries(
  499. metric=Metric(
  500. None,
  501. SessionMRI.RAW_DURATION.value,
  502. ),
  503. aggregate="max",
  504. groupby=[Column("release")],
  505. ),
  506. start=self.hour_ago,
  507. end=self.now,
  508. rollup=Rollup(interval=60, granularity=60),
  509. scope=MetricsScope(
  510. org_ids=[self.org_id],
  511. project_ids=[self.project.id],
  512. use_case_id=UseCaseID.SESSIONS.value,
  513. ),
  514. )
  515. request = Request(
  516. dataset="metrics",
  517. app_id="tests",
  518. query=query,
  519. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  520. )
  521. result = run_query(request)
  522. assert request.dataset == "metrics"
  523. assert len(result["data"]) == 10
  524. for data_point in result["data"]:
  525. assert data_point["release"] == "release_even" or data_point["release"] == "release_odd"
  526. def test_metrics_filters(self) -> None:
  527. query = MetricsQuery(
  528. query=Timeseries(
  529. metric=Metric(
  530. None,
  531. SessionMRI.RAW_USER.value,
  532. ),
  533. aggregate="count",
  534. filters=[
  535. Condition(Column("release"), Op.EQ, "release_even"),
  536. ],
  537. ),
  538. start=self.hour_ago,
  539. end=self.now,
  540. rollup=Rollup(interval=60, granularity=60),
  541. scope=MetricsScope(
  542. org_ids=[self.org_id],
  543. project_ids=[self.project.id],
  544. use_case_id=UseCaseID.SESSIONS.value,
  545. ),
  546. )
  547. request = Request(
  548. dataset="metrics",
  549. app_id="tests",
  550. query=query,
  551. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  552. )
  553. result = run_query(request)
  554. assert request.dataset == "metrics"
  555. assert len(result["data"]) == 5
  556. def test_metrics_complex(self) -> None:
  557. query = MetricsQuery(
  558. query=Timeseries(
  559. metric=Metric(
  560. None,
  561. SessionMRI.RAW_SESSION.value,
  562. ),
  563. aggregate="count",
  564. groupby=[Column("release")],
  565. filters=[
  566. Condition(Column("release"), Op.EQ, "release_even"),
  567. ],
  568. ),
  569. start=self.hour_ago,
  570. end=self.now,
  571. rollup=Rollup(interval=60, granularity=60),
  572. scope=MetricsScope(
  573. org_ids=[self.org_id],
  574. project_ids=[self.project.id],
  575. use_case_id=UseCaseID.SESSIONS.value,
  576. ),
  577. )
  578. request = Request(
  579. dataset="metrics",
  580. app_id="tests",
  581. query=query,
  582. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  583. )
  584. result = run_query(request)
  585. assert request.dataset == "metrics"
  586. assert len(result["data"]) == 5
  587. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  588. def test_metrics_correctly_reverse_resolved(self) -> None:
  589. query = MetricsQuery(
  590. query=Timeseries(
  591. metric=Metric(
  592. None,
  593. SessionMRI.RAW_SESSION.value,
  594. ),
  595. aggregate="count",
  596. groupby=[Column("release"), Column("project_id")],
  597. filters=[
  598. Condition(Column("release"), Op.EQ, "release_even"),
  599. Condition(Column("project_id"), Op.EQ, self.project.id),
  600. ],
  601. ),
  602. start=self.hour_ago,
  603. end=self.now,
  604. rollup=Rollup(interval=60, granularity=60),
  605. scope=MetricsScope(
  606. org_ids=[self.org_id],
  607. project_ids=[self.project.id],
  608. use_case_id=UseCaseID.SESSIONS.value,
  609. ),
  610. )
  611. request = Request(
  612. dataset="metrics",
  613. app_id="tests",
  614. query=query,
  615. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  616. )
  617. result = run_query(request)
  618. assert request.dataset == "metrics"
  619. assert len(result["data"]) == 5
  620. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  621. assert any(data_point["project_id"] == self.project.id for data_point in result["data"])
  622. def test_failure_rate(self) -> None:
  623. query = MetricsQuery(
  624. query=Formula(
  625. ArithmeticOperator.DIVIDE.value,
  626. [
  627. Timeseries(
  628. metric=Metric(
  629. mri=TransactionMRI.DURATION.value,
  630. ),
  631. aggregate="count",
  632. filters=[
  633. Condition(
  634. Column(TransactionTagsKey.TRANSACTION_STATUS.value),
  635. Op.NOT_IN,
  636. [
  637. TransactionStatusTagValue.OK.value,
  638. TransactionStatusTagValue.CANCELLED.value,
  639. TransactionStatusTagValue.UNKNOWN.value,
  640. ],
  641. )
  642. ],
  643. ),
  644. Timeseries(
  645. metric=Metric(
  646. mri=TransactionMRI.DURATION.value,
  647. ),
  648. aggregate="count",
  649. ),
  650. ],
  651. ),
  652. start=self.hour_ago,
  653. end=self.now,
  654. rollup=Rollup(interval=60, totals=True, granularity=60),
  655. scope=MetricsScope(
  656. org_ids=[self.org_id],
  657. project_ids=[self.project.id],
  658. ),
  659. )
  660. request = Request(
  661. dataset="generic_metrics",
  662. app_id="tests",
  663. query=query,
  664. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  665. )
  666. result = run_query(request)
  667. assert len(result["data"]) == 10
  668. assert result["totals"]["aggregate_value"] == 1.0
  669. def test_aggregate_aliases(self) -> None:
  670. query = MetricsQuery(
  671. query=Timeseries(
  672. metric=Metric(
  673. "transaction.duration",
  674. TransactionMRI.DURATION.value,
  675. ),
  676. aggregate="p95",
  677. ),
  678. start=self.hour_ago,
  679. end=self.now,
  680. rollup=Rollup(interval=60, granularity=60),
  681. scope=MetricsScope(
  682. org_ids=[self.org_id],
  683. project_ids=[self.project.id],
  684. use_case_id=UseCaseID.TRANSACTIONS.value,
  685. ),
  686. )
  687. request = Request(
  688. dataset="generic_metrics",
  689. app_id="tests",
  690. query=query,
  691. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  692. )
  693. result = run_query(request)
  694. assert len(result["data"]) == 10
  695. rows = result["data"]
  696. for i in range(10):
  697. # TODO: Snuba is going to start returning 0 instead of [0] for single value aggregates
  698. # For now handle both cases for backwards compatibility
  699. assert rows[i]["aggregate_value"] in ([i], i)
  700. assert (
  701. rows[i]["time"]
  702. == (
  703. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  704. ).isoformat()
  705. )
  706. def test_dataset_correctness(self) -> None:
  707. query = MetricsQuery(
  708. query=Timeseries(
  709. metric=Metric(
  710. "transaction.duration",
  711. TransactionMRI.DURATION.value,
  712. ),
  713. aggregate="quantiles",
  714. aggregate_params=[0.5, 0.99],
  715. groupby=[Column("transaction")],
  716. filters=[
  717. Condition(Column("transaction"), Op.IN, ["transaction_0", "transaction_1"])
  718. ],
  719. ),
  720. start=self.hour_ago,
  721. end=self.now,
  722. rollup=Rollup(interval=60, granularity=60),
  723. scope=MetricsScope(
  724. org_ids=[self.org_id],
  725. project_ids=[self.project.id],
  726. use_case_id=UseCaseID.TRANSACTIONS.value,
  727. ),
  728. )
  729. request = Request(
  730. dataset="metrics",
  731. app_id="tests",
  732. query=query,
  733. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  734. )
  735. result = run_query(request)
  736. assert len(result["data"]) == 10
  737. rows = result["data"]
  738. for i in range(10):
  739. assert rows[i]["aggregate_value"] == [i, i]
  740. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  741. assert (
  742. rows[i]["time"]
  743. == (
  744. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  745. ).isoformat()
  746. )
  747. def test_resolve_all_mris(self) -> None:
  748. for mri in [
  749. "d:custom/sentry.event_manager.save@second",
  750. "d:custom/sentry.event_manager.save_generic_events@second",
  751. ]:
  752. self.store_metric(
  753. self.org_id,
  754. self.project.id,
  755. mri,
  756. {
  757. "transaction": "transaction_1",
  758. "status_code": "200",
  759. "device": "BlackBerry",
  760. },
  761. self.ts(self.hour_ago + timedelta(minutes=5)),
  762. 1,
  763. )
  764. query = MetricsQuery(
  765. query=Formula(
  766. function_name="plus",
  767. parameters=[
  768. Timeseries(
  769. metric=Metric(
  770. mri="d:custom/sentry.event_manager.save@second",
  771. ),
  772. aggregate="avg",
  773. ),
  774. Timeseries(
  775. metric=Metric(
  776. mri="d:custom/sentry.event_manager.save_generic_events@second",
  777. ),
  778. aggregate="avg",
  779. ),
  780. ],
  781. ),
  782. start=self.hour_ago,
  783. end=self.now,
  784. rollup=Rollup(interval=None, totals=True, orderby=None, granularity=10),
  785. scope=MetricsScope(
  786. org_ids=[self.org_id], project_ids=[self.project.id], use_case_id="custom"
  787. ),
  788. limit=Limit(20),
  789. offset=None,
  790. )
  791. request = Request(
  792. dataset="generic_metrics",
  793. app_id="tests",
  794. query=query,
  795. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  796. )
  797. result = run_query(request)
  798. assert len(result["data"]) == 1
  799. def test_formulas_with_scalar_formulas(self) -> None:
  800. query = MetricsQuery(
  801. query=f"sum({TransactionMRI.DURATION.value}) + (24 * 3600)",
  802. start=self.hour_ago,
  803. end=self.now,
  804. rollup=Rollup(interval=60, granularity=60),
  805. scope=MetricsScope(
  806. org_ids=[self.org_id],
  807. project_ids=[self.project.id],
  808. use_case_id=UseCaseID.TRANSACTIONS.value,
  809. ),
  810. )
  811. request = Request(
  812. dataset="generic_metrics",
  813. app_id="tests",
  814. query=query,
  815. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  816. )
  817. result = run_query(request)
  818. assert len(result["data"]) == 10
  819. for row in result["data"]:
  820. assert row["aggregate_value"] >= 86400
  821. def test_extrapolated_generic_metrics(self) -> None:
  822. query = MetricsQuery(
  823. query=Timeseries(
  824. metric=Metric(
  825. "transaction.duration",
  826. TransactionMRI.DURATION.value,
  827. ),
  828. aggregate="sum",
  829. filters=[
  830. Condition(Column("status_code"), Op.EQ, "500"),
  831. Condition(Column("device"), Op.EQ, "BlackBerry"),
  832. ],
  833. groupby=[Column("transaction")],
  834. ),
  835. start=self.hour_ago,
  836. end=self.now,
  837. rollup=Rollup(interval=60, granularity=60),
  838. scope=MetricsScope(
  839. org_ids=[self.org_id],
  840. project_ids=[self.project.id],
  841. use_case_id=UseCaseID.TRANSACTIONS.value,
  842. ),
  843. )
  844. request = Request(
  845. dataset="generic_metrics",
  846. app_id="tests",
  847. query=query,
  848. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  849. )
  850. result = run_query(request)
  851. assert len(result["data"]) == 2
  852. rows = result["data"]
  853. assert rows[0]["aggregate_value"] in ([0], 0)
  854. assert rows[0]["transaction"] == "transaction_0"
  855. assert rows[1]["aggregate_value"] in ([6.00], 6)
  856. assert rows[1]["transaction"] == "transaction_0"
  857. # Set extrapolate flag to True. Since the sampling weight is set to 10, the extrapolated value should be 6*10
  858. query = query.set_extrapolate(True)
  859. request = Request(
  860. dataset="generic_metrics",
  861. app_id="tests",
  862. query=query,
  863. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  864. )
  865. result = run_query(request)
  866. assert len(result["data"]) == 2
  867. rows = result["data"]
  868. assert rows[0]["aggregate_value"] in ([0], 0)
  869. assert rows[0]["transaction"] == "transaction_0"
  870. assert rows[1]["aggregate_value"] in ([60.00], 60)
  871. assert rows[1]["transaction"] == "transaction_0"
  872. class MQLMetaTest(TestCase, BaseMetricsTestCase):
  873. def ts(self, dt: datetime) -> int:
  874. return int(dt.timestamp())
  875. def setUp(self) -> None:
  876. super().setUp()
  877. self.generic_metrics: Mapping[str, Literal["counter", "set", "distribution", "gauge"]] = {
  878. TransactionMRI.DURATION.value: "distribution",
  879. TransactionMRI.USER.value: "set",
  880. TransactionMRI.COUNT_PER_ROOT_PROJECT.value: "counter",
  881. "g:transactions/test_gauge@none": "gauge",
  882. }
  883. self.now = datetime.now(tz=timezone.utc).replace(microsecond=0)
  884. self.hour_ago = self.now - timedelta(hours=1)
  885. self.org_id = self.project.organization_id
  886. for mri, metric_type in self.generic_metrics.items():
  887. assert metric_type in {"counter", "distribution", "set", "gauge"}
  888. for i in range(2):
  889. value: int | dict[str, int]
  890. if metric_type == "gauge":
  891. value = {
  892. "min": i,
  893. "max": i,
  894. "sum": i,
  895. "count": i,
  896. "last": i,
  897. }
  898. else:
  899. value = i
  900. self.store_metric(
  901. self.org_id,
  902. self.project.id,
  903. mri,
  904. {
  905. "transaction": f"transaction_{i % 2}",
  906. "status_code": "500" if i % 2 == 0 else "200",
  907. "device": "BlackBerry" if i % 2 == 0 else "Nokia",
  908. },
  909. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  910. value,
  911. )
  912. def test_fetch_metric_mris(self) -> None:
  913. metric_mris = fetch_metric_mris(self.org_id, [self.project.id], UseCaseID.TRANSACTIONS)
  914. assert len(metric_mris) == 1
  915. assert len(metric_mris[self.project.id]) == 4
  916. assert metric_mris[self.project.id] == [
  917. "c:transactions/count_per_root_project@none",
  918. "s:transactions/user@none",
  919. "g:transactions/test_gauge@none",
  920. "d:transactions/duration@millisecond",
  921. ]
  922. def test_fetch_metric_tag_keys(self) -> None:
  923. tag_keys = fetch_metric_tag_keys(
  924. self.org_id, [self.project.id], UseCaseID.TRANSACTIONS, "g:transactions/test_gauge@none"
  925. )
  926. assert len(tag_keys) == 1
  927. assert len(tag_keys[self.project.id]) == 3
  928. assert tag_keys[self.project.id] == ["status_code", "device", "transaction"]
  929. def test_fetch_metric_tag_values(self) -> None:
  930. tag_values = fetch_metric_tag_values(
  931. self.org_id,
  932. [self.project.id],
  933. UseCaseID.TRANSACTIONS,
  934. "g:transactions/test_gauge@none",
  935. "transaction",
  936. )
  937. assert len(tag_values) == 2
  938. assert tag_values == ["transaction_0", "transaction_1"]
  939. def test_fetch_metric_tag_values_with_prefix(self) -> None:
  940. tag_values = fetch_metric_tag_values(
  941. self.org_id,
  942. [self.project.id],
  943. UseCaseID.TRANSACTIONS,
  944. "g:transactions/test_gauge@none",
  945. "status_code",
  946. "5",
  947. )
  948. assert len(tag_values) == 1
  949. assert tag_values == ["500"]
  950. def test_fetch_metric_tag_values_for_multiple_projects(self) -> None:
  951. new_project = self.create_project(name="New Project")
  952. self.store_metric(
  953. self.org_id,
  954. new_project.id,
  955. "g:transactions/test_gauge@none",
  956. {"status_code": "524"},
  957. self.ts(self.hour_ago + timedelta(minutes=10)),
  958. 10,
  959. )
  960. tag_values = fetch_metric_tag_values(
  961. self.org_id,
  962. [self.project.id, new_project.id],
  963. UseCaseID.TRANSACTIONS,
  964. "g:transactions/test_gauge@none",
  965. "status_code",
  966. "5",
  967. )
  968. assert len(tag_values) == 2
  969. assert tag_values == ["500", "524"]