test_metrics_layer.py 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982
  1. from __future__ import annotations
  2. from collections.abc import Mapping
  3. from datetime import datetime, timedelta, timezone
  4. from typing import Literal
  5. import pytest
  6. from snuba_sdk import (
  7. ArithmeticOperator,
  8. Column,
  9. Condition,
  10. Direction,
  11. Formula,
  12. Limit,
  13. Metric,
  14. MetricsQuery,
  15. MetricsScope,
  16. Op,
  17. Request,
  18. Rollup,
  19. Timeseries,
  20. )
  21. from sentry.exceptions import InvalidParams
  22. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  23. from sentry.snuba.metrics.naming_layer import SessionMRI, TransactionMRI
  24. from sentry.snuba.metrics.naming_layer.public import TransactionStatusTagValue, TransactionTagsKey
  25. from sentry.snuba.metrics_layer.query import (
  26. bulk_run_query,
  27. fetch_metric_mris,
  28. fetch_metric_tag_keys,
  29. fetch_metric_tag_values,
  30. run_query,
  31. )
  32. from sentry.testutils.cases import BaseMetricsTestCase, TestCase
  33. pytestmark = pytest.mark.sentry_metrics
  34. class MQLTest(TestCase, BaseMetricsTestCase):
  35. def ts(self, dt: datetime) -> int:
  36. return int(dt.timestamp())
  37. def setUp(self) -> None:
  38. super().setUp()
  39. self.generic_metrics: Mapping[str, Literal["counter", "set", "distribution", "gauge"]] = {
  40. TransactionMRI.DURATION.value: "distribution",
  41. TransactionMRI.USER.value: "set",
  42. TransactionMRI.COUNT_PER_ROOT_PROJECT.value: "counter",
  43. "g:transactions/test_gauge@none": "gauge",
  44. }
  45. self.metrics: Mapping[str, Literal["counter", "set", "distribution"]] = {
  46. SessionMRI.RAW_DURATION.value: "distribution",
  47. SessionMRI.RAW_USER.value: "set",
  48. SessionMRI.RAW_SESSION.value: "counter",
  49. }
  50. self.now = datetime.now(tz=timezone.utc).replace(microsecond=0)
  51. self.hour_ago = self.now - timedelta(hours=1)
  52. self.org_id = self.project.organization_id
  53. for mri, metric_type in self.generic_metrics.items():
  54. assert metric_type in {"counter", "distribution", "set", "gauge"}
  55. for i in range(10):
  56. value: int | dict[str, int]
  57. if metric_type == "gauge":
  58. value = {
  59. "min": i,
  60. "max": i,
  61. "sum": i,
  62. "count": i,
  63. "last": i,
  64. }
  65. else:
  66. value = i
  67. self.store_metric(
  68. self.org_id,
  69. self.project.id,
  70. mri,
  71. {
  72. "transaction": f"transaction_{i % 2}",
  73. "status_code": "500" if i % 3 == 0 else "200",
  74. "device": "BlackBerry" if i % 2 == 0 else "Nokia",
  75. },
  76. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  77. value,
  78. )
  79. for mri, metric_type in self.metrics.items():
  80. assert metric_type in {"counter", "distribution", "set"}
  81. for i in range(10):
  82. value = i
  83. self.store_metric(
  84. self.org_id,
  85. self.project.id,
  86. mri,
  87. {
  88. "release": "release_even" if i % 2 == 0 else "release_odd",
  89. },
  90. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  91. value,
  92. )
  93. def test_basic_generic_metrics(self) -> None:
  94. query = MetricsQuery(
  95. query=Timeseries(
  96. metric=Metric(
  97. "transaction.duration",
  98. TransactionMRI.DURATION.value,
  99. ),
  100. aggregate="max",
  101. ),
  102. start=self.hour_ago,
  103. end=self.now,
  104. rollup=Rollup(interval=60, granularity=60),
  105. scope=MetricsScope(
  106. org_ids=[self.org_id],
  107. project_ids=[self.project.id],
  108. use_case_id=UseCaseID.TRANSACTIONS.value,
  109. ),
  110. )
  111. request = Request(
  112. dataset="generic_metrics",
  113. app_id="tests",
  114. query=query,
  115. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  116. )
  117. result = run_query(request)
  118. assert len(result["data"]) == 10
  119. rows = result["data"]
  120. for i in range(10):
  121. assert rows[i]["aggregate_value"] == i
  122. assert (
  123. rows[i]["time"]
  124. == (
  125. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  126. ).isoformat()
  127. )
  128. def test_basic_bulk_generic_metrics(self) -> None:
  129. query = MetricsQuery(
  130. query=None,
  131. start=self.hour_ago,
  132. end=self.now,
  133. rollup=Rollup(interval=60, granularity=60),
  134. scope=MetricsScope(
  135. org_ids=[self.org_id],
  136. project_ids=[self.project.id],
  137. use_case_id=UseCaseID.TRANSACTIONS.value,
  138. ),
  139. )
  140. query1 = query.set_query(
  141. Timeseries(
  142. metric=Metric(
  143. "transaction.duration",
  144. TransactionMRI.DURATION.value,
  145. ),
  146. aggregate="max",
  147. )
  148. )
  149. query2 = query.set_query(
  150. Timeseries(
  151. metric=Metric(
  152. public_name=None,
  153. mri=TransactionMRI.USER.value,
  154. ),
  155. aggregate="uniq",
  156. )
  157. )
  158. request1 = Request(
  159. dataset="generic_metrics",
  160. app_id="tests",
  161. query=query1,
  162. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  163. )
  164. request2 = Request(
  165. dataset="generic_metrics",
  166. app_id="tests",
  167. query=query2,
  168. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  169. )
  170. results = bulk_run_query([request1, request2])
  171. assert len(results) == 2
  172. result = results[0] # Distribution
  173. rows = result["data"]
  174. for i in range(10):
  175. assert rows[i]["aggregate_value"] == i
  176. assert (
  177. rows[i]["time"]
  178. == (
  179. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  180. ).isoformat()
  181. )
  182. def test_groupby_generic_metrics(self) -> None:
  183. query = MetricsQuery(
  184. query=Timeseries(
  185. metric=Metric(
  186. "transaction.duration",
  187. TransactionMRI.DURATION.value,
  188. ),
  189. aggregate="quantiles",
  190. aggregate_params=[0.5, 0.99],
  191. groupby=[Column("transaction")],
  192. ),
  193. start=self.hour_ago,
  194. end=self.now,
  195. rollup=Rollup(interval=60, granularity=60),
  196. scope=MetricsScope(
  197. org_ids=[self.org_id],
  198. project_ids=[self.project.id],
  199. use_case_id=UseCaseID.TRANSACTIONS.value,
  200. ),
  201. )
  202. request = Request(
  203. dataset="generic_metrics",
  204. app_id="tests",
  205. query=query,
  206. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  207. )
  208. result = run_query(request)
  209. assert len(result["data"]) == 10
  210. rows = result["data"]
  211. for i in range(10):
  212. assert rows[i]["aggregate_value"] == [i, i]
  213. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  214. assert (
  215. rows[i]["time"]
  216. == (
  217. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  218. ).isoformat()
  219. )
  220. def test_filters_generic_metrics(self) -> None:
  221. query = MetricsQuery(
  222. query=Timeseries(
  223. metric=Metric(
  224. "transaction.duration",
  225. TransactionMRI.DURATION.value,
  226. ),
  227. aggregate="quantiles",
  228. aggregate_params=[0.5],
  229. filters=[
  230. Condition(Column("status_code"), Op.EQ, "500"),
  231. Condition(Column("device"), Op.EQ, "BlackBerry"),
  232. ],
  233. ),
  234. start=self.hour_ago,
  235. end=self.now,
  236. rollup=Rollup(interval=60, granularity=60),
  237. scope=MetricsScope(
  238. org_ids=[self.org_id],
  239. project_ids=[self.project.id],
  240. use_case_id=UseCaseID.TRANSACTIONS.value,
  241. ),
  242. )
  243. request = Request(
  244. dataset="generic_metrics",
  245. app_id="tests",
  246. query=query,
  247. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  248. )
  249. result = run_query(request)
  250. assert len(result["data"]) == 2
  251. rows = result["data"]
  252. # TODO: Snuba is going to start returning 0 instead of [0] for single value aggregates
  253. # For now handle both cases for backwards compatibility
  254. assert rows[0]["aggregate_value"] in ([0], 0)
  255. assert rows[1]["aggregate_value"] in ([6.0], 6)
  256. def test_complex_generic_metrics(self) -> None:
  257. query = MetricsQuery(
  258. query=Timeseries(
  259. metric=Metric(
  260. "transaction.duration",
  261. TransactionMRI.DURATION.value,
  262. ),
  263. aggregate="quantiles",
  264. aggregate_params=[0.5],
  265. filters=[
  266. Condition(Column("status_code"), Op.EQ, "500"),
  267. Condition(Column("device"), Op.EQ, "BlackBerry"),
  268. ],
  269. groupby=[Column("transaction")],
  270. ),
  271. start=self.hour_ago,
  272. end=self.now,
  273. rollup=Rollup(interval=60, granularity=60),
  274. scope=MetricsScope(
  275. org_ids=[self.org_id],
  276. project_ids=[self.project.id],
  277. use_case_id=UseCaseID.TRANSACTIONS.value,
  278. ),
  279. )
  280. request = Request(
  281. dataset="generic_metrics",
  282. app_id="tests",
  283. query=query,
  284. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  285. )
  286. result = run_query(request)
  287. assert len(result["data"]) == 2
  288. rows = result["data"]
  289. # TODO: Snuba is going to start returning 0 instead of [0] for single value aggregates
  290. # For now handle both cases for backwards compatibility
  291. assert rows[0]["aggregate_value"] in ([0], 0)
  292. assert rows[0]["transaction"] == "transaction_0"
  293. assert rows[1]["aggregate_value"] in ([6.0], 6)
  294. assert rows[1]["transaction"] == "transaction_0"
  295. def test_totals(self) -> None:
  296. query = MetricsQuery(
  297. query=Timeseries(
  298. metric=Metric(
  299. "transaction.duration",
  300. TransactionMRI.DURATION.value,
  301. ),
  302. aggregate="max",
  303. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  304. groupby=[Column("transaction")],
  305. ),
  306. start=self.hour_ago,
  307. end=self.now,
  308. rollup=Rollup(totals=True, granularity=60, orderby=Direction.ASC),
  309. scope=MetricsScope(
  310. org_ids=[self.org_id],
  311. project_ids=[self.project.id],
  312. use_case_id=UseCaseID.TRANSACTIONS.value,
  313. ),
  314. )
  315. request = Request(
  316. dataset="generic_metrics",
  317. app_id="tests",
  318. query=query,
  319. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  320. )
  321. result = run_query(request)
  322. assert len(result["data"]) == 2
  323. rows = result["data"]
  324. assert rows[0]["aggregate_value"] == 7.0
  325. assert rows[1]["aggregate_value"] == 8.0
  326. def test_meta_data_in_response(self) -> None:
  327. query = MetricsQuery(
  328. query=Timeseries(
  329. metric=Metric(
  330. "transaction.duration",
  331. TransactionMRI.DURATION.value,
  332. ),
  333. aggregate="max",
  334. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  335. groupby=[Column("transaction")],
  336. ),
  337. start=self.hour_ago.replace(minute=16, second=59),
  338. end=self.now.replace(minute=16, second=59),
  339. rollup=Rollup(interval=60, granularity=60),
  340. scope=MetricsScope(
  341. org_ids=[self.org_id],
  342. project_ids=[self.project.id],
  343. use_case_id=UseCaseID.TRANSACTIONS.value,
  344. ),
  345. )
  346. request = Request(
  347. dataset="generic_metrics",
  348. app_id="tests",
  349. query=query,
  350. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  351. )
  352. result = run_query(request)
  353. assert result["modified_start"] == self.hour_ago.replace(minute=16, second=0)
  354. assert result["modified_end"] == self.now.replace(minute=17, second=0)
  355. assert result["indexer_mappings"] == {
  356. "d:transactions/duration@millisecond": 9223372036854775909,
  357. "status_code": 10000,
  358. "transaction": 9223372036854776020,
  359. }
  360. def test_bad_query(self) -> None:
  361. query = MetricsQuery(
  362. query=Timeseries(
  363. metric=Metric(
  364. "transaction.duration",
  365. "not a real MRI",
  366. ),
  367. aggregate="max",
  368. ),
  369. start=self.hour_ago.replace(minute=16, second=59),
  370. end=self.now.replace(minute=16, second=59),
  371. rollup=Rollup(interval=60, granularity=60),
  372. scope=MetricsScope(
  373. org_ids=[self.org_id],
  374. project_ids=[self.project.id],
  375. use_case_id=UseCaseID.TRANSACTIONS.value,
  376. ),
  377. )
  378. request = Request(
  379. dataset="generic_metrics",
  380. app_id="tests",
  381. query=query,
  382. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  383. )
  384. with pytest.raises(InvalidParams):
  385. run_query(request)
  386. def test_interval_with_totals(self) -> None:
  387. query = MetricsQuery(
  388. query=Timeseries(
  389. metric=Metric(
  390. "transaction.duration",
  391. TransactionMRI.DURATION.value,
  392. ),
  393. aggregate="max",
  394. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  395. groupby=[Column("transaction")],
  396. ),
  397. start=self.hour_ago,
  398. end=self.now,
  399. rollup=Rollup(interval=60, totals=True, granularity=60),
  400. scope=MetricsScope(
  401. org_ids=[self.org_id],
  402. project_ids=[self.project.id],
  403. use_case_id=UseCaseID.TRANSACTIONS.value,
  404. ),
  405. )
  406. request = Request(
  407. dataset="generic_metrics",
  408. app_id="tests",
  409. query=query,
  410. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  411. )
  412. result = run_query(request)
  413. assert len(result["data"]) == 6
  414. assert result["totals"]["aggregate_value"] == 8.0
  415. def test_automatic_granularity(self) -> None:
  416. query = MetricsQuery(
  417. query=Timeseries(
  418. metric=Metric(
  419. "transaction.duration",
  420. TransactionMRI.DURATION.value,
  421. ),
  422. aggregate="max",
  423. ),
  424. start=self.hour_ago,
  425. end=self.now,
  426. rollup=Rollup(interval=120),
  427. scope=MetricsScope(
  428. org_ids=[self.org_id],
  429. project_ids=[self.project.id],
  430. ),
  431. )
  432. request = Request(
  433. dataset="generic_metrics",
  434. app_id="tests",
  435. query=query,
  436. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  437. )
  438. result = run_query(request)
  439. # There's a flaky off by one error here that is very difficult to track down
  440. # TODO: figure out why this is flaky and assert to one specific value
  441. assert len(result["data"]) in [5, 6]
  442. def test_automatic_dataset(self) -> None:
  443. query = MetricsQuery(
  444. query=Timeseries(
  445. metric=Metric(
  446. None,
  447. SessionMRI.RAW_DURATION.value,
  448. ),
  449. aggregate="max",
  450. ),
  451. start=self.hour_ago,
  452. end=self.now,
  453. rollup=Rollup(interval=60, granularity=60),
  454. scope=MetricsScope(
  455. org_ids=[self.org_id],
  456. project_ids=[self.project.id],
  457. use_case_id=UseCaseID.SESSIONS.value,
  458. ),
  459. )
  460. request = Request(
  461. dataset="generic_metrics",
  462. app_id="tests",
  463. query=query,
  464. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  465. )
  466. result = run_query(request)
  467. assert request.dataset == "metrics"
  468. assert len(result["data"]) == 10
  469. def test_gauges(self) -> None:
  470. query = MetricsQuery(
  471. query=Timeseries(
  472. metric=Metric(
  473. None,
  474. "g:transactions/test_gauge@none",
  475. ),
  476. aggregate="last",
  477. ),
  478. start=self.hour_ago,
  479. end=self.now,
  480. rollup=Rollup(interval=60, totals=True, granularity=60),
  481. scope=MetricsScope(
  482. org_ids=[self.org_id],
  483. project_ids=[self.project.id],
  484. ),
  485. )
  486. request = Request(
  487. dataset="generic_metrics",
  488. app_id="tests",
  489. query=query,
  490. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  491. )
  492. result = run_query(request)
  493. assert len(result["data"]) == 10
  494. assert result["totals"]["aggregate_value"] == 9.0
  495. def test_metrics_groupby(self) -> None:
  496. query = MetricsQuery(
  497. query=Timeseries(
  498. metric=Metric(
  499. None,
  500. SessionMRI.RAW_DURATION.value,
  501. ),
  502. aggregate="max",
  503. groupby=[Column("release")],
  504. ),
  505. start=self.hour_ago,
  506. end=self.now,
  507. rollup=Rollup(interval=60, granularity=60),
  508. scope=MetricsScope(
  509. org_ids=[self.org_id],
  510. project_ids=[self.project.id],
  511. use_case_id=UseCaseID.SESSIONS.value,
  512. ),
  513. )
  514. request = Request(
  515. dataset="metrics",
  516. app_id="tests",
  517. query=query,
  518. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  519. )
  520. result = run_query(request)
  521. assert request.dataset == "metrics"
  522. assert len(result["data"]) == 10
  523. for data_point in result["data"]:
  524. assert data_point["release"] == "release_even" or data_point["release"] == "release_odd"
  525. def test_metrics_filters(self) -> None:
  526. query = MetricsQuery(
  527. query=Timeseries(
  528. metric=Metric(
  529. None,
  530. SessionMRI.RAW_USER.value,
  531. ),
  532. aggregate="count",
  533. filters=[
  534. Condition(Column("release"), Op.EQ, "release_even"),
  535. ],
  536. ),
  537. start=self.hour_ago,
  538. end=self.now,
  539. rollup=Rollup(interval=60, granularity=60),
  540. scope=MetricsScope(
  541. org_ids=[self.org_id],
  542. project_ids=[self.project.id],
  543. use_case_id=UseCaseID.SESSIONS.value,
  544. ),
  545. )
  546. request = Request(
  547. dataset="metrics",
  548. app_id="tests",
  549. query=query,
  550. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  551. )
  552. result = run_query(request)
  553. assert request.dataset == "metrics"
  554. assert len(result["data"]) == 5
  555. def test_metrics_complex(self) -> None:
  556. query = MetricsQuery(
  557. query=Timeseries(
  558. metric=Metric(
  559. None,
  560. SessionMRI.RAW_SESSION.value,
  561. ),
  562. aggregate="count",
  563. groupby=[Column("release")],
  564. filters=[
  565. Condition(Column("release"), Op.EQ, "release_even"),
  566. ],
  567. ),
  568. start=self.hour_ago,
  569. end=self.now,
  570. rollup=Rollup(interval=60, granularity=60),
  571. scope=MetricsScope(
  572. org_ids=[self.org_id],
  573. project_ids=[self.project.id],
  574. use_case_id=UseCaseID.SESSIONS.value,
  575. ),
  576. )
  577. request = Request(
  578. dataset="metrics",
  579. app_id="tests",
  580. query=query,
  581. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  582. )
  583. result = run_query(request)
  584. assert request.dataset == "metrics"
  585. assert len(result["data"]) == 5
  586. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  587. def test_metrics_correctly_reverse_resolved(self) -> None:
  588. query = MetricsQuery(
  589. query=Timeseries(
  590. metric=Metric(
  591. None,
  592. SessionMRI.RAW_SESSION.value,
  593. ),
  594. aggregate="count",
  595. groupby=[Column("release"), Column("project_id")],
  596. filters=[
  597. Condition(Column("release"), Op.EQ, "release_even"),
  598. Condition(Column("project_id"), Op.EQ, self.project.id),
  599. ],
  600. ),
  601. start=self.hour_ago,
  602. end=self.now,
  603. rollup=Rollup(interval=60, granularity=60),
  604. scope=MetricsScope(
  605. org_ids=[self.org_id],
  606. project_ids=[self.project.id],
  607. use_case_id=UseCaseID.SESSIONS.value,
  608. ),
  609. )
  610. request = Request(
  611. dataset="metrics",
  612. app_id="tests",
  613. query=query,
  614. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  615. )
  616. result = run_query(request)
  617. assert request.dataset == "metrics"
  618. assert len(result["data"]) == 5
  619. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  620. assert any(data_point["project_id"] == self.project.id for data_point in result["data"])
  621. def test_failure_rate(self) -> None:
  622. query = MetricsQuery(
  623. query=Formula(
  624. ArithmeticOperator.DIVIDE.value,
  625. [
  626. Timeseries(
  627. metric=Metric(
  628. mri=TransactionMRI.DURATION.value,
  629. ),
  630. aggregate="count",
  631. filters=[
  632. Condition(
  633. Column(TransactionTagsKey.TRANSACTION_STATUS.value),
  634. Op.NOT_IN,
  635. [
  636. TransactionStatusTagValue.OK.value,
  637. TransactionStatusTagValue.CANCELLED.value,
  638. TransactionStatusTagValue.UNKNOWN.value,
  639. ],
  640. )
  641. ],
  642. ),
  643. Timeseries(
  644. metric=Metric(
  645. mri=TransactionMRI.DURATION.value,
  646. ),
  647. aggregate="count",
  648. ),
  649. ],
  650. ),
  651. start=self.hour_ago,
  652. end=self.now,
  653. rollup=Rollup(interval=60, totals=True, granularity=60),
  654. scope=MetricsScope(
  655. org_ids=[self.org_id],
  656. project_ids=[self.project.id],
  657. ),
  658. )
  659. request = Request(
  660. dataset="generic_metrics",
  661. app_id="tests",
  662. query=query,
  663. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  664. )
  665. result = run_query(request)
  666. assert len(result["data"]) == 10
  667. assert result["totals"]["aggregate_value"] == 1.0
  668. def test_aggregate_aliases(self) -> None:
  669. query = MetricsQuery(
  670. query=Timeseries(
  671. metric=Metric(
  672. "transaction.duration",
  673. TransactionMRI.DURATION.value,
  674. ),
  675. aggregate="p95",
  676. ),
  677. start=self.hour_ago,
  678. end=self.now,
  679. rollup=Rollup(interval=60, granularity=60),
  680. scope=MetricsScope(
  681. org_ids=[self.org_id],
  682. project_ids=[self.project.id],
  683. use_case_id=UseCaseID.TRANSACTIONS.value,
  684. ),
  685. )
  686. request = Request(
  687. dataset="generic_metrics",
  688. app_id="tests",
  689. query=query,
  690. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  691. )
  692. result = run_query(request)
  693. assert len(result["data"]) == 10
  694. rows = result["data"]
  695. for i in range(10):
  696. # TODO: Snuba is going to start returning 0 instead of [0] for single value aggregates
  697. # For now handle both cases for backwards compatibility
  698. assert rows[i]["aggregate_value"] in ([i], i)
  699. assert (
  700. rows[i]["time"]
  701. == (
  702. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  703. ).isoformat()
  704. )
  705. def test_dataset_correctness(self) -> None:
  706. query = MetricsQuery(
  707. query=Timeseries(
  708. metric=Metric(
  709. "transaction.duration",
  710. TransactionMRI.DURATION.value,
  711. ),
  712. aggregate="quantiles",
  713. aggregate_params=[0.5, 0.99],
  714. groupby=[Column("transaction")],
  715. filters=[
  716. Condition(Column("transaction"), Op.IN, ["transaction_0", "transaction_1"])
  717. ],
  718. ),
  719. start=self.hour_ago,
  720. end=self.now,
  721. rollup=Rollup(interval=60, granularity=60),
  722. scope=MetricsScope(
  723. org_ids=[self.org_id],
  724. project_ids=[self.project.id],
  725. use_case_id=UseCaseID.TRANSACTIONS.value,
  726. ),
  727. )
  728. request = Request(
  729. dataset="metrics",
  730. app_id="tests",
  731. query=query,
  732. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  733. )
  734. result = run_query(request)
  735. assert len(result["data"]) == 10
  736. rows = result["data"]
  737. for i in range(10):
  738. assert rows[i]["aggregate_value"] == [i, i]
  739. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  740. assert (
  741. rows[i]["time"]
  742. == (
  743. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  744. ).isoformat()
  745. )
  746. def test_resolve_all_mris(self) -> None:
  747. for mri in [
  748. "d:custom/sentry.event_manager.save@second",
  749. "d:custom/sentry.event_manager.save_generic_events@second",
  750. ]:
  751. self.store_metric(
  752. self.org_id,
  753. self.project.id,
  754. mri,
  755. {
  756. "transaction": "transaction_1",
  757. "status_code": "200",
  758. "device": "BlackBerry",
  759. },
  760. self.ts(self.hour_ago + timedelta(minutes=5)),
  761. 1,
  762. )
  763. query = MetricsQuery(
  764. query=Formula(
  765. function_name="plus",
  766. parameters=[
  767. Timeseries(
  768. metric=Metric(
  769. mri="d:custom/sentry.event_manager.save@second",
  770. ),
  771. aggregate="avg",
  772. ),
  773. Timeseries(
  774. metric=Metric(
  775. mri="d:custom/sentry.event_manager.save_generic_events@second",
  776. ),
  777. aggregate="avg",
  778. ),
  779. ],
  780. ),
  781. start=self.hour_ago,
  782. end=self.now,
  783. rollup=Rollup(interval=None, totals=True, orderby=None, granularity=10),
  784. scope=MetricsScope(
  785. org_ids=[self.org_id], project_ids=[self.project.id], use_case_id="custom"
  786. ),
  787. limit=Limit(20),
  788. offset=None,
  789. )
  790. request = Request(
  791. dataset="generic_metrics",
  792. app_id="tests",
  793. query=query,
  794. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  795. )
  796. result = run_query(request)
  797. assert len(result["data"]) == 1
  798. def test_formulas_with_scalar_formulas(self) -> None:
  799. query = MetricsQuery(
  800. query=f"sum({TransactionMRI.DURATION.value}) + (24 * 3600)",
  801. start=self.hour_ago,
  802. end=self.now,
  803. rollup=Rollup(interval=60, granularity=60),
  804. scope=MetricsScope(
  805. org_ids=[self.org_id],
  806. project_ids=[self.project.id],
  807. use_case_id=UseCaseID.TRANSACTIONS.value,
  808. ),
  809. )
  810. request = Request(
  811. dataset="generic_metrics",
  812. app_id="tests",
  813. query=query,
  814. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  815. )
  816. result = run_query(request)
  817. assert len(result["data"]) == 10
  818. for row in result["data"]:
  819. assert row["aggregate_value"] >= 86400
  820. class MQLMetaTest(TestCase, BaseMetricsTestCase):
  821. def ts(self, dt: datetime) -> int:
  822. return int(dt.timestamp())
  823. def setUp(self) -> None:
  824. super().setUp()
  825. self.generic_metrics: Mapping[str, Literal["counter", "set", "distribution", "gauge"]] = {
  826. TransactionMRI.DURATION.value: "distribution",
  827. TransactionMRI.USER.value: "set",
  828. TransactionMRI.COUNT_PER_ROOT_PROJECT.value: "counter",
  829. "g:transactions/test_gauge@none": "gauge",
  830. }
  831. self.now = datetime.now(tz=timezone.utc).replace(microsecond=0)
  832. self.hour_ago = self.now - timedelta(hours=1)
  833. self.org_id = self.project.organization_id
  834. for mri, metric_type in self.generic_metrics.items():
  835. assert metric_type in {"counter", "distribution", "set", "gauge"}
  836. for i in range(2):
  837. value: int | dict[str, int]
  838. if metric_type == "gauge":
  839. value = {
  840. "min": i,
  841. "max": i,
  842. "sum": i,
  843. "count": i,
  844. "last": i,
  845. }
  846. else:
  847. value = i
  848. self.store_metric(
  849. self.org_id,
  850. self.project.id,
  851. mri,
  852. {
  853. "transaction": f"transaction_{i % 2}",
  854. "status_code": "500" if i % 2 == 0 else "200",
  855. "device": "BlackBerry" if i % 2 == 0 else "Nokia",
  856. },
  857. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  858. value,
  859. )
  860. def test_fetch_metric_mris(self) -> None:
  861. metric_mris = fetch_metric_mris(self.org_id, [self.project.id], UseCaseID.TRANSACTIONS)
  862. assert len(metric_mris) == 1
  863. assert len(metric_mris[self.project.id]) == 4
  864. assert metric_mris[self.project.id] == [
  865. "c:transactions/count_per_root_project@none",
  866. "s:transactions/user@none",
  867. "g:transactions/test_gauge@none",
  868. "d:transactions/duration@millisecond",
  869. ]
  870. def test_fetch_metric_tag_keys(self) -> None:
  871. tag_keys = fetch_metric_tag_keys(
  872. self.org_id, [self.project.id], UseCaseID.TRANSACTIONS, "g:transactions/test_gauge@none"
  873. )
  874. assert len(tag_keys) == 1
  875. assert len(tag_keys[self.project.id]) == 3
  876. assert tag_keys[self.project.id] == ["status_code", "device", "transaction"]
  877. def test_fetch_metric_tag_values(self) -> None:
  878. tag_values = fetch_metric_tag_values(
  879. self.org_id,
  880. [self.project.id],
  881. UseCaseID.TRANSACTIONS,
  882. "g:transactions/test_gauge@none",
  883. "transaction",
  884. )
  885. assert len(tag_values) == 2
  886. assert tag_values == ["transaction_0", "transaction_1"]
  887. def test_fetch_metric_tag_values_with_prefix(self) -> None:
  888. tag_values = fetch_metric_tag_values(
  889. self.org_id,
  890. [self.project.id],
  891. UseCaseID.TRANSACTIONS,
  892. "g:transactions/test_gauge@none",
  893. "status_code",
  894. "5",
  895. )
  896. assert len(tag_values) == 1
  897. assert tag_values == ["500"]
  898. def test_fetch_metric_tag_values_for_multiple_projects(self) -> None:
  899. new_project = self.create_project(name="New Project")
  900. self.store_metric(
  901. self.org_id,
  902. new_project.id,
  903. "g:transactions/test_gauge@none",
  904. {"status_code": "524"},
  905. self.ts(self.hour_ago + timedelta(minutes=10)),
  906. 10,
  907. )
  908. tag_values = fetch_metric_tag_values(
  909. self.org_id,
  910. [self.project.id, new_project.id],
  911. UseCaseID.TRANSACTIONS,
  912. "g:transactions/test_gauge@none",
  913. "status_code",
  914. "5",
  915. )
  916. assert len(tag_values) == 2
  917. assert tag_values == ["500", "524"]