test_metrics_layer.py 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992
  1. from __future__ import annotations
  2. from collections.abc import Mapping
  3. from datetime import datetime, timedelta, timezone
  4. from typing import Literal
  5. import pytest
  6. from snuba_sdk import (
  7. ArithmeticOperator,
  8. Column,
  9. Condition,
  10. Direction,
  11. Formula,
  12. Limit,
  13. Metric,
  14. MetricsQuery,
  15. MetricsScope,
  16. Op,
  17. Request,
  18. Rollup,
  19. Timeseries,
  20. )
  21. from sentry.exceptions import InvalidParams
  22. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  23. from sentry.snuba.metrics.naming_layer import SessionMRI, TransactionMRI
  24. from sentry.snuba.metrics.naming_layer.public import TransactionStatusTagValue, TransactionTagsKey
  25. from sentry.snuba.metrics_layer.query import (
  26. bulk_run_query,
  27. fetch_metric_mris,
  28. fetch_metric_tag_keys,
  29. fetch_metric_tag_values,
  30. run_query,
  31. )
  32. from sentry.testutils.cases import BaseMetricsTestCase, TestCase
  33. pytestmark = pytest.mark.sentry_metrics
  34. class MQLTest(TestCase, BaseMetricsTestCase):
  35. def ts(self, dt: datetime) -> int:
  36. return int(dt.timestamp())
  37. def setUp(self) -> None:
  38. super().setUp()
  39. self.generic_metrics: Mapping[str, Literal["counter", "set", "distribution", "gauge"]] = {
  40. TransactionMRI.DURATION.value: "distribution",
  41. TransactionMRI.USER.value: "set",
  42. TransactionMRI.COUNT_PER_ROOT_PROJECT.value: "counter",
  43. "g:transactions/test_gauge@none": "gauge",
  44. }
  45. self.metrics: Mapping[str, Literal["counter", "set", "distribution"]] = {
  46. SessionMRI.RAW_DURATION.value: "distribution",
  47. SessionMRI.RAW_USER.value: "set",
  48. SessionMRI.RAW_SESSION.value: "counter",
  49. }
  50. self.now = datetime.now(tz=timezone.utc).replace(microsecond=0)
  51. self.hour_ago = self.now - timedelta(hours=1)
  52. self.org_id = self.project.organization_id
  53. for mri, metric_type in self.generic_metrics.items():
  54. assert metric_type in {"counter", "distribution", "set", "gauge"}
  55. for i in range(10):
  56. value: int | dict[str, int]
  57. if metric_type == "gauge":
  58. value = {
  59. "min": i,
  60. "max": i,
  61. "sum": i,
  62. "count": i,
  63. "last": i,
  64. }
  65. else:
  66. value = i
  67. self.store_metric(
  68. self.org_id,
  69. self.project.id,
  70. metric_type,
  71. mri,
  72. {
  73. "transaction": f"transaction_{i % 2}",
  74. "status_code": "500" if i % 3 == 0 else "200",
  75. "device": "BlackBerry" if i % 2 == 0 else "Nokia",
  76. },
  77. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  78. value,
  79. UseCaseID.TRANSACTIONS,
  80. )
  81. for mri, metric_type in self.metrics.items():
  82. assert metric_type in {"counter", "distribution", "set"}
  83. for i in range(10):
  84. value = i
  85. self.store_metric(
  86. self.org_id,
  87. self.project.id,
  88. metric_type,
  89. mri,
  90. {
  91. "release": "release_even" if i % 2 == 0 else "release_odd",
  92. },
  93. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  94. value,
  95. UseCaseID.SESSIONS,
  96. )
  97. def test_basic_generic_metrics(self) -> None:
  98. query = MetricsQuery(
  99. query=Timeseries(
  100. metric=Metric(
  101. "transaction.duration",
  102. TransactionMRI.DURATION.value,
  103. ),
  104. aggregate="max",
  105. ),
  106. start=self.hour_ago,
  107. end=self.now,
  108. rollup=Rollup(interval=60, granularity=60),
  109. scope=MetricsScope(
  110. org_ids=[self.org_id],
  111. project_ids=[self.project.id],
  112. use_case_id=UseCaseID.TRANSACTIONS.value,
  113. ),
  114. )
  115. request = Request(
  116. dataset="generic_metrics",
  117. app_id="tests",
  118. query=query,
  119. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  120. )
  121. result = run_query(request)
  122. assert len(result["data"]) == 10
  123. rows = result["data"]
  124. for i in range(10):
  125. assert rows[i]["aggregate_value"] == i
  126. assert (
  127. rows[i]["time"]
  128. == (
  129. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  130. ).isoformat()
  131. )
  132. def test_basic_bulk_generic_metrics(self) -> None:
  133. query = MetricsQuery(
  134. query=None,
  135. start=self.hour_ago,
  136. end=self.now,
  137. rollup=Rollup(interval=60, granularity=60),
  138. scope=MetricsScope(
  139. org_ids=[self.org_id],
  140. project_ids=[self.project.id],
  141. use_case_id=UseCaseID.TRANSACTIONS.value,
  142. ),
  143. )
  144. query1 = query.set_query(
  145. Timeseries(
  146. metric=Metric(
  147. "transaction.duration",
  148. TransactionMRI.DURATION.value,
  149. ),
  150. aggregate="max",
  151. )
  152. )
  153. query2 = query.set_query(
  154. Timeseries(
  155. metric=Metric(
  156. public_name=None,
  157. mri=TransactionMRI.USER.value,
  158. ),
  159. aggregate="uniq",
  160. )
  161. )
  162. request1 = Request(
  163. dataset="generic_metrics",
  164. app_id="tests",
  165. query=query1,
  166. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  167. )
  168. request2 = Request(
  169. dataset="generic_metrics",
  170. app_id="tests",
  171. query=query2,
  172. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  173. )
  174. results = bulk_run_query([request1, request2])
  175. assert len(results) == 2
  176. result = results[0] # Distribution
  177. rows = result["data"]
  178. for i in range(10):
  179. assert rows[i]["aggregate_value"] == i
  180. assert (
  181. rows[i]["time"]
  182. == (
  183. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  184. ).isoformat()
  185. )
  186. def test_groupby_generic_metrics(self) -> None:
  187. query = MetricsQuery(
  188. query=Timeseries(
  189. metric=Metric(
  190. "transaction.duration",
  191. TransactionMRI.DURATION.value,
  192. ),
  193. aggregate="quantiles",
  194. aggregate_params=[0.5, 0.99],
  195. groupby=[Column("transaction")],
  196. ),
  197. start=self.hour_ago,
  198. end=self.now,
  199. rollup=Rollup(interval=60, granularity=60),
  200. scope=MetricsScope(
  201. org_ids=[self.org_id],
  202. project_ids=[self.project.id],
  203. use_case_id=UseCaseID.TRANSACTIONS.value,
  204. ),
  205. )
  206. request = Request(
  207. dataset="generic_metrics",
  208. app_id="tests",
  209. query=query,
  210. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  211. )
  212. result = run_query(request)
  213. assert len(result["data"]) == 10
  214. rows = result["data"]
  215. for i in range(10):
  216. assert rows[i]["aggregate_value"] == [i, i]
  217. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  218. assert (
  219. rows[i]["time"]
  220. == (
  221. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  222. ).isoformat()
  223. )
  224. def test_filters_generic_metrics(self) -> None:
  225. query = MetricsQuery(
  226. query=Timeseries(
  227. metric=Metric(
  228. "transaction.duration",
  229. TransactionMRI.DURATION.value,
  230. ),
  231. aggregate="quantiles",
  232. aggregate_params=[0.5],
  233. filters=[
  234. Condition(Column("status_code"), Op.EQ, "500"),
  235. Condition(Column("device"), Op.EQ, "BlackBerry"),
  236. ],
  237. ),
  238. start=self.hour_ago,
  239. end=self.now,
  240. rollup=Rollup(interval=60, granularity=60),
  241. scope=MetricsScope(
  242. org_ids=[self.org_id],
  243. project_ids=[self.project.id],
  244. use_case_id=UseCaseID.TRANSACTIONS.value,
  245. ),
  246. )
  247. request = Request(
  248. dataset="generic_metrics",
  249. app_id="tests",
  250. query=query,
  251. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  252. )
  253. result = run_query(request)
  254. assert len(result["data"]) == 2
  255. rows = result["data"]
  256. # TODO: Snuba is going to start returning 0 instead of [0] for single value aggregates
  257. # For now handle both cases for backwards compatibility
  258. assert rows[0]["aggregate_value"] in ([0], 0)
  259. assert rows[1]["aggregate_value"] in ([6.0], 6)
  260. def test_complex_generic_metrics(self) -> None:
  261. query = MetricsQuery(
  262. query=Timeseries(
  263. metric=Metric(
  264. "transaction.duration",
  265. TransactionMRI.DURATION.value,
  266. ),
  267. aggregate="quantiles",
  268. aggregate_params=[0.5],
  269. filters=[
  270. Condition(Column("status_code"), Op.EQ, "500"),
  271. Condition(Column("device"), Op.EQ, "BlackBerry"),
  272. ],
  273. groupby=[Column("transaction")],
  274. ),
  275. start=self.hour_ago,
  276. end=self.now,
  277. rollup=Rollup(interval=60, granularity=60),
  278. scope=MetricsScope(
  279. org_ids=[self.org_id],
  280. project_ids=[self.project.id],
  281. use_case_id=UseCaseID.TRANSACTIONS.value,
  282. ),
  283. )
  284. request = Request(
  285. dataset="generic_metrics",
  286. app_id="tests",
  287. query=query,
  288. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  289. )
  290. result = run_query(request)
  291. assert len(result["data"]) == 2
  292. rows = result["data"]
  293. # TODO: Snuba is going to start returning 0 instead of [0] for single value aggregates
  294. # For now handle both cases for backwards compatibility
  295. assert rows[0]["aggregate_value"] in ([0], 0)
  296. assert rows[0]["transaction"] == "transaction_0"
  297. assert rows[1]["aggregate_value"] in ([6.0], 6)
  298. assert rows[1]["transaction"] == "transaction_0"
  299. def test_totals(self) -> None:
  300. query = MetricsQuery(
  301. query=Timeseries(
  302. metric=Metric(
  303. "transaction.duration",
  304. TransactionMRI.DURATION.value,
  305. ),
  306. aggregate="max",
  307. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  308. groupby=[Column("transaction")],
  309. ),
  310. start=self.hour_ago,
  311. end=self.now,
  312. rollup=Rollup(totals=True, granularity=60, orderby=Direction.ASC),
  313. scope=MetricsScope(
  314. org_ids=[self.org_id],
  315. project_ids=[self.project.id],
  316. use_case_id=UseCaseID.TRANSACTIONS.value,
  317. ),
  318. )
  319. request = Request(
  320. dataset="generic_metrics",
  321. app_id="tests",
  322. query=query,
  323. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  324. )
  325. result = run_query(request)
  326. assert len(result["data"]) == 2
  327. rows = result["data"]
  328. assert rows[0]["aggregate_value"] == 7.0
  329. assert rows[1]["aggregate_value"] == 8.0
  330. def test_meta_data_in_response(self) -> None:
  331. query = MetricsQuery(
  332. query=Timeseries(
  333. metric=Metric(
  334. "transaction.duration",
  335. TransactionMRI.DURATION.value,
  336. ),
  337. aggregate="max",
  338. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  339. groupby=[Column("transaction")],
  340. ),
  341. start=self.hour_ago.replace(minute=16, second=59),
  342. end=self.now.replace(minute=16, second=59),
  343. rollup=Rollup(interval=60, granularity=60),
  344. scope=MetricsScope(
  345. org_ids=[self.org_id],
  346. project_ids=[self.project.id],
  347. use_case_id=UseCaseID.TRANSACTIONS.value,
  348. ),
  349. )
  350. request = Request(
  351. dataset="generic_metrics",
  352. app_id="tests",
  353. query=query,
  354. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  355. )
  356. result = run_query(request)
  357. assert result["modified_start"] == self.hour_ago.replace(minute=16, second=0)
  358. assert result["modified_end"] == self.now.replace(minute=17, second=0)
  359. assert result["indexer_mappings"] == {
  360. "d:transactions/duration@millisecond": 9223372036854775909,
  361. "status_code": 10000,
  362. "transaction": 9223372036854776020,
  363. }
  364. def test_bad_query(self) -> None:
  365. query = MetricsQuery(
  366. query=Timeseries(
  367. metric=Metric(
  368. "transaction.duration",
  369. "not a real MRI",
  370. ),
  371. aggregate="max",
  372. ),
  373. start=self.hour_ago.replace(minute=16, second=59),
  374. end=self.now.replace(minute=16, second=59),
  375. rollup=Rollup(interval=60, granularity=60),
  376. scope=MetricsScope(
  377. org_ids=[self.org_id],
  378. project_ids=[self.project.id],
  379. use_case_id=UseCaseID.TRANSACTIONS.value,
  380. ),
  381. )
  382. request = Request(
  383. dataset="generic_metrics",
  384. app_id="tests",
  385. query=query,
  386. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  387. )
  388. with pytest.raises(InvalidParams):
  389. run_query(request)
  390. def test_interval_with_totals(self) -> None:
  391. query = MetricsQuery(
  392. query=Timeseries(
  393. metric=Metric(
  394. "transaction.duration",
  395. TransactionMRI.DURATION.value,
  396. ),
  397. aggregate="max",
  398. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  399. groupby=[Column("transaction")],
  400. ),
  401. start=self.hour_ago,
  402. end=self.now,
  403. rollup=Rollup(interval=60, totals=True, granularity=60),
  404. scope=MetricsScope(
  405. org_ids=[self.org_id],
  406. project_ids=[self.project.id],
  407. use_case_id=UseCaseID.TRANSACTIONS.value,
  408. ),
  409. )
  410. request = Request(
  411. dataset="generic_metrics",
  412. app_id="tests",
  413. query=query,
  414. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  415. )
  416. result = run_query(request)
  417. assert len(result["data"]) == 6
  418. assert result["totals"]["aggregate_value"] == 8.0
  419. def test_automatic_granularity(self) -> None:
  420. query = MetricsQuery(
  421. query=Timeseries(
  422. metric=Metric(
  423. "transaction.duration",
  424. TransactionMRI.DURATION.value,
  425. ),
  426. aggregate="max",
  427. ),
  428. start=self.hour_ago,
  429. end=self.now,
  430. rollup=Rollup(interval=120),
  431. scope=MetricsScope(
  432. org_ids=[self.org_id],
  433. project_ids=[self.project.id],
  434. ),
  435. )
  436. request = Request(
  437. dataset="generic_metrics",
  438. app_id="tests",
  439. query=query,
  440. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  441. )
  442. result = run_query(request)
  443. # There's a flaky off by one error here that is very difficult to track down
  444. # TODO: figure out why this is flaky and assert to one specific value
  445. assert len(result["data"]) in [5, 6]
  446. def test_automatic_dataset(self) -> None:
  447. query = MetricsQuery(
  448. query=Timeseries(
  449. metric=Metric(
  450. None,
  451. SessionMRI.RAW_DURATION.value,
  452. ),
  453. aggregate="max",
  454. ),
  455. start=self.hour_ago,
  456. end=self.now,
  457. rollup=Rollup(interval=60, granularity=60),
  458. scope=MetricsScope(
  459. org_ids=[self.org_id],
  460. project_ids=[self.project.id],
  461. use_case_id=UseCaseID.SESSIONS.value,
  462. ),
  463. )
  464. request = Request(
  465. dataset="generic_metrics",
  466. app_id="tests",
  467. query=query,
  468. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  469. )
  470. result = run_query(request)
  471. assert request.dataset == "metrics"
  472. assert len(result["data"]) == 10
  473. def test_gauges(self) -> None:
  474. query = MetricsQuery(
  475. query=Timeseries(
  476. metric=Metric(
  477. None,
  478. "g:transactions/test_gauge@none",
  479. ),
  480. aggregate="last",
  481. ),
  482. start=self.hour_ago,
  483. end=self.now,
  484. rollup=Rollup(interval=60, totals=True, granularity=60),
  485. scope=MetricsScope(
  486. org_ids=[self.org_id],
  487. project_ids=[self.project.id],
  488. ),
  489. )
  490. request = Request(
  491. dataset="generic_metrics",
  492. app_id="tests",
  493. query=query,
  494. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  495. )
  496. result = run_query(request)
  497. assert len(result["data"]) == 10
  498. assert result["totals"]["aggregate_value"] == 9.0
  499. def test_metrics_groupby(self) -> None:
  500. query = MetricsQuery(
  501. query=Timeseries(
  502. metric=Metric(
  503. None,
  504. SessionMRI.RAW_DURATION.value,
  505. ),
  506. aggregate="max",
  507. groupby=[Column("release")],
  508. ),
  509. start=self.hour_ago,
  510. end=self.now,
  511. rollup=Rollup(interval=60, granularity=60),
  512. scope=MetricsScope(
  513. org_ids=[self.org_id],
  514. project_ids=[self.project.id],
  515. use_case_id=UseCaseID.SESSIONS.value,
  516. ),
  517. )
  518. request = Request(
  519. dataset="metrics",
  520. app_id="tests",
  521. query=query,
  522. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  523. )
  524. result = run_query(request)
  525. assert request.dataset == "metrics"
  526. assert len(result["data"]) == 10
  527. for data_point in result["data"]:
  528. assert data_point["release"] == "release_even" or data_point["release"] == "release_odd"
  529. def test_metrics_filters(self) -> None:
  530. query = MetricsQuery(
  531. query=Timeseries(
  532. metric=Metric(
  533. None,
  534. SessionMRI.RAW_USER.value,
  535. ),
  536. aggregate="count",
  537. filters=[
  538. Condition(Column("release"), Op.EQ, "release_even"),
  539. ],
  540. ),
  541. start=self.hour_ago,
  542. end=self.now,
  543. rollup=Rollup(interval=60, granularity=60),
  544. scope=MetricsScope(
  545. org_ids=[self.org_id],
  546. project_ids=[self.project.id],
  547. use_case_id=UseCaseID.SESSIONS.value,
  548. ),
  549. )
  550. request = Request(
  551. dataset="metrics",
  552. app_id="tests",
  553. query=query,
  554. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  555. )
  556. result = run_query(request)
  557. assert request.dataset == "metrics"
  558. assert len(result["data"]) == 5
  559. def test_metrics_complex(self) -> None:
  560. query = MetricsQuery(
  561. query=Timeseries(
  562. metric=Metric(
  563. None,
  564. SessionMRI.RAW_SESSION.value,
  565. ),
  566. aggregate="count",
  567. groupby=[Column("release")],
  568. filters=[
  569. Condition(Column("release"), Op.EQ, "release_even"),
  570. ],
  571. ),
  572. start=self.hour_ago,
  573. end=self.now,
  574. rollup=Rollup(interval=60, granularity=60),
  575. scope=MetricsScope(
  576. org_ids=[self.org_id],
  577. project_ids=[self.project.id],
  578. use_case_id=UseCaseID.SESSIONS.value,
  579. ),
  580. )
  581. request = Request(
  582. dataset="metrics",
  583. app_id="tests",
  584. query=query,
  585. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  586. )
  587. result = run_query(request)
  588. assert request.dataset == "metrics"
  589. assert len(result["data"]) == 5
  590. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  591. def test_metrics_correctly_reverse_resolved(self) -> None:
  592. query = MetricsQuery(
  593. query=Timeseries(
  594. metric=Metric(
  595. None,
  596. SessionMRI.RAW_SESSION.value,
  597. ),
  598. aggregate="count",
  599. groupby=[Column("release"), Column("project_id")],
  600. filters=[
  601. Condition(Column("release"), Op.EQ, "release_even"),
  602. Condition(Column("project_id"), Op.EQ, self.project.id),
  603. ],
  604. ),
  605. start=self.hour_ago,
  606. end=self.now,
  607. rollup=Rollup(interval=60, granularity=60),
  608. scope=MetricsScope(
  609. org_ids=[self.org_id],
  610. project_ids=[self.project.id],
  611. use_case_id=UseCaseID.SESSIONS.value,
  612. ),
  613. )
  614. request = Request(
  615. dataset="metrics",
  616. app_id="tests",
  617. query=query,
  618. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  619. )
  620. result = run_query(request)
  621. assert request.dataset == "metrics"
  622. assert len(result["data"]) == 5
  623. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  624. assert any(data_point["project_id"] == self.project.id for data_point in result["data"])
  625. def test_failure_rate(self) -> None:
  626. query = MetricsQuery(
  627. query=Formula(
  628. ArithmeticOperator.DIVIDE.value,
  629. [
  630. Timeseries(
  631. metric=Metric(
  632. mri=TransactionMRI.DURATION.value,
  633. ),
  634. aggregate="count",
  635. filters=[
  636. Condition(
  637. Column(TransactionTagsKey.TRANSACTION_STATUS.value),
  638. Op.NOT_IN,
  639. [
  640. TransactionStatusTagValue.OK.value,
  641. TransactionStatusTagValue.CANCELLED.value,
  642. TransactionStatusTagValue.UNKNOWN.value,
  643. ],
  644. )
  645. ],
  646. ),
  647. Timeseries(
  648. metric=Metric(
  649. mri=TransactionMRI.DURATION.value,
  650. ),
  651. aggregate="count",
  652. ),
  653. ],
  654. ),
  655. start=self.hour_ago,
  656. end=self.now,
  657. rollup=Rollup(interval=60, totals=True, granularity=60),
  658. scope=MetricsScope(
  659. org_ids=[self.org_id],
  660. project_ids=[self.project.id],
  661. ),
  662. )
  663. request = Request(
  664. dataset="generic_metrics",
  665. app_id="tests",
  666. query=query,
  667. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  668. )
  669. result = run_query(request)
  670. assert len(result["data"]) == 10
  671. assert result["totals"]["aggregate_value"] == 1.0
  672. def test_aggregate_aliases(self) -> None:
  673. query = MetricsQuery(
  674. query=Timeseries(
  675. metric=Metric(
  676. "transaction.duration",
  677. TransactionMRI.DURATION.value,
  678. ),
  679. aggregate="p95",
  680. ),
  681. start=self.hour_ago,
  682. end=self.now,
  683. rollup=Rollup(interval=60, granularity=60),
  684. scope=MetricsScope(
  685. org_ids=[self.org_id],
  686. project_ids=[self.project.id],
  687. use_case_id=UseCaseID.TRANSACTIONS.value,
  688. ),
  689. )
  690. request = Request(
  691. dataset="generic_metrics",
  692. app_id="tests",
  693. query=query,
  694. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  695. )
  696. result = run_query(request)
  697. assert len(result["data"]) == 10
  698. rows = result["data"]
  699. for i in range(10):
  700. # TODO: Snuba is going to start returning 0 instead of [0] for single value aggregates
  701. # For now handle both cases for backwards compatibility
  702. assert rows[i]["aggregate_value"] in ([i], i)
  703. assert (
  704. rows[i]["time"]
  705. == (
  706. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  707. ).isoformat()
  708. )
  709. def test_dataset_correctness(self) -> None:
  710. query = MetricsQuery(
  711. query=Timeseries(
  712. metric=Metric(
  713. "transaction.duration",
  714. TransactionMRI.DURATION.value,
  715. ),
  716. aggregate="quantiles",
  717. aggregate_params=[0.5, 0.99],
  718. groupby=[Column("transaction")],
  719. filters=[
  720. Condition(Column("transaction"), Op.IN, ["transaction_0", "transaction_1"])
  721. ],
  722. ),
  723. start=self.hour_ago,
  724. end=self.now,
  725. rollup=Rollup(interval=60, granularity=60),
  726. scope=MetricsScope(
  727. org_ids=[self.org_id],
  728. project_ids=[self.project.id],
  729. use_case_id=UseCaseID.TRANSACTIONS.value,
  730. ),
  731. )
  732. request = Request(
  733. dataset="metrics",
  734. app_id="tests",
  735. query=query,
  736. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  737. )
  738. result = run_query(request)
  739. assert len(result["data"]) == 10
  740. rows = result["data"]
  741. for i in range(10):
  742. assert rows[i]["aggregate_value"] == [i, i]
  743. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  744. assert (
  745. rows[i]["time"]
  746. == (
  747. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  748. ).isoformat()
  749. )
  750. def test_resolve_all_mris(self) -> None:
  751. for mri in [
  752. "d:custom/sentry.event_manager.save@second",
  753. "d:custom/sentry.event_manager.save_generic_events@second",
  754. ]:
  755. self.store_metric(
  756. self.org_id,
  757. self.project.id,
  758. "distribution",
  759. mri,
  760. {
  761. "transaction": "transaction_1",
  762. "status_code": "200",
  763. "device": "BlackBerry",
  764. },
  765. self.ts(self.hour_ago + timedelta(minutes=5)),
  766. 1,
  767. UseCaseID.CUSTOM,
  768. )
  769. query = MetricsQuery(
  770. query=Formula(
  771. function_name="plus",
  772. parameters=[
  773. Timeseries(
  774. metric=Metric(
  775. mri="d:custom/sentry.event_manager.save@second",
  776. ),
  777. aggregate="avg",
  778. ),
  779. Timeseries(
  780. metric=Metric(
  781. mri="d:custom/sentry.event_manager.save_generic_events@second",
  782. ),
  783. aggregate="avg",
  784. ),
  785. ],
  786. ),
  787. start=self.hour_ago,
  788. end=self.now,
  789. rollup=Rollup(interval=None, totals=True, orderby=None, granularity=10),
  790. scope=MetricsScope(
  791. org_ids=[self.org_id], project_ids=[self.project.id], use_case_id="custom"
  792. ),
  793. limit=Limit(20),
  794. offset=None,
  795. )
  796. request = Request(
  797. dataset="generic_metrics",
  798. app_id="tests",
  799. query=query,
  800. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  801. )
  802. result = run_query(request)
  803. assert len(result["data"]) == 1
  804. def test_formulas_with_scalar_formulas(self) -> None:
  805. query = MetricsQuery(
  806. query=f"sum({TransactionMRI.DURATION.value}) + (24 * 3600)",
  807. start=self.hour_ago,
  808. end=self.now,
  809. rollup=Rollup(interval=60, granularity=60),
  810. scope=MetricsScope(
  811. org_ids=[self.org_id],
  812. project_ids=[self.project.id],
  813. use_case_id=UseCaseID.TRANSACTIONS.value,
  814. ),
  815. )
  816. request = Request(
  817. dataset="generic_metrics",
  818. app_id="tests",
  819. query=query,
  820. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  821. )
  822. result = run_query(request)
  823. assert len(result["data"]) == 10
  824. for row in result["data"]:
  825. assert row["aggregate_value"] >= 86400
  826. class MQLMetaTest(TestCase, BaseMetricsTestCase):
  827. def ts(self, dt: datetime) -> int:
  828. return int(dt.timestamp())
  829. def setUp(self) -> None:
  830. super().setUp()
  831. self.generic_metrics: Mapping[str, Literal["counter", "set", "distribution", "gauge"]] = {
  832. TransactionMRI.DURATION.value: "distribution",
  833. TransactionMRI.USER.value: "set",
  834. TransactionMRI.COUNT_PER_ROOT_PROJECT.value: "counter",
  835. "g:transactions/test_gauge@none": "gauge",
  836. }
  837. self.now = datetime.now(tz=timezone.utc).replace(microsecond=0)
  838. self.hour_ago = self.now - timedelta(hours=1)
  839. self.org_id = self.project.organization_id
  840. for mri, metric_type in self.generic_metrics.items():
  841. assert metric_type in {"counter", "distribution", "set", "gauge"}
  842. for i in range(2):
  843. value: int | dict[str, int]
  844. if metric_type == "gauge":
  845. value = {
  846. "min": i,
  847. "max": i,
  848. "sum": i,
  849. "count": i,
  850. "last": i,
  851. }
  852. else:
  853. value = i
  854. self.store_metric(
  855. self.org_id,
  856. self.project.id,
  857. metric_type,
  858. mri,
  859. {
  860. "transaction": f"transaction_{i % 2}",
  861. "status_code": "500" if i % 2 == 0 else "200",
  862. "device": "BlackBerry" if i % 2 == 0 else "Nokia",
  863. },
  864. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  865. value,
  866. UseCaseID.TRANSACTIONS,
  867. )
  868. def test_fetch_metric_mris(self) -> None:
  869. metric_mris = fetch_metric_mris(self.org_id, [self.project.id], UseCaseID.TRANSACTIONS)
  870. assert len(metric_mris) == 1
  871. assert len(metric_mris[self.project.id]) == 4
  872. assert metric_mris[self.project.id] == [
  873. "c:transactions/count_per_root_project@none",
  874. "s:transactions/user@none",
  875. "g:transactions/test_gauge@none",
  876. "d:transactions/duration@millisecond",
  877. ]
  878. def test_fetch_metric_tag_keys(self) -> None:
  879. tag_keys = fetch_metric_tag_keys(
  880. self.org_id, [self.project.id], UseCaseID.TRANSACTIONS, "g:transactions/test_gauge@none"
  881. )
  882. assert len(tag_keys) == 1
  883. assert len(tag_keys[self.project.id]) == 3
  884. assert tag_keys[self.project.id] == ["status_code", "device", "transaction"]
  885. def test_fetch_metric_tag_values(self) -> None:
  886. tag_values = fetch_metric_tag_values(
  887. self.org_id,
  888. [self.project.id],
  889. UseCaseID.TRANSACTIONS,
  890. "g:transactions/test_gauge@none",
  891. "transaction",
  892. )
  893. assert len(tag_values) == 2
  894. assert tag_values == ["transaction_0", "transaction_1"]
  895. def test_fetch_metric_tag_values_with_prefix(self) -> None:
  896. tag_values = fetch_metric_tag_values(
  897. self.org_id,
  898. [self.project.id],
  899. UseCaseID.TRANSACTIONS,
  900. "g:transactions/test_gauge@none",
  901. "status_code",
  902. "5",
  903. )
  904. assert len(tag_values) == 1
  905. assert tag_values == ["500"]
  906. def test_fetch_metric_tag_values_for_multiple_projects(self) -> None:
  907. new_project = self.create_project(name="New Project")
  908. self.store_metric(
  909. self.org_id,
  910. new_project.id,
  911. "gauge",
  912. "g:transactions/test_gauge@none",
  913. {"status_code": "524"},
  914. self.ts(self.hour_ago + timedelta(minutes=10)),
  915. 10,
  916. UseCaseID.TRANSACTIONS,
  917. )
  918. tag_values = fetch_metric_tag_values(
  919. self.org_id,
  920. [self.project.id, new_project.id],
  921. UseCaseID.TRANSACTIONS,
  922. "g:transactions/test_gauge@none",
  923. "status_code",
  924. "5",
  925. )
  926. assert len(tag_values) == 2
  927. assert tag_values == ["500", "524"]