test_metrics_layer.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850
  1. from __future__ import annotations
  2. from collections.abc import Mapping
  3. from datetime import datetime, timedelta, timezone
  4. from typing import Literal
  5. import pytest
  6. from snuba_sdk import (
  7. ArithmeticOperator,
  8. Column,
  9. Condition,
  10. Direction,
  11. Formula,
  12. Limit,
  13. Metric,
  14. MetricsQuery,
  15. MetricsScope,
  16. Op,
  17. Request,
  18. Rollup,
  19. Timeseries,
  20. )
  21. from sentry.exceptions import InvalidParams
  22. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  23. from sentry.snuba.metrics.naming_layer import SessionMRI, TransactionMRI
  24. from sentry.snuba.metrics.naming_layer.public import TransactionStatusTagValue, TransactionTagsKey
  25. from sentry.snuba.metrics_layer.query import bulk_run_query, run_query
  26. from sentry.testutils.cases import BaseMetricsTestCase, TestCase
  27. pytestmark = pytest.mark.sentry_metrics
  28. class MQLTest(TestCase, BaseMetricsTestCase):
  29. def ts(self, dt: datetime) -> int:
  30. return int(dt.timestamp())
  31. def setUp(self) -> None:
  32. super().setUp()
  33. self.generic_metrics: Mapping[str, Literal["counter", "set", "distribution", "gauge"]] = {
  34. TransactionMRI.DURATION.value: "distribution",
  35. TransactionMRI.USER.value: "set",
  36. TransactionMRI.COUNT_PER_ROOT_PROJECT.value: "counter",
  37. "g:transactions/test_gauge@none": "gauge",
  38. }
  39. self.metrics: Mapping[str, Literal["counter", "set", "distribution"]] = {
  40. SessionMRI.RAW_DURATION.value: "distribution",
  41. SessionMRI.RAW_USER.value: "set",
  42. SessionMRI.RAW_SESSION.value: "counter",
  43. }
  44. self.now = datetime.now(tz=timezone.utc).replace(microsecond=0)
  45. self.hour_ago = self.now - timedelta(hours=1)
  46. self.org_id = self.project.organization_id
  47. for mri, metric_type in self.generic_metrics.items():
  48. assert metric_type in {"counter", "distribution", "set", "gauge"}
  49. for i in range(10):
  50. value: int | dict[str, int]
  51. if metric_type == "gauge":
  52. value = {
  53. "min": i,
  54. "max": i,
  55. "sum": i,
  56. "count": i,
  57. "last": i,
  58. }
  59. else:
  60. value = i
  61. self.store_metric(
  62. self.org_id,
  63. self.project.id,
  64. metric_type,
  65. mri,
  66. {
  67. "transaction": f"transaction_{i % 2}",
  68. "status_code": "500" if i % 3 == 0 else "200",
  69. "device": "BlackBerry" if i % 2 == 0 else "Nokia",
  70. },
  71. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  72. value,
  73. UseCaseID.TRANSACTIONS,
  74. )
  75. for mri, metric_type in self.metrics.items():
  76. assert metric_type in {"counter", "distribution", "set"}
  77. for i in range(10):
  78. value = i
  79. self.store_metric(
  80. self.org_id,
  81. self.project.id,
  82. metric_type,
  83. mri,
  84. {
  85. "release": "release_even" if i % 2 == 0 else "release_odd",
  86. },
  87. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  88. value,
  89. UseCaseID.SESSIONS,
  90. )
  91. def test_basic_generic_metrics(self) -> None:
  92. query = MetricsQuery(
  93. query=Timeseries(
  94. metric=Metric(
  95. "transaction.duration",
  96. TransactionMRI.DURATION.value,
  97. ),
  98. aggregate="max",
  99. ),
  100. start=self.hour_ago,
  101. end=self.now,
  102. rollup=Rollup(interval=60, granularity=60),
  103. scope=MetricsScope(
  104. org_ids=[self.org_id],
  105. project_ids=[self.project.id],
  106. use_case_id=UseCaseID.TRANSACTIONS.value,
  107. ),
  108. )
  109. request = Request(
  110. dataset="generic_metrics",
  111. app_id="tests",
  112. query=query,
  113. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  114. )
  115. result = run_query(request)
  116. assert len(result["data"]) == 10
  117. rows = result["data"]
  118. for i in range(10):
  119. assert rows[i]["aggregate_value"] == i
  120. assert (
  121. rows[i]["time"]
  122. == (
  123. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  124. ).isoformat()
  125. )
  126. def test_basic_bulk_generic_metrics(self) -> None:
  127. query = MetricsQuery(
  128. query=None,
  129. start=self.hour_ago,
  130. end=self.now,
  131. rollup=Rollup(interval=60, granularity=60),
  132. scope=MetricsScope(
  133. org_ids=[self.org_id],
  134. project_ids=[self.project.id],
  135. use_case_id=UseCaseID.TRANSACTIONS.value,
  136. ),
  137. )
  138. query1 = query.set_query(
  139. Timeseries(
  140. metric=Metric(
  141. "transaction.duration",
  142. TransactionMRI.DURATION.value,
  143. ),
  144. aggregate="max",
  145. )
  146. )
  147. query2 = query.set_query(
  148. Timeseries(
  149. metric=Metric(
  150. public_name=None,
  151. mri=TransactionMRI.USER.value,
  152. ),
  153. aggregate="uniq",
  154. )
  155. )
  156. request1 = Request(
  157. dataset="generic_metrics",
  158. app_id="tests",
  159. query=query1,
  160. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  161. )
  162. request2 = Request(
  163. dataset="generic_metrics",
  164. app_id="tests",
  165. query=query2,
  166. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  167. )
  168. results = bulk_run_query([request1, request2])
  169. assert len(results) == 2
  170. result = results[0] # Distribution
  171. rows = result["data"]
  172. for i in range(10):
  173. assert rows[i]["aggregate_value"] == i
  174. assert (
  175. rows[i]["time"]
  176. == (
  177. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  178. ).isoformat()
  179. )
  180. def test_groupby_generic_metrics(self) -> None:
  181. query = MetricsQuery(
  182. query=Timeseries(
  183. metric=Metric(
  184. "transaction.duration",
  185. TransactionMRI.DURATION.value,
  186. ),
  187. aggregate="quantiles",
  188. aggregate_params=[0.5, 0.99],
  189. groupby=[Column("transaction")],
  190. ),
  191. start=self.hour_ago,
  192. end=self.now,
  193. rollup=Rollup(interval=60, granularity=60),
  194. scope=MetricsScope(
  195. org_ids=[self.org_id],
  196. project_ids=[self.project.id],
  197. use_case_id=UseCaseID.TRANSACTIONS.value,
  198. ),
  199. )
  200. request = Request(
  201. dataset="generic_metrics",
  202. app_id="tests",
  203. query=query,
  204. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  205. )
  206. result = run_query(request)
  207. assert len(result["data"]) == 10
  208. rows = result["data"]
  209. for i in range(10):
  210. assert rows[i]["aggregate_value"] == [i, i]
  211. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  212. assert (
  213. rows[i]["time"]
  214. == (
  215. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  216. ).isoformat()
  217. )
  218. def test_filters_generic_metrics(self) -> None:
  219. query = MetricsQuery(
  220. query=Timeseries(
  221. metric=Metric(
  222. "transaction.duration",
  223. TransactionMRI.DURATION.value,
  224. ),
  225. aggregate="quantiles",
  226. aggregate_params=[0.5],
  227. filters=[
  228. Condition(Column("status_code"), Op.EQ, "500"),
  229. Condition(Column("device"), Op.EQ, "BlackBerry"),
  230. ],
  231. ),
  232. start=self.hour_ago,
  233. end=self.now,
  234. rollup=Rollup(interval=60, granularity=60),
  235. scope=MetricsScope(
  236. org_ids=[self.org_id],
  237. project_ids=[self.project.id],
  238. use_case_id=UseCaseID.TRANSACTIONS.value,
  239. ),
  240. )
  241. request = Request(
  242. dataset="generic_metrics",
  243. app_id="tests",
  244. query=query,
  245. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  246. )
  247. result = run_query(request)
  248. assert len(result["data"]) == 2
  249. rows = result["data"]
  250. # TODO: Snuba is going to start returning 0 instead of [0] for single value aggregates
  251. # For now handle both cases for backwards compatibility
  252. assert rows[0]["aggregate_value"] in ([0], 0)
  253. assert rows[1]["aggregate_value"] in ([6.0], 6)
  254. def test_complex_generic_metrics(self) -> None:
  255. query = MetricsQuery(
  256. query=Timeseries(
  257. metric=Metric(
  258. "transaction.duration",
  259. TransactionMRI.DURATION.value,
  260. ),
  261. aggregate="quantiles",
  262. aggregate_params=[0.5],
  263. filters=[
  264. Condition(Column("status_code"), Op.EQ, "500"),
  265. Condition(Column("device"), Op.EQ, "BlackBerry"),
  266. ],
  267. groupby=[Column("transaction")],
  268. ),
  269. start=self.hour_ago,
  270. end=self.now,
  271. rollup=Rollup(interval=60, granularity=60),
  272. scope=MetricsScope(
  273. org_ids=[self.org_id],
  274. project_ids=[self.project.id],
  275. use_case_id=UseCaseID.TRANSACTIONS.value,
  276. ),
  277. )
  278. request = Request(
  279. dataset="generic_metrics",
  280. app_id="tests",
  281. query=query,
  282. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  283. )
  284. result = run_query(request)
  285. assert len(result["data"]) == 2
  286. rows = result["data"]
  287. # TODO: Snuba is going to start returning 0 instead of [0] for single value aggregates
  288. # For now handle both cases for backwards compatibility
  289. assert rows[0]["aggregate_value"] in ([0], 0)
  290. assert rows[0]["transaction"] == "transaction_0"
  291. assert rows[1]["aggregate_value"] in ([6.0], 6)
  292. assert rows[1]["transaction"] == "transaction_0"
  293. def test_totals(self) -> None:
  294. query = MetricsQuery(
  295. query=Timeseries(
  296. metric=Metric(
  297. "transaction.duration",
  298. TransactionMRI.DURATION.value,
  299. ),
  300. aggregate="max",
  301. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  302. groupby=[Column("transaction")],
  303. ),
  304. start=self.hour_ago,
  305. end=self.now,
  306. rollup=Rollup(totals=True, granularity=60, orderby=Direction.ASC),
  307. scope=MetricsScope(
  308. org_ids=[self.org_id],
  309. project_ids=[self.project.id],
  310. use_case_id=UseCaseID.TRANSACTIONS.value,
  311. ),
  312. )
  313. request = Request(
  314. dataset="generic_metrics",
  315. app_id="tests",
  316. query=query,
  317. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  318. )
  319. result = run_query(request)
  320. assert len(result["data"]) == 2
  321. rows = result["data"]
  322. assert rows[0]["aggregate_value"] == 7.0
  323. assert rows[1]["aggregate_value"] == 8.0
  324. def test_meta_data_in_response(self) -> None:
  325. query = MetricsQuery(
  326. query=Timeseries(
  327. metric=Metric(
  328. "transaction.duration",
  329. TransactionMRI.DURATION.value,
  330. ),
  331. aggregate="max",
  332. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  333. groupby=[Column("transaction")],
  334. ),
  335. start=self.hour_ago.replace(minute=16, second=59),
  336. end=self.now.replace(minute=16, second=59),
  337. rollup=Rollup(interval=60, granularity=60),
  338. scope=MetricsScope(
  339. org_ids=[self.org_id],
  340. project_ids=[self.project.id],
  341. use_case_id=UseCaseID.TRANSACTIONS.value,
  342. ),
  343. )
  344. request = Request(
  345. dataset="generic_metrics",
  346. app_id="tests",
  347. query=query,
  348. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  349. )
  350. result = run_query(request)
  351. assert result["modified_start"] == self.hour_ago.replace(minute=16, second=0)
  352. assert result["modified_end"] == self.now.replace(minute=17, second=0)
  353. assert result["indexer_mappings"] == {
  354. "d:transactions/duration@millisecond": 9223372036854775909,
  355. "status_code": 10000,
  356. "transaction": 9223372036854776020,
  357. }
  358. def test_bad_query(self) -> None:
  359. query = MetricsQuery(
  360. query=Timeseries(
  361. metric=Metric(
  362. "transaction.duration",
  363. "not a real MRI",
  364. ),
  365. aggregate="max",
  366. ),
  367. start=self.hour_ago.replace(minute=16, second=59),
  368. end=self.now.replace(minute=16, second=59),
  369. rollup=Rollup(interval=60, granularity=60),
  370. scope=MetricsScope(
  371. org_ids=[self.org_id],
  372. project_ids=[self.project.id],
  373. use_case_id=UseCaseID.TRANSACTIONS.value,
  374. ),
  375. )
  376. request = Request(
  377. dataset="generic_metrics",
  378. app_id="tests",
  379. query=query,
  380. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  381. )
  382. with pytest.raises(InvalidParams):
  383. run_query(request)
  384. def test_interval_with_totals(self) -> None:
  385. query = MetricsQuery(
  386. query=Timeseries(
  387. metric=Metric(
  388. "transaction.duration",
  389. TransactionMRI.DURATION.value,
  390. ),
  391. aggregate="max",
  392. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  393. groupby=[Column("transaction")],
  394. ),
  395. start=self.hour_ago,
  396. end=self.now,
  397. rollup=Rollup(interval=60, totals=True, granularity=60),
  398. scope=MetricsScope(
  399. org_ids=[self.org_id],
  400. project_ids=[self.project.id],
  401. use_case_id=UseCaseID.TRANSACTIONS.value,
  402. ),
  403. )
  404. request = Request(
  405. dataset="generic_metrics",
  406. app_id="tests",
  407. query=query,
  408. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  409. )
  410. result = run_query(request)
  411. assert len(result["data"]) == 6
  412. assert result["totals"]["aggregate_value"] == 8.0
  413. def test_automatic_granularity(self) -> None:
  414. query = MetricsQuery(
  415. query=Timeseries(
  416. metric=Metric(
  417. "transaction.duration",
  418. TransactionMRI.DURATION.value,
  419. ),
  420. aggregate="max",
  421. ),
  422. start=self.hour_ago,
  423. end=self.now,
  424. rollup=Rollup(interval=120),
  425. scope=MetricsScope(
  426. org_ids=[self.org_id],
  427. project_ids=[self.project.id],
  428. ),
  429. )
  430. request = Request(
  431. dataset="generic_metrics",
  432. app_id="tests",
  433. query=query,
  434. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  435. )
  436. result = run_query(request)
  437. # There's a flaky off by one error here that is very difficult to track down
  438. # TODO: figure out why this is flaky and assert to one specific value
  439. assert len(result["data"]) in [5, 6]
  440. def test_automatic_dataset(self) -> None:
  441. query = MetricsQuery(
  442. query=Timeseries(
  443. metric=Metric(
  444. None,
  445. SessionMRI.RAW_DURATION.value,
  446. ),
  447. aggregate="max",
  448. ),
  449. start=self.hour_ago,
  450. end=self.now,
  451. rollup=Rollup(interval=60, granularity=60),
  452. scope=MetricsScope(
  453. org_ids=[self.org_id],
  454. project_ids=[self.project.id],
  455. use_case_id=UseCaseID.SESSIONS.value,
  456. ),
  457. )
  458. request = Request(
  459. dataset="generic_metrics",
  460. app_id="tests",
  461. query=query,
  462. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  463. )
  464. result = run_query(request)
  465. assert request.dataset == "metrics"
  466. assert len(result["data"]) == 10
  467. def test_gauges(self) -> None:
  468. query = MetricsQuery(
  469. query=Timeseries(
  470. metric=Metric(
  471. None,
  472. "g:transactions/test_gauge@none",
  473. ),
  474. aggregate="last",
  475. ),
  476. start=self.hour_ago,
  477. end=self.now,
  478. rollup=Rollup(interval=60, totals=True, granularity=60),
  479. scope=MetricsScope(
  480. org_ids=[self.org_id],
  481. project_ids=[self.project.id],
  482. ),
  483. )
  484. request = Request(
  485. dataset="generic_metrics",
  486. app_id="tests",
  487. query=query,
  488. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  489. )
  490. result = run_query(request)
  491. assert len(result["data"]) == 10
  492. assert result["totals"]["aggregate_value"] == 9.0
  493. def test_metrics_groupby(self) -> None:
  494. query = MetricsQuery(
  495. query=Timeseries(
  496. metric=Metric(
  497. None,
  498. SessionMRI.RAW_DURATION.value,
  499. ),
  500. aggregate="max",
  501. groupby=[Column("release")],
  502. ),
  503. start=self.hour_ago,
  504. end=self.now,
  505. rollup=Rollup(interval=60, granularity=60),
  506. scope=MetricsScope(
  507. org_ids=[self.org_id],
  508. project_ids=[self.project.id],
  509. use_case_id=UseCaseID.SESSIONS.value,
  510. ),
  511. )
  512. request = Request(
  513. dataset="metrics",
  514. app_id="tests",
  515. query=query,
  516. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  517. )
  518. result = run_query(request)
  519. assert request.dataset == "metrics"
  520. assert len(result["data"]) == 10
  521. for data_point in result["data"]:
  522. assert data_point["release"] == "release_even" or data_point["release"] == "release_odd"
  523. def test_metrics_filters(self) -> None:
  524. query = MetricsQuery(
  525. query=Timeseries(
  526. metric=Metric(
  527. None,
  528. SessionMRI.RAW_USER.value,
  529. ),
  530. aggregate="count",
  531. filters=[
  532. Condition(Column("release"), Op.EQ, "release_even"),
  533. ],
  534. ),
  535. start=self.hour_ago,
  536. end=self.now,
  537. rollup=Rollup(interval=60, granularity=60),
  538. scope=MetricsScope(
  539. org_ids=[self.org_id],
  540. project_ids=[self.project.id],
  541. use_case_id=UseCaseID.SESSIONS.value,
  542. ),
  543. )
  544. request = Request(
  545. dataset="metrics",
  546. app_id="tests",
  547. query=query,
  548. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  549. )
  550. result = run_query(request)
  551. assert request.dataset == "metrics"
  552. assert len(result["data"]) == 5
  553. def test_metrics_complex(self) -> None:
  554. query = MetricsQuery(
  555. query=Timeseries(
  556. metric=Metric(
  557. None,
  558. SessionMRI.RAW_SESSION.value,
  559. ),
  560. aggregate="count",
  561. groupby=[Column("release")],
  562. filters=[
  563. Condition(Column("release"), Op.EQ, "release_even"),
  564. ],
  565. ),
  566. start=self.hour_ago,
  567. end=self.now,
  568. rollup=Rollup(interval=60, granularity=60),
  569. scope=MetricsScope(
  570. org_ids=[self.org_id],
  571. project_ids=[self.project.id],
  572. use_case_id=UseCaseID.SESSIONS.value,
  573. ),
  574. )
  575. request = Request(
  576. dataset="metrics",
  577. app_id="tests",
  578. query=query,
  579. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  580. )
  581. result = run_query(request)
  582. assert request.dataset == "metrics"
  583. assert len(result["data"]) == 5
  584. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  585. def test_metrics_correctly_reverse_resolved(self) -> None:
  586. query = MetricsQuery(
  587. query=Timeseries(
  588. metric=Metric(
  589. None,
  590. SessionMRI.RAW_SESSION.value,
  591. ),
  592. aggregate="count",
  593. groupby=[Column("release"), Column("project_id")],
  594. filters=[
  595. Condition(Column("release"), Op.EQ, "release_even"),
  596. Condition(Column("project_id"), Op.EQ, self.project.id),
  597. ],
  598. ),
  599. start=self.hour_ago,
  600. end=self.now,
  601. rollup=Rollup(interval=60, granularity=60),
  602. scope=MetricsScope(
  603. org_ids=[self.org_id],
  604. project_ids=[self.project.id],
  605. use_case_id=UseCaseID.SESSIONS.value,
  606. ),
  607. )
  608. request = Request(
  609. dataset="metrics",
  610. app_id="tests",
  611. query=query,
  612. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  613. )
  614. result = run_query(request)
  615. assert request.dataset == "metrics"
  616. assert len(result["data"]) == 5
  617. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  618. assert any(data_point["project_id"] == self.project.id for data_point in result["data"])
  619. def test_failure_rate(self) -> None:
  620. query = MetricsQuery(
  621. query=Formula(
  622. ArithmeticOperator.DIVIDE.value,
  623. [
  624. Timeseries(
  625. metric=Metric(
  626. mri=TransactionMRI.DURATION.value,
  627. ),
  628. aggregate="count",
  629. filters=[
  630. Condition(
  631. Column(TransactionTagsKey.TRANSACTION_STATUS.value),
  632. Op.NOT_IN,
  633. [
  634. TransactionStatusTagValue.OK.value,
  635. TransactionStatusTagValue.CANCELLED.value,
  636. TransactionStatusTagValue.UNKNOWN.value,
  637. ],
  638. )
  639. ],
  640. ),
  641. Timeseries(
  642. metric=Metric(
  643. mri=TransactionMRI.DURATION.value,
  644. ),
  645. aggregate="count",
  646. ),
  647. ],
  648. ),
  649. start=self.hour_ago,
  650. end=self.now,
  651. rollup=Rollup(interval=60, totals=True, granularity=60),
  652. scope=MetricsScope(
  653. org_ids=[self.org_id],
  654. project_ids=[self.project.id],
  655. ),
  656. )
  657. request = Request(
  658. dataset="generic_metrics",
  659. app_id="tests",
  660. query=query,
  661. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  662. )
  663. result = run_query(request)
  664. assert len(result["data"]) == 10
  665. assert result["totals"]["aggregate_value"] == 1.0
  666. def test_aggregate_aliases(self) -> None:
  667. query = MetricsQuery(
  668. query=Timeseries(
  669. metric=Metric(
  670. "transaction.duration",
  671. TransactionMRI.DURATION.value,
  672. ),
  673. aggregate="p95",
  674. ),
  675. start=self.hour_ago,
  676. end=self.now,
  677. rollup=Rollup(interval=60, granularity=60),
  678. scope=MetricsScope(
  679. org_ids=[self.org_id],
  680. project_ids=[self.project.id],
  681. use_case_id=UseCaseID.TRANSACTIONS.value,
  682. ),
  683. )
  684. request = Request(
  685. dataset="generic_metrics",
  686. app_id="tests",
  687. query=query,
  688. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  689. )
  690. result = run_query(request)
  691. assert len(result["data"]) == 10
  692. rows = result["data"]
  693. for i in range(10):
  694. # TODO: Snuba is going to start returning 0 instead of [0] for single value aggregates
  695. # For now handle both cases for backwards compatibility
  696. assert rows[i]["aggregate_value"] in ([i], i)
  697. assert (
  698. rows[i]["time"]
  699. == (
  700. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  701. ).isoformat()
  702. )
  703. def test_dataset_correctness(self) -> None:
  704. query = MetricsQuery(
  705. query=Timeseries(
  706. metric=Metric(
  707. "transaction.duration",
  708. TransactionMRI.DURATION.value,
  709. ),
  710. aggregate="quantiles",
  711. aggregate_params=[0.5, 0.99],
  712. groupby=[Column("transaction")],
  713. filters=[
  714. Condition(Column("transaction"), Op.IN, ["transaction_0", "transaction_1"])
  715. ],
  716. ),
  717. start=self.hour_ago,
  718. end=self.now,
  719. rollup=Rollup(interval=60, granularity=60),
  720. scope=MetricsScope(
  721. org_ids=[self.org_id],
  722. project_ids=[self.project.id],
  723. use_case_id=UseCaseID.TRANSACTIONS.value,
  724. ),
  725. )
  726. request = Request(
  727. dataset="metrics",
  728. app_id="tests",
  729. query=query,
  730. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  731. )
  732. result = run_query(request)
  733. assert len(result["data"]) == 10
  734. rows = result["data"]
  735. for i in range(10):
  736. assert rows[i]["aggregate_value"] == [i, i]
  737. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  738. assert (
  739. rows[i]["time"]
  740. == (
  741. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  742. ).isoformat()
  743. )
  744. def test_resolve_all_mris(self) -> None:
  745. for mri in [
  746. "d:custom/sentry.event_manager.save@second",
  747. "d:custom/sentry.event_manager.save_generic_events@second",
  748. ]:
  749. self.store_metric(
  750. self.org_id,
  751. self.project.id,
  752. "distribution",
  753. mri,
  754. {
  755. "transaction": "transaction_1",
  756. "status_code": "200",
  757. "device": "BlackBerry",
  758. },
  759. self.ts(self.hour_ago + timedelta(minutes=5)),
  760. 1,
  761. UseCaseID.CUSTOM,
  762. )
  763. query = MetricsQuery(
  764. query=Formula(
  765. function_name="plus",
  766. parameters=[
  767. Timeseries(
  768. metric=Metric(
  769. mri="d:custom/sentry.event_manager.save@second",
  770. ),
  771. aggregate="avg",
  772. ),
  773. Timeseries(
  774. metric=Metric(
  775. mri="d:custom/sentry.event_manager.save_generic_events@second",
  776. ),
  777. aggregate="avg",
  778. ),
  779. ],
  780. ),
  781. start=self.hour_ago,
  782. end=self.now,
  783. rollup=Rollup(interval=None, totals=True, orderby=None, granularity=10),
  784. scope=MetricsScope(
  785. org_ids=[self.org_id], project_ids=[self.project.id], use_case_id="custom"
  786. ),
  787. limit=Limit(20),
  788. offset=None,
  789. )
  790. request = Request(
  791. dataset="generic_metrics",
  792. app_id="tests",
  793. query=query,
  794. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  795. )
  796. result = run_query(request)
  797. assert len(result["data"]) == 1