test_metrics_layer.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. from __future__ import annotations
  2. from datetime import datetime, timedelta, timezone
  3. from typing import Literal, Mapping
  4. import pytest
  5. from snuba_sdk import (
  6. ArithmeticOperator,
  7. Column,
  8. Condition,
  9. Direction,
  10. Formula,
  11. Metric,
  12. MetricsQuery,
  13. MetricsScope,
  14. Op,
  15. Request,
  16. Rollup,
  17. Timeseries,
  18. )
  19. from sentry.exceptions import InvalidParams
  20. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  21. from sentry.snuba.metrics.naming_layer import SessionMRI, TransactionMRI
  22. from sentry.snuba.metrics.naming_layer.public import TransactionStatusTagValue, TransactionTagsKey
  23. from sentry.snuba.metrics_layer.query import run_query
  24. from sentry.testutils.cases import BaseMetricsTestCase, TestCase
  25. pytestmark = pytest.mark.sentry_metrics
  26. class MQLTest(TestCase, BaseMetricsTestCase):
  27. def ts(self, dt: datetime) -> int:
  28. return int(dt.timestamp())
  29. def setUp(self) -> None:
  30. super().setUp()
  31. self.generic_metrics: Mapping[str, Literal["counter", "set", "distribution", "gauge"]] = {
  32. TransactionMRI.DURATION.value: "distribution",
  33. TransactionMRI.USER.value: "set",
  34. TransactionMRI.COUNT_PER_ROOT_PROJECT.value: "counter",
  35. "g:transactions/test_gauge@none": "gauge",
  36. }
  37. self.metrics: Mapping[str, Literal["counter", "set", "distribution"]] = {
  38. SessionMRI.RAW_DURATION.value: "distribution",
  39. SessionMRI.RAW_USER.value: "set",
  40. SessionMRI.RAW_SESSION.value: "counter",
  41. }
  42. self.now = datetime.now(tz=timezone.utc).replace(microsecond=0)
  43. self.hour_ago = self.now - timedelta(hours=1)
  44. self.org_id = self.project.organization_id
  45. for mri, metric_type in self.generic_metrics.items():
  46. assert metric_type in {"counter", "distribution", "set", "gauge"}
  47. for i in range(10):
  48. value: int | dict[str, int]
  49. if metric_type == "gauge":
  50. value = {
  51. "min": i,
  52. "max": i,
  53. "sum": i,
  54. "count": i,
  55. "last": i,
  56. }
  57. else:
  58. value = i
  59. self.store_metric(
  60. self.org_id,
  61. self.project.id,
  62. metric_type,
  63. mri,
  64. {
  65. "transaction": f"transaction_{i % 2}",
  66. "status_code": "500" if i % 3 == 0 else "200",
  67. "device": "BlackBerry" if i % 2 == 0 else "Nokia",
  68. },
  69. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  70. value,
  71. UseCaseID.TRANSACTIONS,
  72. )
  73. for mri, metric_type in self.metrics.items():
  74. assert metric_type in {"counter", "distribution", "set"}
  75. for i in range(10):
  76. value = i
  77. self.store_metric(
  78. self.org_id,
  79. self.project.id,
  80. metric_type,
  81. mri,
  82. {
  83. "release": "release_even" if i % 2 == 0 else "release_odd",
  84. },
  85. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  86. value,
  87. UseCaseID.SESSIONS,
  88. )
  89. def test_basic_generic_metrics(self) -> None:
  90. query = MetricsQuery(
  91. query=Timeseries(
  92. metric=Metric(
  93. "transaction.duration",
  94. TransactionMRI.DURATION.value,
  95. ),
  96. aggregate="max",
  97. ),
  98. start=self.hour_ago,
  99. end=self.now,
  100. rollup=Rollup(interval=60, granularity=60),
  101. scope=MetricsScope(
  102. org_ids=[self.org_id],
  103. project_ids=[self.project.id],
  104. use_case_id=UseCaseID.TRANSACTIONS.value,
  105. ),
  106. )
  107. request = Request(
  108. dataset="generic_metrics",
  109. app_id="tests",
  110. query=query,
  111. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  112. )
  113. result = run_query(request)
  114. assert len(result["data"]) == 10
  115. rows = result["data"]
  116. for i in range(10):
  117. assert rows[i]["aggregate_value"] == i
  118. assert (
  119. rows[i]["time"]
  120. == (
  121. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  122. ).isoformat()
  123. )
  124. def test_groupby_generic_metrics(self) -> None:
  125. query = MetricsQuery(
  126. query=Timeseries(
  127. metric=Metric(
  128. "transaction.duration",
  129. TransactionMRI.DURATION.value,
  130. ),
  131. aggregate="quantiles",
  132. aggregate_params=[0.5, 0.99],
  133. groupby=[Column("transaction")],
  134. ),
  135. start=self.hour_ago,
  136. end=self.now,
  137. rollup=Rollup(interval=60, granularity=60),
  138. scope=MetricsScope(
  139. org_ids=[self.org_id],
  140. project_ids=[self.project.id],
  141. use_case_id=UseCaseID.TRANSACTIONS.value,
  142. ),
  143. )
  144. request = Request(
  145. dataset="generic_metrics",
  146. app_id="tests",
  147. query=query,
  148. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  149. )
  150. result = run_query(request)
  151. assert len(result["data"]) == 10
  152. rows = result["data"]
  153. for i in range(10):
  154. assert rows[i]["aggregate_value"] == [i, i]
  155. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  156. assert (
  157. rows[i]["time"]
  158. == (
  159. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  160. ).isoformat()
  161. )
  162. def test_filters_generic_metrics(self) -> None:
  163. query = MetricsQuery(
  164. query=Timeseries(
  165. metric=Metric(
  166. "transaction.duration",
  167. TransactionMRI.DURATION.value,
  168. ),
  169. aggregate="quantiles",
  170. aggregate_params=[0.5],
  171. filters=[
  172. Condition(Column("status_code"), Op.EQ, "500"),
  173. Condition(Column("device"), Op.EQ, "BlackBerry"),
  174. ],
  175. ),
  176. start=self.hour_ago,
  177. end=self.now,
  178. rollup=Rollup(interval=60, granularity=60),
  179. scope=MetricsScope(
  180. org_ids=[self.org_id],
  181. project_ids=[self.project.id],
  182. use_case_id=UseCaseID.TRANSACTIONS.value,
  183. ),
  184. )
  185. request = Request(
  186. dataset="generic_metrics",
  187. app_id="tests",
  188. query=query,
  189. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  190. )
  191. result = run_query(request)
  192. assert len(result["data"]) == 2
  193. rows = result["data"]
  194. assert rows[0]["aggregate_value"] == [0]
  195. assert rows[1]["aggregate_value"] == [6.0]
  196. def test_complex_generic_metrics(self) -> None:
  197. query = MetricsQuery(
  198. query=Timeseries(
  199. metric=Metric(
  200. "transaction.duration",
  201. TransactionMRI.DURATION.value,
  202. ),
  203. aggregate="quantiles",
  204. aggregate_params=[0.5],
  205. filters=[
  206. Condition(Column("status_code"), Op.EQ, "500"),
  207. Condition(Column("device"), Op.EQ, "BlackBerry"),
  208. ],
  209. groupby=[Column("transaction")],
  210. ),
  211. start=self.hour_ago,
  212. end=self.now,
  213. rollup=Rollup(interval=60, granularity=60),
  214. scope=MetricsScope(
  215. org_ids=[self.org_id],
  216. project_ids=[self.project.id],
  217. use_case_id=UseCaseID.TRANSACTIONS.value,
  218. ),
  219. )
  220. request = Request(
  221. dataset="generic_metrics",
  222. app_id="tests",
  223. query=query,
  224. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  225. )
  226. result = run_query(request)
  227. assert len(result["data"]) == 2
  228. rows = result["data"]
  229. assert rows[0]["aggregate_value"] == [0]
  230. assert rows[0]["transaction"] == "transaction_0"
  231. assert rows[1]["aggregate_value"] == [6.0]
  232. assert rows[1]["transaction"] == "transaction_0"
  233. def test_totals(self) -> None:
  234. query = MetricsQuery(
  235. query=Timeseries(
  236. metric=Metric(
  237. "transaction.duration",
  238. TransactionMRI.DURATION.value,
  239. ),
  240. aggregate="max",
  241. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  242. groupby=[Column("transaction")],
  243. ),
  244. start=self.hour_ago,
  245. end=self.now,
  246. rollup=Rollup(totals=True, granularity=60, orderby=Direction.ASC),
  247. scope=MetricsScope(
  248. org_ids=[self.org_id],
  249. project_ids=[self.project.id],
  250. use_case_id=UseCaseID.TRANSACTIONS.value,
  251. ),
  252. )
  253. request = Request(
  254. dataset="generic_metrics",
  255. app_id="tests",
  256. query=query,
  257. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  258. )
  259. result = run_query(request)
  260. assert len(result["data"]) == 2
  261. rows = result["data"]
  262. assert rows[0]["aggregate_value"] == 7.0
  263. assert rows[1]["aggregate_value"] == 8.0
  264. def test_meta_data_in_response(self) -> None:
  265. query = MetricsQuery(
  266. query=Timeseries(
  267. metric=Metric(
  268. "transaction.duration",
  269. TransactionMRI.DURATION.value,
  270. ),
  271. aggregate="max",
  272. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  273. groupby=[Column("transaction")],
  274. ),
  275. start=self.hour_ago.replace(minute=16, second=59),
  276. end=self.now.replace(minute=16, second=59),
  277. rollup=Rollup(interval=60, granularity=60),
  278. scope=MetricsScope(
  279. org_ids=[self.org_id],
  280. project_ids=[self.project.id],
  281. use_case_id=UseCaseID.TRANSACTIONS.value,
  282. ),
  283. )
  284. request = Request(
  285. dataset="generic_metrics",
  286. app_id="tests",
  287. query=query,
  288. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  289. )
  290. result = run_query(request)
  291. assert result["modified_start"] == self.hour_ago.replace(minute=16, second=0)
  292. assert result["modified_end"] == self.now.replace(minute=17, second=0)
  293. assert result["indexer_mappings"] == {
  294. "d:transactions/duration@millisecond": 9223372036854775909,
  295. "status_code": 10000,
  296. "transaction": 9223372036854776020,
  297. }
  298. def test_bad_query(self) -> None:
  299. query = MetricsQuery(
  300. query=Timeseries(
  301. metric=Metric(
  302. "transaction.duration",
  303. "not a real MRI",
  304. ),
  305. aggregate="max",
  306. ),
  307. start=self.hour_ago.replace(minute=16, second=59),
  308. end=self.now.replace(minute=16, second=59),
  309. rollup=Rollup(interval=60, granularity=60),
  310. scope=MetricsScope(
  311. org_ids=[self.org_id],
  312. project_ids=[self.project.id],
  313. use_case_id=UseCaseID.TRANSACTIONS.value,
  314. ),
  315. )
  316. request = Request(
  317. dataset="generic_metrics",
  318. app_id="tests",
  319. query=query,
  320. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  321. )
  322. with pytest.raises(InvalidParams):
  323. run_query(request)
  324. def test_interval_with_totals(self) -> None:
  325. query = MetricsQuery(
  326. query=Timeseries(
  327. metric=Metric(
  328. "transaction.duration",
  329. TransactionMRI.DURATION.value,
  330. ),
  331. aggregate="max",
  332. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  333. groupby=[Column("transaction")],
  334. ),
  335. start=self.hour_ago,
  336. end=self.now,
  337. rollup=Rollup(interval=60, totals=True, granularity=60),
  338. scope=MetricsScope(
  339. org_ids=[self.org_id],
  340. project_ids=[self.project.id],
  341. use_case_id=UseCaseID.TRANSACTIONS.value,
  342. ),
  343. )
  344. request = Request(
  345. dataset="generic_metrics",
  346. app_id="tests",
  347. query=query,
  348. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  349. )
  350. result = run_query(request)
  351. assert len(result["data"]) == 6
  352. assert result["totals"]["aggregate_value"] == 8.0
  353. def test_automatic_granularity(self) -> None:
  354. query = MetricsQuery(
  355. query=Timeseries(
  356. metric=Metric(
  357. "transaction.duration",
  358. TransactionMRI.DURATION.value,
  359. ),
  360. aggregate="max",
  361. ),
  362. start=self.hour_ago,
  363. end=self.now,
  364. rollup=Rollup(interval=120),
  365. scope=MetricsScope(
  366. org_ids=[self.org_id],
  367. project_ids=[self.project.id],
  368. ),
  369. )
  370. request = Request(
  371. dataset="generic_metrics",
  372. app_id="tests",
  373. query=query,
  374. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  375. )
  376. result = run_query(request)
  377. # There's a flaky off by one error here that is very difficult to track down
  378. # TODO: figure out why this is flaky and assert to one specific value
  379. assert len(result["data"]) in [5, 6]
  380. def test_automatic_dataset(self) -> None:
  381. query = MetricsQuery(
  382. query=Timeseries(
  383. metric=Metric(
  384. None,
  385. SessionMRI.RAW_DURATION.value,
  386. ),
  387. aggregate="max",
  388. ),
  389. start=self.hour_ago,
  390. end=self.now,
  391. rollup=Rollup(interval=60, granularity=60),
  392. scope=MetricsScope(
  393. org_ids=[self.org_id],
  394. project_ids=[self.project.id],
  395. use_case_id=UseCaseID.SESSIONS.value,
  396. ),
  397. )
  398. request = Request(
  399. dataset="generic_metrics",
  400. app_id="tests",
  401. query=query,
  402. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  403. )
  404. result = run_query(request)
  405. assert request.dataset == "metrics"
  406. assert len(result["data"]) == 10
  407. def test_gauges(self) -> None:
  408. query = MetricsQuery(
  409. query=Timeseries(
  410. metric=Metric(
  411. None,
  412. "g:transactions/test_gauge@none",
  413. ),
  414. aggregate="last",
  415. ),
  416. start=self.hour_ago,
  417. end=self.now,
  418. rollup=Rollup(interval=60, totals=True, granularity=60),
  419. scope=MetricsScope(
  420. org_ids=[self.org_id],
  421. project_ids=[self.project.id],
  422. ),
  423. )
  424. request = Request(
  425. dataset="generic_metrics",
  426. app_id="tests",
  427. query=query,
  428. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  429. )
  430. result = run_query(request)
  431. assert len(result["data"]) == 10
  432. assert result["totals"]["aggregate_value"] == 9.0
  433. def test_metrics_groupby(self) -> None:
  434. query = MetricsQuery(
  435. query=Timeseries(
  436. metric=Metric(
  437. None,
  438. SessionMRI.RAW_DURATION.value,
  439. ),
  440. aggregate="max",
  441. groupby=[Column("release")],
  442. ),
  443. start=self.hour_ago,
  444. end=self.now,
  445. rollup=Rollup(interval=60, granularity=60),
  446. scope=MetricsScope(
  447. org_ids=[self.org_id],
  448. project_ids=[self.project.id],
  449. use_case_id=UseCaseID.SESSIONS.value,
  450. ),
  451. )
  452. request = Request(
  453. dataset="metrics",
  454. app_id="tests",
  455. query=query,
  456. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  457. )
  458. result = run_query(request)
  459. assert request.dataset == "metrics"
  460. assert len(result["data"]) == 10
  461. for data_point in result["data"]:
  462. assert data_point["release"] == "release_even" or data_point["release"] == "release_odd"
  463. def test_metrics_filters(self) -> None:
  464. query = MetricsQuery(
  465. query=Timeseries(
  466. metric=Metric(
  467. None,
  468. SessionMRI.RAW_USER.value,
  469. ),
  470. aggregate="count",
  471. filters=[
  472. Condition(Column("release"), Op.EQ, "release_even"),
  473. ],
  474. ),
  475. start=self.hour_ago,
  476. end=self.now,
  477. rollup=Rollup(interval=60, granularity=60),
  478. scope=MetricsScope(
  479. org_ids=[self.org_id],
  480. project_ids=[self.project.id],
  481. use_case_id=UseCaseID.SESSIONS.value,
  482. ),
  483. )
  484. request = Request(
  485. dataset="metrics",
  486. app_id="tests",
  487. query=query,
  488. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  489. )
  490. result = run_query(request)
  491. assert request.dataset == "metrics"
  492. assert len(result["data"]) == 5
  493. def test_metrics_complex(self) -> None:
  494. query = MetricsQuery(
  495. query=Timeseries(
  496. metric=Metric(
  497. None,
  498. SessionMRI.RAW_SESSION.value,
  499. ),
  500. aggregate="count",
  501. groupby=[Column("release")],
  502. filters=[
  503. Condition(Column("release"), Op.EQ, "release_even"),
  504. ],
  505. ),
  506. start=self.hour_ago,
  507. end=self.now,
  508. rollup=Rollup(interval=60, granularity=60),
  509. scope=MetricsScope(
  510. org_ids=[self.org_id],
  511. project_ids=[self.project.id],
  512. use_case_id=UseCaseID.SESSIONS.value,
  513. ),
  514. )
  515. request = Request(
  516. dataset="metrics",
  517. app_id="tests",
  518. query=query,
  519. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  520. )
  521. result = run_query(request)
  522. assert request.dataset == "metrics"
  523. assert len(result["data"]) == 5
  524. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  525. def test_metrics_correctly_reverse_resolved(self) -> None:
  526. query = MetricsQuery(
  527. query=Timeseries(
  528. metric=Metric(
  529. None,
  530. SessionMRI.RAW_SESSION.value,
  531. ),
  532. aggregate="count",
  533. groupby=[Column("release"), Column("project_id")],
  534. filters=[
  535. Condition(Column("release"), Op.EQ, "release_even"),
  536. Condition(Column("project_id"), Op.EQ, self.project.id),
  537. ],
  538. ),
  539. start=self.hour_ago,
  540. end=self.now,
  541. rollup=Rollup(interval=60, granularity=60),
  542. scope=MetricsScope(
  543. org_ids=[self.org_id],
  544. project_ids=[self.project.id],
  545. use_case_id=UseCaseID.SESSIONS.value,
  546. ),
  547. )
  548. request = Request(
  549. dataset="metrics",
  550. app_id="tests",
  551. query=query,
  552. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  553. )
  554. result = run_query(request)
  555. assert request.dataset == "metrics"
  556. assert len(result["data"]) == 5
  557. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  558. assert any(data_point["project_id"] == self.project.id for data_point in result["data"])
  559. def test_failure_rate(self) -> None:
  560. query = MetricsQuery(
  561. query=Formula(
  562. ArithmeticOperator.DIVIDE.value,
  563. [
  564. Timeseries(
  565. metric=Metric(
  566. mri=TransactionMRI.DURATION.value,
  567. ),
  568. aggregate="count",
  569. filters=[
  570. Condition(
  571. Column(TransactionTagsKey.TRANSACTION_STATUS.value),
  572. Op.NOT_IN,
  573. [
  574. TransactionStatusTagValue.OK.value,
  575. TransactionStatusTagValue.CANCELLED.value,
  576. TransactionStatusTagValue.UNKNOWN.value,
  577. ],
  578. )
  579. ],
  580. ),
  581. Timeseries(
  582. metric=Metric(
  583. mri=TransactionMRI.DURATION.value,
  584. ),
  585. aggregate="count",
  586. ),
  587. ],
  588. ),
  589. start=self.hour_ago,
  590. end=self.now,
  591. rollup=Rollup(interval=60, totals=True, granularity=60),
  592. scope=MetricsScope(
  593. org_ids=[self.org_id],
  594. project_ids=[self.project.id],
  595. ),
  596. )
  597. request = Request(
  598. dataset="generic_metrics",
  599. app_id="tests",
  600. query=query,
  601. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  602. )
  603. result = run_query(request)
  604. assert len(result["data"]) == 10
  605. assert result["totals"]["aggregate_value"] == 1.0
  606. def test_aggregate_aliases(self) -> None:
  607. query = MetricsQuery(
  608. query=Timeseries(
  609. metric=Metric(
  610. "transaction.duration",
  611. TransactionMRI.DURATION.value,
  612. ),
  613. aggregate="p95",
  614. ),
  615. start=self.hour_ago,
  616. end=self.now,
  617. rollup=Rollup(interval=60, granularity=60),
  618. scope=MetricsScope(
  619. org_ids=[self.org_id],
  620. project_ids=[self.project.id],
  621. use_case_id=UseCaseID.TRANSACTIONS.value,
  622. ),
  623. )
  624. request = Request(
  625. dataset="generic_metrics",
  626. app_id="tests",
  627. query=query,
  628. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  629. )
  630. result = run_query(request)
  631. assert len(result["data"]) == 10
  632. rows = result["data"]
  633. for i in range(10):
  634. assert rows[i]["aggregate_value"] == [i]
  635. assert (
  636. rows[i]["time"]
  637. == (
  638. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  639. ).isoformat()
  640. )
  641. def test_dataset_correctness(self) -> None:
  642. query = MetricsQuery(
  643. query=Timeseries(
  644. metric=Metric(
  645. "transaction.duration",
  646. TransactionMRI.DURATION.value,
  647. ),
  648. aggregate="quantiles",
  649. aggregate_params=[0.5, 0.99],
  650. groupby=[Column("transaction")],
  651. filters=[
  652. Condition(Column("transaction"), Op.IN, ["transaction_0", "transaction_1"])
  653. ],
  654. ),
  655. start=self.hour_ago,
  656. end=self.now,
  657. rollup=Rollup(interval=60, granularity=60),
  658. scope=MetricsScope(
  659. org_ids=[self.org_id],
  660. project_ids=[self.project.id],
  661. use_case_id=UseCaseID.TRANSACTIONS.value,
  662. ),
  663. )
  664. request = Request(
  665. dataset="metrics",
  666. app_id="tests",
  667. query=query,
  668. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  669. )
  670. result = run_query(request)
  671. assert len(result["data"]) == 10
  672. rows = result["data"]
  673. for i in range(10):
  674. assert rows[i]["aggregate_value"] == [i, i]
  675. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  676. assert (
  677. rows[i]["time"]
  678. == (
  679. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  680. ).isoformat()
  681. )