test_metrics_layer.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. from __future__ import annotations
  2. from collections.abc import Mapping
  3. from datetime import datetime, timedelta, timezone
  4. from typing import Literal
  5. import pytest
  6. from snuba_sdk import (
  7. ArithmeticOperator,
  8. Column,
  9. Condition,
  10. Direction,
  11. Formula,
  12. Metric,
  13. MetricsQuery,
  14. MetricsScope,
  15. Op,
  16. Request,
  17. Rollup,
  18. Timeseries,
  19. )
  20. from sentry.exceptions import InvalidParams
  21. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  22. from sentry.snuba.metrics.naming_layer import SessionMRI, TransactionMRI
  23. from sentry.snuba.metrics.naming_layer.public import TransactionStatusTagValue, TransactionTagsKey
  24. from sentry.snuba.metrics_layer.query import run_query
  25. from sentry.testutils.cases import BaseMetricsTestCase, TestCase
  26. pytestmark = pytest.mark.sentry_metrics
  27. class MQLTest(TestCase, BaseMetricsTestCase):
  28. def ts(self, dt: datetime) -> int:
  29. return int(dt.timestamp())
  30. def setUp(self) -> None:
  31. super().setUp()
  32. self.generic_metrics: Mapping[str, Literal["counter", "set", "distribution", "gauge"]] = {
  33. TransactionMRI.DURATION.value: "distribution",
  34. TransactionMRI.USER.value: "set",
  35. TransactionMRI.COUNT_PER_ROOT_PROJECT.value: "counter",
  36. "g:transactions/test_gauge@none": "gauge",
  37. }
  38. self.metrics: Mapping[str, Literal["counter", "set", "distribution"]] = {
  39. SessionMRI.RAW_DURATION.value: "distribution",
  40. SessionMRI.RAW_USER.value: "set",
  41. SessionMRI.RAW_SESSION.value: "counter",
  42. }
  43. self.now = datetime.now(tz=timezone.utc).replace(microsecond=0)
  44. self.hour_ago = self.now - timedelta(hours=1)
  45. self.org_id = self.project.organization_id
  46. for mri, metric_type in self.generic_metrics.items():
  47. assert metric_type in {"counter", "distribution", "set", "gauge"}
  48. for i in range(10):
  49. value: int | dict[str, int]
  50. if metric_type == "gauge":
  51. value = {
  52. "min": i,
  53. "max": i,
  54. "sum": i,
  55. "count": i,
  56. "last": i,
  57. }
  58. else:
  59. value = i
  60. self.store_metric(
  61. self.org_id,
  62. self.project.id,
  63. metric_type,
  64. mri,
  65. {
  66. "transaction": f"transaction_{i % 2}",
  67. "status_code": "500" if i % 3 == 0 else "200",
  68. "device": "BlackBerry" if i % 2 == 0 else "Nokia",
  69. },
  70. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  71. value,
  72. UseCaseID.TRANSACTIONS,
  73. )
  74. for mri, metric_type in self.metrics.items():
  75. assert metric_type in {"counter", "distribution", "set"}
  76. for i in range(10):
  77. value = i
  78. self.store_metric(
  79. self.org_id,
  80. self.project.id,
  81. metric_type,
  82. mri,
  83. {
  84. "release": "release_even" if i % 2 == 0 else "release_odd",
  85. },
  86. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  87. value,
  88. UseCaseID.SESSIONS,
  89. )
  90. def test_basic_generic_metrics(self) -> None:
  91. query = MetricsQuery(
  92. query=Timeseries(
  93. metric=Metric(
  94. "transaction.duration",
  95. TransactionMRI.DURATION.value,
  96. ),
  97. aggregate="max",
  98. ),
  99. start=self.hour_ago,
  100. end=self.now,
  101. rollup=Rollup(interval=60, granularity=60),
  102. scope=MetricsScope(
  103. org_ids=[self.org_id],
  104. project_ids=[self.project.id],
  105. use_case_id=UseCaseID.TRANSACTIONS.value,
  106. ),
  107. )
  108. request = Request(
  109. dataset="generic_metrics",
  110. app_id="tests",
  111. query=query,
  112. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  113. )
  114. result = run_query(request)
  115. assert len(result["data"]) == 10
  116. rows = result["data"]
  117. for i in range(10):
  118. assert rows[i]["aggregate_value"] == i
  119. assert (
  120. rows[i]["time"]
  121. == (
  122. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  123. ).isoformat()
  124. )
  125. def test_groupby_generic_metrics(self) -> None:
  126. query = MetricsQuery(
  127. query=Timeseries(
  128. metric=Metric(
  129. "transaction.duration",
  130. TransactionMRI.DURATION.value,
  131. ),
  132. aggregate="quantiles",
  133. aggregate_params=[0.5, 0.99],
  134. groupby=[Column("transaction")],
  135. ),
  136. start=self.hour_ago,
  137. end=self.now,
  138. rollup=Rollup(interval=60, granularity=60),
  139. scope=MetricsScope(
  140. org_ids=[self.org_id],
  141. project_ids=[self.project.id],
  142. use_case_id=UseCaseID.TRANSACTIONS.value,
  143. ),
  144. )
  145. request = Request(
  146. dataset="generic_metrics",
  147. app_id="tests",
  148. query=query,
  149. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  150. )
  151. result = run_query(request)
  152. assert len(result["data"]) == 10
  153. rows = result["data"]
  154. for i in range(10):
  155. assert rows[i]["aggregate_value"] == [i, i]
  156. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  157. assert (
  158. rows[i]["time"]
  159. == (
  160. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  161. ).isoformat()
  162. )
  163. def test_filters_generic_metrics(self) -> None:
  164. query = MetricsQuery(
  165. query=Timeseries(
  166. metric=Metric(
  167. "transaction.duration",
  168. TransactionMRI.DURATION.value,
  169. ),
  170. aggregate="quantiles",
  171. aggregate_params=[0.5],
  172. filters=[
  173. Condition(Column("status_code"), Op.EQ, "500"),
  174. Condition(Column("device"), Op.EQ, "BlackBerry"),
  175. ],
  176. ),
  177. start=self.hour_ago,
  178. end=self.now,
  179. rollup=Rollup(interval=60, granularity=60),
  180. scope=MetricsScope(
  181. org_ids=[self.org_id],
  182. project_ids=[self.project.id],
  183. use_case_id=UseCaseID.TRANSACTIONS.value,
  184. ),
  185. )
  186. request = Request(
  187. dataset="generic_metrics",
  188. app_id="tests",
  189. query=query,
  190. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  191. )
  192. result = run_query(request)
  193. assert len(result["data"]) == 2
  194. rows = result["data"]
  195. assert rows[0]["aggregate_value"] == [0]
  196. assert rows[1]["aggregate_value"] == [6.0]
  197. def test_complex_generic_metrics(self) -> None:
  198. query = MetricsQuery(
  199. query=Timeseries(
  200. metric=Metric(
  201. "transaction.duration",
  202. TransactionMRI.DURATION.value,
  203. ),
  204. aggregate="quantiles",
  205. aggregate_params=[0.5],
  206. filters=[
  207. Condition(Column("status_code"), Op.EQ, "500"),
  208. Condition(Column("device"), Op.EQ, "BlackBerry"),
  209. ],
  210. groupby=[Column("transaction")],
  211. ),
  212. start=self.hour_ago,
  213. end=self.now,
  214. rollup=Rollup(interval=60, granularity=60),
  215. scope=MetricsScope(
  216. org_ids=[self.org_id],
  217. project_ids=[self.project.id],
  218. use_case_id=UseCaseID.TRANSACTIONS.value,
  219. ),
  220. )
  221. request = Request(
  222. dataset="generic_metrics",
  223. app_id="tests",
  224. query=query,
  225. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  226. )
  227. result = run_query(request)
  228. assert len(result["data"]) == 2
  229. rows = result["data"]
  230. assert rows[0]["aggregate_value"] == [0]
  231. assert rows[0]["transaction"] == "transaction_0"
  232. assert rows[1]["aggregate_value"] == [6.0]
  233. assert rows[1]["transaction"] == "transaction_0"
  234. def test_totals(self) -> None:
  235. query = MetricsQuery(
  236. query=Timeseries(
  237. metric=Metric(
  238. "transaction.duration",
  239. TransactionMRI.DURATION.value,
  240. ),
  241. aggregate="max",
  242. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  243. groupby=[Column("transaction")],
  244. ),
  245. start=self.hour_ago,
  246. end=self.now,
  247. rollup=Rollup(totals=True, granularity=60, orderby=Direction.ASC),
  248. scope=MetricsScope(
  249. org_ids=[self.org_id],
  250. project_ids=[self.project.id],
  251. use_case_id=UseCaseID.TRANSACTIONS.value,
  252. ),
  253. )
  254. request = Request(
  255. dataset="generic_metrics",
  256. app_id="tests",
  257. query=query,
  258. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  259. )
  260. result = run_query(request)
  261. assert len(result["data"]) == 2
  262. rows = result["data"]
  263. assert rows[0]["aggregate_value"] == 7.0
  264. assert rows[1]["aggregate_value"] == 8.0
  265. def test_meta_data_in_response(self) -> None:
  266. query = MetricsQuery(
  267. query=Timeseries(
  268. metric=Metric(
  269. "transaction.duration",
  270. TransactionMRI.DURATION.value,
  271. ),
  272. aggregate="max",
  273. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  274. groupby=[Column("transaction")],
  275. ),
  276. start=self.hour_ago.replace(minute=16, second=59),
  277. end=self.now.replace(minute=16, second=59),
  278. rollup=Rollup(interval=60, granularity=60),
  279. scope=MetricsScope(
  280. org_ids=[self.org_id],
  281. project_ids=[self.project.id],
  282. use_case_id=UseCaseID.TRANSACTIONS.value,
  283. ),
  284. )
  285. request = Request(
  286. dataset="generic_metrics",
  287. app_id="tests",
  288. query=query,
  289. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  290. )
  291. result = run_query(request)
  292. assert result["modified_start"] == self.hour_ago.replace(minute=16, second=0)
  293. assert result["modified_end"] == self.now.replace(minute=17, second=0)
  294. assert result["indexer_mappings"] == {
  295. "d:transactions/duration@millisecond": 9223372036854775909,
  296. "status_code": 10000,
  297. "transaction": 9223372036854776020,
  298. }
  299. def test_bad_query(self) -> None:
  300. query = MetricsQuery(
  301. query=Timeseries(
  302. metric=Metric(
  303. "transaction.duration",
  304. "not a real MRI",
  305. ),
  306. aggregate="max",
  307. ),
  308. start=self.hour_ago.replace(minute=16, second=59),
  309. end=self.now.replace(minute=16, second=59),
  310. rollup=Rollup(interval=60, granularity=60),
  311. scope=MetricsScope(
  312. org_ids=[self.org_id],
  313. project_ids=[self.project.id],
  314. use_case_id=UseCaseID.TRANSACTIONS.value,
  315. ),
  316. )
  317. request = Request(
  318. dataset="generic_metrics",
  319. app_id="tests",
  320. query=query,
  321. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  322. )
  323. with pytest.raises(InvalidParams):
  324. run_query(request)
  325. def test_interval_with_totals(self) -> None:
  326. query = MetricsQuery(
  327. query=Timeseries(
  328. metric=Metric(
  329. "transaction.duration",
  330. TransactionMRI.DURATION.value,
  331. ),
  332. aggregate="max",
  333. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  334. groupby=[Column("transaction")],
  335. ),
  336. start=self.hour_ago,
  337. end=self.now,
  338. rollup=Rollup(interval=60, totals=True, granularity=60),
  339. scope=MetricsScope(
  340. org_ids=[self.org_id],
  341. project_ids=[self.project.id],
  342. use_case_id=UseCaseID.TRANSACTIONS.value,
  343. ),
  344. )
  345. request = Request(
  346. dataset="generic_metrics",
  347. app_id="tests",
  348. query=query,
  349. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  350. )
  351. result = run_query(request)
  352. assert len(result["data"]) == 6
  353. assert result["totals"]["aggregate_value"] == 8.0
  354. def test_automatic_granularity(self) -> None:
  355. query = MetricsQuery(
  356. query=Timeseries(
  357. metric=Metric(
  358. "transaction.duration",
  359. TransactionMRI.DURATION.value,
  360. ),
  361. aggregate="max",
  362. ),
  363. start=self.hour_ago,
  364. end=self.now,
  365. rollup=Rollup(interval=120),
  366. scope=MetricsScope(
  367. org_ids=[self.org_id],
  368. project_ids=[self.project.id],
  369. ),
  370. )
  371. request = Request(
  372. dataset="generic_metrics",
  373. app_id="tests",
  374. query=query,
  375. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  376. )
  377. result = run_query(request)
  378. # There's a flaky off by one error here that is very difficult to track down
  379. # TODO: figure out why this is flaky and assert to one specific value
  380. assert len(result["data"]) in [5, 6]
  381. def test_automatic_dataset(self) -> None:
  382. query = MetricsQuery(
  383. query=Timeseries(
  384. metric=Metric(
  385. None,
  386. SessionMRI.RAW_DURATION.value,
  387. ),
  388. aggregate="max",
  389. ),
  390. start=self.hour_ago,
  391. end=self.now,
  392. rollup=Rollup(interval=60, granularity=60),
  393. scope=MetricsScope(
  394. org_ids=[self.org_id],
  395. project_ids=[self.project.id],
  396. use_case_id=UseCaseID.SESSIONS.value,
  397. ),
  398. )
  399. request = Request(
  400. dataset="generic_metrics",
  401. app_id="tests",
  402. query=query,
  403. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  404. )
  405. result = run_query(request)
  406. assert request.dataset == "metrics"
  407. assert len(result["data"]) == 10
  408. def test_gauges(self) -> None:
  409. query = MetricsQuery(
  410. query=Timeseries(
  411. metric=Metric(
  412. None,
  413. "g:transactions/test_gauge@none",
  414. ),
  415. aggregate="last",
  416. ),
  417. start=self.hour_ago,
  418. end=self.now,
  419. rollup=Rollup(interval=60, totals=True, granularity=60),
  420. scope=MetricsScope(
  421. org_ids=[self.org_id],
  422. project_ids=[self.project.id],
  423. ),
  424. )
  425. request = Request(
  426. dataset="generic_metrics",
  427. app_id="tests",
  428. query=query,
  429. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  430. )
  431. result = run_query(request)
  432. assert len(result["data"]) == 10
  433. assert result["totals"]["aggregate_value"] == 9.0
  434. def test_metrics_groupby(self) -> None:
  435. query = MetricsQuery(
  436. query=Timeseries(
  437. metric=Metric(
  438. None,
  439. SessionMRI.RAW_DURATION.value,
  440. ),
  441. aggregate="max",
  442. groupby=[Column("release")],
  443. ),
  444. start=self.hour_ago,
  445. end=self.now,
  446. rollup=Rollup(interval=60, granularity=60),
  447. scope=MetricsScope(
  448. org_ids=[self.org_id],
  449. project_ids=[self.project.id],
  450. use_case_id=UseCaseID.SESSIONS.value,
  451. ),
  452. )
  453. request = Request(
  454. dataset="metrics",
  455. app_id="tests",
  456. query=query,
  457. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  458. )
  459. result = run_query(request)
  460. assert request.dataset == "metrics"
  461. assert len(result["data"]) == 10
  462. for data_point in result["data"]:
  463. assert data_point["release"] == "release_even" or data_point["release"] == "release_odd"
  464. def test_metrics_filters(self) -> None:
  465. query = MetricsQuery(
  466. query=Timeseries(
  467. metric=Metric(
  468. None,
  469. SessionMRI.RAW_USER.value,
  470. ),
  471. aggregate="count",
  472. filters=[
  473. Condition(Column("release"), Op.EQ, "release_even"),
  474. ],
  475. ),
  476. start=self.hour_ago,
  477. end=self.now,
  478. rollup=Rollup(interval=60, granularity=60),
  479. scope=MetricsScope(
  480. org_ids=[self.org_id],
  481. project_ids=[self.project.id],
  482. use_case_id=UseCaseID.SESSIONS.value,
  483. ),
  484. )
  485. request = Request(
  486. dataset="metrics",
  487. app_id="tests",
  488. query=query,
  489. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  490. )
  491. result = run_query(request)
  492. assert request.dataset == "metrics"
  493. assert len(result["data"]) == 5
  494. def test_metrics_complex(self) -> None:
  495. query = MetricsQuery(
  496. query=Timeseries(
  497. metric=Metric(
  498. None,
  499. SessionMRI.RAW_SESSION.value,
  500. ),
  501. aggregate="count",
  502. groupby=[Column("release")],
  503. filters=[
  504. Condition(Column("release"), Op.EQ, "release_even"),
  505. ],
  506. ),
  507. start=self.hour_ago,
  508. end=self.now,
  509. rollup=Rollup(interval=60, granularity=60),
  510. scope=MetricsScope(
  511. org_ids=[self.org_id],
  512. project_ids=[self.project.id],
  513. use_case_id=UseCaseID.SESSIONS.value,
  514. ),
  515. )
  516. request = Request(
  517. dataset="metrics",
  518. app_id="tests",
  519. query=query,
  520. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  521. )
  522. result = run_query(request)
  523. assert request.dataset == "metrics"
  524. assert len(result["data"]) == 5
  525. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  526. def test_metrics_correctly_reverse_resolved(self) -> None:
  527. query = MetricsQuery(
  528. query=Timeseries(
  529. metric=Metric(
  530. None,
  531. SessionMRI.RAW_SESSION.value,
  532. ),
  533. aggregate="count",
  534. groupby=[Column("release"), Column("project_id")],
  535. filters=[
  536. Condition(Column("release"), Op.EQ, "release_even"),
  537. Condition(Column("project_id"), Op.EQ, self.project.id),
  538. ],
  539. ),
  540. start=self.hour_ago,
  541. end=self.now,
  542. rollup=Rollup(interval=60, granularity=60),
  543. scope=MetricsScope(
  544. org_ids=[self.org_id],
  545. project_ids=[self.project.id],
  546. use_case_id=UseCaseID.SESSIONS.value,
  547. ),
  548. )
  549. request = Request(
  550. dataset="metrics",
  551. app_id="tests",
  552. query=query,
  553. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  554. )
  555. result = run_query(request)
  556. assert request.dataset == "metrics"
  557. assert len(result["data"]) == 5
  558. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  559. assert any(data_point["project_id"] == self.project.id for data_point in result["data"])
  560. def test_failure_rate(self) -> None:
  561. query = MetricsQuery(
  562. query=Formula(
  563. ArithmeticOperator.DIVIDE.value,
  564. [
  565. Timeseries(
  566. metric=Metric(
  567. mri=TransactionMRI.DURATION.value,
  568. ),
  569. aggregate="count",
  570. filters=[
  571. Condition(
  572. Column(TransactionTagsKey.TRANSACTION_STATUS.value),
  573. Op.NOT_IN,
  574. [
  575. TransactionStatusTagValue.OK.value,
  576. TransactionStatusTagValue.CANCELLED.value,
  577. TransactionStatusTagValue.UNKNOWN.value,
  578. ],
  579. )
  580. ],
  581. ),
  582. Timeseries(
  583. metric=Metric(
  584. mri=TransactionMRI.DURATION.value,
  585. ),
  586. aggregate="count",
  587. ),
  588. ],
  589. ),
  590. start=self.hour_ago,
  591. end=self.now,
  592. rollup=Rollup(interval=60, totals=True, granularity=60),
  593. scope=MetricsScope(
  594. org_ids=[self.org_id],
  595. project_ids=[self.project.id],
  596. ),
  597. )
  598. request = Request(
  599. dataset="generic_metrics",
  600. app_id="tests",
  601. query=query,
  602. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  603. )
  604. result = run_query(request)
  605. assert len(result["data"]) == 10
  606. assert result["totals"]["aggregate_value"] == 1.0
  607. def test_aggregate_aliases(self) -> None:
  608. query = MetricsQuery(
  609. query=Timeseries(
  610. metric=Metric(
  611. "transaction.duration",
  612. TransactionMRI.DURATION.value,
  613. ),
  614. aggregate="p95",
  615. ),
  616. start=self.hour_ago,
  617. end=self.now,
  618. rollup=Rollup(interval=60, granularity=60),
  619. scope=MetricsScope(
  620. org_ids=[self.org_id],
  621. project_ids=[self.project.id],
  622. use_case_id=UseCaseID.TRANSACTIONS.value,
  623. ),
  624. )
  625. request = Request(
  626. dataset="generic_metrics",
  627. app_id="tests",
  628. query=query,
  629. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  630. )
  631. result = run_query(request)
  632. assert len(result["data"]) == 10
  633. rows = result["data"]
  634. for i in range(10):
  635. assert rows[i]["aggregate_value"] == [i]
  636. assert (
  637. rows[i]["time"]
  638. == (
  639. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  640. ).isoformat()
  641. )
  642. def test_dataset_correctness(self) -> None:
  643. query = MetricsQuery(
  644. query=Timeseries(
  645. metric=Metric(
  646. "transaction.duration",
  647. TransactionMRI.DURATION.value,
  648. ),
  649. aggregate="quantiles",
  650. aggregate_params=[0.5, 0.99],
  651. groupby=[Column("transaction")],
  652. filters=[
  653. Condition(Column("transaction"), Op.IN, ["transaction_0", "transaction_1"])
  654. ],
  655. ),
  656. start=self.hour_ago,
  657. end=self.now,
  658. rollup=Rollup(interval=60, granularity=60),
  659. scope=MetricsScope(
  660. org_ids=[self.org_id],
  661. project_ids=[self.project.id],
  662. use_case_id=UseCaseID.TRANSACTIONS.value,
  663. ),
  664. )
  665. request = Request(
  666. dataset="metrics",
  667. app_id="tests",
  668. query=query,
  669. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  670. )
  671. result = run_query(request)
  672. assert len(result["data"]) == 10
  673. rows = result["data"]
  674. for i in range(10):
  675. assert rows[i]["aggregate_value"] == [i, i]
  676. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  677. assert (
  678. rows[i]["time"]
  679. == (
  680. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  681. ).isoformat()
  682. )