test_metrics_layer.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786
  1. from __future__ import annotations
  2. from collections.abc import Mapping
  3. from datetime import datetime, timedelta, timezone
  4. from typing import Literal
  5. import pytest
  6. from snuba_sdk import (
  7. ArithmeticOperator,
  8. Column,
  9. Condition,
  10. Direction,
  11. Formula,
  12. Metric,
  13. MetricsQuery,
  14. MetricsScope,
  15. Op,
  16. Request,
  17. Rollup,
  18. Timeseries,
  19. )
  20. from sentry.exceptions import InvalidParams
  21. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  22. from sentry.snuba.metrics.naming_layer import SessionMRI, TransactionMRI
  23. from sentry.snuba.metrics.naming_layer.public import TransactionStatusTagValue, TransactionTagsKey
  24. from sentry.snuba.metrics_layer.query import bulk_run_query, run_query
  25. from sentry.testutils.cases import BaseMetricsTestCase, TestCase
  26. pytestmark = pytest.mark.sentry_metrics
  27. class MQLTest(TestCase, BaseMetricsTestCase):
  28. def ts(self, dt: datetime) -> int:
  29. return int(dt.timestamp())
  30. def setUp(self) -> None:
  31. super().setUp()
  32. self.generic_metrics: Mapping[str, Literal["counter", "set", "distribution", "gauge"]] = {
  33. TransactionMRI.DURATION.value: "distribution",
  34. TransactionMRI.USER.value: "set",
  35. TransactionMRI.COUNT_PER_ROOT_PROJECT.value: "counter",
  36. "g:transactions/test_gauge@none": "gauge",
  37. }
  38. self.metrics: Mapping[str, Literal["counter", "set", "distribution"]] = {
  39. SessionMRI.RAW_DURATION.value: "distribution",
  40. SessionMRI.RAW_USER.value: "set",
  41. SessionMRI.RAW_SESSION.value: "counter",
  42. }
  43. self.now = datetime.now(tz=timezone.utc).replace(microsecond=0)
  44. self.hour_ago = self.now - timedelta(hours=1)
  45. self.org_id = self.project.organization_id
  46. for mri, metric_type in self.generic_metrics.items():
  47. assert metric_type in {"counter", "distribution", "set", "gauge"}
  48. for i in range(10):
  49. value: int | dict[str, int]
  50. if metric_type == "gauge":
  51. value = {
  52. "min": i,
  53. "max": i,
  54. "sum": i,
  55. "count": i,
  56. "last": i,
  57. }
  58. else:
  59. value = i
  60. self.store_metric(
  61. self.org_id,
  62. self.project.id,
  63. metric_type,
  64. mri,
  65. {
  66. "transaction": f"transaction_{i % 2}",
  67. "status_code": "500" if i % 3 == 0 else "200",
  68. "device": "BlackBerry" if i % 2 == 0 else "Nokia",
  69. },
  70. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  71. value,
  72. UseCaseID.TRANSACTIONS,
  73. )
  74. for mri, metric_type in self.metrics.items():
  75. assert metric_type in {"counter", "distribution", "set"}
  76. for i in range(10):
  77. value = i
  78. self.store_metric(
  79. self.org_id,
  80. self.project.id,
  81. metric_type,
  82. mri,
  83. {
  84. "release": "release_even" if i % 2 == 0 else "release_odd",
  85. },
  86. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  87. value,
  88. UseCaseID.SESSIONS,
  89. )
  90. def test_basic_generic_metrics(self) -> None:
  91. query = MetricsQuery(
  92. query=Timeseries(
  93. metric=Metric(
  94. "transaction.duration",
  95. TransactionMRI.DURATION.value,
  96. ),
  97. aggregate="max",
  98. ),
  99. start=self.hour_ago,
  100. end=self.now,
  101. rollup=Rollup(interval=60, granularity=60),
  102. scope=MetricsScope(
  103. org_ids=[self.org_id],
  104. project_ids=[self.project.id],
  105. use_case_id=UseCaseID.TRANSACTIONS.value,
  106. ),
  107. )
  108. request = Request(
  109. dataset="generic_metrics",
  110. app_id="tests",
  111. query=query,
  112. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  113. )
  114. result = run_query(request)
  115. assert len(result["data"]) == 10
  116. rows = result["data"]
  117. for i in range(10):
  118. assert rows[i]["aggregate_value"] == i
  119. assert (
  120. rows[i]["time"]
  121. == (
  122. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  123. ).isoformat()
  124. )
  125. def test_basic_bulk_generic_metrics(self) -> None:
  126. query = MetricsQuery(
  127. query=None,
  128. start=self.hour_ago,
  129. end=self.now,
  130. rollup=Rollup(interval=60, granularity=60),
  131. scope=MetricsScope(
  132. org_ids=[self.org_id],
  133. project_ids=[self.project.id],
  134. use_case_id=UseCaseID.TRANSACTIONS.value,
  135. ),
  136. )
  137. query1 = query.set_query(
  138. Timeseries(
  139. metric=Metric(
  140. "transaction.duration",
  141. TransactionMRI.DURATION.value,
  142. ),
  143. aggregate="max",
  144. )
  145. )
  146. query2 = query.set_query(
  147. Timeseries(
  148. metric=Metric(
  149. public_name=None,
  150. mri=TransactionMRI.USER.value,
  151. ),
  152. aggregate="uniq",
  153. )
  154. )
  155. request1 = Request(
  156. dataset="generic_metrics",
  157. app_id="tests",
  158. query=query1,
  159. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  160. )
  161. request2 = Request(
  162. dataset="generic_metrics",
  163. app_id="tests",
  164. query=query2,
  165. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  166. )
  167. results = bulk_run_query([request1, request2])
  168. assert len(results) == 2
  169. result = results[0] # Distribution
  170. rows = result["data"]
  171. for i in range(10):
  172. assert rows[i]["aggregate_value"] == i
  173. assert (
  174. rows[i]["time"]
  175. == (
  176. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  177. ).isoformat()
  178. )
  179. def test_groupby_generic_metrics(self) -> None:
  180. query = MetricsQuery(
  181. query=Timeseries(
  182. metric=Metric(
  183. "transaction.duration",
  184. TransactionMRI.DURATION.value,
  185. ),
  186. aggregate="quantiles",
  187. aggregate_params=[0.5, 0.99],
  188. groupby=[Column("transaction")],
  189. ),
  190. start=self.hour_ago,
  191. end=self.now,
  192. rollup=Rollup(interval=60, granularity=60),
  193. scope=MetricsScope(
  194. org_ids=[self.org_id],
  195. project_ids=[self.project.id],
  196. use_case_id=UseCaseID.TRANSACTIONS.value,
  197. ),
  198. )
  199. request = Request(
  200. dataset="generic_metrics",
  201. app_id="tests",
  202. query=query,
  203. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  204. )
  205. result = run_query(request)
  206. assert len(result["data"]) == 10
  207. rows = result["data"]
  208. for i in range(10):
  209. assert rows[i]["aggregate_value"] == [i, i]
  210. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  211. assert (
  212. rows[i]["time"]
  213. == (
  214. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  215. ).isoformat()
  216. )
  217. def test_filters_generic_metrics(self) -> None:
  218. query = MetricsQuery(
  219. query=Timeseries(
  220. metric=Metric(
  221. "transaction.duration",
  222. TransactionMRI.DURATION.value,
  223. ),
  224. aggregate="quantiles",
  225. aggregate_params=[0.5],
  226. filters=[
  227. Condition(Column("status_code"), Op.EQ, "500"),
  228. Condition(Column("device"), Op.EQ, "BlackBerry"),
  229. ],
  230. ),
  231. start=self.hour_ago,
  232. end=self.now,
  233. rollup=Rollup(interval=60, granularity=60),
  234. scope=MetricsScope(
  235. org_ids=[self.org_id],
  236. project_ids=[self.project.id],
  237. use_case_id=UseCaseID.TRANSACTIONS.value,
  238. ),
  239. )
  240. request = Request(
  241. dataset="generic_metrics",
  242. app_id="tests",
  243. query=query,
  244. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  245. )
  246. result = run_query(request)
  247. assert len(result["data"]) == 2
  248. rows = result["data"]
  249. assert rows[0]["aggregate_value"] == [0]
  250. assert rows[1]["aggregate_value"] == [6.0]
  251. def test_complex_generic_metrics(self) -> None:
  252. query = MetricsQuery(
  253. query=Timeseries(
  254. metric=Metric(
  255. "transaction.duration",
  256. TransactionMRI.DURATION.value,
  257. ),
  258. aggregate="quantiles",
  259. aggregate_params=[0.5],
  260. filters=[
  261. Condition(Column("status_code"), Op.EQ, "500"),
  262. Condition(Column("device"), Op.EQ, "BlackBerry"),
  263. ],
  264. groupby=[Column("transaction")],
  265. ),
  266. start=self.hour_ago,
  267. end=self.now,
  268. rollup=Rollup(interval=60, granularity=60),
  269. scope=MetricsScope(
  270. org_ids=[self.org_id],
  271. project_ids=[self.project.id],
  272. use_case_id=UseCaseID.TRANSACTIONS.value,
  273. ),
  274. )
  275. request = Request(
  276. dataset="generic_metrics",
  277. app_id="tests",
  278. query=query,
  279. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  280. )
  281. result = run_query(request)
  282. assert len(result["data"]) == 2
  283. rows = result["data"]
  284. assert rows[0]["aggregate_value"] == [0]
  285. assert rows[0]["transaction"] == "transaction_0"
  286. assert rows[1]["aggregate_value"] == [6.0]
  287. assert rows[1]["transaction"] == "transaction_0"
  288. def test_totals(self) -> None:
  289. query = MetricsQuery(
  290. query=Timeseries(
  291. metric=Metric(
  292. "transaction.duration",
  293. TransactionMRI.DURATION.value,
  294. ),
  295. aggregate="max",
  296. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  297. groupby=[Column("transaction")],
  298. ),
  299. start=self.hour_ago,
  300. end=self.now,
  301. rollup=Rollup(totals=True, granularity=60, orderby=Direction.ASC),
  302. scope=MetricsScope(
  303. org_ids=[self.org_id],
  304. project_ids=[self.project.id],
  305. use_case_id=UseCaseID.TRANSACTIONS.value,
  306. ),
  307. )
  308. request = Request(
  309. dataset="generic_metrics",
  310. app_id="tests",
  311. query=query,
  312. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  313. )
  314. result = run_query(request)
  315. assert len(result["data"]) == 2
  316. rows = result["data"]
  317. assert rows[0]["aggregate_value"] == 7.0
  318. assert rows[1]["aggregate_value"] == 8.0
  319. def test_meta_data_in_response(self) -> None:
  320. query = MetricsQuery(
  321. query=Timeseries(
  322. metric=Metric(
  323. "transaction.duration",
  324. TransactionMRI.DURATION.value,
  325. ),
  326. aggregate="max",
  327. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  328. groupby=[Column("transaction")],
  329. ),
  330. start=self.hour_ago.replace(minute=16, second=59),
  331. end=self.now.replace(minute=16, second=59),
  332. rollup=Rollup(interval=60, granularity=60),
  333. scope=MetricsScope(
  334. org_ids=[self.org_id],
  335. project_ids=[self.project.id],
  336. use_case_id=UseCaseID.TRANSACTIONS.value,
  337. ),
  338. )
  339. request = Request(
  340. dataset="generic_metrics",
  341. app_id="tests",
  342. query=query,
  343. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  344. )
  345. result = run_query(request)
  346. assert result["modified_start"] == self.hour_ago.replace(minute=16, second=0)
  347. assert result["modified_end"] == self.now.replace(minute=17, second=0)
  348. assert result["indexer_mappings"] == {
  349. "d:transactions/duration@millisecond": 9223372036854775909,
  350. "status_code": 10000,
  351. "transaction": 9223372036854776020,
  352. }
  353. def test_bad_query(self) -> None:
  354. query = MetricsQuery(
  355. query=Timeseries(
  356. metric=Metric(
  357. "transaction.duration",
  358. "not a real MRI",
  359. ),
  360. aggregate="max",
  361. ),
  362. start=self.hour_ago.replace(minute=16, second=59),
  363. end=self.now.replace(minute=16, second=59),
  364. rollup=Rollup(interval=60, granularity=60),
  365. scope=MetricsScope(
  366. org_ids=[self.org_id],
  367. project_ids=[self.project.id],
  368. use_case_id=UseCaseID.TRANSACTIONS.value,
  369. ),
  370. )
  371. request = Request(
  372. dataset="generic_metrics",
  373. app_id="tests",
  374. query=query,
  375. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  376. )
  377. with pytest.raises(InvalidParams):
  378. run_query(request)
  379. def test_interval_with_totals(self) -> None:
  380. query = MetricsQuery(
  381. query=Timeseries(
  382. metric=Metric(
  383. "transaction.duration",
  384. TransactionMRI.DURATION.value,
  385. ),
  386. aggregate="max",
  387. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  388. groupby=[Column("transaction")],
  389. ),
  390. start=self.hour_ago,
  391. end=self.now,
  392. rollup=Rollup(interval=60, totals=True, granularity=60),
  393. scope=MetricsScope(
  394. org_ids=[self.org_id],
  395. project_ids=[self.project.id],
  396. use_case_id=UseCaseID.TRANSACTIONS.value,
  397. ),
  398. )
  399. request = Request(
  400. dataset="generic_metrics",
  401. app_id="tests",
  402. query=query,
  403. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  404. )
  405. result = run_query(request)
  406. assert len(result["data"]) == 6
  407. assert result["totals"]["aggregate_value"] == 8.0
  408. def test_automatic_granularity(self) -> None:
  409. query = MetricsQuery(
  410. query=Timeseries(
  411. metric=Metric(
  412. "transaction.duration",
  413. TransactionMRI.DURATION.value,
  414. ),
  415. aggregate="max",
  416. ),
  417. start=self.hour_ago,
  418. end=self.now,
  419. rollup=Rollup(interval=120),
  420. scope=MetricsScope(
  421. org_ids=[self.org_id],
  422. project_ids=[self.project.id],
  423. ),
  424. )
  425. request = Request(
  426. dataset="generic_metrics",
  427. app_id="tests",
  428. query=query,
  429. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  430. )
  431. result = run_query(request)
  432. # There's a flaky off by one error here that is very difficult to track down
  433. # TODO: figure out why this is flaky and assert to one specific value
  434. assert len(result["data"]) in [5, 6]
  435. def test_automatic_dataset(self) -> None:
  436. query = MetricsQuery(
  437. query=Timeseries(
  438. metric=Metric(
  439. None,
  440. SessionMRI.RAW_DURATION.value,
  441. ),
  442. aggregate="max",
  443. ),
  444. start=self.hour_ago,
  445. end=self.now,
  446. rollup=Rollup(interval=60, granularity=60),
  447. scope=MetricsScope(
  448. org_ids=[self.org_id],
  449. project_ids=[self.project.id],
  450. use_case_id=UseCaseID.SESSIONS.value,
  451. ),
  452. )
  453. request = Request(
  454. dataset="generic_metrics",
  455. app_id="tests",
  456. query=query,
  457. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  458. )
  459. result = run_query(request)
  460. assert request.dataset == "metrics"
  461. assert len(result["data"]) == 10
  462. def test_gauges(self) -> None:
  463. query = MetricsQuery(
  464. query=Timeseries(
  465. metric=Metric(
  466. None,
  467. "g:transactions/test_gauge@none",
  468. ),
  469. aggregate="last",
  470. ),
  471. start=self.hour_ago,
  472. end=self.now,
  473. rollup=Rollup(interval=60, totals=True, granularity=60),
  474. scope=MetricsScope(
  475. org_ids=[self.org_id],
  476. project_ids=[self.project.id],
  477. ),
  478. )
  479. request = Request(
  480. dataset="generic_metrics",
  481. app_id="tests",
  482. query=query,
  483. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  484. )
  485. result = run_query(request)
  486. assert len(result["data"]) == 10
  487. assert result["totals"]["aggregate_value"] == 9.0
  488. def test_metrics_groupby(self) -> None:
  489. query = MetricsQuery(
  490. query=Timeseries(
  491. metric=Metric(
  492. None,
  493. SessionMRI.RAW_DURATION.value,
  494. ),
  495. aggregate="max",
  496. groupby=[Column("release")],
  497. ),
  498. start=self.hour_ago,
  499. end=self.now,
  500. rollup=Rollup(interval=60, granularity=60),
  501. scope=MetricsScope(
  502. org_ids=[self.org_id],
  503. project_ids=[self.project.id],
  504. use_case_id=UseCaseID.SESSIONS.value,
  505. ),
  506. )
  507. request = Request(
  508. dataset="metrics",
  509. app_id="tests",
  510. query=query,
  511. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  512. )
  513. result = run_query(request)
  514. assert request.dataset == "metrics"
  515. assert len(result["data"]) == 10
  516. for data_point in result["data"]:
  517. assert data_point["release"] == "release_even" or data_point["release"] == "release_odd"
  518. def test_metrics_filters(self) -> None:
  519. query = MetricsQuery(
  520. query=Timeseries(
  521. metric=Metric(
  522. None,
  523. SessionMRI.RAW_USER.value,
  524. ),
  525. aggregate="count",
  526. filters=[
  527. Condition(Column("release"), Op.EQ, "release_even"),
  528. ],
  529. ),
  530. start=self.hour_ago,
  531. end=self.now,
  532. rollup=Rollup(interval=60, granularity=60),
  533. scope=MetricsScope(
  534. org_ids=[self.org_id],
  535. project_ids=[self.project.id],
  536. use_case_id=UseCaseID.SESSIONS.value,
  537. ),
  538. )
  539. request = Request(
  540. dataset="metrics",
  541. app_id="tests",
  542. query=query,
  543. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  544. )
  545. result = run_query(request)
  546. assert request.dataset == "metrics"
  547. assert len(result["data"]) == 5
  548. def test_metrics_complex(self) -> None:
  549. query = MetricsQuery(
  550. query=Timeseries(
  551. metric=Metric(
  552. None,
  553. SessionMRI.RAW_SESSION.value,
  554. ),
  555. aggregate="count",
  556. groupby=[Column("release")],
  557. filters=[
  558. Condition(Column("release"), Op.EQ, "release_even"),
  559. ],
  560. ),
  561. start=self.hour_ago,
  562. end=self.now,
  563. rollup=Rollup(interval=60, granularity=60),
  564. scope=MetricsScope(
  565. org_ids=[self.org_id],
  566. project_ids=[self.project.id],
  567. use_case_id=UseCaseID.SESSIONS.value,
  568. ),
  569. )
  570. request = Request(
  571. dataset="metrics",
  572. app_id="tests",
  573. query=query,
  574. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  575. )
  576. result = run_query(request)
  577. assert request.dataset == "metrics"
  578. assert len(result["data"]) == 5
  579. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  580. def test_metrics_correctly_reverse_resolved(self) -> None:
  581. query = MetricsQuery(
  582. query=Timeseries(
  583. metric=Metric(
  584. None,
  585. SessionMRI.RAW_SESSION.value,
  586. ),
  587. aggregate="count",
  588. groupby=[Column("release"), Column("project_id")],
  589. filters=[
  590. Condition(Column("release"), Op.EQ, "release_even"),
  591. Condition(Column("project_id"), Op.EQ, self.project.id),
  592. ],
  593. ),
  594. start=self.hour_ago,
  595. end=self.now,
  596. rollup=Rollup(interval=60, granularity=60),
  597. scope=MetricsScope(
  598. org_ids=[self.org_id],
  599. project_ids=[self.project.id],
  600. use_case_id=UseCaseID.SESSIONS.value,
  601. ),
  602. )
  603. request = Request(
  604. dataset="metrics",
  605. app_id="tests",
  606. query=query,
  607. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  608. )
  609. result = run_query(request)
  610. assert request.dataset == "metrics"
  611. assert len(result["data"]) == 5
  612. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  613. assert any(data_point["project_id"] == self.project.id for data_point in result["data"])
  614. def test_failure_rate(self) -> None:
  615. query = MetricsQuery(
  616. query=Formula(
  617. ArithmeticOperator.DIVIDE.value,
  618. [
  619. Timeseries(
  620. metric=Metric(
  621. mri=TransactionMRI.DURATION.value,
  622. ),
  623. aggregate="count",
  624. filters=[
  625. Condition(
  626. Column(TransactionTagsKey.TRANSACTION_STATUS.value),
  627. Op.NOT_IN,
  628. [
  629. TransactionStatusTagValue.OK.value,
  630. TransactionStatusTagValue.CANCELLED.value,
  631. TransactionStatusTagValue.UNKNOWN.value,
  632. ],
  633. )
  634. ],
  635. ),
  636. Timeseries(
  637. metric=Metric(
  638. mri=TransactionMRI.DURATION.value,
  639. ),
  640. aggregate="count",
  641. ),
  642. ],
  643. ),
  644. start=self.hour_ago,
  645. end=self.now,
  646. rollup=Rollup(interval=60, totals=True, granularity=60),
  647. scope=MetricsScope(
  648. org_ids=[self.org_id],
  649. project_ids=[self.project.id],
  650. ),
  651. )
  652. request = Request(
  653. dataset="generic_metrics",
  654. app_id="tests",
  655. query=query,
  656. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  657. )
  658. result = run_query(request)
  659. assert len(result["data"]) == 10
  660. assert result["totals"]["aggregate_value"] == 1.0
  661. def test_aggregate_aliases(self) -> None:
  662. query = MetricsQuery(
  663. query=Timeseries(
  664. metric=Metric(
  665. "transaction.duration",
  666. TransactionMRI.DURATION.value,
  667. ),
  668. aggregate="p95",
  669. ),
  670. start=self.hour_ago,
  671. end=self.now,
  672. rollup=Rollup(interval=60, granularity=60),
  673. scope=MetricsScope(
  674. org_ids=[self.org_id],
  675. project_ids=[self.project.id],
  676. use_case_id=UseCaseID.TRANSACTIONS.value,
  677. ),
  678. )
  679. request = Request(
  680. dataset="generic_metrics",
  681. app_id="tests",
  682. query=query,
  683. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  684. )
  685. result = run_query(request)
  686. assert len(result["data"]) == 10
  687. rows = result["data"]
  688. for i in range(10):
  689. assert rows[i]["aggregate_value"] == [i]
  690. assert (
  691. rows[i]["time"]
  692. == (
  693. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  694. ).isoformat()
  695. )
  696. def test_dataset_correctness(self) -> None:
  697. query = MetricsQuery(
  698. query=Timeseries(
  699. metric=Metric(
  700. "transaction.duration",
  701. TransactionMRI.DURATION.value,
  702. ),
  703. aggregate="quantiles",
  704. aggregate_params=[0.5, 0.99],
  705. groupby=[Column("transaction")],
  706. filters=[
  707. Condition(Column("transaction"), Op.IN, ["transaction_0", "transaction_1"])
  708. ],
  709. ),
  710. start=self.hour_ago,
  711. end=self.now,
  712. rollup=Rollup(interval=60, granularity=60),
  713. scope=MetricsScope(
  714. org_ids=[self.org_id],
  715. project_ids=[self.project.id],
  716. use_case_id=UseCaseID.TRANSACTIONS.value,
  717. ),
  718. )
  719. request = Request(
  720. dataset="metrics",
  721. app_id="tests",
  722. query=query,
  723. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  724. )
  725. result = run_query(request)
  726. assert len(result["data"]) == 10
  727. rows = result["data"]
  728. for i in range(10):
  729. assert rows[i]["aggregate_value"] == [i, i]
  730. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  731. assert (
  732. rows[i]["time"]
  733. == (
  734. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  735. ).isoformat()
  736. )