test_metrics_layer.py 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844
  1. from __future__ import annotations
  2. from collections.abc import Mapping
  3. from datetime import datetime, timedelta, timezone
  4. from typing import Literal
  5. import pytest
  6. from snuba_sdk import (
  7. ArithmeticOperator,
  8. Column,
  9. Condition,
  10. Direction,
  11. Formula,
  12. Limit,
  13. Metric,
  14. MetricsQuery,
  15. MetricsScope,
  16. Op,
  17. Request,
  18. Rollup,
  19. Timeseries,
  20. )
  21. from sentry.exceptions import InvalidParams
  22. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  23. from sentry.snuba.metrics.naming_layer import SessionMRI, TransactionMRI
  24. from sentry.snuba.metrics.naming_layer.public import TransactionStatusTagValue, TransactionTagsKey
  25. from sentry.snuba.metrics_layer.query import bulk_run_query, run_query
  26. from sentry.testutils.cases import BaseMetricsTestCase, TestCase
  27. pytestmark = pytest.mark.sentry_metrics
  28. class MQLTest(TestCase, BaseMetricsTestCase):
  29. def ts(self, dt: datetime) -> int:
  30. return int(dt.timestamp())
  31. def setUp(self) -> None:
  32. super().setUp()
  33. self.generic_metrics: Mapping[str, Literal["counter", "set", "distribution", "gauge"]] = {
  34. TransactionMRI.DURATION.value: "distribution",
  35. TransactionMRI.USER.value: "set",
  36. TransactionMRI.COUNT_PER_ROOT_PROJECT.value: "counter",
  37. "g:transactions/test_gauge@none": "gauge",
  38. }
  39. self.metrics: Mapping[str, Literal["counter", "set", "distribution"]] = {
  40. SessionMRI.RAW_DURATION.value: "distribution",
  41. SessionMRI.RAW_USER.value: "set",
  42. SessionMRI.RAW_SESSION.value: "counter",
  43. }
  44. self.now = datetime.now(tz=timezone.utc).replace(microsecond=0)
  45. self.hour_ago = self.now - timedelta(hours=1)
  46. self.org_id = self.project.organization_id
  47. for mri, metric_type in self.generic_metrics.items():
  48. assert metric_type in {"counter", "distribution", "set", "gauge"}
  49. for i in range(10):
  50. value: int | dict[str, int]
  51. if metric_type == "gauge":
  52. value = {
  53. "min": i,
  54. "max": i,
  55. "sum": i,
  56. "count": i,
  57. "last": i,
  58. }
  59. else:
  60. value = i
  61. self.store_metric(
  62. self.org_id,
  63. self.project.id,
  64. metric_type,
  65. mri,
  66. {
  67. "transaction": f"transaction_{i % 2}",
  68. "status_code": "500" if i % 3 == 0 else "200",
  69. "device": "BlackBerry" if i % 2 == 0 else "Nokia",
  70. },
  71. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  72. value,
  73. UseCaseID.TRANSACTIONS,
  74. )
  75. for mri, metric_type in self.metrics.items():
  76. assert metric_type in {"counter", "distribution", "set"}
  77. for i in range(10):
  78. value = i
  79. self.store_metric(
  80. self.org_id,
  81. self.project.id,
  82. metric_type,
  83. mri,
  84. {
  85. "release": "release_even" if i % 2 == 0 else "release_odd",
  86. },
  87. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  88. value,
  89. UseCaseID.SESSIONS,
  90. )
  91. def test_basic_generic_metrics(self) -> None:
  92. query = MetricsQuery(
  93. query=Timeseries(
  94. metric=Metric(
  95. "transaction.duration",
  96. TransactionMRI.DURATION.value,
  97. ),
  98. aggregate="max",
  99. ),
  100. start=self.hour_ago,
  101. end=self.now,
  102. rollup=Rollup(interval=60, granularity=60),
  103. scope=MetricsScope(
  104. org_ids=[self.org_id],
  105. project_ids=[self.project.id],
  106. use_case_id=UseCaseID.TRANSACTIONS.value,
  107. ),
  108. )
  109. request = Request(
  110. dataset="generic_metrics",
  111. app_id="tests",
  112. query=query,
  113. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  114. )
  115. result = run_query(request)
  116. assert len(result["data"]) == 10
  117. rows = result["data"]
  118. for i in range(10):
  119. assert rows[i]["aggregate_value"] == i
  120. assert (
  121. rows[i]["time"]
  122. == (
  123. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  124. ).isoformat()
  125. )
  126. def test_basic_bulk_generic_metrics(self) -> None:
  127. query = MetricsQuery(
  128. query=None,
  129. start=self.hour_ago,
  130. end=self.now,
  131. rollup=Rollup(interval=60, granularity=60),
  132. scope=MetricsScope(
  133. org_ids=[self.org_id],
  134. project_ids=[self.project.id],
  135. use_case_id=UseCaseID.TRANSACTIONS.value,
  136. ),
  137. )
  138. query1 = query.set_query(
  139. Timeseries(
  140. metric=Metric(
  141. "transaction.duration",
  142. TransactionMRI.DURATION.value,
  143. ),
  144. aggregate="max",
  145. )
  146. )
  147. query2 = query.set_query(
  148. Timeseries(
  149. metric=Metric(
  150. public_name=None,
  151. mri=TransactionMRI.USER.value,
  152. ),
  153. aggregate="uniq",
  154. )
  155. )
  156. request1 = Request(
  157. dataset="generic_metrics",
  158. app_id="tests",
  159. query=query1,
  160. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  161. )
  162. request2 = Request(
  163. dataset="generic_metrics",
  164. app_id="tests",
  165. query=query2,
  166. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  167. )
  168. results = bulk_run_query([request1, request2])
  169. assert len(results) == 2
  170. result = results[0] # Distribution
  171. rows = result["data"]
  172. for i in range(10):
  173. assert rows[i]["aggregate_value"] == i
  174. assert (
  175. rows[i]["time"]
  176. == (
  177. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  178. ).isoformat()
  179. )
  180. def test_groupby_generic_metrics(self) -> None:
  181. query = MetricsQuery(
  182. query=Timeseries(
  183. metric=Metric(
  184. "transaction.duration",
  185. TransactionMRI.DURATION.value,
  186. ),
  187. aggregate="quantiles",
  188. aggregate_params=[0.5, 0.99],
  189. groupby=[Column("transaction")],
  190. ),
  191. start=self.hour_ago,
  192. end=self.now,
  193. rollup=Rollup(interval=60, granularity=60),
  194. scope=MetricsScope(
  195. org_ids=[self.org_id],
  196. project_ids=[self.project.id],
  197. use_case_id=UseCaseID.TRANSACTIONS.value,
  198. ),
  199. )
  200. request = Request(
  201. dataset="generic_metrics",
  202. app_id="tests",
  203. query=query,
  204. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  205. )
  206. result = run_query(request)
  207. assert len(result["data"]) == 10
  208. rows = result["data"]
  209. for i in range(10):
  210. assert rows[i]["aggregate_value"] == [i, i]
  211. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  212. assert (
  213. rows[i]["time"]
  214. == (
  215. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  216. ).isoformat()
  217. )
  218. def test_filters_generic_metrics(self) -> None:
  219. query = MetricsQuery(
  220. query=Timeseries(
  221. metric=Metric(
  222. "transaction.duration",
  223. TransactionMRI.DURATION.value,
  224. ),
  225. aggregate="quantiles",
  226. aggregate_params=[0.5],
  227. filters=[
  228. Condition(Column("status_code"), Op.EQ, "500"),
  229. Condition(Column("device"), Op.EQ, "BlackBerry"),
  230. ],
  231. ),
  232. start=self.hour_ago,
  233. end=self.now,
  234. rollup=Rollup(interval=60, granularity=60),
  235. scope=MetricsScope(
  236. org_ids=[self.org_id],
  237. project_ids=[self.project.id],
  238. use_case_id=UseCaseID.TRANSACTIONS.value,
  239. ),
  240. )
  241. request = Request(
  242. dataset="generic_metrics",
  243. app_id="tests",
  244. query=query,
  245. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  246. )
  247. result = run_query(request)
  248. assert len(result["data"]) == 2
  249. rows = result["data"]
  250. assert rows[0]["aggregate_value"] == [0]
  251. assert rows[1]["aggregate_value"] == [6.0]
  252. def test_complex_generic_metrics(self) -> None:
  253. query = MetricsQuery(
  254. query=Timeseries(
  255. metric=Metric(
  256. "transaction.duration",
  257. TransactionMRI.DURATION.value,
  258. ),
  259. aggregate="quantiles",
  260. aggregate_params=[0.5],
  261. filters=[
  262. Condition(Column("status_code"), Op.EQ, "500"),
  263. Condition(Column("device"), Op.EQ, "BlackBerry"),
  264. ],
  265. groupby=[Column("transaction")],
  266. ),
  267. start=self.hour_ago,
  268. end=self.now,
  269. rollup=Rollup(interval=60, granularity=60),
  270. scope=MetricsScope(
  271. org_ids=[self.org_id],
  272. project_ids=[self.project.id],
  273. use_case_id=UseCaseID.TRANSACTIONS.value,
  274. ),
  275. )
  276. request = Request(
  277. dataset="generic_metrics",
  278. app_id="tests",
  279. query=query,
  280. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  281. )
  282. result = run_query(request)
  283. assert len(result["data"]) == 2
  284. rows = result["data"]
  285. assert rows[0]["aggregate_value"] == [0]
  286. assert rows[0]["transaction"] == "transaction_0"
  287. assert rows[1]["aggregate_value"] == [6.0]
  288. assert rows[1]["transaction"] == "transaction_0"
  289. def test_totals(self) -> None:
  290. query = MetricsQuery(
  291. query=Timeseries(
  292. metric=Metric(
  293. "transaction.duration",
  294. TransactionMRI.DURATION.value,
  295. ),
  296. aggregate="max",
  297. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  298. groupby=[Column("transaction")],
  299. ),
  300. start=self.hour_ago,
  301. end=self.now,
  302. rollup=Rollup(totals=True, granularity=60, orderby=Direction.ASC),
  303. scope=MetricsScope(
  304. org_ids=[self.org_id],
  305. project_ids=[self.project.id],
  306. use_case_id=UseCaseID.TRANSACTIONS.value,
  307. ),
  308. )
  309. request = Request(
  310. dataset="generic_metrics",
  311. app_id="tests",
  312. query=query,
  313. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  314. )
  315. result = run_query(request)
  316. assert len(result["data"]) == 2
  317. rows = result["data"]
  318. assert rows[0]["aggregate_value"] == 7.0
  319. assert rows[1]["aggregate_value"] == 8.0
  320. def test_meta_data_in_response(self) -> None:
  321. query = MetricsQuery(
  322. query=Timeseries(
  323. metric=Metric(
  324. "transaction.duration",
  325. TransactionMRI.DURATION.value,
  326. ),
  327. aggregate="max",
  328. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  329. groupby=[Column("transaction")],
  330. ),
  331. start=self.hour_ago.replace(minute=16, second=59),
  332. end=self.now.replace(minute=16, second=59),
  333. rollup=Rollup(interval=60, granularity=60),
  334. scope=MetricsScope(
  335. org_ids=[self.org_id],
  336. project_ids=[self.project.id],
  337. use_case_id=UseCaseID.TRANSACTIONS.value,
  338. ),
  339. )
  340. request = Request(
  341. dataset="generic_metrics",
  342. app_id="tests",
  343. query=query,
  344. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  345. )
  346. result = run_query(request)
  347. assert result["modified_start"] == self.hour_ago.replace(minute=16, second=0)
  348. assert result["modified_end"] == self.now.replace(minute=17, second=0)
  349. assert result["indexer_mappings"] == {
  350. "d:transactions/duration@millisecond": 9223372036854775909,
  351. "status_code": 10000,
  352. "transaction": 9223372036854776020,
  353. }
  354. def test_bad_query(self) -> None:
  355. query = MetricsQuery(
  356. query=Timeseries(
  357. metric=Metric(
  358. "transaction.duration",
  359. "not a real MRI",
  360. ),
  361. aggregate="max",
  362. ),
  363. start=self.hour_ago.replace(minute=16, second=59),
  364. end=self.now.replace(minute=16, second=59),
  365. rollup=Rollup(interval=60, granularity=60),
  366. scope=MetricsScope(
  367. org_ids=[self.org_id],
  368. project_ids=[self.project.id],
  369. use_case_id=UseCaseID.TRANSACTIONS.value,
  370. ),
  371. )
  372. request = Request(
  373. dataset="generic_metrics",
  374. app_id="tests",
  375. query=query,
  376. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  377. )
  378. with pytest.raises(InvalidParams):
  379. run_query(request)
  380. def test_interval_with_totals(self) -> None:
  381. query = MetricsQuery(
  382. query=Timeseries(
  383. metric=Metric(
  384. "transaction.duration",
  385. TransactionMRI.DURATION.value,
  386. ),
  387. aggregate="max",
  388. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  389. groupby=[Column("transaction")],
  390. ),
  391. start=self.hour_ago,
  392. end=self.now,
  393. rollup=Rollup(interval=60, totals=True, granularity=60),
  394. scope=MetricsScope(
  395. org_ids=[self.org_id],
  396. project_ids=[self.project.id],
  397. use_case_id=UseCaseID.TRANSACTIONS.value,
  398. ),
  399. )
  400. request = Request(
  401. dataset="generic_metrics",
  402. app_id="tests",
  403. query=query,
  404. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  405. )
  406. result = run_query(request)
  407. assert len(result["data"]) == 6
  408. assert result["totals"]["aggregate_value"] == 8.0
  409. def test_automatic_granularity(self) -> None:
  410. query = MetricsQuery(
  411. query=Timeseries(
  412. metric=Metric(
  413. "transaction.duration",
  414. TransactionMRI.DURATION.value,
  415. ),
  416. aggregate="max",
  417. ),
  418. start=self.hour_ago,
  419. end=self.now,
  420. rollup=Rollup(interval=120),
  421. scope=MetricsScope(
  422. org_ids=[self.org_id],
  423. project_ids=[self.project.id],
  424. ),
  425. )
  426. request = Request(
  427. dataset="generic_metrics",
  428. app_id="tests",
  429. query=query,
  430. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  431. )
  432. result = run_query(request)
  433. # There's a flaky off by one error here that is very difficult to track down
  434. # TODO: figure out why this is flaky and assert to one specific value
  435. assert len(result["data"]) in [5, 6]
  436. def test_automatic_dataset(self) -> None:
  437. query = MetricsQuery(
  438. query=Timeseries(
  439. metric=Metric(
  440. None,
  441. SessionMRI.RAW_DURATION.value,
  442. ),
  443. aggregate="max",
  444. ),
  445. start=self.hour_ago,
  446. end=self.now,
  447. rollup=Rollup(interval=60, granularity=60),
  448. scope=MetricsScope(
  449. org_ids=[self.org_id],
  450. project_ids=[self.project.id],
  451. use_case_id=UseCaseID.SESSIONS.value,
  452. ),
  453. )
  454. request = Request(
  455. dataset="generic_metrics",
  456. app_id="tests",
  457. query=query,
  458. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  459. )
  460. result = run_query(request)
  461. assert request.dataset == "metrics"
  462. assert len(result["data"]) == 10
  463. def test_gauges(self) -> None:
  464. query = MetricsQuery(
  465. query=Timeseries(
  466. metric=Metric(
  467. None,
  468. "g:transactions/test_gauge@none",
  469. ),
  470. aggregate="last",
  471. ),
  472. start=self.hour_ago,
  473. end=self.now,
  474. rollup=Rollup(interval=60, totals=True, granularity=60),
  475. scope=MetricsScope(
  476. org_ids=[self.org_id],
  477. project_ids=[self.project.id],
  478. ),
  479. )
  480. request = Request(
  481. dataset="generic_metrics",
  482. app_id="tests",
  483. query=query,
  484. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  485. )
  486. result = run_query(request)
  487. assert len(result["data"]) == 10
  488. assert result["totals"]["aggregate_value"] == 9.0
  489. def test_metrics_groupby(self) -> None:
  490. query = MetricsQuery(
  491. query=Timeseries(
  492. metric=Metric(
  493. None,
  494. SessionMRI.RAW_DURATION.value,
  495. ),
  496. aggregate="max",
  497. groupby=[Column("release")],
  498. ),
  499. start=self.hour_ago,
  500. end=self.now,
  501. rollup=Rollup(interval=60, granularity=60),
  502. scope=MetricsScope(
  503. org_ids=[self.org_id],
  504. project_ids=[self.project.id],
  505. use_case_id=UseCaseID.SESSIONS.value,
  506. ),
  507. )
  508. request = Request(
  509. dataset="metrics",
  510. app_id="tests",
  511. query=query,
  512. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  513. )
  514. result = run_query(request)
  515. assert request.dataset == "metrics"
  516. assert len(result["data"]) == 10
  517. for data_point in result["data"]:
  518. assert data_point["release"] == "release_even" or data_point["release"] == "release_odd"
  519. def test_metrics_filters(self) -> None:
  520. query = MetricsQuery(
  521. query=Timeseries(
  522. metric=Metric(
  523. None,
  524. SessionMRI.RAW_USER.value,
  525. ),
  526. aggregate="count",
  527. filters=[
  528. Condition(Column("release"), Op.EQ, "release_even"),
  529. ],
  530. ),
  531. start=self.hour_ago,
  532. end=self.now,
  533. rollup=Rollup(interval=60, granularity=60),
  534. scope=MetricsScope(
  535. org_ids=[self.org_id],
  536. project_ids=[self.project.id],
  537. use_case_id=UseCaseID.SESSIONS.value,
  538. ),
  539. )
  540. request = Request(
  541. dataset="metrics",
  542. app_id="tests",
  543. query=query,
  544. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  545. )
  546. result = run_query(request)
  547. assert request.dataset == "metrics"
  548. assert len(result["data"]) == 5
  549. def test_metrics_complex(self) -> None:
  550. query = MetricsQuery(
  551. query=Timeseries(
  552. metric=Metric(
  553. None,
  554. SessionMRI.RAW_SESSION.value,
  555. ),
  556. aggregate="count",
  557. groupby=[Column("release")],
  558. filters=[
  559. Condition(Column("release"), Op.EQ, "release_even"),
  560. ],
  561. ),
  562. start=self.hour_ago,
  563. end=self.now,
  564. rollup=Rollup(interval=60, granularity=60),
  565. scope=MetricsScope(
  566. org_ids=[self.org_id],
  567. project_ids=[self.project.id],
  568. use_case_id=UseCaseID.SESSIONS.value,
  569. ),
  570. )
  571. request = Request(
  572. dataset="metrics",
  573. app_id="tests",
  574. query=query,
  575. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  576. )
  577. result = run_query(request)
  578. assert request.dataset == "metrics"
  579. assert len(result["data"]) == 5
  580. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  581. def test_metrics_correctly_reverse_resolved(self) -> None:
  582. query = MetricsQuery(
  583. query=Timeseries(
  584. metric=Metric(
  585. None,
  586. SessionMRI.RAW_SESSION.value,
  587. ),
  588. aggregate="count",
  589. groupby=[Column("release"), Column("project_id")],
  590. filters=[
  591. Condition(Column("release"), Op.EQ, "release_even"),
  592. Condition(Column("project_id"), Op.EQ, self.project.id),
  593. ],
  594. ),
  595. start=self.hour_ago,
  596. end=self.now,
  597. rollup=Rollup(interval=60, granularity=60),
  598. scope=MetricsScope(
  599. org_ids=[self.org_id],
  600. project_ids=[self.project.id],
  601. use_case_id=UseCaseID.SESSIONS.value,
  602. ),
  603. )
  604. request = Request(
  605. dataset="metrics",
  606. app_id="tests",
  607. query=query,
  608. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  609. )
  610. result = run_query(request)
  611. assert request.dataset == "metrics"
  612. assert len(result["data"]) == 5
  613. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  614. assert any(data_point["project_id"] == self.project.id for data_point in result["data"])
  615. def test_failure_rate(self) -> None:
  616. query = MetricsQuery(
  617. query=Formula(
  618. ArithmeticOperator.DIVIDE.value,
  619. [
  620. Timeseries(
  621. metric=Metric(
  622. mri=TransactionMRI.DURATION.value,
  623. ),
  624. aggregate="count",
  625. filters=[
  626. Condition(
  627. Column(TransactionTagsKey.TRANSACTION_STATUS.value),
  628. Op.NOT_IN,
  629. [
  630. TransactionStatusTagValue.OK.value,
  631. TransactionStatusTagValue.CANCELLED.value,
  632. TransactionStatusTagValue.UNKNOWN.value,
  633. ],
  634. )
  635. ],
  636. ),
  637. Timeseries(
  638. metric=Metric(
  639. mri=TransactionMRI.DURATION.value,
  640. ),
  641. aggregate="count",
  642. ),
  643. ],
  644. ),
  645. start=self.hour_ago,
  646. end=self.now,
  647. rollup=Rollup(interval=60, totals=True, granularity=60),
  648. scope=MetricsScope(
  649. org_ids=[self.org_id],
  650. project_ids=[self.project.id],
  651. ),
  652. )
  653. request = Request(
  654. dataset="generic_metrics",
  655. app_id="tests",
  656. query=query,
  657. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  658. )
  659. result = run_query(request)
  660. assert len(result["data"]) == 10
  661. assert result["totals"]["aggregate_value"] == 1.0
  662. def test_aggregate_aliases(self) -> None:
  663. query = MetricsQuery(
  664. query=Timeseries(
  665. metric=Metric(
  666. "transaction.duration",
  667. TransactionMRI.DURATION.value,
  668. ),
  669. aggregate="p95",
  670. ),
  671. start=self.hour_ago,
  672. end=self.now,
  673. rollup=Rollup(interval=60, granularity=60),
  674. scope=MetricsScope(
  675. org_ids=[self.org_id],
  676. project_ids=[self.project.id],
  677. use_case_id=UseCaseID.TRANSACTIONS.value,
  678. ),
  679. )
  680. request = Request(
  681. dataset="generic_metrics",
  682. app_id="tests",
  683. query=query,
  684. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  685. )
  686. result = run_query(request)
  687. assert len(result["data"]) == 10
  688. rows = result["data"]
  689. for i in range(10):
  690. assert rows[i]["aggregate_value"] == [i]
  691. assert (
  692. rows[i]["time"]
  693. == (
  694. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  695. ).isoformat()
  696. )
  697. def test_dataset_correctness(self) -> None:
  698. query = MetricsQuery(
  699. query=Timeseries(
  700. metric=Metric(
  701. "transaction.duration",
  702. TransactionMRI.DURATION.value,
  703. ),
  704. aggregate="quantiles",
  705. aggregate_params=[0.5, 0.99],
  706. groupby=[Column("transaction")],
  707. filters=[
  708. Condition(Column("transaction"), Op.IN, ["transaction_0", "transaction_1"])
  709. ],
  710. ),
  711. start=self.hour_ago,
  712. end=self.now,
  713. rollup=Rollup(interval=60, granularity=60),
  714. scope=MetricsScope(
  715. org_ids=[self.org_id],
  716. project_ids=[self.project.id],
  717. use_case_id=UseCaseID.TRANSACTIONS.value,
  718. ),
  719. )
  720. request = Request(
  721. dataset="metrics",
  722. app_id="tests",
  723. query=query,
  724. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  725. )
  726. result = run_query(request)
  727. assert len(result["data"]) == 10
  728. rows = result["data"]
  729. for i in range(10):
  730. assert rows[i]["aggregate_value"] == [i, i]
  731. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  732. assert (
  733. rows[i]["time"]
  734. == (
  735. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  736. ).isoformat()
  737. )
  738. def test_resolve_all_mris(self) -> None:
  739. for mri in [
  740. "d:custom/sentry.event_manager.save@second",
  741. "d:custom/sentry.event_manager.save_generic_events@second",
  742. ]:
  743. self.store_metric(
  744. self.org_id,
  745. self.project.id,
  746. "distribution",
  747. mri,
  748. {
  749. "transaction": "transaction_1",
  750. "status_code": "200",
  751. "device": "BlackBerry",
  752. },
  753. self.ts(self.hour_ago + timedelta(minutes=5)),
  754. 1,
  755. UseCaseID.CUSTOM,
  756. )
  757. query = MetricsQuery(
  758. query=Formula(
  759. function_name="plus",
  760. parameters=[
  761. Timeseries(
  762. metric=Metric(
  763. mri="d:custom/sentry.event_manager.save@second",
  764. ),
  765. aggregate="avg",
  766. ),
  767. Timeseries(
  768. metric=Metric(
  769. mri="d:custom/sentry.event_manager.save_generic_events@second",
  770. ),
  771. aggregate="avg",
  772. ),
  773. ],
  774. ),
  775. start=self.hour_ago,
  776. end=self.now,
  777. rollup=Rollup(interval=None, totals=True, orderby=None, granularity=10),
  778. scope=MetricsScope(
  779. org_ids=[self.org_id], project_ids=[self.project.id], use_case_id="custom"
  780. ),
  781. limit=Limit(20),
  782. offset=None,
  783. )
  784. request = Request(
  785. dataset="generic_metrics",
  786. app_id="tests",
  787. query=query,
  788. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  789. )
  790. result = run_query(request)
  791. assert len(result["data"]) == 1