test_metrics_layer.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754
  1. from __future__ import annotations
  2. from datetime import datetime, timedelta, timezone
  3. from typing import Any, Callable, Literal, Mapping
  4. import pytest
  5. from snuba_sdk import (
  6. ArithmeticOperator,
  7. Column,
  8. Condition,
  9. Direction,
  10. Formula,
  11. Metric,
  12. MetricsQuery,
  13. MetricsScope,
  14. Op,
  15. Request,
  16. Rollup,
  17. Timeseries,
  18. )
  19. from sentry.exceptions import InvalidParams
  20. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  21. from sentry.snuba.metrics.naming_layer import SessionMRI, TransactionMRI
  22. from sentry.snuba.metrics.naming_layer.public import TransactionStatusTagValue, TransactionTagsKey
  23. from sentry.snuba.metrics_layer.query import run_query as layer_run_query
  24. from sentry.testutils.cases import BaseMetricsTestCase, TestCase
  25. pytestmark = pytest.mark.sentry_metrics
  26. # TODO: This is only needed while we support SnQL and MQL. Once SnQL is removed, this can be removed.
  27. LayerQuery = Callable[[Request], Mapping[str, Any]]
  28. class MQLTest(TestCase, BaseMetricsTestCase):
  29. @property
  30. def run_query(self) -> LayerQuery:
  31. def mql_query_fn(request: Request) -> Mapping[str, Any]:
  32. with self.options({"snuba.use-mql-endpoint": 1.0}):
  33. return layer_run_query(request)
  34. return mql_query_fn
  35. def ts(self, dt: datetime) -> int:
  36. return int(dt.timestamp())
  37. def setUp(self) -> None:
  38. super().setUp()
  39. self.generic_metrics: Mapping[str, Literal["counter", "set", "distribution", "gauge"]] = {
  40. TransactionMRI.DURATION.value: "distribution",
  41. TransactionMRI.USER.value: "set",
  42. TransactionMRI.COUNT_PER_ROOT_PROJECT.value: "counter",
  43. "g:transactions/test_gauge@none": "gauge",
  44. }
  45. self.metrics: Mapping[str, Literal["counter", "set", "distribution"]] = {
  46. SessionMRI.RAW_DURATION.value: "distribution",
  47. SessionMRI.RAW_USER.value: "set",
  48. SessionMRI.RAW_SESSION.value: "counter",
  49. }
  50. self.now = datetime.now(tz=timezone.utc).replace(microsecond=0)
  51. self.hour_ago = self.now - timedelta(hours=1)
  52. self.org_id = self.project.organization_id
  53. for mri, metric_type in self.generic_metrics.items():
  54. assert metric_type in {"counter", "distribution", "set", "gauge"}
  55. for i in range(10):
  56. value: int | dict[str, int]
  57. if metric_type == "gauge":
  58. value = {
  59. "min": i,
  60. "max": i,
  61. "sum": i,
  62. "count": i,
  63. "last": i,
  64. }
  65. else:
  66. value = i
  67. self.store_metric(
  68. self.org_id,
  69. self.project.id,
  70. metric_type,
  71. mri,
  72. {
  73. "transaction": f"transaction_{i % 2}",
  74. "status_code": "500" if i % 3 == 0 else "200",
  75. "device": "BlackBerry" if i % 2 == 0 else "Nokia",
  76. },
  77. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  78. value,
  79. UseCaseID.TRANSACTIONS,
  80. )
  81. for mri, metric_type in self.metrics.items():
  82. assert metric_type in {"counter", "distribution", "set"}
  83. for i in range(10):
  84. value = i
  85. self.store_metric(
  86. self.org_id,
  87. self.project.id,
  88. metric_type,
  89. mri,
  90. {
  91. "release": "release_even" if i % 2 == 0 else "release_odd",
  92. },
  93. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  94. value,
  95. UseCaseID.SESSIONS,
  96. )
  97. def test_basic_generic_metrics(self) -> None:
  98. query = MetricsQuery(
  99. query=Timeseries(
  100. metric=Metric(
  101. "transaction.duration",
  102. TransactionMRI.DURATION.value,
  103. ),
  104. aggregate="max",
  105. ),
  106. start=self.hour_ago,
  107. end=self.now,
  108. rollup=Rollup(interval=60, granularity=60),
  109. scope=MetricsScope(
  110. org_ids=[self.org_id],
  111. project_ids=[self.project.id],
  112. use_case_id=UseCaseID.TRANSACTIONS.value,
  113. ),
  114. )
  115. request = Request(
  116. dataset="generic_metrics",
  117. app_id="tests",
  118. query=query,
  119. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  120. )
  121. result = self.run_query(request)
  122. assert len(result["data"]) == 10
  123. rows = result["data"]
  124. for i in range(10):
  125. assert rows[i]["aggregate_value"] == i
  126. assert (
  127. rows[i]["time"]
  128. == (
  129. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  130. ).isoformat()
  131. )
  132. def test_groupby_generic_metrics(self) -> None:
  133. query = MetricsQuery(
  134. query=Timeseries(
  135. metric=Metric(
  136. "transaction.duration",
  137. TransactionMRI.DURATION.value,
  138. ),
  139. aggregate="quantiles",
  140. aggregate_params=[0.5, 0.99],
  141. groupby=[Column("transaction")],
  142. ),
  143. start=self.hour_ago,
  144. end=self.now,
  145. rollup=Rollup(interval=60, granularity=60),
  146. scope=MetricsScope(
  147. org_ids=[self.org_id],
  148. project_ids=[self.project.id],
  149. use_case_id=UseCaseID.TRANSACTIONS.value,
  150. ),
  151. )
  152. request = Request(
  153. dataset="generic_metrics",
  154. app_id="tests",
  155. query=query,
  156. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  157. )
  158. result = self.run_query(request)
  159. assert len(result["data"]) == 10
  160. rows = result["data"]
  161. for i in range(10):
  162. assert rows[i]["aggregate_value"] == [i, i]
  163. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  164. assert (
  165. rows[i]["time"]
  166. == (
  167. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  168. ).isoformat()
  169. )
  170. def test_filters_generic_metrics(self) -> None:
  171. query = MetricsQuery(
  172. query=Timeseries(
  173. metric=Metric(
  174. "transaction.duration",
  175. TransactionMRI.DURATION.value,
  176. ),
  177. aggregate="quantiles",
  178. aggregate_params=[0.5],
  179. filters=[
  180. Condition(Column("status_code"), Op.EQ, "500"),
  181. Condition(Column("device"), Op.EQ, "BlackBerry"),
  182. ],
  183. ),
  184. start=self.hour_ago,
  185. end=self.now,
  186. rollup=Rollup(interval=60, granularity=60),
  187. scope=MetricsScope(
  188. org_ids=[self.org_id],
  189. project_ids=[self.project.id],
  190. use_case_id=UseCaseID.TRANSACTIONS.value,
  191. ),
  192. )
  193. request = Request(
  194. dataset="generic_metrics",
  195. app_id="tests",
  196. query=query,
  197. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  198. )
  199. result = self.run_query(request)
  200. assert len(result["data"]) == 2
  201. rows = result["data"]
  202. assert rows[0]["aggregate_value"] == [0]
  203. assert rows[1]["aggregate_value"] == [6.0]
  204. def test_complex_generic_metrics(self) -> None:
  205. query = MetricsQuery(
  206. query=Timeseries(
  207. metric=Metric(
  208. "transaction.duration",
  209. TransactionMRI.DURATION.value,
  210. ),
  211. aggregate="quantiles",
  212. aggregate_params=[0.5],
  213. filters=[
  214. Condition(Column("status_code"), Op.EQ, "500"),
  215. Condition(Column("device"), Op.EQ, "BlackBerry"),
  216. ],
  217. groupby=[Column("transaction")],
  218. ),
  219. start=self.hour_ago,
  220. end=self.now,
  221. rollup=Rollup(interval=60, granularity=60),
  222. scope=MetricsScope(
  223. org_ids=[self.org_id],
  224. project_ids=[self.project.id],
  225. use_case_id=UseCaseID.TRANSACTIONS.value,
  226. ),
  227. )
  228. request = Request(
  229. dataset="generic_metrics",
  230. app_id="tests",
  231. query=query,
  232. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  233. )
  234. result = self.run_query(request)
  235. assert len(result["data"]) == 2
  236. rows = result["data"]
  237. assert rows[0]["aggregate_value"] == [0]
  238. assert rows[0]["transaction"] == "transaction_0"
  239. assert rows[1]["aggregate_value"] == [6.0]
  240. assert rows[1]["transaction"] == "transaction_0"
  241. def test_totals(self) -> None:
  242. query = MetricsQuery(
  243. query=Timeseries(
  244. metric=Metric(
  245. "transaction.duration",
  246. TransactionMRI.DURATION.value,
  247. ),
  248. aggregate="max",
  249. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  250. groupby=[Column("transaction")],
  251. ),
  252. start=self.hour_ago,
  253. end=self.now,
  254. rollup=Rollup(totals=True, granularity=60, orderby=Direction.ASC),
  255. scope=MetricsScope(
  256. org_ids=[self.org_id],
  257. project_ids=[self.project.id],
  258. use_case_id=UseCaseID.TRANSACTIONS.value,
  259. ),
  260. )
  261. request = Request(
  262. dataset="generic_metrics",
  263. app_id="tests",
  264. query=query,
  265. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  266. )
  267. result = self.run_query(request)
  268. assert len(result["data"]) == 2
  269. rows = result["data"]
  270. assert rows[0]["aggregate_value"] == 7.0
  271. assert rows[1]["aggregate_value"] == 8.0
  272. def test_meta_data_in_response(self) -> None:
  273. query = MetricsQuery(
  274. query=Timeseries(
  275. metric=Metric(
  276. "transaction.duration",
  277. TransactionMRI.DURATION.value,
  278. ),
  279. aggregate="max",
  280. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  281. groupby=[Column("transaction")],
  282. ),
  283. start=self.hour_ago.replace(minute=16, second=59),
  284. end=self.now.replace(minute=16, second=59),
  285. rollup=Rollup(interval=60, granularity=60),
  286. scope=MetricsScope(
  287. org_ids=[self.org_id],
  288. project_ids=[self.project.id],
  289. use_case_id=UseCaseID.TRANSACTIONS.value,
  290. ),
  291. )
  292. request = Request(
  293. dataset="generic_metrics",
  294. app_id="tests",
  295. query=query,
  296. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  297. )
  298. result = self.run_query(request)
  299. assert result["modified_start"] == self.hour_ago.replace(minute=16, second=0)
  300. assert result["modified_end"] == self.now.replace(minute=17, second=0)
  301. assert result["indexer_mappings"] == {
  302. "d:transactions/duration@millisecond": 9223372036854775909,
  303. "status_code": 10000,
  304. "transaction": 9223372036854776020,
  305. }
  306. def test_bad_query(self) -> None:
  307. query = MetricsQuery(
  308. query=Timeseries(
  309. metric=Metric(
  310. "transaction.duration",
  311. "not a real MRI",
  312. ),
  313. aggregate="max",
  314. ),
  315. start=self.hour_ago.replace(minute=16, second=59),
  316. end=self.now.replace(minute=16, second=59),
  317. rollup=Rollup(interval=60, granularity=60),
  318. scope=MetricsScope(
  319. org_ids=[self.org_id],
  320. project_ids=[self.project.id],
  321. use_case_id=UseCaseID.TRANSACTIONS.value,
  322. ),
  323. )
  324. request = Request(
  325. dataset="generic_metrics",
  326. app_id="tests",
  327. query=query,
  328. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  329. )
  330. with pytest.raises(InvalidParams):
  331. self.run_query(request)
  332. def test_interval_with_totals(self) -> None:
  333. query = MetricsQuery(
  334. query=Timeseries(
  335. metric=Metric(
  336. "transaction.duration",
  337. TransactionMRI.DURATION.value,
  338. ),
  339. aggregate="max",
  340. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  341. groupby=[Column("transaction")],
  342. ),
  343. start=self.hour_ago,
  344. end=self.now,
  345. rollup=Rollup(interval=60, totals=True, granularity=60),
  346. scope=MetricsScope(
  347. org_ids=[self.org_id],
  348. project_ids=[self.project.id],
  349. use_case_id=UseCaseID.TRANSACTIONS.value,
  350. ),
  351. )
  352. request = Request(
  353. dataset="generic_metrics",
  354. app_id="tests",
  355. query=query,
  356. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  357. )
  358. result = self.run_query(request)
  359. assert len(result["data"]) == 6
  360. assert result["totals"]["aggregate_value"] == 8.0
  361. def test_automatic_granularity(self) -> None:
  362. query = MetricsQuery(
  363. query=Timeseries(
  364. metric=Metric(
  365. "transaction.duration",
  366. TransactionMRI.DURATION.value,
  367. ),
  368. aggregate="max",
  369. ),
  370. start=self.hour_ago,
  371. end=self.now,
  372. rollup=Rollup(interval=120),
  373. scope=MetricsScope(
  374. org_ids=[self.org_id],
  375. project_ids=[self.project.id],
  376. ),
  377. )
  378. request = Request(
  379. dataset="generic_metrics",
  380. app_id="tests",
  381. query=query,
  382. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  383. )
  384. result = self.run_query(request)
  385. # There's a flaky off by one error here that is very difficult to track down
  386. # TODO: figure out why this is flaky and assert to one specific value
  387. assert len(result["data"]) in [5, 6]
  388. def test_automatic_dataset(self) -> None:
  389. query = MetricsQuery(
  390. query=Timeseries(
  391. metric=Metric(
  392. None,
  393. SessionMRI.RAW_DURATION.value,
  394. ),
  395. aggregate="max",
  396. ),
  397. start=self.hour_ago,
  398. end=self.now,
  399. rollup=Rollup(interval=60, granularity=60),
  400. scope=MetricsScope(
  401. org_ids=[self.org_id],
  402. project_ids=[self.project.id],
  403. use_case_id=UseCaseID.SESSIONS.value,
  404. ),
  405. )
  406. request = Request(
  407. dataset="generic_metrics",
  408. app_id="tests",
  409. query=query,
  410. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  411. )
  412. result = self.run_query(request)
  413. assert request.dataset == "metrics"
  414. assert len(result["data"]) == 10
  415. def test_gauges(self) -> None:
  416. query = MetricsQuery(
  417. query=Timeseries(
  418. metric=Metric(
  419. None,
  420. "g:transactions/test_gauge@none",
  421. ),
  422. aggregate="last",
  423. ),
  424. start=self.hour_ago,
  425. end=self.now,
  426. rollup=Rollup(interval=60, totals=True, granularity=60),
  427. scope=MetricsScope(
  428. org_ids=[self.org_id],
  429. project_ids=[self.project.id],
  430. ),
  431. )
  432. request = Request(
  433. dataset="generic_metrics",
  434. app_id="tests",
  435. query=query,
  436. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  437. )
  438. result = self.run_query(request)
  439. assert len(result["data"]) == 10
  440. assert result["totals"]["aggregate_value"] == 9.0
  441. def test_metrics_groupby(self) -> None:
  442. query = MetricsQuery(
  443. query=Timeseries(
  444. metric=Metric(
  445. None,
  446. SessionMRI.RAW_DURATION.value,
  447. ),
  448. aggregate="max",
  449. groupby=[Column("release")],
  450. ),
  451. start=self.hour_ago,
  452. end=self.now,
  453. rollup=Rollup(interval=60, granularity=60),
  454. scope=MetricsScope(
  455. org_ids=[self.org_id],
  456. project_ids=[self.project.id],
  457. use_case_id=UseCaseID.SESSIONS.value,
  458. ),
  459. )
  460. request = Request(
  461. dataset="metrics",
  462. app_id="tests",
  463. query=query,
  464. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  465. )
  466. result = self.run_query(request)
  467. assert request.dataset == "metrics"
  468. assert len(result["data"]) == 10
  469. for data_point in result["data"]:
  470. assert data_point["release"] == "release_even" or data_point["release"] == "release_odd"
  471. def test_metrics_filters(self) -> None:
  472. query = MetricsQuery(
  473. query=Timeseries(
  474. metric=Metric(
  475. None,
  476. SessionMRI.RAW_USER.value,
  477. ),
  478. aggregate="count",
  479. filters=[
  480. Condition(Column("release"), Op.EQ, "release_even"),
  481. ],
  482. ),
  483. start=self.hour_ago,
  484. end=self.now,
  485. rollup=Rollup(interval=60, granularity=60),
  486. scope=MetricsScope(
  487. org_ids=[self.org_id],
  488. project_ids=[self.project.id],
  489. use_case_id=UseCaseID.SESSIONS.value,
  490. ),
  491. )
  492. request = Request(
  493. dataset="metrics",
  494. app_id="tests",
  495. query=query,
  496. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  497. )
  498. result = self.run_query(request)
  499. assert request.dataset == "metrics"
  500. assert len(result["data"]) == 5
  501. def test_metrics_complex(self) -> None:
  502. query = MetricsQuery(
  503. query=Timeseries(
  504. metric=Metric(
  505. None,
  506. SessionMRI.RAW_SESSION.value,
  507. ),
  508. aggregate="count",
  509. groupby=[Column("release")],
  510. filters=[
  511. Condition(Column("release"), Op.EQ, "release_even"),
  512. ],
  513. ),
  514. start=self.hour_ago,
  515. end=self.now,
  516. rollup=Rollup(interval=60, granularity=60),
  517. scope=MetricsScope(
  518. org_ids=[self.org_id],
  519. project_ids=[self.project.id],
  520. use_case_id=UseCaseID.SESSIONS.value,
  521. ),
  522. )
  523. request = Request(
  524. dataset="metrics",
  525. app_id="tests",
  526. query=query,
  527. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  528. )
  529. result = self.run_query(request)
  530. assert request.dataset == "metrics"
  531. assert len(result["data"]) == 5
  532. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  533. def test_metrics_correctly_reverse_resolved(self) -> None:
  534. query = MetricsQuery(
  535. query=Timeseries(
  536. metric=Metric(
  537. None,
  538. SessionMRI.RAW_SESSION.value,
  539. ),
  540. aggregate="count",
  541. groupby=[Column("release"), Column("project_id")],
  542. filters=[
  543. Condition(Column("release"), Op.EQ, "release_even"),
  544. Condition(Column("project_id"), Op.EQ, self.project.id),
  545. ],
  546. ),
  547. start=self.hour_ago,
  548. end=self.now,
  549. rollup=Rollup(interval=60, granularity=60),
  550. scope=MetricsScope(
  551. org_ids=[self.org_id],
  552. project_ids=[self.project.id],
  553. use_case_id=UseCaseID.SESSIONS.value,
  554. ),
  555. )
  556. request = Request(
  557. dataset="metrics",
  558. app_id="tests",
  559. query=query,
  560. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  561. )
  562. result = self.run_query(request)
  563. assert request.dataset == "metrics"
  564. assert len(result["data"]) == 5
  565. assert any(data_point["release"] == "release_even" for data_point in result["data"])
  566. assert any(data_point["project_id"] == self.project.id for data_point in result["data"])
  567. @pytest.mark.skip(reason="This is not implemented in MQL")
  568. def test_failure_rate(self) -> None:
  569. query = MetricsQuery(
  570. query=Formula(
  571. ArithmeticOperator.DIVIDE,
  572. [
  573. Timeseries(
  574. metric=Metric(
  575. mri=TransactionMRI.DURATION.value,
  576. ),
  577. aggregate="count",
  578. filters=[
  579. Condition(
  580. Column(TransactionTagsKey.TRANSACTION_STATUS.value),
  581. Op.NOT_IN,
  582. [
  583. TransactionStatusTagValue.OK.value,
  584. TransactionStatusTagValue.CANCELLED.value,
  585. TransactionStatusTagValue.UNKNOWN.value,
  586. ],
  587. )
  588. ],
  589. ),
  590. Timeseries(
  591. metric=Metric(
  592. mri=TransactionMRI.DURATION.value,
  593. ),
  594. aggregate="count",
  595. ),
  596. ],
  597. ),
  598. start=self.hour_ago,
  599. end=self.now,
  600. rollup=Rollup(interval=60, totals=True, granularity=60),
  601. scope=MetricsScope(
  602. org_ids=[self.org_id],
  603. project_ids=[self.project.id],
  604. ),
  605. )
  606. request = Request(
  607. dataset="generic_metrics",
  608. app_id="tests",
  609. query=query,
  610. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  611. )
  612. result = self.run_query(request)
  613. assert len(result["data"]) == 10
  614. assert result["totals"]["aggregate_value"] == 1.0
  615. def test_aggregate_aliases(self) -> None:
  616. query = MetricsQuery(
  617. query=Timeseries(
  618. metric=Metric(
  619. "transaction.duration",
  620. TransactionMRI.DURATION.value,
  621. ),
  622. aggregate="p95",
  623. ),
  624. start=self.hour_ago,
  625. end=self.now,
  626. rollup=Rollup(interval=60, granularity=60),
  627. scope=MetricsScope(
  628. org_ids=[self.org_id],
  629. project_ids=[self.project.id],
  630. use_case_id=UseCaseID.TRANSACTIONS.value,
  631. ),
  632. )
  633. request = Request(
  634. dataset="generic_metrics",
  635. app_id="tests",
  636. query=query,
  637. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  638. )
  639. result = self.run_query(request)
  640. assert len(result["data"]) == 10
  641. rows = result["data"]
  642. for i in range(10):
  643. assert rows[i]["aggregate_value"] == [i]
  644. assert (
  645. rows[i]["time"]
  646. == (
  647. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  648. ).isoformat()
  649. )
  650. def test_dataset_correctness(self) -> None:
  651. query = MetricsQuery(
  652. query=Timeseries(
  653. metric=Metric(
  654. "transaction.duration",
  655. TransactionMRI.DURATION.value,
  656. ),
  657. aggregate="quantiles",
  658. aggregate_params=[0.5, 0.99],
  659. groupby=[Column("transaction")],
  660. filters=[
  661. Condition(Column("transaction"), Op.IN, ["transaction_0", "transaction_1"])
  662. ],
  663. ),
  664. start=self.hour_ago,
  665. end=self.now,
  666. rollup=Rollup(interval=60, granularity=60),
  667. scope=MetricsScope(
  668. org_ids=[self.org_id],
  669. project_ids=[self.project.id],
  670. use_case_id=UseCaseID.TRANSACTIONS.value,
  671. ),
  672. )
  673. request = Request(
  674. dataset="metrics",
  675. app_id="tests",
  676. query=query,
  677. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  678. )
  679. result = self.run_query(request)
  680. assert len(result["data"]) == 10
  681. rows = result["data"]
  682. for i in range(10):
  683. assert rows[i]["aggregate_value"] == [i, i]
  684. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  685. assert (
  686. rows[i]["time"]
  687. == (
  688. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  689. ).isoformat()
  690. )
  691. class SnQLTest(MQLTest):
  692. @property
  693. def run_query(self) -> LayerQuery:
  694. def snql_query_fn(request: Request) -> Mapping[str, Any]:
  695. with self.options({"snuba.use-mql-endpoint": 0}):
  696. return layer_run_query(request)
  697. return snql_query_fn
  698. def test_failure_rate(self) -> None:
  699. super().test_failure_rate()