test_metrics_layer.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718
  1. from __future__ import annotations
  2. from datetime import datetime, timedelta, timezone
  3. from typing import Any, Callable, Literal, Mapping
  4. import pytest
  5. from snuba_sdk import (
  6. ArithmeticOperator,
  7. Column,
  8. Condition,
  9. Direction,
  10. Formula,
  11. Metric,
  12. MetricsQuery,
  13. MetricsScope,
  14. Op,
  15. Request,
  16. Rollup,
  17. Timeseries,
  18. )
  19. from sentry.exceptions import InvalidParams
  20. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  21. from sentry.snuba.metrics.naming_layer import SessionMRI, TransactionMRI
  22. from sentry.snuba.metrics.naming_layer.public import TransactionStatusTagValue, TransactionTagsKey
  23. from sentry.snuba.metrics_layer.query import run_query as layer_run_query
  24. from sentry.testutils.cases import BaseMetricsTestCase, TestCase
  25. pytestmark = pytest.mark.sentry_metrics
  26. # TODO: This is only needed while we support SnQL and MQL. Once SnQL is removed, this can be removed.
  27. LayerQuery = Callable[[Request], Mapping[str, Any]]
  28. class MQLTest(TestCase, BaseMetricsTestCase):
  29. @property
  30. def run_query(self) -> LayerQuery:
  31. def mql_query_fn(request: Request) -> Mapping[str, Any]:
  32. with self.options({"snuba.use-mql-endpoint": 1.0}):
  33. return layer_run_query(request)
  34. return mql_query_fn
  35. def ts(self, dt: datetime) -> int:
  36. return int(dt.timestamp())
  37. def setUp(self) -> None:
  38. super().setUp()
  39. self.generic_metrics: Mapping[str, Literal["counter", "set", "distribution", "gauge"]] = {
  40. TransactionMRI.DURATION.value: "distribution",
  41. TransactionMRI.USER.value: "set",
  42. TransactionMRI.COUNT_PER_ROOT_PROJECT.value: "counter",
  43. "g:transactions/test_gauge@none": "gauge",
  44. }
  45. self.metrics: Mapping[str, Literal["counter", "set", "distribution"]] = {
  46. SessionMRI.RAW_DURATION.value: "distribution",
  47. SessionMRI.RAW_USER.value: "set",
  48. SessionMRI.RAW_SESSION.value: "counter",
  49. }
  50. self.now = datetime.now(tz=timezone.utc).replace(microsecond=0)
  51. self.hour_ago = self.now - timedelta(hours=1)
  52. self.org_id = self.project.organization_id
  53. for mri, metric_type in self.generic_metrics.items():
  54. assert metric_type in {"counter", "distribution", "set", "gauge"}
  55. for i in range(10):
  56. value: int | dict[str, int]
  57. if metric_type == "gauge":
  58. value = {
  59. "min": i,
  60. "max": i,
  61. "sum": i,
  62. "count": i,
  63. "last": i,
  64. }
  65. else:
  66. value = i
  67. self.store_metric(
  68. self.org_id,
  69. self.project.id,
  70. metric_type,
  71. mri,
  72. {
  73. "transaction": f"transaction_{i % 2}",
  74. "status_code": "500" if i % 3 == 0 else "200",
  75. "device": "BlackBerry" if i % 2 == 0 else "Nokia",
  76. },
  77. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  78. value,
  79. UseCaseID.TRANSACTIONS,
  80. )
  81. for mri, metric_type in self.metrics.items():
  82. assert metric_type in {"counter", "distribution", "set"}
  83. for i in range(10):
  84. value = i
  85. self.store_metric(
  86. self.org_id,
  87. self.project.id,
  88. metric_type,
  89. mri,
  90. {
  91. "release": "release_even" if i % 2 == 0 else "release_odd",
  92. },
  93. self.ts(self.hour_ago + timedelta(minutes=1 * i)),
  94. value,
  95. UseCaseID.SESSIONS,
  96. )
  97. def test_basic_generic_metrics(self) -> None:
  98. query = MetricsQuery(
  99. query=Timeseries(
  100. metric=Metric(
  101. "transaction.duration",
  102. TransactionMRI.DURATION.value,
  103. ),
  104. aggregate="max",
  105. ),
  106. start=self.hour_ago,
  107. end=self.now,
  108. rollup=Rollup(interval=60, granularity=60),
  109. scope=MetricsScope(
  110. org_ids=[self.org_id],
  111. project_ids=[self.project.id],
  112. use_case_id=UseCaseID.TRANSACTIONS.value,
  113. ),
  114. )
  115. request = Request(
  116. dataset="generic_metrics",
  117. app_id="tests",
  118. query=query,
  119. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  120. )
  121. result = self.run_query(request)
  122. assert len(result["data"]) == 10
  123. rows = result["data"]
  124. for i in range(10):
  125. assert rows[i]["aggregate_value"] == i
  126. assert (
  127. rows[i]["time"]
  128. == (
  129. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  130. ).isoformat()
  131. )
  132. def test_groupby_generic_metrics(self) -> None:
  133. query = MetricsQuery(
  134. query=Timeseries(
  135. metric=Metric(
  136. "transaction.duration",
  137. TransactionMRI.DURATION.value,
  138. ),
  139. aggregate="quantiles",
  140. aggregate_params=[0.5, 0.99],
  141. groupby=[Column("transaction")],
  142. ),
  143. start=self.hour_ago,
  144. end=self.now,
  145. rollup=Rollup(interval=60, granularity=60),
  146. scope=MetricsScope(
  147. org_ids=[self.org_id],
  148. project_ids=[self.project.id],
  149. use_case_id=UseCaseID.TRANSACTIONS.value,
  150. ),
  151. )
  152. request = Request(
  153. dataset="generic_metrics",
  154. app_id="tests",
  155. query=query,
  156. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  157. )
  158. result = self.run_query(request)
  159. assert len(result["data"]) == 10
  160. rows = result["data"]
  161. for i in range(10):
  162. assert rows[i]["aggregate_value"] == [i, i]
  163. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  164. assert (
  165. rows[i]["time"]
  166. == (
  167. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  168. ).isoformat()
  169. )
  170. def test_filters_generic_metrics(self) -> None:
  171. query = MetricsQuery(
  172. query=Timeseries(
  173. metric=Metric(
  174. "transaction.duration",
  175. TransactionMRI.DURATION.value,
  176. ),
  177. aggregate="quantiles",
  178. aggregate_params=[0.5],
  179. filters=[
  180. Condition(Column("status_code"), Op.EQ, "500"),
  181. Condition(Column("device"), Op.EQ, "BlackBerry"),
  182. ],
  183. ),
  184. start=self.hour_ago,
  185. end=self.now,
  186. rollup=Rollup(interval=60, granularity=60),
  187. scope=MetricsScope(
  188. org_ids=[self.org_id],
  189. project_ids=[self.project.id],
  190. use_case_id=UseCaseID.TRANSACTIONS.value,
  191. ),
  192. )
  193. request = Request(
  194. dataset="generic_metrics",
  195. app_id="tests",
  196. query=query,
  197. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  198. )
  199. result = self.run_query(request)
  200. assert len(result["data"]) == 2
  201. rows = result["data"]
  202. assert rows[0]["aggregate_value"] == [0]
  203. assert rows[1]["aggregate_value"] == [6.0]
  204. def test_complex_generic_metrics(self) -> None:
  205. query = MetricsQuery(
  206. query=Timeseries(
  207. metric=Metric(
  208. "transaction.duration",
  209. TransactionMRI.DURATION.value,
  210. ),
  211. aggregate="quantiles",
  212. aggregate_params=[0.5],
  213. filters=[
  214. Condition(Column("status_code"), Op.EQ, "500"),
  215. Condition(Column("device"), Op.EQ, "BlackBerry"),
  216. ],
  217. groupby=[Column("transaction")],
  218. ),
  219. start=self.hour_ago,
  220. end=self.now,
  221. rollup=Rollup(interval=60, granularity=60),
  222. scope=MetricsScope(
  223. org_ids=[self.org_id],
  224. project_ids=[self.project.id],
  225. use_case_id=UseCaseID.TRANSACTIONS.value,
  226. ),
  227. )
  228. request = Request(
  229. dataset="generic_metrics",
  230. app_id="tests",
  231. query=query,
  232. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  233. )
  234. result = self.run_query(request)
  235. assert len(result["data"]) == 2
  236. rows = result["data"]
  237. assert rows[0]["aggregate_value"] == [0]
  238. assert rows[0]["transaction"] == "transaction_0"
  239. assert rows[1]["aggregate_value"] == [6.0]
  240. assert rows[1]["transaction"] == "transaction_0"
  241. def test_totals(self) -> None:
  242. query = MetricsQuery(
  243. query=Timeseries(
  244. metric=Metric(
  245. "transaction.duration",
  246. TransactionMRI.DURATION.value,
  247. ),
  248. aggregate="max",
  249. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  250. groupby=[Column("transaction")],
  251. ),
  252. start=self.hour_ago,
  253. end=self.now,
  254. rollup=Rollup(totals=True, granularity=60, orderby=Direction.ASC),
  255. scope=MetricsScope(
  256. org_ids=[self.org_id],
  257. project_ids=[self.project.id],
  258. use_case_id=UseCaseID.TRANSACTIONS.value,
  259. ),
  260. )
  261. request = Request(
  262. dataset="generic_metrics",
  263. app_id="tests",
  264. query=query,
  265. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  266. )
  267. result = self.run_query(request)
  268. assert len(result["data"]) == 2
  269. rows = result["data"]
  270. assert rows[0]["aggregate_value"] == 7.0
  271. assert rows[1]["aggregate_value"] == 8.0
  272. def test_meta_data_in_response(self) -> None:
  273. query = MetricsQuery(
  274. query=Timeseries(
  275. metric=Metric(
  276. "transaction.duration",
  277. TransactionMRI.DURATION.value,
  278. ),
  279. aggregate="max",
  280. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  281. groupby=[Column("transaction")],
  282. ),
  283. start=self.hour_ago.replace(minute=16, second=59),
  284. end=self.now.replace(minute=16, second=59),
  285. rollup=Rollup(interval=60, granularity=60),
  286. scope=MetricsScope(
  287. org_ids=[self.org_id],
  288. project_ids=[self.project.id],
  289. use_case_id=UseCaseID.TRANSACTIONS.value,
  290. ),
  291. )
  292. request = Request(
  293. dataset="generic_metrics",
  294. app_id="tests",
  295. query=query,
  296. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  297. )
  298. result = self.run_query(request)
  299. assert result["modified_start"] == self.hour_ago.replace(minute=16, second=0)
  300. assert result["modified_end"] == self.now.replace(minute=17, second=0)
  301. assert result["indexer_mappings"] == {
  302. "d:transactions/duration@millisecond": 9223372036854775909,
  303. "status_code": 10000,
  304. "transaction": 9223372036854776020,
  305. }
  306. def test_bad_query(self) -> None:
  307. query = MetricsQuery(
  308. query=Timeseries(
  309. metric=Metric(
  310. "transaction.duration",
  311. "not a real MRI",
  312. ),
  313. aggregate="max",
  314. ),
  315. start=self.hour_ago.replace(minute=16, second=59),
  316. end=self.now.replace(minute=16, second=59),
  317. rollup=Rollup(interval=60, granularity=60),
  318. scope=MetricsScope(
  319. org_ids=[self.org_id],
  320. project_ids=[self.project.id],
  321. use_case_id=UseCaseID.TRANSACTIONS.value,
  322. ),
  323. )
  324. request = Request(
  325. dataset="generic_metrics",
  326. app_id="tests",
  327. query=query,
  328. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  329. )
  330. with pytest.raises(InvalidParams):
  331. self.run_query(request)
  332. def test_interval_with_totals(self) -> None:
  333. query = MetricsQuery(
  334. query=Timeseries(
  335. metric=Metric(
  336. "transaction.duration",
  337. TransactionMRI.DURATION.value,
  338. ),
  339. aggregate="max",
  340. filters=[Condition(Column("status_code"), Op.EQ, "200")],
  341. groupby=[Column("transaction")],
  342. ),
  343. start=self.hour_ago,
  344. end=self.now,
  345. rollup=Rollup(interval=60, totals=True, granularity=60),
  346. scope=MetricsScope(
  347. org_ids=[self.org_id],
  348. project_ids=[self.project.id],
  349. use_case_id=UseCaseID.TRANSACTIONS.value,
  350. ),
  351. )
  352. request = Request(
  353. dataset="generic_metrics",
  354. app_id="tests",
  355. query=query,
  356. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  357. )
  358. result = self.run_query(request)
  359. assert len(result["data"]) == 6
  360. assert result["totals"]["aggregate_value"] == 8.0
  361. def test_automatic_granularity(self) -> None:
  362. query = MetricsQuery(
  363. query=Timeseries(
  364. metric=Metric(
  365. "transaction.duration",
  366. TransactionMRI.DURATION.value,
  367. ),
  368. aggregate="max",
  369. ),
  370. start=self.hour_ago,
  371. end=self.now,
  372. rollup=Rollup(interval=120),
  373. scope=MetricsScope(
  374. org_ids=[self.org_id],
  375. project_ids=[self.project.id],
  376. ),
  377. )
  378. request = Request(
  379. dataset="generic_metrics",
  380. app_id="tests",
  381. query=query,
  382. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  383. )
  384. result = self.run_query(request)
  385. # There's a flaky off by one error here that is very difficult to track down
  386. # TODO: figure out why this is flaky and assert to one specific value
  387. assert len(result["data"]) in [5, 6]
  388. def test_automatic_dataset(self) -> None:
  389. query = MetricsQuery(
  390. query=Timeseries(
  391. metric=Metric(
  392. None,
  393. SessionMRI.RAW_DURATION.value,
  394. ),
  395. aggregate="max",
  396. ),
  397. start=self.hour_ago,
  398. end=self.now,
  399. rollup=Rollup(interval=60, granularity=60),
  400. scope=MetricsScope(
  401. org_ids=[self.org_id],
  402. project_ids=[self.project.id],
  403. use_case_id=UseCaseID.SESSIONS.value,
  404. ),
  405. )
  406. request = Request(
  407. dataset="generic_metrics",
  408. app_id="tests",
  409. query=query,
  410. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  411. )
  412. result = self.run_query(request)
  413. assert request.dataset == "metrics"
  414. assert len(result["data"]) == 10
  415. def test_gauges(self) -> None:
  416. query = MetricsQuery(
  417. query=Timeseries(
  418. metric=Metric(
  419. None,
  420. "g:transactions/test_gauge@none",
  421. ),
  422. aggregate="last",
  423. ),
  424. start=self.hour_ago,
  425. end=self.now,
  426. rollup=Rollup(interval=60, totals=True, granularity=60),
  427. scope=MetricsScope(
  428. org_ids=[self.org_id],
  429. project_ids=[self.project.id],
  430. ),
  431. )
  432. request = Request(
  433. dataset="generic_metrics",
  434. app_id="tests",
  435. query=query,
  436. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  437. )
  438. result = self.run_query(request)
  439. assert len(result["data"]) == 10
  440. assert result["totals"]["aggregate_value"] == 9.0
  441. def test_groupby_metrics(self) -> None:
  442. query = MetricsQuery(
  443. query=Timeseries(
  444. metric=Metric(
  445. None,
  446. SessionMRI.RAW_DURATION.value,
  447. ),
  448. aggregate="max",
  449. groupby=[Column("release")],
  450. ),
  451. start=self.hour_ago,
  452. end=self.now,
  453. rollup=Rollup(interval=60, granularity=60),
  454. scope=MetricsScope(
  455. org_ids=[self.org_id],
  456. project_ids=[self.project.id],
  457. use_case_id=UseCaseID.SESSIONS.value,
  458. ),
  459. )
  460. request = Request(
  461. dataset="metrics",
  462. app_id="tests",
  463. query=query,
  464. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  465. )
  466. result = self.run_query(request)
  467. assert request.dataset == "metrics"
  468. assert len(result["data"]) == 10
  469. # TODO: check reverse resolved tags
  470. def test_filters_metrics(self) -> None:
  471. query = MetricsQuery(
  472. query=Timeseries(
  473. metric=Metric(
  474. None,
  475. SessionMRI.RAW_USER.value,
  476. ),
  477. aggregate="count",
  478. filters=[
  479. Condition(Column("release"), Op.EQ, "release_even"),
  480. ],
  481. ),
  482. start=self.hour_ago,
  483. end=self.now,
  484. rollup=Rollup(interval=60, granularity=60),
  485. scope=MetricsScope(
  486. org_ids=[self.org_id],
  487. project_ids=[self.project.id],
  488. use_case_id=UseCaseID.SESSIONS.value,
  489. ),
  490. )
  491. request = Request(
  492. dataset="metrics",
  493. app_id="tests",
  494. query=query,
  495. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  496. )
  497. result = self.run_query(request)
  498. assert request.dataset == "metrics"
  499. assert len(result["data"]) == 5
  500. # TODO: check reverse resolved tags
  501. def test_complex_metrics(self) -> None:
  502. query = MetricsQuery(
  503. query=Timeseries(
  504. metric=Metric(
  505. None,
  506. SessionMRI.RAW_SESSION.value,
  507. ),
  508. aggregate="count",
  509. groupby=[Column("release")],
  510. filters=[
  511. Condition(Column("release"), Op.EQ, "release_even"),
  512. ],
  513. ),
  514. start=self.hour_ago,
  515. end=self.now,
  516. rollup=Rollup(interval=60, granularity=60),
  517. scope=MetricsScope(
  518. org_ids=[self.org_id],
  519. project_ids=[self.project.id],
  520. use_case_id=UseCaseID.SESSIONS.value,
  521. ),
  522. )
  523. request = Request(
  524. dataset="metrics",
  525. app_id="tests",
  526. query=query,
  527. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  528. )
  529. result = self.run_query(request)
  530. assert request.dataset == "metrics"
  531. assert len(result["data"]) == 5
  532. # TODO: check reverse resolved tags
  533. @pytest.mark.skip(reason="This is not implemented in MQL")
  534. def test_failure_rate(self) -> None:
  535. query = MetricsQuery(
  536. query=Formula(
  537. ArithmeticOperator.DIVIDE,
  538. [
  539. Timeseries(
  540. metric=Metric(
  541. mri=TransactionMRI.DURATION.value,
  542. ),
  543. aggregate="count",
  544. filters=[
  545. Condition(
  546. Column(TransactionTagsKey.TRANSACTION_STATUS.value),
  547. Op.NOT_IN,
  548. [
  549. TransactionStatusTagValue.OK.value,
  550. TransactionStatusTagValue.CANCELLED.value,
  551. TransactionStatusTagValue.UNKNOWN.value,
  552. ],
  553. )
  554. ],
  555. ),
  556. Timeseries(
  557. metric=Metric(
  558. mri=TransactionMRI.DURATION.value,
  559. ),
  560. aggregate="count",
  561. ),
  562. ],
  563. ),
  564. start=self.hour_ago,
  565. end=self.now,
  566. rollup=Rollup(interval=60, totals=True, granularity=60),
  567. scope=MetricsScope(
  568. org_ids=[self.org_id],
  569. project_ids=[self.project.id],
  570. ),
  571. )
  572. request = Request(
  573. dataset="generic_metrics",
  574. app_id="tests",
  575. query=query,
  576. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  577. )
  578. result = self.run_query(request)
  579. assert len(result["data"]) == 10
  580. assert result["totals"]["aggregate_value"] == 1.0
  581. def test_aggregate_aliases(self) -> None:
  582. query = MetricsQuery(
  583. query=Timeseries(
  584. metric=Metric(
  585. "transaction.duration",
  586. TransactionMRI.DURATION.value,
  587. ),
  588. aggregate="p95",
  589. ),
  590. start=self.hour_ago,
  591. end=self.now,
  592. rollup=Rollup(interval=60, granularity=60),
  593. scope=MetricsScope(
  594. org_ids=[self.org_id],
  595. project_ids=[self.project.id],
  596. use_case_id=UseCaseID.TRANSACTIONS.value,
  597. ),
  598. )
  599. request = Request(
  600. dataset="generic_metrics",
  601. app_id="tests",
  602. query=query,
  603. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  604. )
  605. result = self.run_query(request)
  606. assert len(result["data"]) == 10
  607. rows = result["data"]
  608. for i in range(10):
  609. assert rows[i]["aggregate_value"] == [i]
  610. assert (
  611. rows[i]["time"]
  612. == (
  613. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  614. ).isoformat()
  615. )
  616. def test_dataset_correctness(self) -> None:
  617. query = MetricsQuery(
  618. query=Timeseries(
  619. metric=Metric(
  620. "transaction.duration",
  621. TransactionMRI.DURATION.value,
  622. ),
  623. aggregate="quantiles",
  624. aggregate_params=[0.5, 0.99],
  625. groupby=[Column("transaction")],
  626. filters=[
  627. Condition(Column("transaction"), Op.IN, ["transaction_0", "transaction_1"])
  628. ],
  629. ),
  630. start=self.hour_ago,
  631. end=self.now,
  632. rollup=Rollup(interval=60, granularity=60),
  633. scope=MetricsScope(
  634. org_ids=[self.org_id],
  635. project_ids=[self.project.id],
  636. use_case_id=UseCaseID.TRANSACTIONS.value,
  637. ),
  638. )
  639. request = Request(
  640. dataset="metrics",
  641. app_id="tests",
  642. query=query,
  643. tenant_ids={"referrer": "metrics.testing.test", "organization_id": self.org_id},
  644. )
  645. result = self.run_query(request)
  646. assert len(result["data"]) == 10
  647. rows = result["data"]
  648. for i in range(10):
  649. assert rows[i]["aggregate_value"] == [i, i]
  650. assert rows[i]["transaction"] == f"transaction_{i % 2}"
  651. assert (
  652. rows[i]["time"]
  653. == (
  654. self.hour_ago.replace(second=0, microsecond=0) + timedelta(minutes=1 * i)
  655. ).isoformat()
  656. )
  657. class SnQLTest(MQLTest):
  658. @property
  659. def run_query(self) -> LayerQuery:
  660. def snql_query_fn(request: Request) -> Mapping[str, Any]:
  661. with self.options({"snuba.use-mql-endpoint": 0}):
  662. return layer_run_query(request)
  663. return snql_query_fn
  664. def test_failure_rate(self) -> None:
  665. super().test_failure_rate()