test_organization_events_span_metrics.py 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046
  1. from datetime import timedelta
  2. import pytest
  3. from django.urls import reverse
  4. from sentry.search.events import constants
  5. from sentry.search.utils import map_device_class_level
  6. from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
  7. from sentry.testutils.helpers.datetime import before_now
  8. pytestmark = pytest.mark.sentry_metrics
  9. SPAN_DURATION_MRI = "d:spans/duration@millisecond"
  10. class OrganizationEventsMetricsEnhancedPerformanceEndpointTest(MetricsEnhancedPerformanceTestCase):
  11. viewname = "sentry-api-0-organization-events"
  12. # Poor intentionally omitted for test_measurement_rating_that_does_not_exist
  13. METRIC_STRINGS = [
  14. "foo_transaction",
  15. "bar_transaction",
  16. ]
  17. def setUp(self):
  18. super().setUp()
  19. self.min_ago = before_now(minutes=1)
  20. self.six_min_ago = before_now(minutes=6)
  21. self.three_days_ago = before_now(days=3)
  22. self.features = {
  23. "organizations:starfish-view": True,
  24. }
  25. def do_request(self, query, features=None):
  26. if features is None:
  27. features = {"organizations:discover-basic": True}
  28. features.update(self.features)
  29. self.login_as(user=self.user)
  30. url = reverse(
  31. self.viewname,
  32. kwargs={"organization_id_or_slug": self.organization.slug},
  33. )
  34. with self.feature(features):
  35. return self.client.get(url, query, format="json")
  36. def test_p50_with_no_data(self):
  37. response = self.do_request(
  38. {
  39. "field": ["p50()"],
  40. "query": "",
  41. "project": self.project.id,
  42. "dataset": "spansMetrics",
  43. }
  44. )
  45. assert response.status_code == 200, response.content
  46. data = response.data["data"]
  47. meta = response.data["meta"]
  48. assert len(data) == 1
  49. assert data[0]["p50()"] == 0
  50. assert meta["dataset"] == "spansMetrics"
  51. @pytest.mark.querybuilder
  52. def test_count(self):
  53. self.store_span_metric(
  54. 1,
  55. internal_metric=constants.SELF_TIME_LIGHT,
  56. timestamp=self.three_days_ago,
  57. )
  58. response = self.do_request(
  59. {
  60. "field": ["count()"],
  61. "query": "",
  62. "project": self.project.id,
  63. "dataset": "spansMetrics",
  64. "statsPeriod": "7d",
  65. }
  66. )
  67. assert response.status_code == 200, response.content
  68. data = response.data["data"]
  69. meta = response.data["meta"]
  70. assert len(data) == 1
  71. assert data[0]["count()"] == 1
  72. assert meta["dataset"] == "spansMetrics"
  73. def test_count_if(self):
  74. self.store_span_metric(
  75. 2,
  76. internal_metric=constants.SELF_TIME_LIGHT,
  77. timestamp=self.three_days_ago,
  78. tags={"release": "1.0.0"},
  79. )
  80. self.store_span_metric(
  81. 2,
  82. internal_metric=constants.SELF_TIME_LIGHT,
  83. timestamp=self.three_days_ago,
  84. tags={"release": "1.0.0"},
  85. )
  86. self.store_span_metric(
  87. 2,
  88. internal_metric=constants.SELF_TIME_LIGHT,
  89. timestamp=self.three_days_ago,
  90. tags={"release": "2.0.0"},
  91. )
  92. fieldRelease1 = "count_if(release,1.0.0)"
  93. fieldRelease2 = "count_if(release,2.0.0)"
  94. response = self.do_request(
  95. {
  96. "field": [fieldRelease1, fieldRelease2],
  97. "query": "",
  98. "project": self.project.id,
  99. "dataset": "spansMetrics",
  100. "statsPeriod": "7d",
  101. }
  102. )
  103. assert response.status_code == 200, response.content
  104. data = response.data["data"]
  105. meta = response.data["meta"]
  106. assert len(data) == 1
  107. assert data[0][fieldRelease1] == 2
  108. assert data[0][fieldRelease2] == 1
  109. assert meta["dataset"] == "spansMetrics"
  110. def test_division_if(self):
  111. self.store_span_metric(
  112. {
  113. "min": 1,
  114. "max": 1,
  115. "sum": 1,
  116. "count": 1,
  117. "last": 1,
  118. },
  119. entity="metrics_gauges",
  120. metric="mobile.slow_frames",
  121. timestamp=self.three_days_ago,
  122. tags={"release": "1.0.0"},
  123. )
  124. self.store_span_metric(
  125. {
  126. "min": 1,
  127. "max": 1,
  128. "sum": 15,
  129. "count": 15,
  130. "last": 1,
  131. },
  132. entity="metrics_gauges",
  133. metric="mobile.total_frames",
  134. timestamp=self.three_days_ago,
  135. tags={"release": "1.0.0"},
  136. )
  137. self.store_span_metric(
  138. {
  139. "min": 1,
  140. "max": 1,
  141. "sum": 2,
  142. "count": 2,
  143. "last": 1,
  144. },
  145. entity="metrics_gauges",
  146. metric="mobile.frozen_frames",
  147. timestamp=self.three_days_ago,
  148. tags={"release": "2.0.0"},
  149. )
  150. self.store_span_metric(
  151. {
  152. "min": 1,
  153. "max": 1,
  154. "sum": 10,
  155. "count": 10,
  156. "last": 1,
  157. },
  158. entity="metrics_gauges",
  159. metric="mobile.total_frames",
  160. timestamp=self.three_days_ago,
  161. tags={"release": "2.0.0"},
  162. )
  163. fieldRelease1 = "division_if(mobile.slow_frames,mobile.total_frames,release,1.0.0)"
  164. fieldRelease2 = "division_if(mobile.frozen_frames,mobile.total_frames,release,2.0.0)"
  165. response = self.do_request(
  166. {
  167. "field": [fieldRelease1, fieldRelease2],
  168. "query": "",
  169. "project": self.project.id,
  170. "dataset": "spansMetrics",
  171. "statsPeriod": "7d",
  172. }
  173. )
  174. assert response.status_code == 200, response.content
  175. data = response.data["data"]
  176. meta = response.data["meta"]
  177. assert len(data) == 1
  178. assert data[0][fieldRelease1] == 1 / 15
  179. assert data[0][fieldRelease2] == 2 / 10
  180. assert meta["dataset"] == "spansMetrics"
  181. def test_count_unique(self):
  182. self.store_span_metric(
  183. 1,
  184. "user",
  185. timestamp=self.min_ago,
  186. )
  187. self.store_span_metric(
  188. 2,
  189. "user",
  190. timestamp=self.min_ago,
  191. )
  192. response = self.do_request(
  193. {
  194. "field": ["count_unique(user)"],
  195. "query": "",
  196. "project": self.project.id,
  197. "dataset": "spansMetrics",
  198. }
  199. )
  200. assert response.status_code == 200, response.content
  201. data = response.data["data"]
  202. meta = response.data["meta"]
  203. assert len(data) == 1
  204. assert data[0]["count_unique(user)"] == 2
  205. assert meta["dataset"] == "spansMetrics"
  206. def test_sum(self):
  207. self.store_span_metric(
  208. 321,
  209. internal_metric=constants.SELF_TIME_LIGHT,
  210. timestamp=self.min_ago,
  211. )
  212. self.store_span_metric(
  213. 99,
  214. internal_metric=constants.SELF_TIME_LIGHT,
  215. timestamp=self.min_ago,
  216. )
  217. response = self.do_request(
  218. {
  219. "field": ["sum(span.self_time)"],
  220. "query": "",
  221. "project": self.project.id,
  222. "dataset": "spansMetrics",
  223. }
  224. )
  225. assert response.status_code == 200, response.content
  226. data = response.data["data"]
  227. meta = response.data["meta"]
  228. assert len(data) == 1
  229. assert data[0]["sum(span.self_time)"] == 420
  230. assert meta["dataset"] == "spansMetrics"
  231. def test_percentile(self):
  232. self.store_span_metric(
  233. 1,
  234. internal_metric=constants.SELF_TIME_LIGHT,
  235. timestamp=self.min_ago,
  236. )
  237. response = self.do_request(
  238. {
  239. "field": ["percentile(span.self_time, 0.95)"],
  240. "query": "",
  241. "project": self.project.id,
  242. "dataset": "spansMetrics",
  243. }
  244. )
  245. assert response.status_code == 200, response.content
  246. data = response.data["data"]
  247. meta = response.data["meta"]
  248. assert len(data) == 1
  249. assert data[0]["percentile(span.self_time, 0.95)"] == 1
  250. assert meta["dataset"] == "spansMetrics"
  251. def test_fixed_percentile_functions(self):
  252. self.store_span_metric(
  253. 1,
  254. internal_metric=constants.SELF_TIME_LIGHT,
  255. timestamp=self.min_ago,
  256. )
  257. for function in ["p50()", "p75()", "p95()", "p99()", "p100()"]:
  258. response = self.do_request(
  259. {
  260. "field": [function],
  261. "query": "",
  262. "project": self.project.id,
  263. "dataset": "spansMetrics",
  264. }
  265. )
  266. assert response.status_code == 200, response.content
  267. data = response.data["data"]
  268. meta = response.data["meta"]
  269. assert len(data) == 1
  270. assert data[0][function] == 1, function
  271. assert meta["dataset"] == "spansMetrics", function
  272. assert meta["fields"][function] == "duration", function
  273. def test_fixed_percentile_functions_with_duration(self):
  274. self.store_span_metric(
  275. 1,
  276. internal_metric=constants.SPAN_METRICS_MAP["span.duration"],
  277. timestamp=self.min_ago,
  278. )
  279. for function in [
  280. "p50(span.duration)",
  281. "p75(span.duration)",
  282. "p95(span.duration)",
  283. "p99(span.duration)",
  284. "p100(span.duration)",
  285. ]:
  286. response = self.do_request(
  287. {
  288. "field": [function],
  289. "query": "",
  290. "project": self.project.id,
  291. "dataset": "spansMetrics",
  292. }
  293. )
  294. assert response.status_code == 200, response.content
  295. data = response.data["data"]
  296. meta = response.data["meta"]
  297. assert len(data) == 1, function
  298. assert data[0][function] == 1, function
  299. assert meta["dataset"] == "spansMetrics", function
  300. assert meta["fields"][function] == "duration", function
  301. def test_avg(self):
  302. self.store_span_metric(
  303. 1,
  304. internal_metric=constants.SELF_TIME_LIGHT,
  305. timestamp=self.min_ago,
  306. )
  307. response = self.do_request(
  308. {
  309. "field": ["avg()"],
  310. "query": "",
  311. "project": self.project.id,
  312. "dataset": "spansMetrics",
  313. }
  314. )
  315. assert response.status_code == 200, response.content
  316. data = response.data["data"]
  317. meta = response.data["meta"]
  318. assert len(data) == 1
  319. assert data[0]["avg()"] == 1
  320. assert meta["dataset"] == "spansMetrics"
  321. def test_eps(self):
  322. for _ in range(6):
  323. self.store_span_metric(
  324. 1,
  325. internal_metric=constants.SELF_TIME_LIGHT,
  326. timestamp=self.min_ago,
  327. )
  328. response = self.do_request(
  329. {
  330. "field": ["eps()", "sps()"],
  331. "query": "",
  332. "project": self.project.id,
  333. "dataset": "spansMetrics",
  334. "statsPeriod": "10m",
  335. }
  336. )
  337. assert response.status_code == 200, response.content
  338. data = response.data["data"]
  339. meta = response.data["meta"]
  340. assert len(data) == 1
  341. assert data[0]["eps()"] == 0.01
  342. assert data[0]["sps()"] == 0.01
  343. assert meta["fields"]["eps()"] == "rate"
  344. assert meta["fields"]["sps()"] == "rate"
  345. assert meta["units"]["eps()"] == "1/second"
  346. assert meta["units"]["sps()"] == "1/second"
  347. assert meta["dataset"] == "spansMetrics"
  348. def test_epm(self):
  349. for _ in range(6):
  350. self.store_span_metric(
  351. 1,
  352. internal_metric=constants.SELF_TIME_LIGHT,
  353. timestamp=self.min_ago,
  354. )
  355. response = self.do_request(
  356. {
  357. "field": ["epm()", "spm()"],
  358. "query": "",
  359. "project": self.project.id,
  360. "dataset": "spansMetrics",
  361. "statsPeriod": "10m",
  362. }
  363. )
  364. assert response.status_code == 200, response.content
  365. data = response.data["data"]
  366. meta = response.data["meta"]
  367. assert len(data) == 1
  368. assert data[0]["epm()"] == 0.6
  369. assert data[0]["spm()"] == 0.6
  370. assert meta["fields"]["epm()"] == "rate"
  371. assert meta["fields"]["spm()"] == "rate"
  372. assert meta["units"]["epm()"] == "1/minute"
  373. assert meta["units"]["spm()"] == "1/minute"
  374. assert meta["dataset"] == "spansMetrics"
  375. def test_time_spent_percentage(self):
  376. for _ in range(4):
  377. self.store_span_metric(
  378. 1,
  379. internal_metric=constants.SELF_TIME_LIGHT,
  380. tags={"transaction": "foo_transaction"},
  381. timestamp=self.min_ago,
  382. )
  383. self.store_span_metric(
  384. 1,
  385. tags={"transaction": "foo_transaction"},
  386. timestamp=self.min_ago,
  387. )
  388. self.store_span_metric(
  389. 1,
  390. internal_metric=constants.SELF_TIME_LIGHT,
  391. tags={"transaction": "bar_transaction"},
  392. timestamp=self.min_ago,
  393. )
  394. self.store_span_metric(
  395. 1,
  396. tags={"transaction": "bar_transaction"},
  397. timestamp=self.min_ago,
  398. )
  399. response = self.do_request(
  400. {
  401. "field": ["transaction", "time_spent_percentage()"],
  402. "query": "",
  403. "orderby": ["-time_spent_percentage()"],
  404. "project": self.project.id,
  405. "dataset": "spansMetrics",
  406. "statsPeriod": "10m",
  407. }
  408. )
  409. assert response.status_code == 200, response.content
  410. data = response.data["data"]
  411. meta = response.data["meta"]
  412. assert len(data) == 2
  413. assert data[0]["time_spent_percentage()"] == 0.8
  414. assert data[0]["transaction"] == "foo_transaction"
  415. assert data[1]["time_spent_percentage()"] == 0.2
  416. assert data[1]["transaction"] == "bar_transaction"
  417. assert meta["dataset"] == "spansMetrics"
  418. def test_time_spent_percentage_local(self):
  419. response = self.do_request(
  420. {
  421. "field": ["time_spent_percentage(local)"],
  422. "query": "",
  423. "orderby": ["-time_spent_percentage(local)"],
  424. "project": self.project.id,
  425. "dataset": "spansMetrics",
  426. "statsPeriod": "10m",
  427. }
  428. )
  429. assert response.status_code == 200, response.content
  430. data = response.data["data"]
  431. meta = response.data["meta"]
  432. assert len(data) == 1
  433. assert data[0]["time_spent_percentage(local)"] is None
  434. assert meta["dataset"] == "spansMetrics"
  435. def test_time_spent_percentage_on_span_duration(self):
  436. for _ in range(4):
  437. self.store_span_metric(
  438. 1,
  439. internal_metric=constants.SPAN_METRICS_MAP["span.duration"],
  440. tags={"transaction": "foo_transaction"},
  441. timestamp=self.min_ago,
  442. )
  443. self.store_span_metric(
  444. 1,
  445. internal_metric=constants.SPAN_METRICS_MAP["span.duration"],
  446. tags={"transaction": "bar_transaction"},
  447. timestamp=self.min_ago,
  448. )
  449. response = self.do_request(
  450. {
  451. "field": ["transaction", "time_spent_percentage(app,span.duration)"],
  452. "query": "",
  453. "orderby": ["-time_spent_percentage(app,span.duration)"],
  454. "project": self.project.id,
  455. "dataset": "spansMetrics",
  456. "statsPeriod": "10m",
  457. }
  458. )
  459. assert response.status_code == 200, response.content
  460. data = response.data["data"]
  461. meta = response.data["meta"]
  462. assert len(data) == 2
  463. assert data[0]["time_spent_percentage(app,span.duration)"] == 0.8
  464. assert data[0]["transaction"] == "foo_transaction"
  465. assert data[1]["time_spent_percentage(app,span.duration)"] == 0.2
  466. assert data[1]["transaction"] == "bar_transaction"
  467. assert meta["dataset"] == "spansMetrics"
  468. def test_http_error_rate_and_count(self):
  469. for _ in range(4):
  470. self.store_span_metric(
  471. 1,
  472. internal_metric=constants.SELF_TIME_LIGHT,
  473. tags={"span.status_code": "500"},
  474. timestamp=self.min_ago,
  475. )
  476. self.store_span_metric(
  477. 1,
  478. internal_metric=constants.SELF_TIME_LIGHT,
  479. tags={"span.status_code": "200"},
  480. timestamp=self.min_ago,
  481. )
  482. response = self.do_request(
  483. {
  484. "field": ["http_error_count()", "http_error_rate()"],
  485. "query": "",
  486. "orderby": ["-http_error_rate()"],
  487. "project": self.project.id,
  488. "dataset": "spansMetrics",
  489. "statsPeriod": "10m",
  490. }
  491. )
  492. assert response.status_code == 200, response.content
  493. data = response.data["data"]
  494. meta = response.data["meta"]
  495. assert len(data) == 1
  496. assert data[0]["http_error_rate()"] == 0.8
  497. assert meta["dataset"] == "spansMetrics"
  498. assert meta["fields"]["http_error_count()"] == "integer"
  499. assert meta["fields"]["http_error_rate()"] == "percentage"
  500. def test_ttid_rate_and_count(self):
  501. for _ in range(8):
  502. self.store_span_metric(
  503. 1,
  504. internal_metric=constants.SELF_TIME_LIGHT,
  505. tags={"ttid": "ttid", "ttfd": "ttfd"},
  506. timestamp=self.min_ago,
  507. )
  508. self.store_span_metric(
  509. 1,
  510. internal_metric=constants.SELF_TIME_LIGHT,
  511. tags={"ttfd": "ttfd", "ttid": ""},
  512. timestamp=self.min_ago,
  513. )
  514. self.store_span_metric(
  515. 1,
  516. internal_metric=constants.SELF_TIME_LIGHT,
  517. tags={"ttfd": "", "ttid": ""},
  518. timestamp=self.min_ago,
  519. )
  520. response = self.do_request(
  521. {
  522. "field": [
  523. "ttid_contribution_rate()",
  524. "ttid_count()",
  525. "ttfd_contribution_rate()",
  526. "ttfd_count()",
  527. ],
  528. "query": "",
  529. "orderby": ["-ttid_contribution_rate()"],
  530. "project": self.project.id,
  531. "dataset": "spansMetrics",
  532. "statsPeriod": "10m",
  533. }
  534. )
  535. assert response.status_code == 200, response.content
  536. data = response.data["data"]
  537. meta = response.data["meta"]
  538. assert len(data) == 1
  539. assert data[0]["ttid_contribution_rate()"] == 0.8
  540. assert data[0]["ttid_count()"] == 8
  541. assert data[0]["ttfd_contribution_rate()"] == 0.9
  542. assert data[0]["ttfd_count()"] == 9
  543. assert meta["dataset"] == "spansMetrics"
  544. assert meta["fields"]["ttid_count()"] == "integer"
  545. assert meta["fields"]["ttid_contribution_rate()"] == "percentage"
  546. assert meta["fields"]["ttfd_count()"] == "integer"
  547. assert meta["fields"]["ttfd_contribution_rate()"] == "percentage"
  548. def test_main_thread_count(self):
  549. for _ in range(8):
  550. self.store_span_metric(
  551. 1,
  552. internal_metric=constants.SELF_TIME_LIGHT,
  553. tags={"span.main_thread": "true"},
  554. timestamp=self.min_ago,
  555. )
  556. self.store_span_metric(
  557. 1,
  558. internal_metric=constants.SELF_TIME_LIGHT,
  559. tags={},
  560. timestamp=self.min_ago,
  561. )
  562. self.store_span_metric(
  563. 1,
  564. internal_metric=constants.SELF_TIME_LIGHT,
  565. tags={"span.main_thread": ""},
  566. timestamp=self.min_ago,
  567. )
  568. response = self.do_request(
  569. {
  570. "field": [
  571. "main_thread_count()",
  572. ],
  573. "query": "",
  574. "orderby": ["-main_thread_count()"],
  575. "project": self.project.id,
  576. "dataset": "spansMetrics",
  577. "statsPeriod": "10m",
  578. }
  579. )
  580. assert response.status_code == 200, response.content
  581. data = response.data["data"]
  582. meta = response.data["meta"]
  583. assert len(data) == 1
  584. assert data[0]["main_thread_count()"] == 8
  585. assert meta["dataset"] == "spansMetrics"
  586. assert meta["fields"]["main_thread_count()"] == "integer"
  587. def test_use_self_time_light(self):
  588. self.store_span_metric(
  589. 100,
  590. internal_metric=constants.SELF_TIME_LIGHT,
  591. tags={"transaction": "foo_transaction"},
  592. timestamp=self.min_ago,
  593. )
  594. response = self.do_request(
  595. {
  596. "field": ["p50(span.self_time)"],
  597. # Should be 0 since its filtering on transaction
  598. "query": "transaction:foo_transaction",
  599. "orderby": ["-p50(span.self_time)"],
  600. "project": self.project.id,
  601. "dataset": "spansMetrics",
  602. "statsPeriod": "10m",
  603. }
  604. )
  605. assert response.status_code == 200, response.content
  606. data = response.data["data"]
  607. meta = response.data["meta"]
  608. assert len(data) == 1
  609. assert data[0]["p50(span.self_time)"] == 0
  610. assert meta["dataset"] == "spansMetrics"
  611. assert meta["fields"]["p50(span.self_time)"] == "duration"
  612. response = self.do_request(
  613. {
  614. # Should be 0 since it has a transaction column
  615. "field": ["transaction", "p50(span.self_time)"],
  616. "query": "",
  617. "orderby": ["-p50(span.self_time)"],
  618. "project": self.project.id,
  619. "dataset": "spansMetrics",
  620. "statsPeriod": "10m",
  621. }
  622. )
  623. assert response.status_code == 200, response.content
  624. data = response.data["data"]
  625. meta = response.data["meta"]
  626. assert len(data) == 0
  627. response = self.do_request(
  628. {
  629. "field": ["p50(span.self_time)"],
  630. # Should be 100 since its not filtering on transaction
  631. "query": "",
  632. "orderby": ["-p50(span.self_time)"],
  633. "project": self.project.id,
  634. "dataset": "spansMetrics",
  635. "statsPeriod": "10m",
  636. }
  637. )
  638. assert response.status_code == 200, response.content
  639. data = response.data["data"]
  640. meta = response.data["meta"]
  641. assert len(data) == 1
  642. assert data[0]["p50(span.self_time)"] == 100
  643. assert meta["dataset"] == "spansMetrics"
  644. assert meta["fields"]["p50(span.self_time)"] == "duration"
  645. def test_span_module(self):
  646. self.store_span_metric(
  647. 1,
  648. internal_metric=constants.SELF_TIME_LIGHT,
  649. timestamp=self.six_min_ago,
  650. tags={"span.category": "http", "span.description": "f"},
  651. )
  652. self.store_span_metric(
  653. 3,
  654. internal_metric=constants.SELF_TIME_LIGHT,
  655. timestamp=self.six_min_ago,
  656. tags={"span.category": "db", "span.description": "e"},
  657. )
  658. self.store_span_metric(
  659. 5,
  660. internal_metric=constants.SELF_TIME_LIGHT,
  661. timestamp=self.six_min_ago,
  662. tags={"span.category": "foobar", "span.description": "d"},
  663. )
  664. self.store_span_metric(
  665. 7,
  666. internal_metric=constants.SELF_TIME_LIGHT,
  667. timestamp=self.six_min_ago,
  668. tags={"span.category": "cache", "span.description": "c"},
  669. )
  670. self.store_span_metric(
  671. 9,
  672. internal_metric=constants.SELF_TIME_LIGHT,
  673. timestamp=self.six_min_ago,
  674. tags={"span.category": "db", "span.op": "db.redis", "span.description": "b"},
  675. )
  676. self.store_span_metric(
  677. 11,
  678. internal_metric=constants.SELF_TIME_LIGHT,
  679. timestamp=self.six_min_ago,
  680. tags={"span.category": "db", "span.op": "db.sql.room", "span.description": "a"},
  681. )
  682. response = self.do_request(
  683. {
  684. "field": ["span.module", "span.description", "p50(span.self_time)"],
  685. "query": "",
  686. "orderby": ["-p50(span.self_time)"],
  687. "project": self.project.id,
  688. "dataset": "spansMetrics",
  689. "statsPeriod": "10m",
  690. }
  691. )
  692. assert response.status_code == 200, response.content
  693. data = response.data["data"]
  694. meta = response.data["meta"]
  695. assert len(data) == 6
  696. assert data[0]["p50(span.self_time)"] == 11
  697. assert data[0]["span.module"] == "other"
  698. assert data[0]["span.description"] == "a"
  699. assert data[1]["p50(span.self_time)"] == 9
  700. assert data[1]["span.module"] == "cache"
  701. assert data[1]["span.description"] == "b"
  702. assert data[2]["p50(span.self_time)"] == 7
  703. assert data[2]["span.module"] == "cache"
  704. assert data[2]["span.description"] == "c"
  705. assert data[3]["p50(span.self_time)"] == 5
  706. assert data[3]["span.module"] == "other"
  707. assert data[3]["span.description"] == "d"
  708. assert data[4]["p50(span.self_time)"] == 3
  709. assert data[4]["span.module"] == "db"
  710. assert data[4]["span.description"] == "e"
  711. assert data[5]["p50(span.self_time)"] == 1
  712. assert data[5]["span.module"] == "http"
  713. assert data[5]["span.description"] == "f"
  714. assert meta["dataset"] == "spansMetrics"
  715. assert meta["fields"]["p50(span.self_time)"] == "duration"
  716. def test_tag_search(self):
  717. self.store_span_metric(
  718. 321,
  719. internal_metric=constants.SELF_TIME_LIGHT,
  720. timestamp=self.min_ago,
  721. tags={"span.description": "foo"},
  722. )
  723. self.store_span_metric(
  724. 99,
  725. internal_metric=constants.SELF_TIME_LIGHT,
  726. timestamp=self.min_ago,
  727. tags={"span.description": "bar"},
  728. )
  729. response = self.do_request(
  730. {
  731. "field": ["sum(span.self_time)"],
  732. "query": "span.description:bar",
  733. "project": self.project.id,
  734. "dataset": "spansMetrics",
  735. }
  736. )
  737. assert response.status_code == 200, response.content
  738. data = response.data["data"]
  739. meta = response.data["meta"]
  740. assert len(data) == 1
  741. assert data[0]["sum(span.self_time)"] == 99
  742. assert meta["dataset"] == "spansMetrics"
  743. def test_free_text_search(self):
  744. self.store_span_metric(
  745. 321,
  746. internal_metric=constants.SELF_TIME_LIGHT,
  747. timestamp=self.min_ago,
  748. tags={"span.description": "foo"},
  749. )
  750. self.store_span_metric(
  751. 99,
  752. internal_metric=constants.SELF_TIME_LIGHT,
  753. timestamp=self.min_ago,
  754. tags={"span.description": "bar"},
  755. )
  756. response = self.do_request(
  757. {
  758. "field": ["sum(span.self_time)"],
  759. "query": "foo",
  760. "project": self.project.id,
  761. "dataset": "spansMetrics",
  762. }
  763. )
  764. assert response.status_code == 200, response.content
  765. data = response.data["data"]
  766. meta = response.data["meta"]
  767. assert len(data) == 1
  768. assert data[0]["sum(span.self_time)"] == 321
  769. assert meta["dataset"] == "spansMetrics"
  770. def test_avg_compare(self):
  771. self.store_span_metric(
  772. 100,
  773. internal_metric=constants.SELF_TIME_LIGHT,
  774. timestamp=self.min_ago,
  775. tags={"release": "foo"},
  776. )
  777. self.store_span_metric(
  778. 10,
  779. internal_metric=constants.SELF_TIME_LIGHT,
  780. timestamp=self.min_ago,
  781. tags={"release": "bar"},
  782. )
  783. for function_name in [
  784. "avg_compare(span.self_time, release, foo, bar)",
  785. 'avg_compare(span.self_time, release, "foo", "bar")',
  786. ]:
  787. response = self.do_request(
  788. {
  789. "field": [function_name],
  790. "query": "",
  791. "project": self.project.id,
  792. "dataset": "spansMetrics",
  793. }
  794. )
  795. assert response.status_code == 200, response.content
  796. data = response.data["data"]
  797. meta = response.data["meta"]
  798. assert len(data) == 1
  799. assert data[0][function_name] == -0.9
  800. assert meta["dataset"] == "spansMetrics"
  801. assert meta["fields"][function_name] == "percent_change"
  802. def test_avg_compare_invalid_column(self):
  803. response = self.do_request(
  804. {
  805. "field": ["avg_compare(span.self_time, transaction, foo, bar)"],
  806. "query": "",
  807. "project": self.project.id,
  808. "dataset": "spansMetrics",
  809. }
  810. )
  811. assert response.status_code == 400, response.content
  812. def test_span_domain_array(self):
  813. self.store_span_metric(
  814. 321,
  815. internal_metric=constants.SELF_TIME_LIGHT,
  816. timestamp=self.min_ago,
  817. tags={"span.domain": ",sentry_table1,"},
  818. )
  819. self.store_span_metric(
  820. 21,
  821. internal_metric=constants.SELF_TIME_LIGHT,
  822. timestamp=self.min_ago,
  823. tags={"span.domain": ",sentry_table1,sentry_table2,"},
  824. )
  825. response = self.do_request(
  826. {
  827. "field": ["span.domain", "p75(span.self_time)"],
  828. "query": "",
  829. "project": self.project.id,
  830. "orderby": ["-p75(span.self_time)"],
  831. "dataset": "spansMetrics",
  832. }
  833. )
  834. assert response.status_code == 200, response.content
  835. data = response.data["data"]
  836. meta = response.data["meta"]
  837. assert len(data) == 2
  838. assert data[0]["span.domain"] == ["sentry_table1"]
  839. assert data[1]["span.domain"] == ["sentry_table1", "sentry_table2"]
  840. assert meta["dataset"] == "spansMetrics"
  841. assert meta["fields"]["span.domain"] == "array"
  842. def test_span_domain_array_filter(self):
  843. self.store_span_metric(
  844. 321,
  845. internal_metric=constants.SELF_TIME_LIGHT,
  846. timestamp=self.min_ago,
  847. tags={"span.domain": ",sentry_table1,"},
  848. )
  849. self.store_span_metric(
  850. 21,
  851. internal_metric=constants.SELF_TIME_LIGHT,
  852. timestamp=self.min_ago,
  853. tags={"span.domain": ",sentry_table1,sentry_table2,"},
  854. )
  855. response = self.do_request(
  856. {
  857. "field": ["span.domain", "p75(span.self_time)"],
  858. "query": "span.domain:sentry_table2",
  859. "project": self.project.id,
  860. "dataset": "spansMetrics",
  861. }
  862. )
  863. assert response.status_code == 200, response.content
  864. data = response.data["data"]
  865. meta = response.data["meta"]
  866. assert len(data) == 1
  867. assert data[0]["span.domain"] == ["sentry_table1", "sentry_table2"]
  868. assert meta["dataset"] == "spansMetrics"
  869. assert meta["fields"]["span.domain"] == "array"
  870. def test_span_domain_array_filter_wildcard(self):
  871. self.store_span_metric(
  872. 321,
  873. internal_metric=constants.SELF_TIME_LIGHT,
  874. timestamp=self.min_ago,
  875. tags={"span.domain": ",sentry_table1,"},
  876. )
  877. self.store_span_metric(
  878. 21,
  879. internal_metric=constants.SELF_TIME_LIGHT,
  880. timestamp=self.min_ago,
  881. tags={"span.domain": ",sentry_table1,sentry_table2,"},
  882. )
  883. for query in ["sentry*2", "*table2", "sentry_table2*"]:
  884. response = self.do_request(
  885. {
  886. "field": ["span.domain", "p75(span.self_time)"],
  887. "query": f"span.domain:{query}",
  888. "project": self.project.id,
  889. "dataset": "spansMetrics",
  890. }
  891. )
  892. assert response.status_code == 200, response.content
  893. data = response.data["data"]
  894. meta = response.data["meta"]
  895. assert len(data) == 1, query
  896. assert data[0]["span.domain"] == ["sentry_table1", "sentry_table2"], query
  897. assert meta["dataset"] == "spansMetrics", query
  898. assert meta["fields"]["span.domain"] == "array"
  899. def test_span_domain_array_has_filter(self):
  900. self.store_span_metric(
  901. 321,
  902. internal_metric=constants.SELF_TIME_LIGHT,
  903. timestamp=self.min_ago,
  904. tags={"span.domain": ""},
  905. )
  906. self.store_span_metric(
  907. 21,
  908. internal_metric=constants.SELF_TIME_LIGHT,
  909. timestamp=self.min_ago,
  910. tags={"span.domain": ",sentry_table1,sentry_table2,"},
  911. )
  912. response = self.do_request(
  913. {
  914. "field": ["span.domain", "p75(span.self_time)"],
  915. "query": "has:span.domain",
  916. "project": self.project.id,
  917. "dataset": "spansMetrics",
  918. }
  919. )
  920. assert response.status_code == 200, response.content
  921. data = response.data["data"]
  922. meta = response.data["meta"]
  923. assert len(data) == 1
  924. assert data[0]["span.domain"] == ["sentry_table1", "sentry_table2"]
  925. assert meta["dataset"] == "spansMetrics"
  926. response = self.do_request(
  927. {
  928. "field": ["span.domain", "p75(span.self_time)"],
  929. "query": "!has:span.domain",
  930. "project": self.project.id,
  931. "dataset": "spansMetrics",
  932. }
  933. )
  934. assert response.status_code == 200, response.content
  935. data = response.data["data"]
  936. meta = response.data["meta"]
  937. assert len(data) == 1
  938. assert meta["dataset"] == "spansMetrics"
  939. assert meta["fields"]["span.domain"] == "array"
  940. def test_unique_values_span_domain(self):
  941. self.store_span_metric(
  942. 321,
  943. internal_metric=constants.SELF_TIME_LIGHT,
  944. timestamp=self.min_ago,
  945. tags={"span.domain": ",sentry_table1,"},
  946. )
  947. self.store_span_metric(
  948. 21,
  949. internal_metric=constants.SELF_TIME_LIGHT,
  950. timestamp=self.min_ago,
  951. tags={"span.domain": ",sentry_table2,sentry_table3,"},
  952. )
  953. response = self.do_request(
  954. {
  955. "field": ["unique.span_domains", "count()"],
  956. "query": "",
  957. "orderby": "unique.span_domains",
  958. "project": self.project.id,
  959. "dataset": "spansMetrics",
  960. }
  961. )
  962. assert response.status_code == 200, response.content
  963. data = response.data["data"]
  964. meta = response.data["meta"]
  965. assert len(data) == 3
  966. assert data[0]["unique.span_domains"] == "sentry_table1"
  967. assert data[1]["unique.span_domains"] == "sentry_table2"
  968. assert data[2]["unique.span_domains"] == "sentry_table3"
  969. assert meta["fields"]["unique.span_domains"] == "string"
  970. def test_unique_values_span_domain_with_filter(self):
  971. self.store_span_metric(
  972. 321,
  973. internal_metric=constants.SELF_TIME_LIGHT,
  974. timestamp=self.min_ago,
  975. tags={"span.domain": ",sentry_tible1,"},
  976. )
  977. self.store_span_metric(
  978. 21,
  979. internal_metric=constants.SELF_TIME_LIGHT,
  980. timestamp=self.min_ago,
  981. tags={"span.domain": ",sentry_table2,sentry_table3,"},
  982. )
  983. response = self.do_request(
  984. {
  985. "field": ["unique.span_domains", "count()"],
  986. "query": "span.domain:sentry_tab*",
  987. "orderby": "unique.span_domains",
  988. "project": self.project.id,
  989. "dataset": "spansMetrics",
  990. }
  991. )
  992. assert response.status_code == 200, response.content
  993. data = response.data["data"]
  994. meta = response.data["meta"]
  995. assert len(data) == 2
  996. assert data[0]["unique.span_domains"] == "sentry_table2"
  997. assert data[1]["unique.span_domains"] == "sentry_table3"
  998. assert meta["fields"]["unique.span_domains"] == "string"
  999. def test_avg_if(self):
  1000. self.store_span_metric(
  1001. 100,
  1002. internal_metric=constants.SELF_TIME_LIGHT,
  1003. timestamp=self.min_ago,
  1004. tags={"release": "foo"},
  1005. )
  1006. self.store_span_metric(
  1007. 200,
  1008. internal_metric=constants.SELF_TIME_LIGHT,
  1009. timestamp=self.min_ago,
  1010. tags={"release": "foo"},
  1011. )
  1012. self.store_span_metric(
  1013. 10,
  1014. internal_metric=constants.SELF_TIME_LIGHT,
  1015. timestamp=self.min_ago,
  1016. tags={"release": "bar"},
  1017. )
  1018. self.store_span_metric(
  1019. 300,
  1020. internal_metric=constants.SELF_TIME_LIGHT,
  1021. timestamp=self.min_ago,
  1022. tags={"span.op": "queue.process"},
  1023. )
  1024. response = self.do_request(
  1025. {
  1026. "field": [
  1027. "avg_if(span.self_time, release, foo)",
  1028. "avg_if(span.self_time, span.op, queue.process)",
  1029. ],
  1030. "query": "",
  1031. "project": self.project.id,
  1032. "dataset": "spansMetrics",
  1033. }
  1034. )
  1035. assert response.status_code == 200, response.content
  1036. data = response.data["data"]
  1037. meta = response.data["meta"]
  1038. assert len(data) == 1
  1039. assert data[0]["avg_if(span.self_time, release, foo)"] == 150
  1040. assert data[0]["avg_if(span.self_time, span.op, queue.process)"] == 300
  1041. assert meta["dataset"] == "spansMetrics"
  1042. assert meta["fields"]["avg_if(span.self_time, release, foo)"] == "duration"
  1043. assert meta["fields"]["avg_if(span.self_time, span.op, queue.process)"] == "duration"
  1044. def test_device_class(self):
  1045. self.store_span_metric(
  1046. 123,
  1047. internal_metric=constants.SELF_TIME_LIGHT,
  1048. timestamp=self.min_ago,
  1049. tags={"device.class": "1"},
  1050. )
  1051. self.store_span_metric(
  1052. 678,
  1053. internal_metric=constants.SELF_TIME_LIGHT,
  1054. timestamp=self.min_ago,
  1055. tags={"device.class": "2"},
  1056. )
  1057. self.store_span_metric(
  1058. 999,
  1059. internal_metric=constants.SELF_TIME_LIGHT,
  1060. timestamp=self.min_ago,
  1061. tags={"device.class": ""},
  1062. )
  1063. response = self.do_request(
  1064. {
  1065. "field": ["device.class", "p95()"],
  1066. "query": "",
  1067. "orderby": "p95()",
  1068. "project": self.project.id,
  1069. "dataset": "spansMetrics",
  1070. }
  1071. )
  1072. assert response.status_code == 200, response.content
  1073. data = response.data["data"]
  1074. meta = response.data["meta"]
  1075. assert len(data) == 3
  1076. # Need to actually check the dict since the level for 1 isn't guaranteed to stay `low` or `medium`
  1077. assert data[0]["device.class"] == map_device_class_level("1")
  1078. assert data[1]["device.class"] == map_device_class_level("2")
  1079. assert data[2]["device.class"] == "Unknown"
  1080. assert meta["fields"]["device.class"] == "string"
  1081. def test_device_class_filter(self):
  1082. self.store_span_metric(
  1083. 123,
  1084. internal_metric=constants.SELF_TIME_LIGHT,
  1085. timestamp=self.min_ago,
  1086. tags={"device.class": "1"},
  1087. )
  1088. # Need to actually check the dict since the level for 1 isn't guaranteed to stay `low`
  1089. level = map_device_class_level("1")
  1090. response = self.do_request(
  1091. {
  1092. "field": ["device.class", "count()"],
  1093. "query": f"device.class:{level}",
  1094. "orderby": "count()",
  1095. "project": self.project.id,
  1096. "dataset": "spansMetrics",
  1097. }
  1098. )
  1099. assert response.status_code == 200, response.content
  1100. data = response.data["data"]
  1101. meta = response.data["meta"]
  1102. assert len(data) == 1
  1103. assert data[0]["device.class"] == level
  1104. assert meta["fields"]["device.class"] == "string"
  1105. def test_device_class_filter_unknown(self):
  1106. self.store_span_metric(
  1107. 123,
  1108. internal_metric=constants.SELF_TIME_LIGHT,
  1109. timestamp=self.min_ago,
  1110. tags={"device.class": ""},
  1111. )
  1112. response = self.do_request(
  1113. {
  1114. "field": ["device.class", "count()"],
  1115. "query": "device.class:Unknown",
  1116. "orderby": "count()",
  1117. "project": self.project.id,
  1118. "dataset": "spansMetrics",
  1119. }
  1120. )
  1121. assert response.status_code == 200, response.content
  1122. data = response.data["data"]
  1123. meta = response.data["meta"]
  1124. assert len(data) == 1
  1125. assert data[0]["device.class"] == "Unknown"
  1126. assert meta["fields"]["device.class"] == "string"
  1127. def test_cache_hit_rate(self):
  1128. self.store_span_metric(
  1129. 1,
  1130. internal_metric=constants.SELF_TIME_LIGHT,
  1131. timestamp=self.min_ago,
  1132. tags={"cache.hit": "true"},
  1133. )
  1134. self.store_span_metric(
  1135. 1,
  1136. internal_metric=constants.SELF_TIME_LIGHT,
  1137. timestamp=self.min_ago,
  1138. tags={"cache.hit": "false"},
  1139. )
  1140. response = self.do_request(
  1141. {
  1142. "field": ["cache_hit_rate()"],
  1143. "query": "",
  1144. "project": self.project.id,
  1145. "dataset": "spansMetrics",
  1146. }
  1147. )
  1148. assert response.status_code == 200, response.content
  1149. data = response.data["data"]
  1150. meta = response.data["meta"]
  1151. assert len(data) == 1
  1152. assert data[0]["cache_hit_rate()"] == 0.5
  1153. assert meta["dataset"] == "spansMetrics"
  1154. assert meta["fields"]["cache_hit_rate()"] == "percentage"
  1155. def test_cache_miss_rate(self):
  1156. self.store_span_metric(
  1157. 1,
  1158. internal_metric=constants.SELF_TIME_LIGHT,
  1159. timestamp=self.min_ago,
  1160. tags={"cache.hit": "true"},
  1161. )
  1162. self.store_span_metric(
  1163. 1,
  1164. internal_metric=constants.SELF_TIME_LIGHT,
  1165. timestamp=self.min_ago,
  1166. tags={"cache.hit": "false"},
  1167. )
  1168. self.store_span_metric(
  1169. 1,
  1170. internal_metric=constants.SELF_TIME_LIGHT,
  1171. timestamp=self.min_ago,
  1172. tags={"cache.hit": "false"},
  1173. )
  1174. self.store_span_metric(
  1175. 1,
  1176. internal_metric=constants.SELF_TIME_LIGHT,
  1177. timestamp=self.min_ago,
  1178. tags={"cache.hit": "false"},
  1179. )
  1180. response = self.do_request(
  1181. {
  1182. "field": ["cache_miss_rate()"],
  1183. "query": "",
  1184. "project": self.project.id,
  1185. "dataset": "spansMetrics",
  1186. }
  1187. )
  1188. assert response.status_code == 200, response.content
  1189. data = response.data["data"]
  1190. meta = response.data["meta"]
  1191. assert len(data) == 1
  1192. assert data[0]["cache_miss_rate()"] == 0.75
  1193. assert meta["dataset"] == "spansMetrics"
  1194. assert meta["fields"]["cache_miss_rate()"] == "percentage"
  1195. def test_http_response_rate(self):
  1196. self.store_span_metric(
  1197. 1,
  1198. internal_metric=constants.SELF_TIME_LIGHT,
  1199. timestamp=self.min_ago,
  1200. tags={"span.status_code": "200"},
  1201. )
  1202. self.store_span_metric(
  1203. 3,
  1204. internal_metric=constants.SELF_TIME_LIGHT,
  1205. timestamp=self.min_ago,
  1206. tags={"span.status_code": "301"},
  1207. )
  1208. self.store_span_metric(
  1209. 3,
  1210. internal_metric=constants.SELF_TIME_LIGHT,
  1211. timestamp=self.min_ago,
  1212. tags={"span.status_code": "404"},
  1213. )
  1214. self.store_span_metric(
  1215. 4,
  1216. internal_metric=constants.SELF_TIME_LIGHT,
  1217. timestamp=self.min_ago,
  1218. tags={"span.status_code": "503"},
  1219. )
  1220. self.store_span_metric(
  1221. 5,
  1222. internal_metric=constants.SELF_TIME_LIGHT,
  1223. timestamp=self.min_ago,
  1224. tags={"span.status_code": "501"},
  1225. )
  1226. response = self.do_request(
  1227. {
  1228. "field": [
  1229. "http_response_rate(200)", # By exact code
  1230. "http_response_rate(3)", # By code class
  1231. "http_response_rate(4)",
  1232. "http_response_rate(5)",
  1233. ],
  1234. "query": "",
  1235. "project": self.project.id,
  1236. "dataset": "spansMetrics",
  1237. }
  1238. )
  1239. assert response.status_code == 200, response.content
  1240. data = response.data["data"]
  1241. assert len(data) == 1
  1242. assert data[0]["http_response_rate(200)"] == 0.2
  1243. assert data[0]["http_response_rate(3)"] == 0.2
  1244. assert data[0]["http_response_rate(4)"] == 0.2
  1245. assert data[0]["http_response_rate(5)"] == 0.4
  1246. meta = response.data["meta"]
  1247. assert meta["dataset"] == "spansMetrics"
  1248. assert meta["fields"]["http_response_rate(200)"] == "percentage"
  1249. def test_regression_score_regression(self):
  1250. # This span increases in duration
  1251. self.store_span_metric(
  1252. 1,
  1253. internal_metric=SPAN_DURATION_MRI,
  1254. timestamp=self.six_min_ago,
  1255. tags={"transaction": "/api/0/projects/", "span.description": "Regressed Span"},
  1256. project=self.project.id,
  1257. )
  1258. self.store_span_metric(
  1259. 100,
  1260. internal_metric=SPAN_DURATION_MRI,
  1261. timestamp=self.min_ago,
  1262. tags={"transaction": "/api/0/projects/", "span.description": "Regressed Span"},
  1263. project=self.project.id,
  1264. )
  1265. # This span stays the same
  1266. self.store_span_metric(
  1267. 1,
  1268. internal_metric=SPAN_DURATION_MRI,
  1269. timestamp=self.three_days_ago,
  1270. tags={"transaction": "/api/0/projects/", "span.description": "Non-regressed"},
  1271. project=self.project.id,
  1272. )
  1273. self.store_span_metric(
  1274. 1,
  1275. internal_metric=SPAN_DURATION_MRI,
  1276. timestamp=self.min_ago,
  1277. tags={"transaction": "/api/0/projects/", "span.description": "Non-regressed"},
  1278. project=self.project.id,
  1279. )
  1280. response = self.do_request(
  1281. {
  1282. "field": [
  1283. "span.description",
  1284. f"regression_score(span.duration,{int(self.two_min_ago.timestamp())})",
  1285. ],
  1286. "query": "transaction:/api/0/projects/",
  1287. "dataset": "spansMetrics",
  1288. "orderby": [
  1289. f"-regression_score(span.duration,{int(self.two_min_ago.timestamp())})"
  1290. ],
  1291. "start": (self.six_min_ago - timedelta(minutes=1)).isoformat(),
  1292. "end": before_now(minutes=0),
  1293. }
  1294. )
  1295. assert response.status_code == 200, response.content
  1296. data = response.data["data"]
  1297. assert len(data) == 2
  1298. assert [row["span.description"] for row in data] == ["Regressed Span", "Non-regressed"]
  1299. def test_regression_score_added_span(self):
  1300. # This span only exists after the breakpoint
  1301. self.store_span_metric(
  1302. 100,
  1303. internal_metric=SPAN_DURATION_MRI,
  1304. timestamp=self.min_ago,
  1305. tags={"transaction": "/api/0/projects/", "span.description": "Added span"},
  1306. project=self.project.id,
  1307. )
  1308. # This span stays the same
  1309. self.store_span_metric(
  1310. 1,
  1311. internal_metric=SPAN_DURATION_MRI,
  1312. timestamp=self.three_days_ago,
  1313. tags={"transaction": "/api/0/projects/", "span.description": "Non-regressed"},
  1314. project=self.project.id,
  1315. )
  1316. self.store_span_metric(
  1317. 1,
  1318. internal_metric=SPAN_DURATION_MRI,
  1319. timestamp=self.min_ago,
  1320. tags={"transaction": "/api/0/projects/", "span.description": "Non-regressed"},
  1321. project=self.project.id,
  1322. )
  1323. response = self.do_request(
  1324. {
  1325. "field": [
  1326. "span.description",
  1327. f"regression_score(span.duration,{int(self.two_min_ago.timestamp())})",
  1328. ],
  1329. "query": "transaction:/api/0/projects/",
  1330. "dataset": "spansMetrics",
  1331. "orderby": [
  1332. f"-regression_score(span.duration,{int(self.two_min_ago.timestamp())})"
  1333. ],
  1334. "start": (self.six_min_ago - timedelta(minutes=1)).isoformat(),
  1335. "end": before_now(minutes=0),
  1336. }
  1337. )
  1338. assert response.status_code == 200, response.content
  1339. data = response.data["data"]
  1340. assert len(data) == 2
  1341. assert [row["span.description"] for row in data] == ["Added span", "Non-regressed"]
  1342. def test_regression_score_removed_span(self):
  1343. # This span only exists before the breakpoint
  1344. self.store_span_metric(
  1345. 100,
  1346. internal_metric=SPAN_DURATION_MRI,
  1347. timestamp=self.six_min_ago,
  1348. tags={"transaction": "/api/0/projects/", "span.description": "Removed span"},
  1349. project=self.project.id,
  1350. )
  1351. # This span stays the same
  1352. self.store_span_metric(
  1353. 1,
  1354. internal_metric=SPAN_DURATION_MRI,
  1355. timestamp=self.three_days_ago,
  1356. tags={"transaction": "/api/0/projects/", "span.description": "Non-regressed"},
  1357. project=self.project.id,
  1358. )
  1359. self.store_span_metric(
  1360. 1,
  1361. internal_metric=SPAN_DURATION_MRI,
  1362. timestamp=self.min_ago,
  1363. tags={"transaction": "/api/0/projects/", "span.description": "Non-regressed"},
  1364. project=self.project.id,
  1365. )
  1366. response = self.do_request(
  1367. {
  1368. "field": [
  1369. "span.description",
  1370. f"regression_score(span.duration,{int(self.two_min_ago.timestamp())})",
  1371. ],
  1372. "query": "transaction:/api/0/projects/",
  1373. "dataset": "spansMetrics",
  1374. "orderby": [
  1375. f"-regression_score(span.duration,{int(self.two_min_ago.timestamp())})"
  1376. ],
  1377. "start": (self.six_min_ago - timedelta(minutes=1)).isoformat(),
  1378. "end": before_now(minutes=0),
  1379. }
  1380. )
  1381. assert response.status_code == 200, response.content
  1382. data = response.data["data"]
  1383. assert len(data) == 2
  1384. assert [row["span.description"] for row in data] == ["Non-regressed", "Removed span"]
  1385. # The regression score is <0 for removed spans, this can act as
  1386. # a way to filter out removed spans when necessary
  1387. assert data[1][f"regression_score(span.duration,{int(self.two_min_ago.timestamp())})"] < 0
  1388. def test_avg_self_time_by_timestamp(self):
  1389. self.store_span_metric(
  1390. 1,
  1391. internal_metric=constants.SELF_TIME_LIGHT,
  1392. timestamp=self.six_min_ago,
  1393. tags={},
  1394. )
  1395. self.store_span_metric(
  1396. 3,
  1397. internal_metric=constants.SELF_TIME_LIGHT,
  1398. timestamp=self.min_ago,
  1399. tags={},
  1400. )
  1401. response = self.do_request(
  1402. {
  1403. "field": [
  1404. f"avg_by_timestamp(span.self_time,less,{int(self.two_min_ago.timestamp())})",
  1405. f"avg_by_timestamp(span.self_time,greater,{int(self.two_min_ago.timestamp())})",
  1406. ],
  1407. "query": "",
  1408. "project": self.project.id,
  1409. "dataset": "spansMetrics",
  1410. "statsPeriod": "1h",
  1411. }
  1412. )
  1413. assert response.status_code == 200, response.content
  1414. data = response.data["data"]
  1415. assert len(data) == 1
  1416. assert data[0] == {
  1417. f"avg_by_timestamp(span.self_time,less,{int(self.two_min_ago.timestamp())})": 1.0,
  1418. f"avg_by_timestamp(span.self_time,greater,{int(self.two_min_ago.timestamp())})": 3.0,
  1419. }
  1420. def test_avg_self_time_by_timestamp_invalid_condition(self):
  1421. response = self.do_request(
  1422. {
  1423. "field": [
  1424. f"avg_by_timestamp(span.self_time,INVALID_ARG,{int(self.two_min_ago.timestamp())})",
  1425. ],
  1426. "query": "",
  1427. "project": self.project.id,
  1428. "dataset": "spansMetrics",
  1429. "statsPeriod": "1h",
  1430. }
  1431. )
  1432. assert response.status_code == 400, response.content
  1433. assert (
  1434. response.data["detail"]
  1435. == "avg_by_timestamp: condition argument invalid: string must be one of ['greater', 'less']"
  1436. )
  1437. def test_epm_by_timestamp(self):
  1438. self.store_span_metric(
  1439. 1,
  1440. internal_metric=SPAN_DURATION_MRI,
  1441. timestamp=self.six_min_ago,
  1442. tags={},
  1443. )
  1444. # More events occur after the timestamp
  1445. for _ in range(3):
  1446. self.store_span_metric(
  1447. 3,
  1448. internal_metric=SPAN_DURATION_MRI,
  1449. timestamp=self.min_ago,
  1450. tags={},
  1451. )
  1452. response = self.do_request(
  1453. {
  1454. "field": [
  1455. f"epm_by_timestamp(less,{int(self.two_min_ago.timestamp())})",
  1456. f"epm_by_timestamp(greater,{int(self.two_min_ago.timestamp())})",
  1457. ],
  1458. "query": "",
  1459. "project": self.project.id,
  1460. "dataset": "spansMetrics",
  1461. "statsPeriod": "1h",
  1462. }
  1463. )
  1464. assert response.status_code == 200, response.content
  1465. data = response.data["data"]
  1466. assert len(data) == 1
  1467. assert data[0][f"epm_by_timestamp(less,{int(self.two_min_ago.timestamp())})"] < 1.0
  1468. assert data[0][f"epm_by_timestamp(greater,{int(self.two_min_ago.timestamp())})"] > 1.0
  1469. def test_epm_by_timestamp_invalid_condition(self):
  1470. response = self.do_request(
  1471. {
  1472. "field": [
  1473. f"epm_by_timestamp(INVALID_ARG,{int(self.two_min_ago.timestamp())})",
  1474. ],
  1475. "query": "",
  1476. "project": self.project.id,
  1477. "dataset": "spansMetrics",
  1478. "statsPeriod": "1h",
  1479. }
  1480. )
  1481. assert response.status_code == 400, response.content
  1482. assert (
  1483. response.data["detail"]
  1484. == "epm_by_timestamp: condition argument invalid: string must be one of ['greater', 'less']"
  1485. )
  1486. def test_any_function(self):
  1487. for char in "abc":
  1488. for transaction in ["foo", "bar"]:
  1489. self.store_span_metric(
  1490. 1,
  1491. internal_metric=constants.SELF_TIME_LIGHT,
  1492. timestamp=self.six_min_ago,
  1493. tags={"span.description": char, "transaction": transaction},
  1494. )
  1495. response = self.do_request(
  1496. {
  1497. "field": [
  1498. "transaction",
  1499. "any(span.description)",
  1500. ],
  1501. "query": "",
  1502. "orderby": ["transaction"],
  1503. "project": self.project.id,
  1504. "dataset": "spansMetrics",
  1505. "statsPeriod": "1h",
  1506. }
  1507. )
  1508. assert response.status_code == 200, response.content
  1509. assert response.data["data"] == [
  1510. {"transaction": "bar", "any(span.description)": "a"},
  1511. {"transaction": "foo", "any(span.description)": "a"},
  1512. ]
  1513. def test_count_op(self):
  1514. self.store_span_metric(
  1515. 1,
  1516. internal_metric=constants.SELF_TIME_LIGHT,
  1517. timestamp=self.six_min_ago,
  1518. tags={"span.op": "queue.publish"},
  1519. )
  1520. self.store_span_metric(
  1521. 1,
  1522. internal_metric=constants.SELF_TIME_LIGHT,
  1523. timestamp=self.six_min_ago,
  1524. tags={"span.op": "queue.process"},
  1525. )
  1526. response = self.do_request(
  1527. {
  1528. "field": [
  1529. "count_op(queue.publish)",
  1530. "count_op(queue.process)",
  1531. ],
  1532. "query": "",
  1533. "project": self.project.id,
  1534. "dataset": "spansMetrics",
  1535. "statsPeriod": "1h",
  1536. }
  1537. )
  1538. assert response.status_code == 200, response.content
  1539. data = response.data["data"]
  1540. assert data == [
  1541. {"count_op(queue.publish)": 1, "count_op(queue.process)": 1},
  1542. ]
  1543. def test_project_mapping(self):
  1544. self.store_span_metric(
  1545. 1,
  1546. internal_metric=constants.SELF_TIME_LIGHT,
  1547. timestamp=self.six_min_ago,
  1548. tags={},
  1549. )
  1550. # More events occur after the timestamp
  1551. for _ in range(3):
  1552. self.store_span_metric(
  1553. 3,
  1554. internal_metric=constants.SELF_TIME_LIGHT,
  1555. timestamp=self.min_ago,
  1556. tags={},
  1557. )
  1558. response = self.do_request(
  1559. {
  1560. "field": ["project", "project.name", "count()"],
  1561. "query": "",
  1562. "project": self.project.id,
  1563. "dataset": "spansMetrics",
  1564. "statsPeriod": "1h",
  1565. }
  1566. )
  1567. assert response.status_code == 200, response.content
  1568. data = response.data["data"]
  1569. assert data[0]["project"] == self.project.slug
  1570. assert data[0]["project.name"] == self.project.slug
  1571. def test_slow_frames_gauge_metric(self):
  1572. self.store_span_metric(
  1573. {
  1574. "min": 5,
  1575. "max": 5,
  1576. "sum": 5,
  1577. "count": 1,
  1578. "last": 5,
  1579. },
  1580. entity="metrics_gauges",
  1581. metric="mobile.slow_frames",
  1582. timestamp=self.six_min_ago,
  1583. tags={"release": "foo"},
  1584. )
  1585. self.store_span_metric(
  1586. {
  1587. "min": 10,
  1588. "max": 10,
  1589. "sum": 10,
  1590. "count": 1,
  1591. "last": 10,
  1592. },
  1593. entity="metrics_gauges",
  1594. metric="mobile.slow_frames",
  1595. timestamp=self.six_min_ago,
  1596. tags={"release": "bar"},
  1597. )
  1598. response = self.do_request(
  1599. {
  1600. "field": [
  1601. "avg_if(mobile.slow_frames,release,foo)",
  1602. "avg_if(mobile.slow_frames,release,bar)",
  1603. "avg_compare(mobile.slow_frames,release,foo,bar)",
  1604. ],
  1605. "query": "",
  1606. "project": self.project.id,
  1607. "dataset": "spansMetrics",
  1608. "statsPeriod": "1h",
  1609. }
  1610. )
  1611. assert response.status_code == 200, response.content
  1612. data = response.data["data"]
  1613. assert data == [
  1614. {
  1615. "avg_compare(mobile.slow_frames,release,foo,bar)": 1.0,
  1616. "avg_if(mobile.slow_frames,release,foo)": 5.0,
  1617. "avg_if(mobile.slow_frames,release,bar)": 10.0,
  1618. }
  1619. ]
  1620. def test_resolve_messaging_message_receive_latency_gauge(self):
  1621. self.store_span_metric(
  1622. {
  1623. "min": 5,
  1624. "max": 5,
  1625. "sum": 5,
  1626. "count": 1,
  1627. "last": 5,
  1628. },
  1629. entity="metrics_gauges",
  1630. metric="messaging.message.receive.latency",
  1631. timestamp=self.six_min_ago,
  1632. tags={"messaging.destination.name": "foo", "trace.status": "ok"},
  1633. )
  1634. self.store_span_metric(
  1635. {
  1636. "min": 10,
  1637. "max": 10,
  1638. "sum": 10,
  1639. "count": 1,
  1640. "last": 10,
  1641. },
  1642. entity="metrics_gauges",
  1643. metric="messaging.message.receive.latency",
  1644. timestamp=self.six_min_ago,
  1645. tags={"messaging.destination.name": "bar", "trace.status": "ok"},
  1646. )
  1647. response = self.do_request(
  1648. {
  1649. "field": [
  1650. "messaging.destination.name",
  1651. "trace.status",
  1652. "avg(messaging.message.receive.latency)",
  1653. ],
  1654. "query": "",
  1655. "project": self.project.id,
  1656. "dataset": "spansMetrics",
  1657. "statsPeriod": "1h",
  1658. }
  1659. )
  1660. assert response.status_code == 200, response.content
  1661. data = response.data["data"]
  1662. assert data == [
  1663. {
  1664. "messaging.destination.name": "bar",
  1665. "trace.status": "ok",
  1666. "avg(messaging.message.receive.latency)": 10.0,
  1667. },
  1668. {
  1669. "messaging.destination.name": "foo",
  1670. "trace.status": "ok",
  1671. "avg(messaging.message.receive.latency)": 5.0,
  1672. },
  1673. ]
  1674. def test_messaging_does_not_exist_as_metric(self):
  1675. self.store_span_metric(
  1676. 100,
  1677. internal_metric=constants.SPAN_METRICS_MAP["span.duration"],
  1678. tags={"messaging.destination.name": "foo", "trace.status": "ok"},
  1679. timestamp=self.min_ago,
  1680. )
  1681. response = self.do_request(
  1682. {
  1683. "field": [
  1684. "messaging.destination.name",
  1685. "trace.status",
  1686. "avg(messaging.message.receive.latency)",
  1687. "avg(span.duration)",
  1688. ],
  1689. "query": "",
  1690. "project": self.project.id,
  1691. "dataset": "spansMetrics",
  1692. "statsPeriod": "1h",
  1693. }
  1694. )
  1695. assert response.status_code == 200, response.content
  1696. data = response.data["data"]
  1697. assert data == [
  1698. {
  1699. "messaging.destination.name": "foo",
  1700. "trace.status": "ok",
  1701. "avg(messaging.message.receive.latency)": None,
  1702. "avg(span.duration)": 100,
  1703. },
  1704. ]
  1705. meta = response.data["meta"]
  1706. assert meta["fields"]["avg(messaging.message.receive.latency)"] == "null"
  1707. def test_cache_item_size_does_not_exist_as_metric(self):
  1708. self.store_span_metric(
  1709. 100,
  1710. internal_metric=constants.SPAN_METRICS_MAP["span.duration"],
  1711. tags={"cache.item": "true"},
  1712. timestamp=self.min_ago,
  1713. )
  1714. response = self.do_request(
  1715. {
  1716. "field": [
  1717. "avg(cache.item_size)",
  1718. "avg(span.duration)",
  1719. ],
  1720. "query": "",
  1721. "project": self.project.id,
  1722. "dataset": "spansMetrics",
  1723. "statsPeriod": "1h",
  1724. }
  1725. )
  1726. assert response.status_code == 200, response.content
  1727. data = response.data["data"]
  1728. assert data == [
  1729. {
  1730. "avg(cache.item_size)": None,
  1731. "avg(span.duration)": 100,
  1732. },
  1733. ]
  1734. meta = response.data["meta"]
  1735. assert meta["fields"]["avg(cache.item_size)"] == "null"
  1736. def test_trace_status_rate(self):
  1737. self.store_span_metric(
  1738. 1,
  1739. internal_metric=constants.SELF_TIME_LIGHT,
  1740. timestamp=self.min_ago,
  1741. tags={"trace.status": "unknown"},
  1742. )
  1743. self.store_span_metric(
  1744. 3,
  1745. internal_metric=constants.SELF_TIME_LIGHT,
  1746. timestamp=self.min_ago,
  1747. tags={"trace.status": "internal_error"},
  1748. )
  1749. self.store_span_metric(
  1750. 3,
  1751. internal_metric=constants.SELF_TIME_LIGHT,
  1752. timestamp=self.min_ago,
  1753. tags={"trace.status": "unauthenticated"},
  1754. )
  1755. self.store_span_metric(
  1756. 4,
  1757. internal_metric=constants.SELF_TIME_LIGHT,
  1758. timestamp=self.min_ago,
  1759. tags={"trace.status": "ok"},
  1760. )
  1761. self.store_span_metric(
  1762. 5,
  1763. internal_metric=constants.SELF_TIME_LIGHT,
  1764. timestamp=self.min_ago,
  1765. tags={"trace.status": "ok"},
  1766. )
  1767. response = self.do_request(
  1768. {
  1769. "field": [
  1770. "trace_status_rate(ok)",
  1771. "trace_status_rate(unknown)",
  1772. "trace_status_rate(internal_error)",
  1773. "trace_status_rate(unauthenticated)",
  1774. ],
  1775. "query": "",
  1776. "project": self.project.id,
  1777. "dataset": "spansMetrics",
  1778. "statsPeriod": "1h",
  1779. }
  1780. )
  1781. assert response.status_code == 200, response.content
  1782. data = response.data["data"]
  1783. assert len(data) == 1
  1784. assert data[0]["trace_status_rate(ok)"] == 0.4
  1785. assert data[0]["trace_status_rate(unknown)"] == 0.2
  1786. assert data[0]["trace_status_rate(internal_error)"] == 0.2
  1787. assert data[0]["trace_status_rate(unauthenticated)"] == 0.2
  1788. meta = response.data["meta"]
  1789. assert meta["dataset"] == "spansMetrics"
  1790. assert meta["fields"]["trace_status_rate(ok)"] == "percentage"
  1791. assert meta["fields"]["trace_status_rate(unknown)"] == "percentage"
  1792. assert meta["fields"]["trace_status_rate(internal_error)"] == "percentage"
  1793. assert meta["fields"]["trace_status_rate(unauthenticated)"] == "percentage"
  1794. def test_trace_error_rate(self):
  1795. self.store_span_metric(
  1796. 1,
  1797. internal_metric=constants.SELF_TIME_LIGHT,
  1798. timestamp=self.min_ago,
  1799. tags={"trace.status": "unknown"},
  1800. )
  1801. self.store_span_metric(
  1802. 3,
  1803. internal_metric=constants.SELF_TIME_LIGHT,
  1804. timestamp=self.min_ago,
  1805. tags={"trace.status": "internal_error"},
  1806. )
  1807. self.store_span_metric(
  1808. 3,
  1809. internal_metric=constants.SELF_TIME_LIGHT,
  1810. timestamp=self.min_ago,
  1811. tags={"trace.status": "unauthenticated"},
  1812. )
  1813. self.store_span_metric(
  1814. 4,
  1815. internal_metric=constants.SELF_TIME_LIGHT,
  1816. timestamp=self.min_ago,
  1817. tags={"trace.status": "ok"},
  1818. )
  1819. self.store_span_metric(
  1820. 5,
  1821. internal_metric=constants.SELF_TIME_LIGHT,
  1822. timestamp=self.min_ago,
  1823. tags={"trace.status": "ok"},
  1824. )
  1825. response = self.do_request(
  1826. {
  1827. "field": [
  1828. "trace_error_rate()",
  1829. ],
  1830. "query": "",
  1831. "project": self.project.id,
  1832. "dataset": "spansMetrics",
  1833. }
  1834. )
  1835. assert response.status_code == 200, response.content
  1836. data = response.data["data"]
  1837. assert len(data) == 1
  1838. assert data[0]["trace_error_rate()"] == 0.4
  1839. meta = response.data["meta"]
  1840. assert meta["dataset"] == "spansMetrics"
  1841. assert meta["fields"]["trace_error_rate()"] == "percentage"
  1842. class OrganizationEventsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  1843. OrganizationEventsMetricsEnhancedPerformanceEndpointTest
  1844. ):
  1845. def setUp(self):
  1846. super().setUp()
  1847. self.features["organizations:use-metrics-layer"] = True
  1848. @pytest.mark.xfail(reason="Not implemented")
  1849. def test_time_spent_percentage(self):
  1850. super().test_time_spent_percentage()
  1851. @pytest.mark.xfail(reason="Not implemented")
  1852. def test_time_spent_percentage_local(self):
  1853. super().test_time_spent_percentage_local()
  1854. @pytest.mark.xfail(reason="Not implemented")
  1855. def test_time_spent_percentage_on_span_duration(self):
  1856. super().test_time_spent_percentage_on_span_duration()
  1857. @pytest.mark.xfail(reason="Cannot group by function 'if'")
  1858. def test_span_module(self):
  1859. super().test_span_module()
  1860. @pytest.mark.xfail(reason="Cannot search by tags")
  1861. def test_tag_search(self):
  1862. super().test_tag_search()
  1863. @pytest.mark.xfail(reason="Cannot search by tags")
  1864. def test_free_text_search(self):
  1865. super().test_free_text_search()
  1866. @pytest.mark.xfail(reason="Not implemented")
  1867. def test_avg_compare(self):
  1868. super().test_avg_compare()
  1869. @pytest.mark.xfail(reason="Not implemented")
  1870. def test_span_domain_array(self):
  1871. super().test_span_domain_array()
  1872. @pytest.mark.xfail(reason="Not implemented")
  1873. def test_span_domain_array_filter(self):
  1874. super().test_span_domain_array_filter()
  1875. @pytest.mark.xfail(reason="Not implemented")
  1876. def test_span_domain_array_filter_wildcard(self):
  1877. super().test_span_domain_array_filter_wildcard()
  1878. @pytest.mark.xfail(reason="Not implemented")
  1879. def test_span_domain_array_has_filter(self):
  1880. super().test_span_domain_array_has_filter()
  1881. @pytest.mark.xfail(reason="Not implemented")
  1882. def test_unique_values_span_domain(self):
  1883. super().test_unique_values_span_domain()
  1884. @pytest.mark.xfail(reason="Not implemented")
  1885. def test_unique_values_span_domain_with_filter(self):
  1886. super().test_unique_values_span_domain_with_filter()
  1887. @pytest.mark.xfail(reason="Not implemented")
  1888. def test_avg_if(self):
  1889. super().test_avg_if()
  1890. @pytest.mark.xfail(reason="Not implemented")
  1891. def test_device_class_filter(self):
  1892. super().test_device_class_filter()
  1893. @pytest.mark.xfail(reason="Not implemented")
  1894. def test_device_class(self):
  1895. super().test_device_class()
  1896. @pytest.mark.xfail(reason="Not implemented")
  1897. def test_count_op(self):
  1898. super().test_count_op()