test_organization_events_span_metrics.py 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511
  1. from datetime import timedelta
  2. import pytest
  3. from django.urls import reverse
  4. from sentry.search.events import constants
  5. from sentry.search.utils import map_device_class_level
  6. from sentry.testutils.cases import MetricsEnhancedPerformanceTestCase
  7. from sentry.testutils.helpers.datetime import before_now
  8. pytestmark = pytest.mark.sentry_metrics
  9. class OrganizationEventsMetricsEnhancedPerformanceEndpointTest(MetricsEnhancedPerformanceTestCase):
  10. viewname = "sentry-api-0-organization-events"
  11. # Poor intentionally omitted for test_measurement_rating_that_does_not_exist
  12. METRIC_STRINGS = [
  13. "foo_transaction",
  14. "bar_transaction",
  15. ]
  16. def setUp(self):
  17. super().setUp()
  18. self.min_ago = before_now(minutes=1)
  19. self.six_min_ago = before_now(minutes=6)
  20. self.three_days_ago = before_now(days=3)
  21. self.features = {
  22. "organizations:starfish-view": True,
  23. }
  24. def do_request(self, query, features=None):
  25. if features is None:
  26. features = {"organizations:discover-basic": True}
  27. features.update(self.features)
  28. self.login_as(user=self.user)
  29. url = reverse(
  30. self.viewname,
  31. kwargs={"organization_slug": self.organization.slug},
  32. )
  33. with self.feature(features):
  34. return self.client.get(url, query, format="json")
  35. def test_p50_with_no_data(self):
  36. response = self.do_request(
  37. {
  38. "field": ["p50()"],
  39. "query": "",
  40. "project": self.project.id,
  41. "dataset": "spansMetrics",
  42. }
  43. )
  44. assert response.status_code == 200, response.content
  45. data = response.data["data"]
  46. meta = response.data["meta"]
  47. assert len(data) == 1
  48. assert data[0]["p50()"] == 0
  49. assert meta["dataset"] == "spansMetrics"
  50. def test_count(self):
  51. self.store_span_metric(
  52. 1,
  53. internal_metric=constants.SELF_TIME_LIGHT,
  54. timestamp=self.three_days_ago,
  55. )
  56. response = self.do_request(
  57. {
  58. "field": ["count()"],
  59. "query": "",
  60. "project": self.project.id,
  61. "dataset": "spansMetrics",
  62. "statsPeriod": "7d",
  63. }
  64. )
  65. assert response.status_code == 200, response.content
  66. data = response.data["data"]
  67. meta = response.data["meta"]
  68. assert len(data) == 1
  69. assert data[0]["count()"] == 1
  70. assert meta["dataset"] == "spansMetrics"
  71. def test_count_unique(self):
  72. self.store_span_metric(
  73. 1,
  74. "user",
  75. timestamp=self.min_ago,
  76. )
  77. self.store_span_metric(
  78. 2,
  79. "user",
  80. timestamp=self.min_ago,
  81. )
  82. response = self.do_request(
  83. {
  84. "field": ["count_unique(user)"],
  85. "query": "",
  86. "project": self.project.id,
  87. "dataset": "spansMetrics",
  88. }
  89. )
  90. assert response.status_code == 200, response.content
  91. data = response.data["data"]
  92. meta = response.data["meta"]
  93. assert len(data) == 1
  94. assert data[0]["count_unique(user)"] == 2
  95. assert meta["dataset"] == "spansMetrics"
  96. def test_sum(self):
  97. self.store_span_metric(
  98. 321,
  99. internal_metric=constants.SELF_TIME_LIGHT,
  100. timestamp=self.min_ago,
  101. )
  102. self.store_span_metric(
  103. 99,
  104. internal_metric=constants.SELF_TIME_LIGHT,
  105. timestamp=self.min_ago,
  106. )
  107. response = self.do_request(
  108. {
  109. "field": ["sum(span.self_time)"],
  110. "query": "",
  111. "project": self.project.id,
  112. "dataset": "spansMetrics",
  113. }
  114. )
  115. assert response.status_code == 200, response.content
  116. data = response.data["data"]
  117. meta = response.data["meta"]
  118. assert len(data) == 1
  119. assert data[0]["sum(span.self_time)"] == 420
  120. assert meta["dataset"] == "spansMetrics"
  121. def test_percentile(self):
  122. self.store_span_metric(
  123. 1,
  124. internal_metric=constants.SELF_TIME_LIGHT,
  125. timestamp=self.min_ago,
  126. )
  127. response = self.do_request(
  128. {
  129. "field": ["percentile(span.self_time, 0.95)"],
  130. "query": "",
  131. "project": self.project.id,
  132. "dataset": "spansMetrics",
  133. }
  134. )
  135. assert response.status_code == 200, response.content
  136. data = response.data["data"]
  137. meta = response.data["meta"]
  138. assert len(data) == 1
  139. assert data[0]["percentile(span.self_time, 0.95)"] == 1
  140. assert meta["dataset"] == "spansMetrics"
  141. def test_fixed_percentile_functions(self):
  142. self.store_span_metric(
  143. 1,
  144. internal_metric=constants.SELF_TIME_LIGHT,
  145. timestamp=self.min_ago,
  146. )
  147. for function in ["p50()", "p75()", "p95()", "p99()", "p100()"]:
  148. response = self.do_request(
  149. {
  150. "field": [function],
  151. "query": "",
  152. "project": self.project.id,
  153. "dataset": "spansMetrics",
  154. }
  155. )
  156. assert response.status_code == 200, response.content
  157. data = response.data["data"]
  158. meta = response.data["meta"]
  159. assert len(data) == 1
  160. assert data[0][function] == 1, function
  161. assert meta["dataset"] == "spansMetrics", function
  162. assert meta["fields"][function] == "duration", function
  163. def test_fixed_percentile_functions_with_duration(self):
  164. self.store_span_metric(
  165. 1,
  166. internal_metric=constants.SPAN_METRICS_MAP["span.duration"],
  167. timestamp=self.min_ago,
  168. )
  169. for function in [
  170. "p50(span.duration)",
  171. "p75(span.duration)",
  172. "p95(span.duration)",
  173. "p99(span.duration)",
  174. "p100(span.duration)",
  175. ]:
  176. response = self.do_request(
  177. {
  178. "field": [function],
  179. "query": "",
  180. "project": self.project.id,
  181. "dataset": "spansMetrics",
  182. }
  183. )
  184. assert response.status_code == 200, response.content
  185. data = response.data["data"]
  186. meta = response.data["meta"]
  187. assert len(data) == 1, function
  188. assert data[0][function] == 1, function
  189. assert meta["dataset"] == "spansMetrics", function
  190. assert meta["fields"][function] == "duration", function
  191. def test_avg(self):
  192. self.store_span_metric(
  193. 1,
  194. internal_metric=constants.SELF_TIME_LIGHT,
  195. timestamp=self.min_ago,
  196. )
  197. response = self.do_request(
  198. {
  199. "field": ["avg()"],
  200. "query": "",
  201. "project": self.project.id,
  202. "dataset": "spansMetrics",
  203. }
  204. )
  205. assert response.status_code == 200, response.content
  206. data = response.data["data"]
  207. meta = response.data["meta"]
  208. assert len(data) == 1
  209. assert data[0]["avg()"] == 1
  210. assert meta["dataset"] == "spansMetrics"
  211. def test_eps(self):
  212. for _ in range(6):
  213. self.store_span_metric(
  214. 1,
  215. internal_metric=constants.SELF_TIME_LIGHT,
  216. timestamp=self.min_ago,
  217. )
  218. response = self.do_request(
  219. {
  220. "field": ["eps()", "sps()"],
  221. "query": "",
  222. "project": self.project.id,
  223. "dataset": "spansMetrics",
  224. "statsPeriod": "10m",
  225. }
  226. )
  227. assert response.status_code == 200, response.content
  228. data = response.data["data"]
  229. meta = response.data["meta"]
  230. assert len(data) == 1
  231. assert data[0]["eps()"] == 0.01
  232. assert data[0]["sps()"] == 0.01
  233. assert meta["fields"]["eps()"] == "rate"
  234. assert meta["fields"]["sps()"] == "rate"
  235. assert meta["units"]["eps()"] == "1/second"
  236. assert meta["units"]["sps()"] == "1/second"
  237. assert meta["dataset"] == "spansMetrics"
  238. def test_epm(self):
  239. for _ in range(6):
  240. self.store_span_metric(
  241. 1,
  242. internal_metric=constants.SELF_TIME_LIGHT,
  243. timestamp=self.min_ago,
  244. )
  245. response = self.do_request(
  246. {
  247. "field": ["epm()", "spm()"],
  248. "query": "",
  249. "project": self.project.id,
  250. "dataset": "spansMetrics",
  251. "statsPeriod": "10m",
  252. }
  253. )
  254. assert response.status_code == 200, response.content
  255. data = response.data["data"]
  256. meta = response.data["meta"]
  257. assert len(data) == 1
  258. assert data[0]["epm()"] == 0.6
  259. assert data[0]["spm()"] == 0.6
  260. assert meta["fields"]["epm()"] == "rate"
  261. assert meta["fields"]["spm()"] == "rate"
  262. assert meta["units"]["epm()"] == "1/minute"
  263. assert meta["units"]["spm()"] == "1/minute"
  264. assert meta["dataset"] == "spansMetrics"
  265. def test_time_spent_percentage(self):
  266. for _ in range(4):
  267. self.store_span_metric(
  268. 1,
  269. internal_metric=constants.SELF_TIME_LIGHT,
  270. tags={"transaction": "foo_transaction"},
  271. timestamp=self.min_ago,
  272. )
  273. self.store_span_metric(
  274. 1,
  275. tags={"transaction": "foo_transaction"},
  276. timestamp=self.min_ago,
  277. )
  278. self.store_span_metric(
  279. 1,
  280. internal_metric=constants.SELF_TIME_LIGHT,
  281. tags={"transaction": "bar_transaction"},
  282. timestamp=self.min_ago,
  283. )
  284. self.store_span_metric(
  285. 1,
  286. tags={"transaction": "bar_transaction"},
  287. timestamp=self.min_ago,
  288. )
  289. response = self.do_request(
  290. {
  291. "field": ["transaction", "time_spent_percentage()"],
  292. "query": "",
  293. "orderby": ["-time_spent_percentage()"],
  294. "project": self.project.id,
  295. "dataset": "spansMetrics",
  296. "statsPeriod": "10m",
  297. }
  298. )
  299. assert response.status_code == 200, response.content
  300. data = response.data["data"]
  301. meta = response.data["meta"]
  302. assert len(data) == 2
  303. assert data[0]["time_spent_percentage()"] == 0.8
  304. assert data[0]["transaction"] == "foo_transaction"
  305. assert data[1]["time_spent_percentage()"] == 0.2
  306. assert data[1]["transaction"] == "bar_transaction"
  307. assert meta["dataset"] == "spansMetrics"
  308. def test_time_spent_percentage_local(self):
  309. response = self.do_request(
  310. {
  311. "field": ["time_spent_percentage(local)"],
  312. "query": "",
  313. "orderby": ["-time_spent_percentage(local)"],
  314. "project": self.project.id,
  315. "dataset": "spansMetrics",
  316. "statsPeriod": "10m",
  317. }
  318. )
  319. assert response.status_code == 200, response.content
  320. data = response.data["data"]
  321. meta = response.data["meta"]
  322. assert len(data) == 1
  323. assert data[0]["time_spent_percentage(local)"] is None
  324. assert meta["dataset"] == "spansMetrics"
  325. def test_http_error_rate_and_count(self):
  326. for _ in range(4):
  327. self.store_span_metric(
  328. 1,
  329. internal_metric=constants.SELF_TIME_LIGHT,
  330. tags={"span.status_code": "500"},
  331. timestamp=self.min_ago,
  332. )
  333. self.store_span_metric(
  334. 1,
  335. internal_metric=constants.SELF_TIME_LIGHT,
  336. tags={"span.status_code": "200"},
  337. timestamp=self.min_ago,
  338. )
  339. response = self.do_request(
  340. {
  341. "field": ["http_error_count()", "http_error_rate()"],
  342. "query": "",
  343. "orderby": ["-http_error_rate()"],
  344. "project": self.project.id,
  345. "dataset": "spansMetrics",
  346. "statsPeriod": "10m",
  347. }
  348. )
  349. assert response.status_code == 200, response.content
  350. data = response.data["data"]
  351. meta = response.data["meta"]
  352. assert len(data) == 1
  353. assert data[0]["http_error_rate()"] == 0.8
  354. assert meta["dataset"] == "spansMetrics"
  355. assert meta["fields"]["http_error_count()"] == "integer"
  356. assert meta["fields"]["http_error_rate()"] == "percentage"
  357. def test_ttid_rate_and_count(self):
  358. for _ in range(8):
  359. self.store_span_metric(
  360. 1,
  361. internal_metric=constants.SELF_TIME_LIGHT,
  362. tags={"ttid": "ttid", "ttfd": "ttfd"},
  363. timestamp=self.min_ago,
  364. )
  365. self.store_span_metric(
  366. 1,
  367. internal_metric=constants.SELF_TIME_LIGHT,
  368. tags={"ttfd": "ttfd", "ttid": ""},
  369. timestamp=self.min_ago,
  370. )
  371. self.store_span_metric(
  372. 1,
  373. internal_metric=constants.SELF_TIME_LIGHT,
  374. tags={"ttfd": "", "ttid": ""},
  375. timestamp=self.min_ago,
  376. )
  377. response = self.do_request(
  378. {
  379. "field": [
  380. "ttid_contribution_rate()",
  381. "ttid_count()",
  382. "ttfd_contribution_rate()",
  383. "ttfd_count()",
  384. ],
  385. "query": "",
  386. "orderby": ["-ttid_contribution_rate()"],
  387. "project": self.project.id,
  388. "dataset": "spansMetrics",
  389. "statsPeriod": "10m",
  390. }
  391. )
  392. assert response.status_code == 200, response.content
  393. data = response.data["data"]
  394. meta = response.data["meta"]
  395. assert len(data) == 1
  396. assert data[0]["ttid_contribution_rate()"] == 0.8
  397. assert data[0]["ttid_count()"] == 8
  398. assert data[0]["ttfd_contribution_rate()"] == 0.9
  399. assert data[0]["ttfd_count()"] == 9
  400. assert meta["dataset"] == "spansMetrics"
  401. assert meta["fields"]["ttid_count()"] == "integer"
  402. assert meta["fields"]["ttid_contribution_rate()"] == "percentage"
  403. assert meta["fields"]["ttfd_count()"] == "integer"
  404. assert meta["fields"]["ttfd_contribution_rate()"] == "percentage"
  405. def test_main_thread_count(self):
  406. for _ in range(8):
  407. self.store_span_metric(
  408. 1,
  409. internal_metric=constants.SELF_TIME_LIGHT,
  410. tags={"span.main_thread": "true"},
  411. timestamp=self.min_ago,
  412. )
  413. self.store_span_metric(
  414. 1,
  415. internal_metric=constants.SELF_TIME_LIGHT,
  416. tags={},
  417. timestamp=self.min_ago,
  418. )
  419. self.store_span_metric(
  420. 1,
  421. internal_metric=constants.SELF_TIME_LIGHT,
  422. tags={"span.main_thread": ""},
  423. timestamp=self.min_ago,
  424. )
  425. response = self.do_request(
  426. {
  427. "field": [
  428. "main_thread_count()",
  429. ],
  430. "query": "",
  431. "orderby": ["-main_thread_count()"],
  432. "project": self.project.id,
  433. "dataset": "spansMetrics",
  434. "statsPeriod": "10m",
  435. }
  436. )
  437. assert response.status_code == 200, response.content
  438. data = response.data["data"]
  439. meta = response.data["meta"]
  440. assert len(data) == 1
  441. assert data[0]["main_thread_count()"] == 8
  442. assert meta["dataset"] == "spansMetrics"
  443. assert meta["fields"]["main_thread_count()"] == "integer"
  444. def test_use_self_time_light(self):
  445. self.store_span_metric(
  446. 100,
  447. internal_metric=constants.SELF_TIME_LIGHT,
  448. tags={"transaction": "foo_transaction"},
  449. timestamp=self.min_ago,
  450. )
  451. response = self.do_request(
  452. {
  453. "field": ["p50(span.self_time)"],
  454. # Should be 0 since its filtering on transaction
  455. "query": "transaction:foo_transaction",
  456. "orderby": ["-p50(span.self_time)"],
  457. "project": self.project.id,
  458. "dataset": "spansMetrics",
  459. "statsPeriod": "10m",
  460. }
  461. )
  462. assert response.status_code == 200, response.content
  463. data = response.data["data"]
  464. meta = response.data["meta"]
  465. assert len(data) == 1
  466. assert data[0]["p50(span.self_time)"] == 0
  467. assert meta["dataset"] == "spansMetrics"
  468. assert meta["fields"]["p50(span.self_time)"] == "duration"
  469. response = self.do_request(
  470. {
  471. # Should be 0 since it has a transaction column
  472. "field": ["transaction", "p50(span.self_time)"],
  473. "query": "",
  474. "orderby": ["-p50(span.self_time)"],
  475. "project": self.project.id,
  476. "dataset": "spansMetrics",
  477. "statsPeriod": "10m",
  478. }
  479. )
  480. assert response.status_code == 200, response.content
  481. data = response.data["data"]
  482. meta = response.data["meta"]
  483. assert len(data) == 0
  484. response = self.do_request(
  485. {
  486. "field": ["p50(span.self_time)"],
  487. # Should be 100 since its not filtering on transaction
  488. "query": "",
  489. "orderby": ["-p50(span.self_time)"],
  490. "project": self.project.id,
  491. "dataset": "spansMetrics",
  492. "statsPeriod": "10m",
  493. }
  494. )
  495. assert response.status_code == 200, response.content
  496. data = response.data["data"]
  497. meta = response.data["meta"]
  498. assert len(data) == 1
  499. assert data[0]["p50(span.self_time)"] == 100
  500. assert meta["dataset"] == "spansMetrics"
  501. assert meta["fields"]["p50(span.self_time)"] == "duration"
  502. def test_span_module(self):
  503. self.store_span_metric(
  504. 1,
  505. internal_metric=constants.SELF_TIME_LIGHT,
  506. timestamp=self.six_min_ago,
  507. tags={"span.category": "http", "span.description": "f"},
  508. )
  509. self.store_span_metric(
  510. 3,
  511. internal_metric=constants.SELF_TIME_LIGHT,
  512. timestamp=self.six_min_ago,
  513. tags={"span.category": "db", "span.description": "e"},
  514. )
  515. self.store_span_metric(
  516. 5,
  517. internal_metric=constants.SELF_TIME_LIGHT,
  518. timestamp=self.six_min_ago,
  519. tags={"span.category": "foobar", "span.description": "d"},
  520. )
  521. self.store_span_metric(
  522. 7,
  523. internal_metric=constants.SELF_TIME_LIGHT,
  524. timestamp=self.six_min_ago,
  525. tags={"span.category": "cache", "span.description": "c"},
  526. )
  527. self.store_span_metric(
  528. 9,
  529. internal_metric=constants.SELF_TIME_LIGHT,
  530. timestamp=self.six_min_ago,
  531. tags={"span.category": "db", "span.op": "db.redis", "span.description": "b"},
  532. )
  533. self.store_span_metric(
  534. 11,
  535. internal_metric=constants.SELF_TIME_LIGHT,
  536. timestamp=self.six_min_ago,
  537. tags={"span.category": "db", "span.op": "db.sql.room", "span.description": "a"},
  538. )
  539. response = self.do_request(
  540. {
  541. "field": ["span.module", "span.description", "p50(span.self_time)"],
  542. "query": "",
  543. "orderby": ["-p50(span.self_time)"],
  544. "project": self.project.id,
  545. "dataset": "spansMetrics",
  546. "statsPeriod": "10m",
  547. }
  548. )
  549. assert response.status_code == 200, response.content
  550. data = response.data["data"]
  551. meta = response.data["meta"]
  552. assert len(data) == 6
  553. assert data[0]["p50(span.self_time)"] == 11
  554. assert data[0]["span.module"] == "other"
  555. assert data[0]["span.description"] == "a"
  556. assert data[1]["p50(span.self_time)"] == 9
  557. assert data[1]["span.module"] == "cache"
  558. assert data[1]["span.description"] == "b"
  559. assert data[2]["p50(span.self_time)"] == 7
  560. assert data[2]["span.module"] == "cache"
  561. assert data[2]["span.description"] == "c"
  562. assert data[3]["p50(span.self_time)"] == 5
  563. assert data[3]["span.module"] == "other"
  564. assert data[3]["span.description"] == "d"
  565. assert data[4]["p50(span.self_time)"] == 3
  566. assert data[4]["span.module"] == "db"
  567. assert data[4]["span.description"] == "e"
  568. assert data[5]["p50(span.self_time)"] == 1
  569. assert data[5]["span.module"] == "http"
  570. assert data[5]["span.description"] == "f"
  571. assert meta["dataset"] == "spansMetrics"
  572. assert meta["fields"]["p50(span.self_time)"] == "duration"
  573. def test_tag_search(self):
  574. self.store_span_metric(
  575. 321,
  576. internal_metric=constants.SELF_TIME_LIGHT,
  577. timestamp=self.min_ago,
  578. tags={"span.description": "foo"},
  579. )
  580. self.store_span_metric(
  581. 99,
  582. internal_metric=constants.SELF_TIME_LIGHT,
  583. timestamp=self.min_ago,
  584. tags={"span.description": "bar"},
  585. )
  586. response = self.do_request(
  587. {
  588. "field": ["sum(span.self_time)"],
  589. "query": "span.description:bar",
  590. "project": self.project.id,
  591. "dataset": "spansMetrics",
  592. }
  593. )
  594. assert response.status_code == 200, response.content
  595. data = response.data["data"]
  596. meta = response.data["meta"]
  597. assert len(data) == 1
  598. assert data[0]["sum(span.self_time)"] == 99
  599. assert meta["dataset"] == "spansMetrics"
  600. def test_free_text_search(self):
  601. self.store_span_metric(
  602. 321,
  603. internal_metric=constants.SELF_TIME_LIGHT,
  604. timestamp=self.min_ago,
  605. tags={"span.description": "foo"},
  606. )
  607. self.store_span_metric(
  608. 99,
  609. internal_metric=constants.SELF_TIME_LIGHT,
  610. timestamp=self.min_ago,
  611. tags={"span.description": "bar"},
  612. )
  613. response = self.do_request(
  614. {
  615. "field": ["sum(span.self_time)"],
  616. "query": "foo",
  617. "project": self.project.id,
  618. "dataset": "spansMetrics",
  619. }
  620. )
  621. assert response.status_code == 200, response.content
  622. data = response.data["data"]
  623. meta = response.data["meta"]
  624. assert len(data) == 1
  625. assert data[0]["sum(span.self_time)"] == 321
  626. assert meta["dataset"] == "spansMetrics"
  627. def test_avg_compare(self):
  628. self.store_span_metric(
  629. 100,
  630. internal_metric=constants.SELF_TIME_LIGHT,
  631. timestamp=self.min_ago,
  632. tags={"release": "foo"},
  633. )
  634. self.store_span_metric(
  635. 10,
  636. internal_metric=constants.SELF_TIME_LIGHT,
  637. timestamp=self.min_ago,
  638. tags={"release": "bar"},
  639. )
  640. for function_name in [
  641. "avg_compare(span.self_time, release, foo, bar)",
  642. 'avg_compare(span.self_time, release, "foo", "bar")',
  643. ]:
  644. response = self.do_request(
  645. {
  646. "field": [function_name],
  647. "query": "",
  648. "project": self.project.id,
  649. "dataset": "spansMetrics",
  650. }
  651. )
  652. assert response.status_code == 200, response.content
  653. data = response.data["data"]
  654. meta = response.data["meta"]
  655. assert len(data) == 1
  656. assert data[0][function_name] == -0.9
  657. assert meta["dataset"] == "spansMetrics"
  658. assert meta["fields"][function_name] == "percent_change"
  659. def test_avg_compare_invalid_column(self):
  660. response = self.do_request(
  661. {
  662. "field": ["avg_compare(span.self_time, transaction, foo, bar)"],
  663. "query": "",
  664. "project": self.project.id,
  665. "dataset": "spansMetrics",
  666. }
  667. )
  668. assert response.status_code == 400, response.content
  669. def test_span_domain_array(self):
  670. self.store_span_metric(
  671. 321,
  672. internal_metric=constants.SELF_TIME_LIGHT,
  673. timestamp=self.min_ago,
  674. tags={"span.domain": ",sentry_table1,"},
  675. )
  676. self.store_span_metric(
  677. 21,
  678. internal_metric=constants.SELF_TIME_LIGHT,
  679. timestamp=self.min_ago,
  680. tags={"span.domain": ",sentry_table1,sentry_table2,"},
  681. )
  682. response = self.do_request(
  683. {
  684. "field": ["span.domain", "p75(span.self_time)"],
  685. "query": "",
  686. "project": self.project.id,
  687. "orderby": ["-p75(span.self_time)"],
  688. "dataset": "spansMetrics",
  689. }
  690. )
  691. assert response.status_code == 200, response.content
  692. data = response.data["data"]
  693. meta = response.data["meta"]
  694. assert len(data) == 2
  695. assert data[0]["span.domain"] == ["sentry_table1"]
  696. assert data[1]["span.domain"] == ["sentry_table1", "sentry_table2"]
  697. assert meta["dataset"] == "spansMetrics"
  698. assert meta["fields"]["span.domain"] == "array"
  699. def test_span_domain_array_filter(self):
  700. self.store_span_metric(
  701. 321,
  702. internal_metric=constants.SELF_TIME_LIGHT,
  703. timestamp=self.min_ago,
  704. tags={"span.domain": ",sentry_table1,"},
  705. )
  706. self.store_span_metric(
  707. 21,
  708. internal_metric=constants.SELF_TIME_LIGHT,
  709. timestamp=self.min_ago,
  710. tags={"span.domain": ",sentry_table1,sentry_table2,"},
  711. )
  712. response = self.do_request(
  713. {
  714. "field": ["span.domain", "p75(span.self_time)"],
  715. "query": "span.domain:sentry_table2",
  716. "project": self.project.id,
  717. "dataset": "spansMetrics",
  718. }
  719. )
  720. assert response.status_code == 200, response.content
  721. data = response.data["data"]
  722. meta = response.data["meta"]
  723. assert len(data) == 1
  724. assert data[0]["span.domain"] == ["sentry_table1", "sentry_table2"]
  725. assert meta["dataset"] == "spansMetrics"
  726. assert meta["fields"]["span.domain"] == "array"
  727. def test_span_domain_array_filter_wildcard(self):
  728. self.store_span_metric(
  729. 321,
  730. internal_metric=constants.SELF_TIME_LIGHT,
  731. timestamp=self.min_ago,
  732. tags={"span.domain": ",sentry_table1,"},
  733. )
  734. self.store_span_metric(
  735. 21,
  736. internal_metric=constants.SELF_TIME_LIGHT,
  737. timestamp=self.min_ago,
  738. tags={"span.domain": ",sentry_table1,sentry_table2,"},
  739. )
  740. for query in ["sentry*2", "*table2", "sentry_table2*"]:
  741. response = self.do_request(
  742. {
  743. "field": ["span.domain", "p75(span.self_time)"],
  744. "query": f"span.domain:{query}",
  745. "project": self.project.id,
  746. "dataset": "spansMetrics",
  747. }
  748. )
  749. assert response.status_code == 200, response.content
  750. data = response.data["data"]
  751. meta = response.data["meta"]
  752. assert len(data) == 1, query
  753. assert data[0]["span.domain"] == ["sentry_table1", "sentry_table2"], query
  754. assert meta["dataset"] == "spansMetrics", query
  755. assert meta["fields"]["span.domain"] == "array"
  756. def test_span_domain_array_has_filter(self):
  757. self.store_span_metric(
  758. 321,
  759. internal_metric=constants.SELF_TIME_LIGHT,
  760. timestamp=self.min_ago,
  761. tags={"span.domain": ""},
  762. )
  763. self.store_span_metric(
  764. 21,
  765. internal_metric=constants.SELF_TIME_LIGHT,
  766. timestamp=self.min_ago,
  767. tags={"span.domain": ",sentry_table1,sentry_table2,"},
  768. )
  769. response = self.do_request(
  770. {
  771. "field": ["span.domain", "p75(span.self_time)"],
  772. "query": "has:span.domain",
  773. "project": self.project.id,
  774. "dataset": "spansMetrics",
  775. }
  776. )
  777. assert response.status_code == 200, response.content
  778. data = response.data["data"]
  779. meta = response.data["meta"]
  780. assert len(data) == 1
  781. assert data[0]["span.domain"] == ["sentry_table1", "sentry_table2"]
  782. assert meta["dataset"] == "spansMetrics"
  783. response = self.do_request(
  784. {
  785. "field": ["span.domain", "p75(span.self_time)"],
  786. "query": "!has:span.domain",
  787. "project": self.project.id,
  788. "dataset": "spansMetrics",
  789. }
  790. )
  791. assert response.status_code == 200, response.content
  792. data = response.data["data"]
  793. meta = response.data["meta"]
  794. assert len(data) == 1
  795. assert meta["dataset"] == "spansMetrics"
  796. assert meta["fields"]["span.domain"] == "array"
  797. def test_unique_values_span_domain(self):
  798. self.store_span_metric(
  799. 321,
  800. internal_metric=constants.SELF_TIME_LIGHT,
  801. timestamp=self.min_ago,
  802. tags={"span.domain": ",sentry_table1,"},
  803. )
  804. self.store_span_metric(
  805. 21,
  806. internal_metric=constants.SELF_TIME_LIGHT,
  807. timestamp=self.min_ago,
  808. tags={"span.domain": ",sentry_table2,sentry_table3,"},
  809. )
  810. response = self.do_request(
  811. {
  812. "field": ["unique.span_domains", "count()"],
  813. "query": "",
  814. "orderby": "unique.span_domains",
  815. "project": self.project.id,
  816. "dataset": "spansMetrics",
  817. }
  818. )
  819. assert response.status_code == 200, response.content
  820. data = response.data["data"]
  821. meta = response.data["meta"]
  822. assert len(data) == 3
  823. assert data[0]["unique.span_domains"] == "sentry_table1"
  824. assert data[1]["unique.span_domains"] == "sentry_table2"
  825. assert data[2]["unique.span_domains"] == "sentry_table3"
  826. assert meta["fields"]["unique.span_domains"] == "string"
  827. def test_unique_values_span_domain_with_filter(self):
  828. self.store_span_metric(
  829. 321,
  830. internal_metric=constants.SELF_TIME_LIGHT,
  831. timestamp=self.min_ago,
  832. tags={"span.domain": ",sentry_tible1,"},
  833. )
  834. self.store_span_metric(
  835. 21,
  836. internal_metric=constants.SELF_TIME_LIGHT,
  837. timestamp=self.min_ago,
  838. tags={"span.domain": ",sentry_table2,sentry_table3,"},
  839. )
  840. response = self.do_request(
  841. {
  842. "field": ["unique.span_domains", "count()"],
  843. "query": "span.domain:sentry_tab*",
  844. "orderby": "unique.span_domains",
  845. "project": self.project.id,
  846. "dataset": "spansMetrics",
  847. }
  848. )
  849. assert response.status_code == 200, response.content
  850. data = response.data["data"]
  851. meta = response.data["meta"]
  852. assert len(data) == 2
  853. assert data[0]["unique.span_domains"] == "sentry_table2"
  854. assert data[1]["unique.span_domains"] == "sentry_table3"
  855. assert meta["fields"]["unique.span_domains"] == "string"
  856. def test_avg_if(self):
  857. self.store_span_metric(
  858. 100,
  859. internal_metric=constants.SELF_TIME_LIGHT,
  860. timestamp=self.min_ago,
  861. tags={"release": "foo"},
  862. )
  863. self.store_span_metric(
  864. 200,
  865. internal_metric=constants.SELF_TIME_LIGHT,
  866. timestamp=self.min_ago,
  867. tags={"release": "foo"},
  868. )
  869. self.store_span_metric(
  870. 10,
  871. internal_metric=constants.SELF_TIME_LIGHT,
  872. timestamp=self.min_ago,
  873. tags={"release": "bar"},
  874. )
  875. self.store_span_metric(
  876. 300,
  877. internal_metric=constants.SELF_TIME_LIGHT,
  878. timestamp=self.min_ago,
  879. tags={"span.op": "queue.task.celery"},
  880. )
  881. response = self.do_request(
  882. {
  883. "field": [
  884. "avg_if(span.self_time, release, foo)",
  885. "avg_if(span.self_time, span.op, queue.task.celery)",
  886. ],
  887. "query": "",
  888. "project": self.project.id,
  889. "dataset": "spansMetrics",
  890. }
  891. )
  892. assert response.status_code == 200, response.content
  893. data = response.data["data"]
  894. meta = response.data["meta"]
  895. assert len(data) == 1
  896. assert data[0]["avg_if(span.self_time, release, foo)"] == 150
  897. assert data[0]["avg_if(span.self_time, span.op, queue.task.celery)"] == 300
  898. assert meta["dataset"] == "spansMetrics"
  899. assert meta["fields"]["avg_if(span.self_time, release, foo)"] == "duration"
  900. assert meta["fields"]["avg_if(span.self_time, span.op, queue.task.celery)"] == "duration"
  901. def test_device_class(self):
  902. self.store_span_metric(
  903. 123,
  904. internal_metric=constants.SELF_TIME_LIGHT,
  905. timestamp=self.min_ago,
  906. tags={"device.class": "1"},
  907. )
  908. self.store_span_metric(
  909. 678,
  910. internal_metric=constants.SELF_TIME_LIGHT,
  911. timestamp=self.min_ago,
  912. tags={"device.class": "2"},
  913. )
  914. self.store_span_metric(
  915. 999,
  916. internal_metric=constants.SELF_TIME_LIGHT,
  917. timestamp=self.min_ago,
  918. tags={"device.class": ""},
  919. )
  920. response = self.do_request(
  921. {
  922. "field": ["device.class", "p95()"],
  923. "query": "",
  924. "orderby": "p95()",
  925. "project": self.project.id,
  926. "dataset": "spansMetrics",
  927. }
  928. )
  929. assert response.status_code == 200, response.content
  930. data = response.data["data"]
  931. meta = response.data["meta"]
  932. assert len(data) == 3
  933. # Need to actually check the dict since the level for 1 isn't guaranteed to stay `low` or `medium`
  934. assert data[0]["device.class"] == map_device_class_level("1")
  935. assert data[1]["device.class"] == map_device_class_level("2")
  936. assert data[2]["device.class"] == "Unknown"
  937. assert meta["fields"]["device.class"] == "string"
  938. def test_device_class_filter(self):
  939. self.store_span_metric(
  940. 123,
  941. internal_metric=constants.SELF_TIME_LIGHT,
  942. timestamp=self.min_ago,
  943. tags={"device.class": "1"},
  944. )
  945. # Need to actually check the dict since the level for 1 isn't guaranteed to stay `low`
  946. level = map_device_class_level("1")
  947. response = self.do_request(
  948. {
  949. "field": ["device.class", "count()"],
  950. "query": f"device.class:{level}",
  951. "orderby": "count()",
  952. "project": self.project.id,
  953. "dataset": "spansMetrics",
  954. }
  955. )
  956. assert response.status_code == 200, response.content
  957. data = response.data["data"]
  958. meta = response.data["meta"]
  959. assert len(data) == 1
  960. assert data[0]["device.class"] == level
  961. assert meta["fields"]["device.class"] == "string"
  962. def test_device_class_filter_unknown(self):
  963. self.store_span_metric(
  964. 123,
  965. internal_metric=constants.SELF_TIME_LIGHT,
  966. timestamp=self.min_ago,
  967. tags={"device.class": ""},
  968. )
  969. response = self.do_request(
  970. {
  971. "field": ["device.class", "count()"],
  972. "query": "device.class:Unknown",
  973. "orderby": "count()",
  974. "project": self.project.id,
  975. "dataset": "spansMetrics",
  976. }
  977. )
  978. assert response.status_code == 200, response.content
  979. data = response.data["data"]
  980. meta = response.data["meta"]
  981. assert len(data) == 1
  982. assert data[0]["device.class"] == "Unknown"
  983. assert meta["fields"]["device.class"] == "string"
  984. def test_cache_hit_rate(self):
  985. self.store_span_metric(
  986. 1,
  987. internal_metric=constants.SELF_TIME_LIGHT,
  988. timestamp=self.min_ago,
  989. tags={"cache.hit": "true"},
  990. )
  991. self.store_span_metric(
  992. 1,
  993. internal_metric=constants.SELF_TIME_LIGHT,
  994. timestamp=self.min_ago,
  995. tags={"cache.hit": "false"},
  996. )
  997. response = self.do_request(
  998. {
  999. "field": ["cache_hit_rate()"],
  1000. "query": "",
  1001. "project": self.project.id,
  1002. "dataset": "spansMetrics",
  1003. }
  1004. )
  1005. assert response.status_code == 200, response.content
  1006. data = response.data["data"]
  1007. meta = response.data["meta"]
  1008. assert len(data) == 1
  1009. assert data[0]["cache_hit_rate()"] == 0.5
  1010. assert meta["dataset"] == "spansMetrics"
  1011. assert meta["fields"]["cache_hit_rate()"] == "percentage"
  1012. def test_http_response_rate(self):
  1013. self.store_span_metric(
  1014. 1,
  1015. internal_metric=constants.SELF_TIME_LIGHT,
  1016. timestamp=self.min_ago,
  1017. tags={"span.status_code": "200"},
  1018. )
  1019. self.store_span_metric(
  1020. 3,
  1021. internal_metric=constants.SELF_TIME_LIGHT,
  1022. timestamp=self.min_ago,
  1023. tags={"span.status_code": "301"},
  1024. )
  1025. self.store_span_metric(
  1026. 3,
  1027. internal_metric=constants.SELF_TIME_LIGHT,
  1028. timestamp=self.min_ago,
  1029. tags={"span.status_code": "404"},
  1030. )
  1031. self.store_span_metric(
  1032. 4,
  1033. internal_metric=constants.SELF_TIME_LIGHT,
  1034. timestamp=self.min_ago,
  1035. tags={"span.status_code": "503"},
  1036. )
  1037. self.store_span_metric(
  1038. 5,
  1039. internal_metric=constants.SELF_TIME_LIGHT,
  1040. timestamp=self.min_ago,
  1041. tags={"span.status_code": "501"},
  1042. )
  1043. response = self.do_request(
  1044. {
  1045. "field": [
  1046. "http_response_rate(200)", # By exact code
  1047. "http_response_rate(3)", # By code class
  1048. "http_response_rate(4)",
  1049. "http_response_rate(5)",
  1050. ],
  1051. "query": "",
  1052. "project": self.project.id,
  1053. "dataset": "spansMetrics",
  1054. }
  1055. )
  1056. assert response.status_code == 200, response.content
  1057. data = response.data["data"]
  1058. assert len(data) == 1
  1059. assert data[0]["http_response_rate(200)"] == 0.2
  1060. assert data[0]["http_response_rate(3)"] == 0.2
  1061. assert data[0]["http_response_rate(4)"] == 0.2
  1062. assert data[0]["http_response_rate(5)"] == 0.4
  1063. meta = response.data["meta"]
  1064. assert meta["dataset"] == "spansMetrics"
  1065. assert meta["fields"]["http_response_rate(200)"] == "percentage"
  1066. def test_regression_score_regression(self):
  1067. # This span increases in duration
  1068. self.store_span_metric(
  1069. 1,
  1070. timestamp=self.six_min_ago,
  1071. tags={"transaction": "/api/0/projects/", "span.description": "Regressed Span"},
  1072. project=self.project.id,
  1073. )
  1074. self.store_span_metric(
  1075. 100,
  1076. timestamp=self.min_ago,
  1077. tags={"transaction": "/api/0/projects/", "span.description": "Regressed Span"},
  1078. project=self.project.id,
  1079. )
  1080. # This span stays the same
  1081. self.store_span_metric(
  1082. 1,
  1083. timestamp=self.three_days_ago,
  1084. tags={"transaction": "/api/0/projects/", "span.description": "Non-regressed"},
  1085. project=self.project.id,
  1086. )
  1087. self.store_span_metric(
  1088. 1,
  1089. timestamp=self.min_ago,
  1090. tags={"transaction": "/api/0/projects/", "span.description": "Non-regressed"},
  1091. project=self.project.id,
  1092. )
  1093. response = self.do_request(
  1094. {
  1095. "field": [
  1096. "span.description",
  1097. f"regression_score(span.self_time,{int(self.two_min_ago.timestamp())})",
  1098. ],
  1099. "query": "transaction:/api/0/projects/",
  1100. "dataset": "spansMetrics",
  1101. "orderby": [
  1102. f"-regression_score(span.self_time,{int(self.two_min_ago.timestamp())})"
  1103. ],
  1104. "start": (self.six_min_ago - timedelta(minutes=1)).isoformat(),
  1105. "end": before_now(minutes=0),
  1106. }
  1107. )
  1108. assert response.status_code == 200, response.content
  1109. data = response.data["data"]
  1110. assert len(data) == 2
  1111. assert [row["span.description"] for row in data] == ["Regressed Span", "Non-regressed"]
  1112. def test_regression_score_added_span(self):
  1113. # This span only exists after the breakpoint
  1114. self.store_span_metric(
  1115. 100,
  1116. timestamp=self.min_ago,
  1117. tags={"transaction": "/api/0/projects/", "span.description": "Added span"},
  1118. project=self.project.id,
  1119. )
  1120. # This span stays the same
  1121. self.store_span_metric(
  1122. 1,
  1123. timestamp=self.three_days_ago,
  1124. tags={"transaction": "/api/0/projects/", "span.description": "Non-regressed"},
  1125. project=self.project.id,
  1126. )
  1127. self.store_span_metric(
  1128. 1,
  1129. timestamp=self.min_ago,
  1130. tags={"transaction": "/api/0/projects/", "span.description": "Non-regressed"},
  1131. project=self.project.id,
  1132. )
  1133. response = self.do_request(
  1134. {
  1135. "field": [
  1136. "span.description",
  1137. f"regression_score(span.self_time,{int(self.two_min_ago.timestamp())})",
  1138. ],
  1139. "query": "transaction:/api/0/projects/",
  1140. "dataset": "spansMetrics",
  1141. "orderby": [
  1142. f"-regression_score(span.self_time,{int(self.two_min_ago.timestamp())})"
  1143. ],
  1144. "start": (self.six_min_ago - timedelta(minutes=1)).isoformat(),
  1145. "end": before_now(minutes=0),
  1146. }
  1147. )
  1148. assert response.status_code == 200, response.content
  1149. data = response.data["data"]
  1150. assert len(data) == 2
  1151. assert [row["span.description"] for row in data] == ["Added span", "Non-regressed"]
  1152. def test_regression_score_removed_span(self):
  1153. # This span only exists before the breakpoint
  1154. self.store_span_metric(
  1155. 100,
  1156. timestamp=self.six_min_ago,
  1157. tags={"transaction": "/api/0/projects/", "span.description": "Removed span"},
  1158. project=self.project.id,
  1159. )
  1160. # This span stays the same
  1161. self.store_span_metric(
  1162. 1,
  1163. timestamp=self.three_days_ago,
  1164. tags={"transaction": "/api/0/projects/", "span.description": "Non-regressed"},
  1165. project=self.project.id,
  1166. )
  1167. self.store_span_metric(
  1168. 1,
  1169. timestamp=self.min_ago,
  1170. tags={"transaction": "/api/0/projects/", "span.description": "Non-regressed"},
  1171. project=self.project.id,
  1172. )
  1173. response = self.do_request(
  1174. {
  1175. "field": [
  1176. "span.description",
  1177. f"regression_score(span.self_time,{int(self.two_min_ago.timestamp())})",
  1178. ],
  1179. "query": "transaction:/api/0/projects/",
  1180. "dataset": "spansMetrics",
  1181. "orderby": [
  1182. f"-regression_score(span.self_time,{int(self.two_min_ago.timestamp())})"
  1183. ],
  1184. "start": (self.six_min_ago - timedelta(minutes=1)).isoformat(),
  1185. "end": before_now(minutes=0),
  1186. }
  1187. )
  1188. assert response.status_code == 200, response.content
  1189. data = response.data["data"]
  1190. assert len(data) == 2
  1191. assert [row["span.description"] for row in data] == ["Non-regressed", "Removed span"]
  1192. # The regression score is <0 for removed spans, this can act as
  1193. # a way to filter out removed spans when necessary
  1194. assert data[1][f"regression_score(span.self_time,{int(self.two_min_ago.timestamp())})"] < 0
  1195. def test_avg_self_time_by_timestamp(self):
  1196. self.store_span_metric(
  1197. 1,
  1198. internal_metric=constants.SELF_TIME_LIGHT,
  1199. timestamp=self.six_min_ago,
  1200. tags={},
  1201. )
  1202. self.store_span_metric(
  1203. 3,
  1204. internal_metric=constants.SELF_TIME_LIGHT,
  1205. timestamp=self.min_ago,
  1206. tags={},
  1207. )
  1208. response = self.do_request(
  1209. {
  1210. "field": [
  1211. f"avg_by_timestamp(span.self_time,less,{int(self.two_min_ago.timestamp())})",
  1212. f"avg_by_timestamp(span.self_time,greater,{int(self.two_min_ago.timestamp())})",
  1213. ],
  1214. "query": "",
  1215. "project": self.project.id,
  1216. "dataset": "spansMetrics",
  1217. "statsPeriod": "1h",
  1218. }
  1219. )
  1220. assert response.status_code == 200, response.content
  1221. data = response.data["data"]
  1222. assert len(data) == 1
  1223. assert data[0] == {
  1224. f"avg_by_timestamp(span.self_time,less,{int(self.two_min_ago.timestamp())})": 1.0,
  1225. f"avg_by_timestamp(span.self_time,greater,{int(self.two_min_ago.timestamp())})": 3.0,
  1226. }
  1227. def test_avg_self_time_by_timestamp_invalid_condition(self):
  1228. response = self.do_request(
  1229. {
  1230. "field": [
  1231. f"avg_by_timestamp(span.self_time,INVALID_ARG,{int(self.two_min_ago.timestamp())})",
  1232. ],
  1233. "query": "",
  1234. "project": self.project.id,
  1235. "dataset": "spansMetrics",
  1236. "statsPeriod": "1h",
  1237. }
  1238. )
  1239. assert response.status_code == 400, response.content
  1240. assert (
  1241. response.data["detail"]
  1242. == "avg_by_timestamp: condition argument invalid: string must be one of ['greater', 'less']"
  1243. )
  1244. def test_epm_by_timestamp(self):
  1245. self.store_span_metric(
  1246. 1,
  1247. internal_metric=constants.SELF_TIME_LIGHT,
  1248. timestamp=self.six_min_ago,
  1249. tags={},
  1250. )
  1251. # More events occur after the timestamp
  1252. for _ in range(3):
  1253. self.store_span_metric(
  1254. 3,
  1255. internal_metric=constants.SELF_TIME_LIGHT,
  1256. timestamp=self.min_ago,
  1257. tags={},
  1258. )
  1259. response = self.do_request(
  1260. {
  1261. "field": [
  1262. f"epm_by_timestamp(less,{int(self.two_min_ago.timestamp())})",
  1263. f"epm_by_timestamp(greater,{int(self.two_min_ago.timestamp())})",
  1264. ],
  1265. "query": "",
  1266. "project": self.project.id,
  1267. "dataset": "spansMetrics",
  1268. "statsPeriod": "1h",
  1269. }
  1270. )
  1271. assert response.status_code == 200, response.content
  1272. data = response.data["data"]
  1273. assert len(data) == 1
  1274. assert data[0][f"epm_by_timestamp(less,{int(self.two_min_ago.timestamp())})"] < 1.0
  1275. assert data[0][f"epm_by_timestamp(greater,{int(self.two_min_ago.timestamp())})"] > 1.0
  1276. def test_epm_by_timestamp_invalid_condition(self):
  1277. response = self.do_request(
  1278. {
  1279. "field": [
  1280. f"epm_by_timestamp(INVALID_ARG,{int(self.two_min_ago.timestamp())})",
  1281. ],
  1282. "query": "",
  1283. "project": self.project.id,
  1284. "dataset": "spansMetrics",
  1285. "statsPeriod": "1h",
  1286. }
  1287. )
  1288. assert response.status_code == 400, response.content
  1289. assert (
  1290. response.data["detail"]
  1291. == "epm_by_timestamp: condition argument invalid: string must be one of ['greater', 'less']"
  1292. )
  1293. def test_any_function(self):
  1294. for char in "abc":
  1295. for transaction in ["foo", "bar"]:
  1296. self.store_span_metric(
  1297. 1,
  1298. internal_metric=constants.SELF_TIME_LIGHT,
  1299. timestamp=self.six_min_ago,
  1300. tags={"span.description": char, "transaction": transaction},
  1301. )
  1302. response = self.do_request(
  1303. {
  1304. "field": [
  1305. "transaction",
  1306. "any(span.description)",
  1307. ],
  1308. "query": "",
  1309. "orderby": ["transaction"],
  1310. "project": self.project.id,
  1311. "dataset": "spansMetrics",
  1312. "statsPeriod": "1h",
  1313. }
  1314. )
  1315. assert response.status_code == 200, response.content
  1316. assert response.data["data"] == [
  1317. {"transaction": "bar", "any(span.description)": "a"},
  1318. {"transaction": "foo", "any(span.description)": "a"},
  1319. ]
  1320. def test_count_op(self):
  1321. self.store_span_metric(
  1322. 1,
  1323. internal_metric=constants.SELF_TIME_LIGHT,
  1324. timestamp=self.six_min_ago,
  1325. tags={"span.op": "queue.submit.celery"},
  1326. )
  1327. self.store_span_metric(
  1328. 1,
  1329. internal_metric=constants.SELF_TIME_LIGHT,
  1330. timestamp=self.six_min_ago,
  1331. tags={"span.op": "queue.task.celery"},
  1332. )
  1333. response = self.do_request(
  1334. {
  1335. "field": [
  1336. "count_op(queue.submit.celery)",
  1337. "count_op(queue.task.celery)",
  1338. ],
  1339. "query": "",
  1340. "project": self.project.id,
  1341. "dataset": "spansMetrics",
  1342. "statsPeriod": "1h",
  1343. }
  1344. )
  1345. assert response.status_code == 200, response.content
  1346. data = response.data["data"]
  1347. assert data == [
  1348. {"count_op(queue.submit.celery)": 1, "count_op(queue.task.celery)": 1},
  1349. ]
  1350. class OrganizationEventsMetricsEnhancedPerformanceEndpointTestWithMetricLayer(
  1351. OrganizationEventsMetricsEnhancedPerformanceEndpointTest
  1352. ):
  1353. def setUp(self):
  1354. super().setUp()
  1355. self.features["organizations:use-metrics-layer"] = True
  1356. @pytest.mark.xfail(reason="Not implemented")
  1357. def test_time_spent_percentage(self):
  1358. super().test_time_spent_percentage()
  1359. @pytest.mark.xfail(reason="Not implemented")
  1360. def test_time_spent_percentage_local(self):
  1361. super().test_time_spent_percentage_local()
  1362. @pytest.mark.xfail(reason="Cannot group by function 'if'")
  1363. def test_span_module(self):
  1364. super().test_span_module()
  1365. @pytest.mark.xfail(reason="Cannot search by tags")
  1366. def test_tag_search(self):
  1367. super().test_tag_search()
  1368. @pytest.mark.xfail(reason="Cannot search by tags")
  1369. def test_free_text_search(self):
  1370. super().test_free_text_search()
  1371. @pytest.mark.xfail(reason="Not implemented")
  1372. def test_avg_compare(self):
  1373. super().test_avg_compare()
  1374. @pytest.mark.xfail(reason="Not implemented")
  1375. def test_span_domain_array(self):
  1376. super().test_span_domain_array()
  1377. @pytest.mark.xfail(reason="Not implemented")
  1378. def test_span_domain_array_filter(self):
  1379. super().test_span_domain_array_filter()
  1380. @pytest.mark.xfail(reason="Not implemented")
  1381. def test_span_domain_array_filter_wildcard(self):
  1382. super().test_span_domain_array_filter_wildcard()
  1383. @pytest.mark.xfail(reason="Not implemented")
  1384. def test_span_domain_array_has_filter(self):
  1385. super().test_span_domain_array_has_filter()
  1386. @pytest.mark.xfail(reason="Not implemented")
  1387. def test_unique_values_span_domain(self):
  1388. super().test_unique_values_span_domain()
  1389. @pytest.mark.xfail(reason="Not implemented")
  1390. def test_unique_values_span_domain_with_filter(self):
  1391. super().test_unique_values_span_domain_with_filter()
  1392. @pytest.mark.xfail(reason="Not implemented")
  1393. def test_avg_if(self):
  1394. super().test_avg_if()
  1395. @pytest.mark.xfail(reason="Not implemented")
  1396. def test_device_class_filter(self):
  1397. super().test_device_class_filter()
  1398. @pytest.mark.xfail(reason="Not implemented")
  1399. def test_device_class(self):
  1400. super().test_device_class()
  1401. @pytest.mark.xfail(reason="Not implemented")
  1402. def test_count_op(self):
  1403. super().test_count_op()