test_organization_events_histogram.py 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166
  1. from __future__ import annotations
  2. import random
  3. from collections import namedtuple
  4. from copy import deepcopy
  5. from datetime import timedelta
  6. import pytest
  7. from django.urls import reverse
  8. from rest_framework.exceptions import ErrorDetail
  9. from sentry.sentry_metrics.aggregation_option_registry import AggregationOption
  10. from sentry.testutils.cases import APITestCase, MetricsEnhancedPerformanceTestCase, SnubaTestCase
  11. from sentry.testutils.helpers.datetime import before_now, iso_format
  12. from sentry.utils.samples import load_data
  13. from sentry.utils.snuba import get_array_column_alias
  14. pytestmark = pytest.mark.sentry_metrics
  15. HistogramSpec = namedtuple(
  16. "HistogramSpec", ["start", "end", "fields", "tags"], defaults=[None, None, [], {}]
  17. )
  18. ARRAY_COLUMNS = ["measurements", "span_op_breakdowns"]
  19. class OrganizationEventsHistogramEndpointTest(APITestCase, SnubaTestCase):
  20. def setUp(self):
  21. super().setUp()
  22. self.min_ago = iso_format(before_now(minutes=1))
  23. self.data = load_data("transaction")
  24. self.features = {}
  25. def populate_events(self, specs):
  26. start = before_now(minutes=5)
  27. for spec in specs:
  28. spec = HistogramSpec(*spec)
  29. for suffix_key, count in spec.fields:
  30. for i in range(count):
  31. data = deepcopy(self.data)
  32. measurement_name = suffix_key
  33. breakdown_name = f"ops.{suffix_key}"
  34. data["timestamp"] = iso_format(start)
  35. data["start_timestamp"] = iso_format(start - timedelta(seconds=i))
  36. value = random.random() * (spec.end - spec.start) + spec.start
  37. data["transaction"] = f"/measurement/{measurement_name}/value/{value}"
  38. data["measurements"] = {measurement_name: {"value": value}}
  39. data["breakdowns"] = {
  40. "span_ops": {
  41. breakdown_name: {"value": value},
  42. }
  43. }
  44. self.store_event(data, self.project.id)
  45. def as_response_data(self, specs):
  46. data: dict[str, list[dict[str, int]]] = {}
  47. for spec in specs:
  48. spec = HistogramSpec(*spec)
  49. for measurement, count in sorted(spec.fields):
  50. if measurement not in data:
  51. data[measurement] = []
  52. data[measurement].append({"bin": spec.start, "count": count})
  53. return data
  54. def do_request(self, query, features=None):
  55. if features is None:
  56. features = {"organizations:performance-view": True}
  57. features.update(self.features)
  58. self.login_as(user=self.user)
  59. url = reverse(
  60. "sentry-api-0-organization-events-histogram",
  61. kwargs={"organization_slug": self.organization.slug},
  62. )
  63. with self.feature(features):
  64. return self.client.get(url, query, format="json")
  65. def test_no_projects(self):
  66. response = self.do_request({})
  67. assert response.status_code == 200, response.content
  68. assert response.data == {}
  69. def test_good_params(self):
  70. for array_column in ARRAY_COLUMNS:
  71. alias = get_array_column_alias(array_column)
  72. query = {
  73. "query": "event.type:transaction",
  74. "project": [self.project.id],
  75. "field": [f"{alias}.foo", f"{alias}.bar"],
  76. "numBuckets": 10,
  77. }
  78. response = self.do_request(query)
  79. assert response.status_code == 200, f"failing for {array_column}"
  80. def test_good_params_with_optionals(self):
  81. for array_column in ARRAY_COLUMNS:
  82. alias = get_array_column_alias(array_column)
  83. query = {
  84. "query": "event.type:transaction",
  85. "project": [self.project.id],
  86. "field": [f"{alias}.foo", f"{alias}.bar"],
  87. "numBuckets": 10,
  88. "precision": 0,
  89. "min": 0,
  90. "max": 10,
  91. }
  92. response = self.do_request(query)
  93. assert response.status_code == 200, f"failing for {array_column}"
  94. def test_bad_params_reverse_min_max(self):
  95. for array_column in ARRAY_COLUMNS:
  96. alias = get_array_column_alias(array_column)
  97. query = {
  98. "query": "event.type:transaction",
  99. "project": [self.project.id],
  100. "field": [f"{alias}.foo", f"{alias}.bar"],
  101. "numBuckets": 10,
  102. "precision": 0,
  103. "min": 10,
  104. "max": 5,
  105. }
  106. response = self.do_request(query)
  107. assert response.data == {"non_field_errors": ["min cannot be greater than max."]}
  108. def test_bad_params_missing_fields(self):
  109. query = {
  110. "project": [self.project.id],
  111. "numBuckets": 10,
  112. }
  113. response = self.do_request(query)
  114. assert response.status_code == 400
  115. assert response.data == {
  116. "field": [ErrorDetail(string="This field is required.", code="required")],
  117. }
  118. def test_bad_params_too_many_fields(self):
  119. query = {
  120. "project": [self.project.id],
  121. "field": ["foo", "bar", "baz", "qux", "quux"],
  122. "numBuckets": 10,
  123. "min": 0,
  124. "max": 100,
  125. "precision": 0,
  126. }
  127. response = self.do_request(query)
  128. assert response.status_code == 400
  129. assert response.data == {
  130. "field": ["Ensure this field has no more than 4 elements."],
  131. }
  132. def test_bad_params_mixed_fields(self):
  133. for array_column in ARRAY_COLUMNS:
  134. for other_array_column in ARRAY_COLUMNS:
  135. query = {
  136. "project": [self.project.id],
  137. "field": [
  138. "foo",
  139. f"{get_array_column_alias(array_column)}.foo",
  140. f"{get_array_column_alias(other_array_column)}.bar",
  141. ],
  142. "numBuckets": 10,
  143. "min": 0,
  144. "max": 100,
  145. "precision": 0,
  146. }
  147. response = self.do_request(query)
  148. assert response.status_code == 400, f"failing for {array_column}"
  149. assert response.data == {
  150. "field": [
  151. "You can only generate histogram for one column at a time unless they are all measurements or all span op breakdowns."
  152. ],
  153. }, f"failing for {array_column}"
  154. def test_bad_params_missing_num_buckets(self):
  155. query = {
  156. "project": [self.project.id],
  157. "field": ["foo"],
  158. }
  159. response = self.do_request(query)
  160. assert response.status_code == 400
  161. assert response.data == {
  162. "numBuckets": ["This field is required."],
  163. }
  164. def test_bad_params_invalid_num_buckets(self):
  165. for array_column in ARRAY_COLUMNS:
  166. alias = get_array_column_alias(array_column)
  167. query = {
  168. "project": [self.project.id],
  169. "field": [f"{alias}.foo", f"{alias}.bar"],
  170. "numBuckets": "baz",
  171. }
  172. response = self.do_request(query)
  173. assert response.status_code == 400, f"failing for {array_column}"
  174. assert response.data == {
  175. "numBuckets": ["A valid integer is required."],
  176. }, f"failing for {array_column}"
  177. def test_bad_params_invalid_negative_num_buckets(self):
  178. for array_column in ARRAY_COLUMNS:
  179. alias = get_array_column_alias(array_column)
  180. query = {
  181. "project": [self.project.id],
  182. "field": [f"{alias}.foo", f"{alias}.bar"],
  183. "numBuckets": -1,
  184. }
  185. response = self.do_request(query)
  186. assert response.status_code == 400, f"failing for {array_column}"
  187. assert response.data == {
  188. "numBuckets": ["Ensure this value is greater than or equal to 1."],
  189. }, f"failing for {array_column}"
  190. def test_bad_params_num_buckets_too_large(self):
  191. for array_column in ARRAY_COLUMNS:
  192. alias = get_array_column_alias(array_column)
  193. query = {
  194. "project": [self.project.id],
  195. "field": [f"{alias}.foo", f"{alias}.bar"],
  196. "numBuckets": 150,
  197. }
  198. response = self.do_request(query)
  199. assert response.status_code == 400, f"failing for {array_column}"
  200. assert response.data == {
  201. "numBuckets": ["Ensure this value is less than or equal to 100."],
  202. }, f"failing for {array_column}"
  203. def test_bad_params_invalid_precision_too_small(self):
  204. for array_column in ARRAY_COLUMNS:
  205. alias = get_array_column_alias(array_column)
  206. query = {
  207. "project": [self.project.id],
  208. "field": [f"{alias}.foo", f"{alias}.bar"],
  209. "numBuckets": 10,
  210. "precision": -1,
  211. }
  212. response = self.do_request(query)
  213. assert response.status_code == 400, f"failing for {array_column}"
  214. assert response.data == {
  215. "precision": ["Ensure this value is greater than or equal to 0."],
  216. }, f"failing for {array_column}"
  217. def test_bad_params_invalid_precision_too_big(self):
  218. for array_column in ARRAY_COLUMNS:
  219. alias = get_array_column_alias(array_column)
  220. query = {
  221. "project": [self.project.id],
  222. "field": [f"{alias}.foo", f"{alias}.bar"],
  223. "numBuckets": 10,
  224. "precision": 100,
  225. }
  226. response = self.do_request(query)
  227. assert response.status_code == 400, f"failing for {array_column}"
  228. assert response.data == {
  229. "precision": ["Ensure this value is less than or equal to 4."],
  230. }, f"failing for {array_column}"
  231. def test_bad_params_invalid_min(self):
  232. for array_column in ARRAY_COLUMNS:
  233. alias = get_array_column_alias(array_column)
  234. query = {
  235. "project": [self.project.id],
  236. "field": [f"{alias}.foo", f"{alias}.bar"],
  237. "numBuckets": 10,
  238. "min": "qux",
  239. }
  240. response = self.do_request(query)
  241. assert response.status_code == 400, f"failing for {array_column}"
  242. assert response.data == {
  243. "min": ["A valid number is required."],
  244. }, f"failing for {array_column}"
  245. def test_bad_params_invalid_max(self):
  246. for array_column in ARRAY_COLUMNS:
  247. alias = get_array_column_alias(array_column)
  248. query = {
  249. "project": [self.project.id],
  250. "field": [f"{alias}.foo", f"{alias}.bar"],
  251. "numBuckets": 10,
  252. "max": "qux",
  253. }
  254. response = self.do_request(query)
  255. assert response.status_code == 400, f"failing for {array_column}"
  256. assert response.data == {
  257. "max": ["A valid number is required."],
  258. }, f"failing for {array_column}"
  259. def test_histogram_empty(self):
  260. for array_column in ARRAY_COLUMNS:
  261. alias = get_array_column_alias(array_column)
  262. query = {
  263. "project": [self.project.id],
  264. "field": [f"{alias}.foo", f"{alias}.bar"],
  265. "numBuckets": 5,
  266. }
  267. response = self.do_request(query)
  268. assert response.status_code == 200, f"failing for {array_column}"
  269. expected = [(i, i + 1, [(f"{alias}.foo", 0), (f"{alias}.bar", 0)]) for i in range(5)]
  270. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  271. def test_histogram_simple(self):
  272. # range is [0, 5), so it is divided into 5 buckets of width 1
  273. specs = [
  274. (0, 1, [("foo", 1)]),
  275. (1, 2, [("foo", 1)]),
  276. (2, 3, [("foo", 1)]),
  277. (4, 5, [("foo", 1)]),
  278. ]
  279. self.populate_events(specs)
  280. for array_column in ARRAY_COLUMNS:
  281. alias = get_array_column_alias(array_column)
  282. query = {
  283. "project": [self.project.id],
  284. "field": [f"{alias}.foo"],
  285. "numBuckets": 5,
  286. }
  287. response = self.do_request(query)
  288. assert response.status_code == 200, f"failing for {array_column}"
  289. expected = [
  290. (0, 1, [(f"{alias}.foo", 1)]),
  291. (1, 2, [(f"{alias}.foo", 1)]),
  292. (2, 3, [(f"{alias}.foo", 1)]),
  293. (3, 4, [(f"{alias}.foo", 0)]),
  294. (4, 5, [(f"{alias}.foo", 1)]),
  295. ]
  296. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  297. def test_histogram_simple_using_min_max(self):
  298. # range is [0, 5), so it is divided into 5 buckets of width 1
  299. specs = [
  300. (0, 1, [("foo", 1)]),
  301. (1, 2, [("foo", 1)]),
  302. (2, 3, [("foo", 1)]),
  303. (4, 5, [("foo", 1)]),
  304. ]
  305. self.populate_events(specs)
  306. for array_column in ARRAY_COLUMNS:
  307. alias = get_array_column_alias(array_column)
  308. query = {
  309. "project": [self.project.id],
  310. "field": [f"{alias}.foo"],
  311. "numBuckets": 5,
  312. "min": 0,
  313. "max": 5,
  314. }
  315. response = self.do_request(query)
  316. assert response.status_code == 200, f"failing for {array_column}"
  317. expected = [
  318. (0, 1, [(f"{alias}.foo", 1)]),
  319. (1, 2, [(f"{alias}.foo", 1)]),
  320. (2, 3, [(f"{alias}.foo", 1)]),
  321. (3, 4, [(f"{alias}.foo", 0)]),
  322. (4, 5, [(f"{alias}.foo", 1)]),
  323. ]
  324. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  325. def test_histogram_simple_using_given_min_above_queried_max(self):
  326. # All these events are out of range of the query parameters,
  327. # and should not appear in the results.
  328. specs = [
  329. (0, 1, [("foo", 1)]),
  330. (1, 2, [("foo", 1)]),
  331. (2, 3, [("foo", 1)]),
  332. (4, 5, [("foo", 1)]),
  333. ]
  334. self.populate_events(specs)
  335. for array_column in ARRAY_COLUMNS:
  336. alias = get_array_column_alias(array_column)
  337. query = {
  338. "project": [self.project.id],
  339. "field": [f"{alias}.foo"],
  340. "numBuckets": 5,
  341. "min": 6,
  342. }
  343. response = self.do_request(query)
  344. assert response.status_code == 200, f"failing for {array_column}"
  345. expected = [
  346. (6, 7, [(f"{alias}.foo", 0)]),
  347. ]
  348. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  349. def test_histogram_simple_using_given_max_below_queried_min(self):
  350. # All these events are out of range of the query parameters,
  351. # and should not appear in the results.
  352. specs = [
  353. (6, 7, [("foo", 1)]),
  354. (8, 9, [("foo", 1)]),
  355. (10, 11, [("foo", 1)]),
  356. (12, 13, [("foo", 1)]),
  357. ]
  358. self.populate_events(specs)
  359. for array_column in ARRAY_COLUMNS:
  360. alias = get_array_column_alias(array_column)
  361. query = {
  362. "project": [self.project.id],
  363. "field": [f"{alias}.foo"],
  364. "numBuckets": 5,
  365. "max": 6,
  366. }
  367. response = self.do_request(query)
  368. assert response.status_code == 200, f"failing for {array_column}"
  369. expected = [
  370. (5, 6, [(f"{alias}.foo", 0)]),
  371. ]
  372. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  373. def test_histogram_large_buckets(self):
  374. # make sure that it works for large width buckets
  375. # range is [0, 99], so it is divided into 5 buckets of width 20
  376. specs = [
  377. (0, 0, [("foo", 2)]),
  378. (99, 99, [("foo", 2)]),
  379. ]
  380. self.populate_events(specs)
  381. for array_column in ARRAY_COLUMNS:
  382. alias = get_array_column_alias(array_column)
  383. query = {
  384. "project": [self.project.id],
  385. "field": [f"{alias}.foo"],
  386. "numBuckets": 5,
  387. }
  388. response = self.do_request(query)
  389. assert response.status_code == 200, f"failing for {array_column}"
  390. expected = [
  391. (0, 20, [(f"{alias}.foo", 2)]),
  392. (20, 40, [(f"{alias}.foo", 0)]),
  393. (40, 60, [(f"{alias}.foo", 0)]),
  394. (60, 80, [(f"{alias}.foo", 0)]),
  395. (80, 100, [(f"{alias}.foo", 2)]),
  396. ]
  397. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  398. def test_histogram_non_zero_offset(self):
  399. # range is [10, 15), so it is divided into 5 buckets of width 1
  400. specs = [
  401. (10, 11, [("foo", 1)]),
  402. (12, 13, [("foo", 1)]),
  403. (13, 14, [("foo", 1)]),
  404. (14, 15, [("foo", 1)]),
  405. ]
  406. self.populate_events(specs)
  407. for array_column in ARRAY_COLUMNS:
  408. alias = get_array_column_alias(array_column)
  409. query = {
  410. "project": [self.project.id],
  411. "field": [f"{alias}.foo"],
  412. "numBuckets": 5,
  413. }
  414. response = self.do_request(query)
  415. assert response.status_code == 200, f"failing for {array_column}"
  416. expected = [
  417. (10, 11, [(f"{alias}.foo", 1)]),
  418. (11, 12, [(f"{alias}.foo", 0)]),
  419. (12, 13, [(f"{alias}.foo", 1)]),
  420. (13, 14, [(f"{alias}.foo", 1)]),
  421. (14, 15, [(f"{alias}.foo", 1)]),
  422. ]
  423. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  424. def test_histogram_extra_data(self):
  425. # range is [11, 16), so it is divided into 5 buckets of width 1
  426. # make sure every bin has some value
  427. specs = [
  428. (10, 11, [("foo", 1)]),
  429. (11, 12, [("foo", 1)]),
  430. (12, 13, [("foo", 1)]),
  431. (13, 14, [("foo", 1)]),
  432. (14, 15, [("foo", 1)]),
  433. (15, 16, [("foo", 1)]),
  434. (16, 17, [("foo", 1)]),
  435. ]
  436. self.populate_events(specs)
  437. for array_column in ARRAY_COLUMNS:
  438. alias = get_array_column_alias(array_column)
  439. query = {
  440. "project": [self.project.id],
  441. "field": [f"{alias}.foo"],
  442. "numBuckets": 5,
  443. "min": 11,
  444. "max": 16,
  445. }
  446. response = self.do_request(query)
  447. assert response.status_code == 200, f"failing for {array_column}"
  448. expected = [
  449. (11, 12, [(f"{alias}.foo", 1)]),
  450. (12, 13, [(f"{alias}.foo", 1)]),
  451. (13, 14, [(f"{alias}.foo", 1)]),
  452. (14, 15, [(f"{alias}.foo", 1)]),
  453. (15, 16, [(f"{alias}.foo", 1)]),
  454. ]
  455. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  456. def test_histogram_non_zero_min_large_buckets(self):
  457. # range is [10, 59], so it is divided into 5 buckets of width 10
  458. specs = [
  459. (10, 10, [("foo", 1)]),
  460. (40, 50, [("foo", 1)]),
  461. (59, 59, [("foo", 2)]),
  462. ]
  463. self.populate_events(specs)
  464. for array_column in ARRAY_COLUMNS:
  465. alias = get_array_column_alias(array_column)
  466. query = {
  467. "project": [self.project.id],
  468. "field": [f"{alias}.foo"],
  469. "numBuckets": 5,
  470. }
  471. response = self.do_request(query)
  472. assert response.status_code == 200, f"failing for {array_column}"
  473. expected = [
  474. (10, 20, [(f"{alias}.foo", 1)]),
  475. (20, 30, [(f"{alias}.foo", 0)]),
  476. (30, 40, [(f"{alias}.foo", 0)]),
  477. (40, 50, [(f"{alias}.foo", 1)]),
  478. (50, 60, [(f"{alias}.foo", 2)]),
  479. ]
  480. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  481. @pytest.mark.xfail(reason="snuba does not allow - in alias names")
  482. def test_histogram_negative_values(self):
  483. # range is [-9, -4), so it is divided into 5 buckets of width 1
  484. specs = [
  485. (-9, -8, [("foo", 3)]),
  486. (-5, -4, [("foo", 1)]),
  487. ]
  488. self.populate_events(specs)
  489. for array_column in ARRAY_COLUMNS:
  490. alias = get_array_column_alias(array_column)
  491. query = {
  492. "project": [self.project.id],
  493. "field": [f"{alias}.foo"],
  494. "numBuckets": 5,
  495. }
  496. response = self.do_request(query)
  497. assert response.status_code == 200, f"failing for {array_column}"
  498. expected = [
  499. (-9, -8, [(f"{alias}.foo", 3)]),
  500. (-8, -7, [(f"{alias}.foo", 0)]),
  501. (-7, -6, [(f"{alias}.foo", 0)]),
  502. (-6, -5, [(f"{alias}.foo", 0)]),
  503. (-5, -4, [(f"{alias}.foo", 1)]),
  504. ]
  505. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  506. @pytest.mark.xfail(reason="snuba does not allow - in alias names")
  507. def test_histogram_positive_and_negative_values(self):
  508. # range is [-50, 49], so it is divided into 5 buckets of width 10
  509. specs = [
  510. (-50, -50, [("foo", 1)]),
  511. (-10, 10, [("foo", 2)]),
  512. (49, 49, [("foo", 1)]),
  513. ]
  514. self.populate_events(specs)
  515. for array_column in ARRAY_COLUMNS:
  516. alias = get_array_column_alias(array_column)
  517. query = {
  518. "project": [self.project.id],
  519. "field": [f"{alias}.foo"],
  520. "numBuckets": 5,
  521. }
  522. response = self.do_request(query)
  523. assert response.status_code == 200, f"failing for {array_column}"
  524. expected = [
  525. (-50, -30, [(f"{alias}.foo", 1)]),
  526. (-30, -10, [(f"{alias}.foo", 0)]),
  527. (-10, 10, [(f"{alias}.foo", 2)]),
  528. (10, 30, [(f"{alias}.foo", 0)]),
  529. (30, 50, [(f"{alias}.foo", 1)]),
  530. ]
  531. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  532. def test_histogram_increased_precision(self):
  533. # range is [1.00, 2.24], so it is divided into 5 buckets of width 0.25
  534. specs = [
  535. (1.00, 1.00, [("foo", 3)]),
  536. (2.24, 2.24, [("foo", 1)]),
  537. ]
  538. self.populate_events(specs)
  539. for array_column in ARRAY_COLUMNS:
  540. alias = get_array_column_alias(array_column)
  541. query = {
  542. "project": [self.project.id],
  543. "field": [f"{alias}.foo"],
  544. "numBuckets": 5,
  545. "precision": 2,
  546. }
  547. response = self.do_request(query)
  548. assert response.status_code == 200, f"failing for {array_column}"
  549. expected = [
  550. (1.00, 1.25, [(f"{alias}.foo", 3)]),
  551. (1.25, 1.50, [(f"{alias}.foo", 0)]),
  552. (1.50, 1.75, [(f"{alias}.foo", 0)]),
  553. (1.75, 2.00, [(f"{alias}.foo", 0)]),
  554. (2.00, 2.25, [(f"{alias}.foo", 1)]),
  555. ]
  556. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  557. def test_histogram_increased_precision_with_min_max(self):
  558. # range is [1.25, 2.24], so it is divided into 5 buckets of width 0.25
  559. specs = [
  560. (1.00, 1.25, [("foo", 3)]),
  561. (2.00, 2.25, [("foo", 1)]),
  562. ]
  563. self.populate_events(specs)
  564. for array_column in ARRAY_COLUMNS:
  565. alias = get_array_column_alias(array_column)
  566. query = {
  567. "project": [self.project.id],
  568. "field": [f"{alias}.foo"],
  569. "numBuckets": 3,
  570. "precision": 2,
  571. "min": 1.25,
  572. "max": 2.00,
  573. }
  574. response = self.do_request(query)
  575. assert response.status_code == 200, f"failing for {array_column}"
  576. expected = [
  577. (1.25, 1.50, [(f"{alias}.foo", 0)]),
  578. (1.50, 1.75, [(f"{alias}.foo", 0)]),
  579. (1.75, 2.00, [(f"{alias}.foo", 0)]),
  580. ]
  581. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  582. def test_histogram_increased_precision_large_buckets(self):
  583. # range is [10.0000, 59.9999] so it is divided into 5 buckets of width 10
  584. specs = [
  585. (10.0000, 10.0000, [("foo", 1)]),
  586. (30.0000, 40.0000, [("foo", 1)]),
  587. (59.9999, 59.9999, [("foo", 2)]),
  588. ]
  589. self.populate_events(specs)
  590. for array_column in ARRAY_COLUMNS:
  591. alias = get_array_column_alias(array_column)
  592. query = {
  593. "project": [self.project.id],
  594. "field": [f"{alias}.foo"],
  595. "numBuckets": 5,
  596. "precision": 4,
  597. }
  598. response = self.do_request(query)
  599. assert response.status_code == 200, f"failing for {array_column}"
  600. expected = [
  601. (10.0000, 20.0000, [(f"{alias}.foo", 1)]),
  602. (20.0000, 30.0000, [(f"{alias}.foo", 0)]),
  603. (30.0000, 40.0000, [(f"{alias}.foo", 1)]),
  604. (40.0000, 50.0000, [(f"{alias}.foo", 0)]),
  605. (50.0000, 60.0000, [(f"{alias}.foo", 2)]),
  606. ]
  607. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  608. def test_histogram_multiple_measures(self):
  609. # range is [10, 59] so it is divided into 5 buckets of width 10
  610. specs = [
  611. (10, 10, [("bar", 0), ("baz", 0), ("foo", 1)]),
  612. (30, 40, [("bar", 2), ("baz", 0), ("foo", 0)]),
  613. (59, 59, [("bar", 0), ("baz", 1), ("foo", 0)]),
  614. ]
  615. self.populate_events(specs)
  616. for array_column in ARRAY_COLUMNS:
  617. alias = get_array_column_alias(array_column)
  618. query = {
  619. "project": [self.project.id],
  620. "field": [f"{alias}.bar", f"{alias}.baz", f"{alias}.foo"],
  621. "numBuckets": 5,
  622. }
  623. response = self.do_request(query)
  624. assert response.status_code == 200, f"failing for {array_column}"
  625. expected = [
  626. (
  627. 10,
  628. 20,
  629. [
  630. (f"{alias}.bar", 0),
  631. (f"{alias}.baz", 0),
  632. (f"{alias}.foo", 1),
  633. ],
  634. ),
  635. (
  636. 20,
  637. 30,
  638. [
  639. (f"{alias}.bar", 0),
  640. (f"{alias}.baz", 0),
  641. (f"{alias}.foo", 0),
  642. ],
  643. ),
  644. (
  645. 30,
  646. 40,
  647. [
  648. (f"{alias}.bar", 2),
  649. (f"{alias}.baz", 0),
  650. (f"{alias}.foo", 0),
  651. ],
  652. ),
  653. (
  654. 40,
  655. 50,
  656. [
  657. (f"{alias}.bar", 0),
  658. (f"{alias}.baz", 0),
  659. (f"{alias}.foo", 0),
  660. ],
  661. ),
  662. (
  663. 50,
  664. 60,
  665. [
  666. (f"{alias}.bar", 0),
  667. (f"{alias}.baz", 1),
  668. (f"{alias}.foo", 0),
  669. ],
  670. ),
  671. ]
  672. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  673. def test_histogram_max_value_on_edge(self):
  674. # range is [11, 21] so it is divided into 5 buckets of width 5
  675. # because using buckets of width 2 will exclude 21, and the next
  676. # nice number is 5
  677. specs = [
  678. (11, 11, [("bar", 0), ("baz", 0), ("foo", 1)]),
  679. (21, 21, [("bar", 1), ("baz", 1), ("foo", 1)]),
  680. ]
  681. self.populate_events(specs)
  682. for array_column in ARRAY_COLUMNS:
  683. alias = get_array_column_alias(array_column)
  684. query = {
  685. "project": [self.project.id],
  686. "field": [f"{alias}.bar", f"{alias}.baz", f"{alias}.foo"],
  687. "numBuckets": 5,
  688. }
  689. response = self.do_request(query)
  690. assert response.status_code == 200, f"failing for {array_column}"
  691. expected = [
  692. (
  693. 10,
  694. 15,
  695. [
  696. (f"{alias}.bar", 0),
  697. (f"{alias}.baz", 0),
  698. (f"{alias}.foo", 1),
  699. ],
  700. ),
  701. (
  702. 15,
  703. 20,
  704. [
  705. (f"{alias}.bar", 0),
  706. (f"{alias}.baz", 0),
  707. (f"{alias}.foo", 0),
  708. ],
  709. ),
  710. (
  711. 20,
  712. 25,
  713. [
  714. (f"{alias}.bar", 1),
  715. (f"{alias}.baz", 1),
  716. (f"{alias}.foo", 1),
  717. ],
  718. ),
  719. ]
  720. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  721. def test_histogram_bins_exceed_max(self):
  722. specs = [
  723. (10, 15, [("bar", 0), ("baz", 0), ("foo", 1)]),
  724. (30, 30, [("bar", 1), ("baz", 1), ("foo", 1)]),
  725. ]
  726. self.populate_events(specs)
  727. for array_column in ARRAY_COLUMNS:
  728. alias = get_array_column_alias(array_column)
  729. query = {
  730. "project": [self.project.id],
  731. "field": [f"{alias}.bar", f"{alias}.baz", f"{alias}.foo"],
  732. "numBuckets": 5,
  733. "min": 10,
  734. "max": 21,
  735. }
  736. response = self.do_request(query)
  737. assert response.status_code == 200, f"failing for {array_column}"
  738. expected = [
  739. (
  740. 10,
  741. 15,
  742. [
  743. (f"{alias}.bar", 0),
  744. (f"{alias}.baz", 0),
  745. (f"{alias}.foo", 1),
  746. ],
  747. ),
  748. (
  749. 15,
  750. 20,
  751. [
  752. (f"{alias}.bar", 0),
  753. (f"{alias}.baz", 0),
  754. (f"{alias}.foo", 0),
  755. ],
  756. ),
  757. (
  758. 20,
  759. 25,
  760. [
  761. (f"{alias}.bar", 0),
  762. (f"{alias}.baz", 0),
  763. (f"{alias}.foo", 0),
  764. ],
  765. ),
  766. ]
  767. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  768. def test_bad_params_invalid_data_filter(self):
  769. for array_column in ARRAY_COLUMNS:
  770. alias = get_array_column_alias(array_column)
  771. query = {
  772. "project": [self.project.id],
  773. "field": [f"{alias}.foo", f"{alias}.bar"],
  774. "numBuckets": 10,
  775. "dataFilter": "invalid",
  776. }
  777. response = self.do_request(query)
  778. assert response.status_code == 400, f"failing for {array_column}"
  779. assert response.data == {
  780. "dataFilter": ['"invalid" is not a valid choice.'],
  781. }, f"failing for {array_column}"
  782. def test_histogram_all_data_filter(self):
  783. specs = [
  784. (0, 1, [("foo", 4)]),
  785. (4000, 5000, [("foo", 1)]),
  786. ]
  787. self.populate_events(specs)
  788. for array_column in ARRAY_COLUMNS:
  789. alias = get_array_column_alias(array_column)
  790. query = {
  791. "project": [self.project.id],
  792. "field": [f"{alias}.foo"],
  793. "numBuckets": 5,
  794. "dataFilter": "all",
  795. }
  796. response = self.do_request(query)
  797. assert response.status_code == 200, f"failing for {array_column}"
  798. expected = [
  799. (0, 1000, [(f"{alias}.foo", 4)]),
  800. (1000, 2000, [(f"{alias}.foo", 0)]),
  801. (2000, 3000, [(f"{alias}.foo", 0)]),
  802. (3000, 4000, [(f"{alias}.foo", 0)]),
  803. (4000, 5000, [(f"{alias}.foo", 1)]),
  804. ]
  805. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  806. def test_histogram_exclude_outliers_data_filter(self):
  807. specs = [
  808. (0, 0, [("foo", 4)]),
  809. (4000, 4001, [("foo", 1)]),
  810. ]
  811. self.populate_events(specs)
  812. for array_column in ARRAY_COLUMNS:
  813. alias = get_array_column_alias(array_column)
  814. query = {
  815. "project": [self.project.id],
  816. "field": [f"{alias}.foo"],
  817. "numBuckets": 5,
  818. "dataFilter": "exclude_outliers",
  819. }
  820. response = self.do_request(query)
  821. assert response.status_code == 200, f"failing for {array_column}"
  822. expected = [
  823. (0, 1, [(f"{alias}.foo", 4)]),
  824. ]
  825. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  826. def test_histogram_missing_measurement_data(self):
  827. # make sure there is at least one transaction
  828. specs = [
  829. (0, 1, [("foo", 1)]),
  830. ]
  831. self.populate_events(specs)
  832. for array_column in ARRAY_COLUMNS:
  833. alias = get_array_column_alias(array_column)
  834. query = {
  835. "project": [self.project.id],
  836. # make sure to query a measurement that does not exist
  837. "field": [f"{alias}.bar"],
  838. "numBuckets": 5,
  839. "dataFilter": "exclude_outliers",
  840. }
  841. response = self.do_request(query)
  842. assert response.status_code == 200, f"failing for {array_column}"
  843. expected = [
  844. (0, 1, [(f"{alias}.bar", 0)]),
  845. (1, 1, [(f"{alias}.bar", 0)]),
  846. (2, 2, [(f"{alias}.bar", 0)]),
  847. (3, 3, [(f"{alias}.bar", 0)]),
  848. (4, 4, [(f"{alias}.bar", 0)]),
  849. ]
  850. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  851. def test_histogram_missing_measurement_data_with_explicit_bounds(self):
  852. # make sure there is at least one transaction
  853. specs = [
  854. (0, 1, [("foo", 1)]),
  855. ]
  856. self.populate_events(specs)
  857. for array_column in ARRAY_COLUMNS:
  858. alias = get_array_column_alias(array_column)
  859. query = {
  860. "project": [self.project.id],
  861. # make sure to query a measurement that does not exist
  862. "field": [f"{alias}.bar"],
  863. "numBuckets": 5,
  864. "dataFilter": "exclude_outliers",
  865. "min": 10,
  866. }
  867. response = self.do_request(query)
  868. assert response.status_code == 200, f"failing for {array_column}"
  869. expected = [
  870. (10, 11, [(f"{alias}.bar", 0)]),
  871. (11, 11, [(f"{alias}.bar", 0)]),
  872. (12, 12, [(f"{alias}.bar", 0)]),
  873. (13, 13, [(f"{alias}.bar", 0)]),
  874. (14, 14, [(f"{alias}.bar", 0)]),
  875. ]
  876. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  877. def test_histogram_ignores_aggregate_conditions(self):
  878. # range is [0, 5), so it is divided into 5 buckets of width 1
  879. specs = [
  880. (0, 1, [("foo", 1)]),
  881. (1, 2, [("foo", 1)]),
  882. (2, 3, [("foo", 1)]),
  883. (3, 4, [("foo", 0)]),
  884. (4, 5, [("foo", 1)]),
  885. ]
  886. self.populate_events(specs)
  887. for array_column in ARRAY_COLUMNS:
  888. alias = get_array_column_alias(array_column)
  889. query = {
  890. "project": [self.project.id],
  891. "field": [f"{alias}.foo"],
  892. "numBuckets": 5,
  893. "query": "tpm():>0.001",
  894. }
  895. response = self.do_request(query)
  896. assert response.status_code == 200, f"failing for {array_column}"
  897. expected = [
  898. (0, 1, [(f"{alias}.foo", 1)]),
  899. (1, 2, [(f"{alias}.foo", 1)]),
  900. (2, 3, [(f"{alias}.foo", 1)]),
  901. (3, 4, [(f"{alias}.foo", 0)]),
  902. (4, 5, [(f"{alias}.foo", 1)]),
  903. ]
  904. assert response.data == self.as_response_data(expected), f"failing for {array_column}"
  905. def test_histogram_outlier_filtering_with_no_rows(self):
  906. query = {
  907. "project": [self.project.id],
  908. "field": ["transaction.duration"],
  909. "numBuckets": 5,
  910. "dataFilter": "exclude_outliers",
  911. }
  912. response = self.do_request(query)
  913. assert response.status_code == 200
  914. expected = [
  915. (0, 1, [("transaction.duration", 0)]),
  916. ]
  917. assert response.data == self.as_response_data(expected)
  918. class OrganizationEventsMetricsEnhancedPerformanceHistogramEndpointTest(
  919. MetricsEnhancedPerformanceTestCase
  920. ):
  921. def setUp(self):
  922. super().setUp()
  923. self.min_ago = iso_format(before_now(minutes=1))
  924. self.features = {}
  925. def populate_events(self, specs):
  926. start = before_now(minutes=5)
  927. for spec in specs:
  928. spec = HistogramSpec(*spec)
  929. for suffix_key, count in spec.fields:
  930. for i in range(count):
  931. self.store_transaction_metric(
  932. (spec.end + spec.start) / 2,
  933. metric=suffix_key,
  934. tags={"transaction": suffix_key, **spec.tags},
  935. timestamp=start,
  936. aggregation_option=AggregationOption.HIST,
  937. )
  938. def as_response_data(self, specs):
  939. data: dict[str, list[dict[str, int]]] = {}
  940. for spec in specs:
  941. spec = HistogramSpec(*spec)
  942. for measurement, count in sorted(spec.fields):
  943. if measurement not in data:
  944. data[measurement] = []
  945. data[measurement].append({"bin": spec.start, "count": count})
  946. return data
  947. def do_request(self, query, features=None):
  948. if features is None:
  949. features = {
  950. "organizations:performance-view": True,
  951. "organizations:performance-use-metrics": True,
  952. }
  953. features.update(self.features)
  954. self.login_as(user=self.user)
  955. url = reverse(
  956. "sentry-api-0-organization-events-histogram",
  957. kwargs={"organization_slug": self.organization.slug},
  958. )
  959. with self.feature(features):
  960. return self.client.get(url, query, format="json")
  961. def test_no_projects(self):
  962. response = self.do_request({})
  963. assert response.status_code == 200, response.content
  964. assert response.data == {}
  965. def test_histogram_simple(self):
  966. specs = [
  967. (0, 1, [("transaction.duration", 5)]),
  968. (1, 2, [("transaction.duration", 10)]),
  969. (2, 3, [("transaction.duration", 1)]),
  970. (4, 5, [("transaction.duration", 15)]),
  971. ]
  972. self.populate_events(specs)
  973. query = {
  974. "project": [self.project.id],
  975. "field": ["transaction.duration"],
  976. "numBuckets": 5,
  977. "dataset": "metrics",
  978. }
  979. response = self.do_request(query)
  980. assert response.status_code == 200, response.content
  981. expected = [
  982. (0, 1, [("transaction.duration", 6)]),
  983. (1, 2, [("transaction.duration", 9)]),
  984. (2, 3, [("transaction.duration", 3)]),
  985. (3, 4, [("transaction.duration", 8)]),
  986. (4, 5, [("transaction.duration", 7)]),
  987. ]
  988. # Note metrics data is approximate, these values are based on running the test and asserting the results
  989. expected_response = self.as_response_data(expected)
  990. expected_response["meta"] = {"isMetricsData": True}
  991. assert response.data == expected_response
  992. def test_multi_histogram(self):
  993. specs = [
  994. (0, 1, [("measurements.fcp", 5), ("measurements.lcp", 5)]),
  995. (1, 2, [("measurements.fcp", 5), ("measurements.lcp", 5)]),
  996. ]
  997. self.populate_events(specs)
  998. query = {
  999. "project": [self.project.id],
  1000. "field": ["measurements.fcp", "measurements.lcp"],
  1001. "numBuckets": 2,
  1002. "dataset": "metrics",
  1003. }
  1004. response = self.do_request(query)
  1005. assert response.status_code == 200, response.content
  1006. expected = [
  1007. (0, 1, [("measurements.fcp", 5), ("measurements.lcp", 5)]),
  1008. (1, 2, [("measurements.fcp", 5), ("measurements.lcp", 5)]),
  1009. ]
  1010. # Note metrics data is approximate, these values are based on running the test and asserting the results
  1011. expected_response = self.as_response_data(expected)
  1012. expected_response["meta"] = {"isMetricsData": True}
  1013. assert response.data == expected_response
  1014. def test_histogram_exclude_outliers_data_filter(self):
  1015. specs = [
  1016. (0, 0, [("transaction.duration", 4)], {"histogram_outlier": "inlier"}),
  1017. (1, 1, [("transaction.duration", 4)], {"histogram_outlier": "inlier"}),
  1018. (4000, 4001, [("transaction.duration", 1)], {"histogram_outlier": "outlier"}),
  1019. ]
  1020. self.populate_events(specs)
  1021. query = {
  1022. "project": [self.project.id],
  1023. "field": ["transaction.duration"],
  1024. "numBuckets": 5,
  1025. "dataFilter": "exclude_outliers",
  1026. "dataset": "metrics",
  1027. }
  1028. response = self.do_request(query)
  1029. assert response.status_code == 200, response.content
  1030. # Metrics approximation means both buckets got merged
  1031. expected = [
  1032. (0, 0, [("transaction.duration", 8)]),
  1033. (1, 2, [("transaction.duration", 0)]),
  1034. ]
  1035. expected_response = self.as_response_data(expected)
  1036. expected_response["meta"] = {"isMetricsData": True}
  1037. assert response.data == expected_response
  1038. class OrganizationEventsMetricsEnhancedPerformanceHistogramEndpointTestWithMetricLayer(
  1039. OrganizationEventsMetricsEnhancedPerformanceHistogramEndpointTest
  1040. ):
  1041. def setUp(self):
  1042. super().setUp()
  1043. self.features["organizations:use-metrics-layer"] = True