test_organization_sessions.py 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884
  1. import datetime
  2. from unittest.mock import patch
  3. from uuid import uuid4
  4. import pytest
  5. from django.urls import reverse
  6. from freezegun import freeze_time
  7. from sentry.release_health.metrics import MetricsReleaseHealthBackend
  8. from sentry.testutils import APITestCase, SnubaTestCase
  9. from sentry.testutils.cases import SessionMetricsTestCase
  10. from sentry.utils.dates import to_timestamp
  11. def result_sorted(result):
  12. """sort the groups of the results array by the `by` object, ensuring a stable order"""
  13. def stable_dict(d):
  14. return tuple(sorted(d.items(), key=lambda t: t[0]))
  15. result["groups"].sort(key=lambda group: stable_dict(group["by"]))
  16. return result
  17. ONE_DAY_AGO = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(days=1)
  18. TWO_DAYS_AGO = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(days=2)
  19. MOCK_DATETIME = ONE_DAY_AGO.replace(hour=12, minute=27, second=28, microsecond=303000)
  20. MOCK_DATETIME_PLUS_TEN_MINUTES = MOCK_DATETIME + datetime.timedelta(minutes=10)
  21. SNUBA_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
  22. MOCK_DATETIME_START_OF_DAY = MOCK_DATETIME.replace(hour=0, minute=0, second=0)
  23. class OrganizationSessionsEndpointTest(APITestCase, SnubaTestCase):
  24. def setUp(self):
  25. super().setUp()
  26. self.setup_fixture()
  27. def setup_fixture(self):
  28. self.timestamp = to_timestamp(MOCK_DATETIME)
  29. self.received = self.timestamp
  30. self.session_started = self.timestamp // 3600 * 3600 # round to the hour
  31. self.organization1 = self.organization
  32. self.organization2 = self.create_organization()
  33. self.organization3 = self.create_organization()
  34. self.project1 = self.project
  35. self.project2 = self.create_project()
  36. self.project3 = self.create_project()
  37. self.project4 = self.create_project(organization=self.organization2)
  38. self.user2 = self.create_user(is_superuser=False)
  39. self.create_member(
  40. user=self.user2, organization=self.organization1, role="member", teams=[]
  41. )
  42. self.create_member(user=self.user, organization=self.organization3, role="admin", teams=[])
  43. self.create_environment(self.project2, name="development")
  44. template = {
  45. "distinct_id": "00000000-0000-0000-0000-000000000000",
  46. "status": "exited",
  47. "seq": 0,
  48. "release": "foo@1.0.0",
  49. "environment": "production",
  50. "retention_days": 90,
  51. "duration": 123.4,
  52. "errors": 0,
  53. "started": self.session_started,
  54. "received": self.received,
  55. }
  56. def make_duration(kwargs):
  57. """Randomish but deterministic duration"""
  58. return float(len(str(kwargs)))
  59. def make_session(project, **kwargs):
  60. return dict(
  61. template,
  62. session_id=uuid4().hex,
  63. org_id=project.organization_id,
  64. project_id=project.id,
  65. duration=make_duration(kwargs),
  66. **kwargs,
  67. )
  68. self.store_session(make_session(self.project1, started=self.session_started + 12 * 60))
  69. self.store_session(
  70. make_session(self.project1, started=self.session_started + 24 * 60, release="foo@1.1.0")
  71. )
  72. self.store_session(make_session(self.project1, started=self.session_started - 60 * 60))
  73. self.store_session(make_session(self.project1, started=self.session_started - 12 * 60 * 60))
  74. self.store_session(make_session(self.project2, status="crashed"))
  75. self.store_session(make_session(self.project2, environment="development"))
  76. self.store_session(make_session(self.project3, errors=1, release="foo@1.2.0"))
  77. self.store_session(
  78. make_session(
  79. self.project3,
  80. distinct_id="39887d89-13b2-4c84-8c23-5d13d2102664",
  81. started=self.session_started - 60 * 60,
  82. )
  83. )
  84. self.store_session(
  85. make_session(
  86. self.project3, distinct_id="39887d89-13b2-4c84-8c23-5d13d2102664", errors=1
  87. )
  88. )
  89. self.store_session(make_session(self.project4))
  90. def do_request(self, query, user=None, org=None):
  91. self.login_as(user=user or self.user)
  92. url = reverse(
  93. "sentry-api-0-organization-sessions",
  94. kwargs={"organization_slug": (org or self.organization).slug},
  95. )
  96. return self.client.get(url, query, format="json")
  97. def test_empty_request(self):
  98. response = self.do_request({})
  99. assert response.status_code == 400, response.content
  100. assert response.data == {"detail": 'Request is missing a "field"'}
  101. def test_inaccessible_project(self):
  102. response = self.do_request({"project": [self.project4.id]})
  103. assert response.status_code == 403, response.content
  104. assert response.data == {"detail": "You do not have permission to perform this action."}
  105. def test_unknown_field(self):
  106. response = self.do_request({"field": ["summ(sessin)"]})
  107. assert response.status_code == 400, response.content
  108. assert response.data == {"detail": 'Invalid field: "summ(sessin)"'}
  109. def test_unknown_groupby(self):
  110. response = self.do_request({"field": ["sum(session)"], "groupBy": ["envriomnent"]})
  111. assert response.status_code == 400, response.content
  112. assert response.data == {"detail": 'Invalid groupBy: "envriomnent"'}
  113. def test_illegal_groupby(self):
  114. response = self.do_request({"field": ["sum(session)"], "groupBy": ["issue.id"]})
  115. assert response.status_code == 400, response.content
  116. assert response.data == {"detail": 'Invalid groupBy: "issue.id"'}
  117. def test_invalid_query(self):
  118. response = self.do_request(
  119. {"statsPeriod": "1d", "field": ["sum(session)"], "query": ["foo:bar"]}
  120. )
  121. assert response.status_code == 400, response.content
  122. assert response.data == {"detail": 'Invalid query field: "foo"'}
  123. response = self.do_request(
  124. {
  125. "statsPeriod": "1d",
  126. "field": ["sum(session)"],
  127. "query": ["release:foo-bar@1.2.3 (123)"],
  128. }
  129. )
  130. assert response.status_code == 400, response.content
  131. # TODO: it would be good to provide a better error here,
  132. # since its not obvious where `message` comes from.
  133. assert response.data == {"detail": 'Invalid query field: "message"'}
  134. def test_illegal_query(self):
  135. response = self.do_request(
  136. {"statsPeriod": "1d", "field": ["sum(session)"], "query": ["issue.id:123"]}
  137. )
  138. assert response.status_code == 400, response.content
  139. assert response.data == {"detail": 'Invalid query field: "group_id"'}
  140. def test_too_many_points(self):
  141. # default statsPeriod is 90d
  142. response = self.do_request({"field": ["sum(session)"], "interval": "1h"})
  143. assert response.status_code == 400, response.content
  144. assert response.data == {
  145. "detail": "Your interval and date range would create too many results. "
  146. "Use a larger interval, or a smaller date range."
  147. }
  148. @freeze_time(MOCK_DATETIME)
  149. def test_timeseries_interval(self):
  150. response = self.do_request(
  151. {"project": [-1], "statsPeriod": "1d", "interval": "1d", "field": ["sum(session)"]}
  152. )
  153. start_of_day_snuba_format = MOCK_DATETIME_START_OF_DAY.strftime(SNUBA_TIME_FORMAT)
  154. assert response.status_code == 200, response.content
  155. assert result_sorted(response.data) == {
  156. "start": start_of_day_snuba_format,
  157. "end": MOCK_DATETIME.replace(minute=28, second=0).strftime(SNUBA_TIME_FORMAT),
  158. "query": "",
  159. "intervals": [start_of_day_snuba_format],
  160. "groups": [{"by": {}, "series": {"sum(session)": [9]}, "totals": {"sum(session)": 9}}],
  161. }
  162. response = self.do_request(
  163. {"project": [-1], "statsPeriod": "1d", "interval": "6h", "field": ["sum(session)"]}
  164. )
  165. assert response.status_code == 200, response.content
  166. assert result_sorted(response.data) == {
  167. "start": TWO_DAYS_AGO.replace(hour=18, minute=0, second=0).strftime(SNUBA_TIME_FORMAT),
  168. "end": MOCK_DATETIME.replace(minute=28, second=0).strftime(SNUBA_TIME_FORMAT),
  169. "query": "",
  170. "intervals": [
  171. TWO_DAYS_AGO.replace(hour=18, minute=0, second=0).strftime(SNUBA_TIME_FORMAT),
  172. MOCK_DATETIME.replace(hour=0, minute=0, second=0).strftime(SNUBA_TIME_FORMAT),
  173. MOCK_DATETIME.replace(hour=6, minute=0, second=0).strftime(SNUBA_TIME_FORMAT),
  174. MOCK_DATETIME.replace(hour=12, minute=0, second=0).strftime(SNUBA_TIME_FORMAT),
  175. ],
  176. "groups": [
  177. {"by": {}, "series": {"sum(session)": [0, 1, 2, 6]}, "totals": {"sum(session)": 9}}
  178. ],
  179. }
  180. @freeze_time(MOCK_DATETIME)
  181. def test_user_all_accessible(self):
  182. response = self.do_request(
  183. {"project": [-1], "statsPeriod": "1d", "interval": "1d", "field": ["sum(session)"]},
  184. user=self.user2,
  185. )
  186. start_of_day_snuba_format = MOCK_DATETIME_START_OF_DAY.strftime(SNUBA_TIME_FORMAT)
  187. assert response.status_code == 200, response.content
  188. assert result_sorted(response.data) == {
  189. "start": start_of_day_snuba_format,
  190. "end": MOCK_DATETIME.replace(hour=12, minute=28, second=0).strftime(SNUBA_TIME_FORMAT),
  191. "query": "",
  192. "intervals": [start_of_day_snuba_format],
  193. "groups": [{"by": {}, "series": {"sum(session)": [9]}, "totals": {"sum(session)": 9}}],
  194. }
  195. def test_no_projects(self):
  196. response = self.do_request(
  197. {"project": [-1], "statsPeriod": "1d", "interval": "1d", "field": ["sum(session)"]},
  198. org=self.organization3,
  199. )
  200. assert response.status_code == 400, response.content
  201. assert response.data == {"detail": "No projects available"}
  202. @freeze_time(MOCK_DATETIME_PLUS_TEN_MINUTES)
  203. def test_minute_resolution(self):
  204. with self.feature("organizations:minute-resolution-sessions"):
  205. response = self.do_request(
  206. {
  207. "project": [self.project1.id, self.project2.id],
  208. "statsPeriod": "30m",
  209. "interval": "10m",
  210. "field": ["sum(session)"],
  211. }
  212. )
  213. assert response.status_code == 200, response.content
  214. assert result_sorted(response.data) == {
  215. "start": MOCK_DATETIME.replace(hour=12, minute=0, second=0).strftime(
  216. SNUBA_TIME_FORMAT
  217. ),
  218. "end": MOCK_DATETIME.replace(hour=12, minute=38, second=0).strftime(
  219. SNUBA_TIME_FORMAT
  220. ),
  221. "query": "",
  222. "intervals": [
  223. *[
  224. MOCK_DATETIME.replace(hour=12, minute=min, second=0).strftime(
  225. SNUBA_TIME_FORMAT
  226. )
  227. for min in [0, 10, 20, 30]
  228. ],
  229. ],
  230. "groups": [
  231. {
  232. "by": {},
  233. "series": {"sum(session)": [2, 1, 1, 0]},
  234. "totals": {"sum(session)": 4},
  235. }
  236. ],
  237. }
  238. @freeze_time(MOCK_DATETIME_PLUS_TEN_MINUTES)
  239. def test_10s_resolution(self):
  240. with self.feature("organizations:minute-resolution-sessions"):
  241. response = self.do_request(
  242. {
  243. "project": [self.project1.id],
  244. "statsPeriod": "1m",
  245. "interval": "10s",
  246. "field": ["sum(session)"],
  247. }
  248. )
  249. assert response.status_code == 200, response.content
  250. from sentry.api.endpoints.organization_sessions import release_health
  251. if release_health.is_metrics_based():
  252. # With the metrics backend, we should get exactly what we asked for,
  253. # 6 intervals with 10 second length. However, because of rounding,
  254. # we get it rounded to the next minute (see https://github.com/getsentry/sentry/blob/d6c59c32307eee7162301c76b74af419055b9b39/src/sentry/snuba/sessions_v2.py#L388-L392)
  255. assert len(response.data["intervals"]) == 9
  256. else:
  257. # With the sessions backend, the entire period will be aligned
  258. # to one hour, and the resolution will still be one minute:
  259. assert len(response.data["intervals"]) == 38
  260. @freeze_time(MOCK_DATETIME)
  261. def test_filter_projects(self):
  262. response = self.do_request(
  263. {
  264. "statsPeriod": "1d",
  265. "interval": "1d",
  266. "field": ["sum(session)"],
  267. "project": [self.project2.id, self.project3.id],
  268. }
  269. )
  270. assert response.status_code == 200, response.content
  271. assert result_sorted(response.data)["groups"] == [
  272. {"by": {}, "series": {"sum(session)": [5]}, "totals": {"sum(session)": 5}}
  273. ]
  274. @freeze_time(MOCK_DATETIME)
  275. def test_filter_environment(self):
  276. response = self.do_request(
  277. {
  278. "project": [-1],
  279. "statsPeriod": "1d",
  280. "interval": "1d",
  281. "field": ["sum(session)"],
  282. "query": "environment:development",
  283. }
  284. )
  285. assert response.status_code == 200, response.content
  286. assert result_sorted(response.data)["groups"] == [
  287. {"by": {}, "series": {"sum(session)": [1]}, "totals": {"sum(session)": 1}}
  288. ]
  289. response = self.do_request(
  290. {
  291. "project": [-1],
  292. "statsPeriod": "1d",
  293. "interval": "1d",
  294. "field": ["sum(session)"],
  295. "environment": ["development"],
  296. }
  297. )
  298. assert response.status_code == 200, response.content
  299. assert result_sorted(response.data)["groups"] == [
  300. {"by": {}, "series": {"sum(session)": [1]}, "totals": {"sum(session)": 1}}
  301. ]
  302. @freeze_time(MOCK_DATETIME)
  303. def test_filter_release(self):
  304. response = self.do_request(
  305. {
  306. "project": [-1],
  307. "statsPeriod": "1d",
  308. "interval": "1d",
  309. "field": ["sum(session)"],
  310. "query": "release:foo@1.1.0",
  311. }
  312. )
  313. assert response.status_code == 200, response.content
  314. assert result_sorted(response.data)["groups"] == [
  315. {"by": {}, "series": {"sum(session)": [1]}, "totals": {"sum(session)": 1}}
  316. ]
  317. response = self.do_request(
  318. {
  319. "project": [-1],
  320. "statsPeriod": "1d",
  321. "interval": "1d",
  322. "field": ["sum(session)"],
  323. "query": 'release:"foo@1.1.0" or release:"foo@1.2.0"',
  324. }
  325. )
  326. assert response.status_code == 200, response.content
  327. assert result_sorted(response.data)["groups"] == [
  328. {"by": {}, "series": {"sum(session)": [2]}, "totals": {"sum(session)": 2}}
  329. ]
  330. response = self.do_request(
  331. {
  332. "project": [-1],
  333. "statsPeriod": "1d",
  334. "interval": "1d",
  335. "field": ["sum(session)"],
  336. "query": 'release:"foo@1.1.0" or release:"foo@1.2.0" or release:"foo@1.3.0"',
  337. "groupBy": ["release"],
  338. }
  339. )
  340. assert response.status_code == 200, response.content
  341. assert result_sorted(response.data)["groups"] == [
  342. {
  343. "by": {"release": "foo@1.1.0"},
  344. "series": {"sum(session)": [1]},
  345. "totals": {"sum(session)": 1},
  346. },
  347. {
  348. "by": {"release": "foo@1.2.0"},
  349. "series": {"sum(session)": [1]},
  350. "totals": {"sum(session)": 1},
  351. },
  352. ]
  353. @freeze_time(MOCK_DATETIME)
  354. def test_filter_unknown_release(self):
  355. response = self.do_request(
  356. {
  357. "project": [-1],
  358. "statsPeriod": "1d",
  359. "interval": "1h",
  360. "field": ["sum(session)"],
  361. "query": "release:foo@6.6.6",
  362. "groupBy": "session.status",
  363. }
  364. )
  365. assert response.status_code == 200, response.content
  366. @freeze_time(MOCK_DATETIME)
  367. def test_groupby_project(self):
  368. response = self.do_request(
  369. {
  370. "project": [-1],
  371. "statsPeriod": "1d",
  372. "interval": "1d",
  373. "field": ["sum(session)"],
  374. "groupBy": ["project"],
  375. }
  376. )
  377. assert response.status_code == 200, response.content
  378. assert result_sorted(response.data)["groups"] == [
  379. {
  380. "by": {"project": self.project1.id},
  381. "series": {"sum(session)": [4]},
  382. "totals": {"sum(session)": 4},
  383. },
  384. {
  385. "by": {"project": self.project2.id},
  386. "series": {"sum(session)": [2]},
  387. "totals": {"sum(session)": 2},
  388. },
  389. {
  390. "by": {"project": self.project3.id},
  391. "series": {"sum(session)": [3]},
  392. "totals": {"sum(session)": 3},
  393. },
  394. ]
  395. @freeze_time(MOCK_DATETIME)
  396. def test_groupby_environment(self):
  397. response = self.do_request(
  398. {
  399. "project": [-1],
  400. "statsPeriod": "1d",
  401. "interval": "1d",
  402. "field": ["sum(session)"],
  403. "groupBy": ["environment"],
  404. }
  405. )
  406. assert response.status_code == 200, response.content
  407. assert result_sorted(response.data)["groups"] == [
  408. {
  409. "by": {"environment": "development"},
  410. "series": {"sum(session)": [1]},
  411. "totals": {"sum(session)": 1},
  412. },
  413. {
  414. "by": {"environment": "production"},
  415. "series": {"sum(session)": [8]},
  416. "totals": {"sum(session)": 8},
  417. },
  418. ]
  419. @freeze_time(MOCK_DATETIME)
  420. def test_groupby_release(self):
  421. response = self.do_request(
  422. {
  423. "project": [-1],
  424. "statsPeriod": "1d",
  425. "interval": "1d",
  426. "field": ["sum(session)"],
  427. "groupBy": ["release"],
  428. }
  429. )
  430. assert response.status_code == 200, response.content
  431. assert result_sorted(response.data)["groups"] == [
  432. {
  433. "by": {"release": "foo@1.0.0"},
  434. "series": {"sum(session)": [7]},
  435. "totals": {"sum(session)": 7},
  436. },
  437. {
  438. "by": {"release": "foo@1.1.0"},
  439. "series": {"sum(session)": [1]},
  440. "totals": {"sum(session)": 1},
  441. },
  442. {
  443. "by": {"release": "foo@1.2.0"},
  444. "series": {"sum(session)": [1]},
  445. "totals": {"sum(session)": 1},
  446. },
  447. ]
  448. @freeze_time(MOCK_DATETIME)
  449. def test_groupby_status(self):
  450. response = self.do_request(
  451. {
  452. "project": [-1],
  453. "statsPeriod": "1d",
  454. "interval": "1d",
  455. "field": ["sum(session)"],
  456. "groupBy": ["session.status"],
  457. }
  458. )
  459. assert response.status_code == 200, response.content
  460. assert result_sorted(response.data)["groups"] == [
  461. {
  462. "by": {"session.status": "abnormal"},
  463. "series": {"sum(session)": [0]},
  464. "totals": {"sum(session)": 0},
  465. },
  466. {
  467. "by": {"session.status": "crashed"},
  468. "series": {"sum(session)": [1]},
  469. "totals": {"sum(session)": 1},
  470. },
  471. {
  472. "by": {"session.status": "errored"},
  473. "series": {"sum(session)": [2]},
  474. "totals": {"sum(session)": 2},
  475. },
  476. {
  477. "by": {"session.status": "healthy"},
  478. "series": {"sum(session)": [6]},
  479. "totals": {"sum(session)": 6},
  480. },
  481. ]
  482. @freeze_time(MOCK_DATETIME)
  483. def test_groupby_cross(self):
  484. response = self.do_request(
  485. {
  486. "project": [-1],
  487. "statsPeriod": "1d",
  488. "interval": "1d",
  489. "field": ["sum(session)"],
  490. "groupBy": ["release", "environment"],
  491. }
  492. )
  493. assert response.status_code == 200, response.content
  494. assert result_sorted(response.data)["groups"] == [
  495. {
  496. "by": {"environment": "development", "release": "foo@1.0.0"},
  497. "series": {"sum(session)": [1]},
  498. "totals": {"sum(session)": 1},
  499. },
  500. {
  501. "by": {"environment": "production", "release": "foo@1.0.0"},
  502. "series": {"sum(session)": [6]},
  503. "totals": {"sum(session)": 6},
  504. },
  505. {
  506. "by": {"environment": "production", "release": "foo@1.1.0"},
  507. "series": {"sum(session)": [1]},
  508. "totals": {"sum(session)": 1},
  509. },
  510. {
  511. "by": {"environment": "production", "release": "foo@1.2.0"},
  512. "series": {"sum(session)": [1]},
  513. "totals": {"sum(session)": 1},
  514. },
  515. ]
  516. @freeze_time(MOCK_DATETIME)
  517. def test_users_groupby(self):
  518. response = self.do_request(
  519. {
  520. "project": [-1],
  521. "statsPeriod": "1d",
  522. "interval": "1d",
  523. "field": ["count_unique(user)"],
  524. }
  525. )
  526. assert response.status_code == 200, response.content
  527. assert result_sorted(response.data)["groups"] == [
  528. {"by": {}, "series": {"count_unique(user)": [1]}, "totals": {"count_unique(user)": 1}}
  529. ]
  530. response = self.do_request(
  531. {
  532. "project": [-1],
  533. "statsPeriod": "1d",
  534. "interval": "1d",
  535. "field": ["count_unique(user)"],
  536. "groupBy": ["session.status"],
  537. }
  538. )
  539. assert response.status_code == 200, response.content
  540. assert result_sorted(response.data)["groups"] == [
  541. {
  542. "by": {"session.status": "abnormal"},
  543. "series": {"count_unique(user)": [0]},
  544. "totals": {"count_unique(user)": 0},
  545. },
  546. {
  547. "by": {"session.status": "crashed"},
  548. "series": {"count_unique(user)": [0]},
  549. "totals": {"count_unique(user)": 0},
  550. },
  551. {
  552. "by": {"session.status": "errored"},
  553. "series": {"count_unique(user)": [1]},
  554. "totals": {"count_unique(user)": 1},
  555. },
  556. {
  557. "by": {"session.status": "healthy"},
  558. "series": {"count_unique(user)": [0]},
  559. "totals": {"count_unique(user)": 0},
  560. },
  561. ]
  562. expected_duration_values = {
  563. "avg(session.duration)": 42375.0,
  564. "max(session.duration)": 80000.0,
  565. "p50(session.duration)": 33500.0,
  566. "p75(session.duration)": 53750.0,
  567. "p90(session.duration)": 71600.0,
  568. "p95(session.duration)": 75800.0,
  569. "p99(session.duration)": 79159.99999999999,
  570. }
  571. @freeze_time(MOCK_DATETIME)
  572. def test_duration_percentiles(self):
  573. response = self.do_request(
  574. {
  575. "project": [-1],
  576. "statsPeriod": "1d",
  577. "interval": "1d",
  578. "field": [
  579. "avg(session.duration)",
  580. "p50(session.duration)",
  581. "p75(session.duration)",
  582. "p90(session.duration)",
  583. "p95(session.duration)",
  584. "p99(session.duration)",
  585. "max(session.duration)",
  586. ],
  587. }
  588. )
  589. assert response.status_code == 200, response.content
  590. expected = self.expected_duration_values
  591. groups = result_sorted(response.data)["groups"]
  592. assert len(groups) == 1, groups
  593. group = groups[0]
  594. assert group["totals"] == pytest.approx(expected)
  595. for key, series in group["series"].items():
  596. assert series == pytest.approx([expected[key]])
  597. @freeze_time(MOCK_DATETIME)
  598. def test_duration_percentiles_groupby(self):
  599. response = self.do_request(
  600. {
  601. "project": [-1],
  602. "statsPeriod": "1d",
  603. "interval": "1d",
  604. "field": [
  605. "avg(session.duration)",
  606. "p50(session.duration)",
  607. "p75(session.duration)",
  608. "p90(session.duration)",
  609. "p95(session.duration)",
  610. "p99(session.duration)",
  611. "max(session.duration)",
  612. ],
  613. "groupBy": "session.status",
  614. }
  615. )
  616. assert response.status_code == 200, response.content
  617. expected = self.expected_duration_values
  618. seen = set() # Make sure all session statuses are listed
  619. for group in result_sorted(response.data)["groups"]:
  620. seen.add(group["by"].get("session.status"))
  621. if group["by"] == {"session.status": "healthy"}:
  622. assert group["totals"] == pytest.approx(expected)
  623. for key, series in group["series"].items():
  624. assert series == pytest.approx([expected[key]])
  625. else:
  626. # Everything's none:
  627. assert group["totals"] == {key: None for key in expected}, group["by"]
  628. assert group["series"] == {key: [None] for key in expected}
  629. assert seen == {"abnormal", "crashed", "errored", "healthy"}
  630. @freeze_time(MOCK_DATETIME)
  631. def test_snuba_limit_exceeded(self):
  632. # 2 * 3 => only show two groups
  633. with patch("sentry.snuba.sessions_v2.SNUBA_LIMIT", 6), patch(
  634. "sentry.release_health.metrics_sessions_v2.SNUBA_LIMIT", 6
  635. ):
  636. response = self.do_request(
  637. {
  638. "project": [-1],
  639. "statsPeriod": "3d",
  640. "interval": "1d",
  641. "field": ["sum(session)", "count_unique(user)"],
  642. "groupBy": ["project", "release", "environment"],
  643. }
  644. )
  645. assert response.status_code == 200, response.content
  646. assert result_sorted(response.data)["groups"] == [
  647. {
  648. "by": {
  649. "release": "foo@1.0.0",
  650. "environment": "production",
  651. "project": self.project1.id,
  652. },
  653. "totals": {"sum(session)": 3, "count_unique(user)": 0},
  654. "series": {"sum(session)": [0, 0, 3], "count_unique(user)": [0, 0, 0]},
  655. },
  656. {
  657. "by": {
  658. "release": "foo@1.0.0",
  659. "environment": "production",
  660. "project": self.project3.id,
  661. },
  662. "totals": {"sum(session)": 2, "count_unique(user)": 1},
  663. "series": {"sum(session)": [0, 0, 2], "count_unique(user)": [0, 0, 1]},
  664. },
  665. ]
  666. @freeze_time(MOCK_DATETIME)
  667. def test_snuba_limit_exceeded_groupby_status(self):
  668. """Get consistent result when grouping by status"""
  669. # 2 * 3 => only show two groups
  670. with patch("sentry.snuba.sessions_v2.SNUBA_LIMIT", 6), patch(
  671. "sentry.release_health.metrics_sessions_v2.SNUBA_LIMIT", 6
  672. ):
  673. response = self.do_request(
  674. {
  675. "project": [-1],
  676. "statsPeriod": "3d",
  677. "interval": "1d",
  678. "field": ["sum(session)", "count_unique(user)"],
  679. "groupBy": ["project", "release", "environment", "session.status"],
  680. }
  681. )
  682. assert response.status_code == 200, response.content
  683. assert result_sorted(response.data)["groups"] == [
  684. {
  685. "by": {
  686. "project": self.project1.id,
  687. "release": "foo@1.0.0",
  688. "session.status": "abnormal",
  689. "environment": "production",
  690. },
  691. "totals": {"sum(session)": 0, "count_unique(user)": 0},
  692. "series": {"sum(session)": [0, 0, 0], "count_unique(user)": [0, 0, 0]},
  693. },
  694. {
  695. "by": {
  696. "project": self.project1.id,
  697. "release": "foo@1.0.0",
  698. "session.status": "crashed",
  699. "environment": "production",
  700. },
  701. "totals": {"sum(session)": 0, "count_unique(user)": 0},
  702. "series": {"sum(session)": [0, 0, 0], "count_unique(user)": [0, 0, 0]},
  703. },
  704. {
  705. "by": {
  706. "project": self.project1.id,
  707. "release": "foo@1.0.0",
  708. "environment": "production",
  709. "session.status": "errored",
  710. },
  711. "totals": {"sum(session)": 0, "count_unique(user)": 0},
  712. "series": {"sum(session)": [0, 0, 0], "count_unique(user)": [0, 0, 0]},
  713. },
  714. {
  715. "by": {
  716. "project": self.project1.id,
  717. "session.status": "healthy",
  718. "release": "foo@1.0.0",
  719. "environment": "production",
  720. },
  721. "totals": {"sum(session)": 3, "count_unique(user)": 0},
  722. "series": {"sum(session)": [0, 0, 3], "count_unique(user)": [0, 0, 0]},
  723. },
  724. {
  725. "by": {
  726. "session.status": "abnormal",
  727. "release": "foo@1.0.0",
  728. "project": self.project3.id,
  729. "environment": "production",
  730. },
  731. "totals": {"sum(session)": 0, "count_unique(user)": 0},
  732. "series": {"sum(session)": [0, 0, 0], "count_unique(user)": [0, 0, 0]},
  733. },
  734. {
  735. "by": {
  736. "release": "foo@1.0.0",
  737. "project": self.project3.id,
  738. "session.status": "crashed",
  739. "environment": "production",
  740. },
  741. "totals": {"sum(session)": 0, "count_unique(user)": 0},
  742. "series": {"sum(session)": [0, 0, 0], "count_unique(user)": [0, 0, 0]},
  743. },
  744. {
  745. "by": {
  746. "release": "foo@1.0.0",
  747. "project": self.project3.id,
  748. "environment": "production",
  749. "session.status": "errored",
  750. },
  751. "totals": {"sum(session)": 1, "count_unique(user)": 1},
  752. "series": {"sum(session)": [0, 0, 1], "count_unique(user)": [0, 0, 1]},
  753. },
  754. {
  755. "by": {
  756. "session.status": "healthy",
  757. "release": "foo@1.0.0",
  758. "project": self.project3.id,
  759. "environment": "production",
  760. },
  761. "totals": {"sum(session)": 1, "count_unique(user)": 0},
  762. "series": {"sum(session)": [0, 0, 1], "count_unique(user)": [0, 0, 0]},
  763. },
  764. ]
  765. @freeze_time(MOCK_DATETIME)
  766. def test_environment_filter_not_present_in_query(self):
  767. self.create_environment(name="abc")
  768. response = self.do_request(
  769. {
  770. "project": [-1],
  771. "statsPeriod": "1d",
  772. "interval": "1d",
  773. "field": ["sum(session)"],
  774. "environment": ["development", "abc"],
  775. }
  776. )
  777. assert response.status_code == 200, response.content
  778. assert result_sorted(response.data)["groups"] == [
  779. {"by": {}, "series": {"sum(session)": [1]}, "totals": {"sum(session)": 1}}
  780. ]
  781. @patch("sentry.api.endpoints.organization_sessions.release_health", MetricsReleaseHealthBackend())
  782. class OrganizationSessionsEndpointMetricsTest(
  783. SessionMetricsTestCase, OrganizationSessionsEndpointTest
  784. ):
  785. """Repeat with metrics backend"""