test_sessions_v2.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807
  1. import math
  2. from datetime import datetime
  3. import pytest
  4. import pytz
  5. from django.http import QueryDict
  6. from freezegun import freeze_time
  7. from sentry.release_health.base import AllowedResolution, SessionsQueryConfig
  8. from sentry.snuba.sessions_v2 import (
  9. InvalidParams,
  10. QueryDefinition,
  11. get_constrained_date_range,
  12. get_timestamps,
  13. massage_sessions_result,
  14. )
  15. from sentry.utils.pytest.fixtures import django_db_all
  16. def _make_query(qs, allow_minute_resolution=True, params=None):
  17. query_config = SessionsQueryConfig(
  18. (AllowedResolution.one_minute if allow_minute_resolution else AllowedResolution.one_hour),
  19. allow_session_status_query=False,
  20. restrict_date_range=True,
  21. )
  22. return QueryDefinition(QueryDict(qs), params or {}, query_config)
  23. def result_sorted(result):
  24. """sort the groups of the results array by the `by` object, ensuring a stable order"""
  25. def stable_dict(d):
  26. return tuple(sorted(d.items(), key=lambda t: t[0]))
  27. result["groups"].sort(key=lambda group: stable_dict(group["by"]))
  28. return result
  29. @freeze_time("2018-12-11 03:21:00")
  30. def test_round_range():
  31. start, end, interval = get_constrained_date_range({"statsPeriod": "2d"})
  32. assert start == datetime(2018, 12, 9, 4, tzinfo=pytz.utc)
  33. assert end == datetime(2018, 12, 11, 3, 22, tzinfo=pytz.utc)
  34. start, end, interval = get_constrained_date_range({"statsPeriod": "2d", "interval": "1d"})
  35. assert start == datetime(2018, 12, 10, tzinfo=pytz.utc)
  36. assert end == datetime(2018, 12, 11, 3, 22, tzinfo=pytz.utc)
  37. def test_invalid_interval():
  38. with pytest.raises(InvalidParams):
  39. start, end, interval = get_constrained_date_range({"interval": "0d"})
  40. def test_round_exact():
  41. start, end, interval = get_constrained_date_range(
  42. {"start": "2021-01-12T04:06:16", "end": "2021-01-17T08:26:13", "interval": "1d"},
  43. )
  44. assert start == datetime(2021, 1, 12, tzinfo=pytz.utc)
  45. assert end == datetime(2021, 1, 18, tzinfo=pytz.utc)
  46. def test_inclusive_end():
  47. start, end, interval = get_constrained_date_range(
  48. {"start": "2021-02-24T00:00:00", "end": "2021-02-25T00:00:00", "interval": "1h"},
  49. )
  50. assert start == datetime(2021, 2, 24, tzinfo=pytz.utc)
  51. assert end == datetime(2021, 2, 25, 1, tzinfo=pytz.utc)
  52. @freeze_time("2021-03-05T11:00:00.000Z")
  53. def test_future_request():
  54. start, end, interval = get_constrained_date_range(
  55. {"start": "2021-03-05T12:00:00", "end": "2021-03-05T13:00:00", "interval": "1h"},
  56. )
  57. assert start == datetime(2021, 3, 5, 11, tzinfo=pytz.utc)
  58. assert end == datetime(2021, 3, 5, 11, 1, tzinfo=pytz.utc)
  59. @freeze_time("2021-03-05T11:14:17.105Z")
  60. def test_interval_restrictions():
  61. # making sure intervals are cleanly divisible
  62. with pytest.raises(InvalidParams, match="The interval has to be less than one day."):
  63. _make_query("statsPeriod=4d&interval=2d&field=sum(session)")
  64. with pytest.raises(
  65. InvalidParams, match="The interval should divide one day without a remainder."
  66. ):
  67. _make_query("statsPeriod=6h&interval=59m&field=sum(session)")
  68. with pytest.raises(
  69. InvalidParams, match="The interval should divide one day without a remainder."
  70. ):
  71. _make_query("statsPeriod=4d&interval=5h&field=sum(session)")
  72. _make_query("statsPeriod=6h&interval=90m&field=sum(session)")
  73. with pytest.raises(
  74. InvalidParams,
  75. match="The interval has to be a multiple of the minimum interval of one hour.",
  76. ):
  77. _make_query("statsPeriod=6h&interval=90m&field=sum(session)", False)
  78. with pytest.raises(
  79. InvalidParams,
  80. match="The interval has to be a multiple of the minimum interval of one minute.",
  81. ):
  82. _make_query("statsPeriod=1h&interval=90s&field=sum(session)")
  83. # restrictions for minute resolution time range
  84. with pytest.raises(
  85. InvalidParams,
  86. match="The time-range when using one-minute resolution intervals is restricted to 6 hours.",
  87. ):
  88. _make_query("statsPeriod=7h&interval=15m&field=sum(session)")
  89. with pytest.raises(
  90. InvalidParams,
  91. match="The time-range when using one-minute resolution intervals is restricted to the last 30 days.",
  92. ):
  93. _make_query(
  94. "start=2021-01-05T11:14:17&end=2021-01-05T12:14:17&interval=15m&field=sum(session)"
  95. )
  96. with pytest.raises(
  97. InvalidParams, match="Your interval and date range would create too many results."
  98. ):
  99. _make_query("statsPeriod=90d&interval=1h&field=sum(session)")
  100. @freeze_time("2020-12-18T11:14:17.105Z")
  101. def test_timestamps():
  102. query = _make_query("statsPeriod=1d&interval=12h&field=sum(session)")
  103. expected_timestamps = ["2020-12-17T12:00:00Z", "2020-12-18T00:00:00Z"]
  104. actual_timestamps = get_timestamps(query)
  105. assert actual_timestamps == expected_timestamps
  106. @freeze_time("2021-03-08T09:34:00.000Z")
  107. def test_hourly_rounded_start():
  108. query = _make_query("statsPeriod=30m&interval=1m&field=sum(session)")
  109. actual_timestamps = get_timestamps(query)
  110. assert actual_timestamps[0] == "2021-03-08T09:00:00Z"
  111. assert actual_timestamps[-1] == "2021-03-08T09:34:00Z"
  112. assert len(actual_timestamps) == 35
  113. # in this case "45m" means from 08:49:00-09:34:00, but since we round start/end
  114. # to hours, we extend the start time to 08:00:00.
  115. query = _make_query("statsPeriod=45m&interval=1m&field=sum(session)")
  116. actual_timestamps = get_timestamps(query)
  117. assert actual_timestamps[0] == "2021-03-08T08:00:00Z"
  118. assert actual_timestamps[-1] == "2021-03-08T09:34:00Z"
  119. assert len(actual_timestamps) == 95
  120. def test_rounded_end():
  121. query = _make_query(
  122. "field=sum(session)&interval=1h&start=2021-02-24T00:00:00Z&end=2021-02-25T00:00:00Z"
  123. )
  124. expected_timestamps = [
  125. "2021-02-24T00:00:00Z",
  126. "2021-02-24T01:00:00Z",
  127. "2021-02-24T02:00:00Z",
  128. "2021-02-24T03:00:00Z",
  129. "2021-02-24T04:00:00Z",
  130. "2021-02-24T05:00:00Z",
  131. "2021-02-24T06:00:00Z",
  132. "2021-02-24T07:00:00Z",
  133. "2021-02-24T08:00:00Z",
  134. "2021-02-24T09:00:00Z",
  135. "2021-02-24T10:00:00Z",
  136. "2021-02-24T11:00:00Z",
  137. "2021-02-24T12:00:00Z",
  138. "2021-02-24T13:00:00Z",
  139. "2021-02-24T14:00:00Z",
  140. "2021-02-24T15:00:00Z",
  141. "2021-02-24T16:00:00Z",
  142. "2021-02-24T17:00:00Z",
  143. "2021-02-24T18:00:00Z",
  144. "2021-02-24T19:00:00Z",
  145. "2021-02-24T20:00:00Z",
  146. "2021-02-24T21:00:00Z",
  147. "2021-02-24T22:00:00Z",
  148. "2021-02-24T23:00:00Z",
  149. "2021-02-25T00:00:00Z",
  150. ]
  151. actual_timestamps = get_timestamps(query)
  152. assert len(actual_timestamps) == 25
  153. assert actual_timestamps == expected_timestamps
  154. def test_simple_query():
  155. query = _make_query("statsPeriod=1d&interval=12h&field=sum(session)")
  156. assert query.query_columns == ["sessions"]
  157. def test_groupby_query():
  158. query = _make_query("statsPeriod=1d&interval=12h&field=sum(session)&groupBy=release")
  159. assert sorted(query.query_columns) == ["release", "sessions"]
  160. assert query.query_groupby == ["release"]
  161. def test_virtual_groupby_query():
  162. query = _make_query("statsPeriod=1d&interval=12h&field=sum(session)&groupBy=session.status")
  163. assert sorted(query.query_columns) == [
  164. "sessions",
  165. "sessions_abnormal",
  166. "sessions_crashed",
  167. "sessions_errored",
  168. ]
  169. assert query.query_groupby == []
  170. query = _make_query(
  171. "statsPeriod=1d&interval=12h&field=count_unique(user)&groupBy=session.status"
  172. )
  173. assert sorted(query.query_columns) == [
  174. "users",
  175. "users_abnormal",
  176. "users_crashed",
  177. "users_errored",
  178. ]
  179. assert query.query_groupby == []
  180. @freeze_time("2022-05-04T09:00:00.000Z")
  181. def _get_query_maker_params(project):
  182. # These parameters are computed in the API endpoint, before the
  183. # QueryDefinition is built. Since we're only testing the query
  184. # definition here, we can safely mock these.
  185. return {
  186. "start": datetime.now(),
  187. "end": datetime.now(),
  188. "organization_id": project.organization_id,
  189. }
  190. @django_db_all
  191. def test_filter_proj_slug_in_query(default_project):
  192. params = _get_query_maker_params(default_project)
  193. params["project_id"] = [default_project.id]
  194. query_def = _make_query(
  195. f"field=sum(session)&interval=2h&statsPeriod=2h&query=project%3A{default_project.slug}",
  196. params=params,
  197. )
  198. assert query_def.query == f"project:{default_project.slug}"
  199. assert query_def.params["project_id"] == [default_project.id]
  200. @django_db_all
  201. def test_filter_proj_slug_in_top_filter(default_project):
  202. params = _get_query_maker_params(default_project)
  203. params["project_id"] = [default_project.id]
  204. query_def = _make_query(
  205. f"field=sum(session)&interval=2h&statsPeriod=2h&project={default_project.id}",
  206. params=params,
  207. )
  208. assert query_def.query == ""
  209. assert query_def.params["project_id"] == [default_project.id]
  210. @django_db_all
  211. def test_filter_proj_slug_in_top_filter_and_query(default_project):
  212. params = _get_query_maker_params(default_project)
  213. params["project_id"] = [default_project.id]
  214. query_def = _make_query(
  215. f"field=sum(session)&interval=2h&statsPeriod=2h&project={default_project.id}&query=project%3A{default_project.slug}",
  216. params=params,
  217. )
  218. assert query_def.query == f"project:{default_project.slug}"
  219. assert query_def.params["project_id"] == [default_project.id]
  220. @django_db_all
  221. def test_proj_neither_in_top_filter_nor_query(default_project):
  222. params = _get_query_maker_params(default_project)
  223. query_def = _make_query(
  224. "field=sum(session)&interval=2h&statsPeriod=2h",
  225. params=params,
  226. )
  227. assert query_def.query == ""
  228. assert "project_id" not in query_def.params
  229. @django_db_all
  230. def test_filter_env_in_query(default_project):
  231. env = "prod"
  232. params = _get_query_maker_params(default_project)
  233. query_def = _make_query(
  234. f"field=sum(session)&interval=2h&statsPeriod=2h&query=environment%3A{env}",
  235. params=params,
  236. )
  237. assert query_def.query == f"environment:{env}"
  238. @django_db_all
  239. def test_filter_env_in_top_filter(default_project):
  240. env = "prod"
  241. params = _get_query_maker_params(default_project)
  242. params["environment"] = "prod"
  243. query_def = _make_query(
  244. f"field=sum(session)&interval=2h&statsPeriod=2h&environment={env}",
  245. params=params,
  246. )
  247. assert query_def.query == ""
  248. @django_db_all
  249. def test_filter_env_in_top_filter_and_query(default_project):
  250. env = "prod"
  251. params = _get_query_maker_params(default_project)
  252. params["environment"] = "prod"
  253. query_def = _make_query(
  254. f"field=sum(session)&interval=2h&statsPeriod=2h&environment={env}&query=environment%3A{env}",
  255. params=params,
  256. )
  257. assert query_def.query == f"environment:{env}"
  258. @django_db_all
  259. def test_env_neither_in_top_filter_nor_query(default_project):
  260. params = _get_query_maker_params(default_project)
  261. query_def = _make_query(
  262. "field=sum(session)&interval=2h&statsPeriod=2h",
  263. params=params,
  264. )
  265. assert query_def.query == ""
  266. @freeze_time("2020-12-18T11:14:17.105Z")
  267. def test_massage_empty():
  268. query = _make_query("statsPeriod=1d&interval=1d&field=sum(session)")
  269. expected_result = {
  270. "start": "2020-12-18T00:00:00Z",
  271. "end": "2020-12-18T11:15:00Z",
  272. "query": "",
  273. "intervals": ["2020-12-18T00:00:00Z"],
  274. "groups": [],
  275. }
  276. actual_result = result_sorted(massage_sessions_result(query, [], []))
  277. assert actual_result == expected_result
  278. @freeze_time("2020-12-18T11:14:17.105Z")
  279. def test_massage_unbalanced_results():
  280. query = _make_query("statsPeriod=1d&interval=1d&field=sum(session)&groupBy=release")
  281. result_totals = [
  282. {"release": "test-example-release", "sessions": 1},
  283. ]
  284. expected_result = {
  285. "start": "2020-12-18T00:00:00Z",
  286. "end": "2020-12-18T11:15:00Z",
  287. "query": "",
  288. "intervals": ["2020-12-18T00:00:00Z"],
  289. "groups": [
  290. {
  291. "by": {"release": "test-example-release"},
  292. "series": {"sum(session)": [0]},
  293. "totals": {"sum(session)": 1},
  294. }
  295. ],
  296. }
  297. actual_result = result_sorted(massage_sessions_result(query, result_totals, []))
  298. assert actual_result == expected_result
  299. result_totals = []
  300. result_timeseries = [
  301. {
  302. "release": "test-example-release",
  303. "sessions": 1,
  304. "bucketed_started": "2020-12-18T00:00:00+00:00",
  305. },
  306. ]
  307. expected_result = {
  308. "start": "2020-12-18T00:00:00Z",
  309. "end": "2020-12-18T11:15:00Z",
  310. "query": "",
  311. "intervals": ["2020-12-18T00:00:00Z"],
  312. "groups": [
  313. {
  314. "by": {"release": "test-example-release"},
  315. "series": {"sum(session)": [1]},
  316. "totals": {"sum(session)": 0},
  317. }
  318. ],
  319. }
  320. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  321. assert actual_result == expected_result
  322. @freeze_time("2020-12-18T11:14:17.105Z")
  323. def test_massage_simple_timeseries():
  324. """A timeseries is filled up when it only receives partial data"""
  325. query = _make_query("statsPeriod=1d&interval=6h&field=sum(session)")
  326. result_totals = [{"sessions": 4}]
  327. # snuba returns the datetimes as strings for now
  328. result_timeseries = [
  329. {"sessions": 2, "bucketed_started": "2020-12-18T06:00:00+00:00"},
  330. {"sessions": 2, "bucketed_started": "2020-12-17T12:00:00+00:00"},
  331. ]
  332. expected_result = {
  333. "start": "2020-12-17T12:00:00Z",
  334. "end": "2020-12-18T11:15:00Z",
  335. "query": "",
  336. "intervals": [
  337. "2020-12-17T12:00:00Z",
  338. "2020-12-17T18:00:00Z",
  339. "2020-12-18T00:00:00Z",
  340. "2020-12-18T06:00:00Z",
  341. ],
  342. "groups": [
  343. {"by": {}, "series": {"sum(session)": [2, 0, 0, 2]}, "totals": {"sum(session)": 4}}
  344. ],
  345. }
  346. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  347. assert actual_result == expected_result
  348. @freeze_time("2020-12-18T11:14:17.105Z")
  349. def test_massage_unordered_timeseries():
  350. query = _make_query("statsPeriod=1d&interval=6h&field=sum(session)")
  351. result_totals = [{"sessions": 10}]
  352. # snuba returns the datetimes as strings for now
  353. result_timeseries = [
  354. {"sessions": 3, "bucketed_started": "2020-12-18T00:00:00+00:00"},
  355. {"sessions": 2, "bucketed_started": "2020-12-17T18:00:00+00:00"},
  356. {"sessions": 4, "bucketed_started": "2020-12-18T06:00:00+00:00"},
  357. {"sessions": 1, "bucketed_started": "2020-12-17T12:00:00+00:00"},
  358. ]
  359. expected_result = {
  360. "start": "2020-12-17T12:00:00Z",
  361. "end": "2020-12-18T11:15:00Z",
  362. "query": "",
  363. "intervals": [
  364. "2020-12-17T12:00:00Z",
  365. "2020-12-17T18:00:00Z",
  366. "2020-12-18T00:00:00Z",
  367. "2020-12-18T06:00:00Z",
  368. ],
  369. "groups": [
  370. {"by": {}, "series": {"sum(session)": [1, 2, 3, 4]}, "totals": {"sum(session)": 10}}
  371. ],
  372. }
  373. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  374. assert actual_result == expected_result
  375. @freeze_time("2020-12-18T11:14:17.105Z")
  376. def test_massage_no_timeseries():
  377. query = _make_query("statsPeriod=1d&interval=6h&field=sum(session)&groupby=projects")
  378. result_totals = [{"sessions": 4}]
  379. # snuba returns the datetimes as strings for now
  380. result_timeseries = None
  381. expected_result = {
  382. "start": "2020-12-17T12:00:00Z",
  383. "end": "2020-12-18T11:15:00Z",
  384. "query": "",
  385. "intervals": [
  386. "2020-12-17T12:00:00Z",
  387. "2020-12-17T18:00:00Z",
  388. "2020-12-18T00:00:00Z",
  389. "2020-12-18T06:00:00Z",
  390. ],
  391. "groups": [{"by": {}, "totals": {"sum(session)": 4}}],
  392. }
  393. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  394. assert actual_result == expected_result
  395. def test_massage_exact_timeseries():
  396. query = _make_query(
  397. "start=2020-12-17T15:12:34Z&end=2020-12-18T11:14:17Z&interval=6h&field=sum(session)"
  398. )
  399. result_totals = [{"sessions": 4}]
  400. result_timeseries = [
  401. {"sessions": 2, "bucketed_started": "2020-12-18T06:00:00+00:00"},
  402. {"sessions": 2, "bucketed_started": "2020-12-17T12:00:00+00:00"},
  403. ]
  404. expected_result = {
  405. "start": "2020-12-17T12:00:00Z",
  406. "end": "2020-12-18T12:00:00Z",
  407. "query": "",
  408. "intervals": [
  409. "2020-12-17T12:00:00Z",
  410. "2020-12-17T18:00:00Z",
  411. "2020-12-18T00:00:00Z",
  412. "2020-12-18T06:00:00Z",
  413. ],
  414. "groups": [
  415. {"by": {}, "series": {"sum(session)": [2, 0, 0, 2]}, "totals": {"sum(session)": 4}}
  416. ],
  417. }
  418. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  419. assert actual_result == expected_result
  420. @freeze_time("2020-12-18T11:14:17.105Z")
  421. def test_massage_groupby_timeseries():
  422. query = _make_query("statsPeriod=1d&interval=6h&field=sum(session)&groupBy=release")
  423. result_totals = [
  424. {"release": "test-example-release", "sessions": 4},
  425. {"release": "test-example-release-2", "sessions": 1},
  426. ]
  427. # snuba returns the datetimes as strings for now
  428. result_timeseries = [
  429. {
  430. "release": "test-example-release",
  431. "sessions": 2,
  432. "bucketed_started": "2020-12-18T06:00:00+00:00",
  433. },
  434. {
  435. "release": "test-example-release-2",
  436. "sessions": 1,
  437. "bucketed_started": "2020-12-18T06:00:00+00:00",
  438. },
  439. {
  440. "release": "test-example-release",
  441. "sessions": 2,
  442. "bucketed_started": "2020-12-17T12:00:00+00:00",
  443. },
  444. ]
  445. expected_result = {
  446. "start": "2020-12-17T12:00:00Z",
  447. "end": "2020-12-18T11:15:00Z",
  448. "query": "",
  449. "intervals": [
  450. "2020-12-17T12:00:00Z",
  451. "2020-12-17T18:00:00Z",
  452. "2020-12-18T00:00:00Z",
  453. "2020-12-18T06:00:00Z",
  454. ],
  455. "groups": [
  456. {
  457. "by": {"release": "test-example-release"},
  458. "series": {"sum(session)": [2, 0, 0, 2]},
  459. "totals": {"sum(session)": 4},
  460. },
  461. {
  462. "by": {"release": "test-example-release-2"},
  463. "series": {"sum(session)": [0, 0, 0, 1]},
  464. "totals": {"sum(session)": 1},
  465. },
  466. ],
  467. }
  468. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  469. assert actual_result == expected_result
  470. @freeze_time("2020-12-18T13:25:15.769Z")
  471. def test_massage_virtual_groupby_timeseries():
  472. query = _make_query(
  473. "statsPeriod=1d&interval=6h&field=sum(session)&field=count_unique(user)&groupBy=session.status"
  474. )
  475. result_totals = [
  476. {
  477. "users": 1,
  478. "users_crashed": 1,
  479. "sessions": 31,
  480. "sessions_errored": 15,
  481. "users_errored": 1,
  482. "sessions_abnormal": 6,
  483. "sessions_crashed": 8,
  484. "users_abnormal": 0,
  485. }
  486. ]
  487. # snuba returns the datetimes as strings for now
  488. result_timeseries = [
  489. {
  490. "sessions_errored": 1,
  491. "users": 1,
  492. "users_crashed": 1,
  493. "sessions_abnormal": 0,
  494. "sessions": 3,
  495. "users_errored": 1,
  496. "users_abnormal": 0,
  497. "sessions_crashed": 1,
  498. "bucketed_started": "2020-12-18T12:00:00+00:00",
  499. },
  500. {
  501. "sessions_errored": 0,
  502. "users": 1,
  503. "users_crashed": 0,
  504. "sessions_abnormal": 0,
  505. "sessions": 3,
  506. "users_errored": 0,
  507. "users_abnormal": 0,
  508. "sessions_crashed": 0,
  509. "bucketed_started": "2020-12-18T06:00:00+00:00",
  510. },
  511. {
  512. "sessions_errored": 10,
  513. "users": 1,
  514. "users_crashed": 0,
  515. "sessions_abnormal": 2,
  516. "sessions": 15,
  517. "users_errored": 0,
  518. "users_abnormal": 0,
  519. "sessions_crashed": 4,
  520. "bucketed_started": "2020-12-18T00:00:00+00:00",
  521. },
  522. {
  523. "sessions_errored": 4,
  524. "users": 1,
  525. "users_crashed": 0,
  526. "sessions_abnormal": 4,
  527. "sessions": 10,
  528. "users_errored": 0,
  529. "users_abnormal": 0,
  530. "sessions_crashed": 3,
  531. "bucketed_started": "2020-12-17T18:00:00+00:00",
  532. },
  533. ]
  534. expected_result = {
  535. "start": "2020-12-17T18:00:00Z",
  536. "end": "2020-12-18T13:26:00Z",
  537. "query": "",
  538. "intervals": [
  539. "2020-12-17T18:00:00Z",
  540. "2020-12-18T00:00:00Z",
  541. "2020-12-18T06:00:00Z",
  542. "2020-12-18T12:00:00Z",
  543. ],
  544. "groups": [
  545. {
  546. "by": {"session.status": "abnormal"},
  547. "series": {"count_unique(user)": [0, 0, 0, 0], "sum(session)": [4, 2, 0, 0]},
  548. "totals": {"count_unique(user)": 0, "sum(session)": 6},
  549. },
  550. {
  551. "by": {"session.status": "crashed"},
  552. "series": {"count_unique(user)": [0, 0, 0, 1], "sum(session)": [3, 4, 0, 1]},
  553. "totals": {"count_unique(user)": 1, "sum(session)": 8},
  554. },
  555. {
  556. "by": {"session.status": "errored"},
  557. "series": {"count_unique(user)": [0, 0, 0, 0], "sum(session)": [0, 4, 0, 0]},
  558. "totals": {"count_unique(user)": 0, "sum(session)": 1},
  559. },
  560. {
  561. "by": {"session.status": "healthy"},
  562. "series": {"count_unique(user)": [1, 1, 1, 0], "sum(session)": [6, 5, 3, 2]},
  563. # while in one of the time slots, we have a healthy user, it is
  564. # the *same* user as the one experiencing a crash later on,
  565. # so in the *whole* time window, that one user is not counted as healthy,
  566. # so the `0` here is expected, as that's an example of the `count_unique` behavior.
  567. "totals": {"count_unique(user)": 0, "sum(session)": 16},
  568. },
  569. ],
  570. }
  571. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  572. assert actual_result == expected_result
  573. @freeze_time("2020-12-18T13:25:15.769Z")
  574. def test_clamping_in_massage_sessions_results_with_groupby_timeseries():
  575. query = _make_query(
  576. "statsPeriod=12h&interval=6h&field=sum(session)&field=count_unique(user)&groupBy=session.status"
  577. )
  578. # snuba returns the datetimes as strings for now
  579. result_timeseries = [
  580. {
  581. "sessions": 7,
  582. "sessions_errored": 3,
  583. "sessions_crashed": 2,
  584. "sessions_abnormal": 2,
  585. "users": 7,
  586. "users_errored": 3,
  587. "users_crashed": 2,
  588. "users_abnormal": 2,
  589. "bucketed_started": "2020-12-18T12:00:00+00:00",
  590. },
  591. {
  592. "sessions": 5,
  593. "sessions_errored": 10,
  594. "sessions_crashed": 0,
  595. "sessions_abnormal": 0,
  596. "users": 5,
  597. "users_errored": 10,
  598. "users_crashed": 0,
  599. "users_abnormal": 0,
  600. "bucketed_started": "2020-12-18T06:00:00+00:00",
  601. },
  602. ]
  603. expected_result = {
  604. "start": "2020-12-18T06:00:00Z",
  605. "end": "2020-12-18T13:26:00Z",
  606. "query": "",
  607. "intervals": [
  608. "2020-12-18T06:00:00Z",
  609. "2020-12-18T12:00:00Z",
  610. ],
  611. "groups": [
  612. {
  613. "by": {"session.status": "abnormal"},
  614. "series": {"count_unique(user)": [0, 2], "sum(session)": [0, 2]},
  615. "totals": {"count_unique(user)": 0, "sum(session)": 0},
  616. },
  617. {
  618. "by": {"session.status": "crashed"},
  619. "series": {"count_unique(user)": [0, 2], "sum(session)": [0, 2]},
  620. "totals": {"count_unique(user)": 0, "sum(session)": 0},
  621. },
  622. {
  623. "by": {"session.status": "errored"},
  624. "series": {"count_unique(user)": [10, 0], "sum(session)": [10, 0]},
  625. "totals": {"count_unique(user)": 0, "sum(session)": 0},
  626. },
  627. {
  628. "by": {"session.status": "healthy"},
  629. "series": {"count_unique(user)": [0, 4], "sum(session)": [0, 4]},
  630. "totals": {"count_unique(user)": 0, "sum(session)": 0},
  631. },
  632. ],
  633. }
  634. actual_result = result_sorted(massage_sessions_result(query, [], result_timeseries))
  635. assert actual_result == expected_result
  636. @freeze_time("2020-12-18T11:14:17.105Z")
  637. def test_nan_duration():
  638. query = _make_query(
  639. "statsPeriod=1d&interval=6h&field=avg(session.duration)&field=p50(session.duration)"
  640. )
  641. result_totals = [
  642. {
  643. "duration_avg": math.nan,
  644. "duration_quantiles": [math.inf, math.inf, math.inf, math.inf, math.inf, math.inf],
  645. },
  646. ]
  647. result_timeseries = [
  648. {
  649. "duration_avg": math.inf,
  650. "duration_quantiles": [math.inf, math.inf, math.inf, math.inf, math.inf, math.inf],
  651. "bucketed_started": "2020-12-18T06:00:00+00:00",
  652. },
  653. {
  654. "duration_avg": math.nan,
  655. "duration_quantiles": [math.nan, math.nan, math.nan, math.nan, math.nan, math.nan],
  656. "bucketed_started": "2020-12-17T12:00:00+00:00",
  657. },
  658. ]
  659. expected_result = {
  660. "start": "2020-12-17T12:00:00Z",
  661. "end": "2020-12-18T11:15:00Z",
  662. "query": "",
  663. "intervals": [
  664. "2020-12-17T12:00:00Z",
  665. "2020-12-17T18:00:00Z",
  666. "2020-12-18T00:00:00Z",
  667. "2020-12-18T06:00:00Z",
  668. ],
  669. "groups": [
  670. {
  671. "by": {},
  672. "series": {
  673. "avg(session.duration)": [None, None, None, None],
  674. "p50(session.duration)": [None, None, None, None],
  675. },
  676. "totals": {"avg(session.duration)": None, "p50(session.duration)": None},
  677. },
  678. ],
  679. }
  680. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  681. assert actual_result == expected_result