test_sessions_v2.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634
  1. import math
  2. from datetime import datetime
  3. import pytest
  4. import pytz
  5. from django.http import QueryDict
  6. from freezegun import freeze_time
  7. # from sentry.testutils import TestCase
  8. from sentry.snuba.sessions_v2 import (
  9. InvalidParams,
  10. QueryDefinition,
  11. _get_timestamps,
  12. get_constrained_date_range,
  13. massage_sessions_result,
  14. )
  15. def _make_query(qs, allow_minute_resolution=True):
  16. return QueryDefinition(QueryDict(qs), {}, allow_minute_resolution)
  17. def result_sorted(result):
  18. """sort the groups of the results array by the `by` object, ensuring a stable order"""
  19. def stable_dict(d):
  20. return tuple(sorted(d.items(), key=lambda t: t[0]))
  21. result["groups"].sort(key=lambda group: stable_dict(group["by"]))
  22. return result
  23. @freeze_time("2018-12-11 03:21:00")
  24. def test_round_range():
  25. start, end, interval = get_constrained_date_range({"statsPeriod": "2d"})
  26. assert start == datetime(2018, 12, 9, 4, tzinfo=pytz.utc)
  27. assert end == datetime(2018, 12, 11, 3, 22, tzinfo=pytz.utc)
  28. start, end, interval = get_constrained_date_range({"statsPeriod": "2d", "interval": "1d"})
  29. assert start == datetime(2018, 12, 10, tzinfo=pytz.utc)
  30. assert end == datetime(2018, 12, 11, 3, 22, tzinfo=pytz.utc)
  31. def test_invalid_interval():
  32. with pytest.raises(InvalidParams):
  33. start, end, interval = get_constrained_date_range({"interval": "0d"})
  34. def test_round_exact():
  35. start, end, interval = get_constrained_date_range(
  36. {"start": "2021-01-12T04:06:16", "end": "2021-01-17T08:26:13", "interval": "1d"},
  37. )
  38. assert start == datetime(2021, 1, 12, tzinfo=pytz.utc)
  39. assert end == datetime(2021, 1, 18, tzinfo=pytz.utc)
  40. def test_inclusive_end():
  41. start, end, interval = get_constrained_date_range(
  42. {"start": "2021-02-24T00:00:00", "end": "2021-02-25T00:00:00", "interval": "1h"},
  43. )
  44. assert start == datetime(2021, 2, 24, tzinfo=pytz.utc)
  45. assert end == datetime(2021, 2, 25, 1, tzinfo=pytz.utc)
  46. @freeze_time("2021-03-05T11:14:17.105Z")
  47. def test_interval_restrictions():
  48. # making sure intervals are cleanly divisible
  49. with pytest.raises(InvalidParams, match="The interval has to be less than one day."):
  50. _make_query("statsPeriod=4d&interval=2d&field=sum(session)")
  51. with pytest.raises(
  52. InvalidParams, match="The interval should divide one day without a remainder."
  53. ):
  54. _make_query("statsPeriod=6h&interval=59m&field=sum(session)")
  55. with pytest.raises(
  56. InvalidParams, match="The interval should divide one day without a remainder."
  57. ):
  58. _make_query("statsPeriod=4d&interval=5h&field=sum(session)")
  59. _make_query("statsPeriod=6h&interval=90m&field=sum(session)")
  60. with pytest.raises(
  61. InvalidParams,
  62. match="The interval has to be a multiple of the minimum interval of one hour.",
  63. ):
  64. _make_query("statsPeriod=6h&interval=90m&field=sum(session)", False)
  65. with pytest.raises(
  66. InvalidParams,
  67. match="The interval has to be a multiple of the minimum interval of one minute.",
  68. ):
  69. _make_query("statsPeriod=1h&interval=90s&field=sum(session)")
  70. # restrictions for minute resolution time range
  71. with pytest.raises(
  72. InvalidParams,
  73. match="The time-range when using one-minute resolution intervals is restricted to 6 hours.",
  74. ):
  75. _make_query("statsPeriod=7h&interval=15m&field=sum(session)")
  76. with pytest.raises(
  77. InvalidParams,
  78. match="The time-range when using one-minute resolution intervals is restricted to the last 30 days.",
  79. ):
  80. _make_query(
  81. "start=2021-01-05T11:14:17&end=2021-01-05T12:14:17&interval=15m&field=sum(session)"
  82. )
  83. with pytest.raises(
  84. InvalidParams, match="Your interval and date range would create too many results."
  85. ):
  86. _make_query("statsPeriod=90d&interval=1h&field=sum(session)")
  87. @freeze_time("2020-12-18T11:14:17.105Z")
  88. def test_timestamps():
  89. query = _make_query("statsPeriod=1d&interval=12h&field=sum(session)")
  90. expected_timestamps = ["2020-12-17T12:00:00Z", "2020-12-18T00:00:00Z"]
  91. actual_timestamps = _get_timestamps(query)
  92. assert actual_timestamps == expected_timestamps
  93. @freeze_time("2021-03-08T09:34:00.000Z")
  94. def test_hourly_rounded_start():
  95. query = _make_query("statsPeriod=30m&interval=1m&field=sum(session)")
  96. actual_timestamps = _get_timestamps(query)
  97. assert actual_timestamps[0] == "2021-03-08T09:00:00Z"
  98. assert actual_timestamps[-1] == "2021-03-08T09:34:00Z"
  99. assert len(actual_timestamps) == 35
  100. # in this case "45m" means from 08:49:00-09:34:00, but since we round start/end
  101. # to hours, we extend the start time to 08:00:00.
  102. query = _make_query("statsPeriod=45m&interval=1m&field=sum(session)")
  103. actual_timestamps = _get_timestamps(query)
  104. assert actual_timestamps[0] == "2021-03-08T08:00:00Z"
  105. assert actual_timestamps[-1] == "2021-03-08T09:34:00Z"
  106. assert len(actual_timestamps) == 95
  107. def test_rounded_end():
  108. query = _make_query(
  109. "field=sum(session)&interval=1h&start=2021-02-24T00:00:00Z&end=2021-02-25T00:00:00Z"
  110. )
  111. expected_timestamps = [
  112. "2021-02-24T00:00:00Z",
  113. "2021-02-24T01:00:00Z",
  114. "2021-02-24T02:00:00Z",
  115. "2021-02-24T03:00:00Z",
  116. "2021-02-24T04:00:00Z",
  117. "2021-02-24T05:00:00Z",
  118. "2021-02-24T06:00:00Z",
  119. "2021-02-24T07:00:00Z",
  120. "2021-02-24T08:00:00Z",
  121. "2021-02-24T09:00:00Z",
  122. "2021-02-24T10:00:00Z",
  123. "2021-02-24T11:00:00Z",
  124. "2021-02-24T12:00:00Z",
  125. "2021-02-24T13:00:00Z",
  126. "2021-02-24T14:00:00Z",
  127. "2021-02-24T15:00:00Z",
  128. "2021-02-24T16:00:00Z",
  129. "2021-02-24T17:00:00Z",
  130. "2021-02-24T18:00:00Z",
  131. "2021-02-24T19:00:00Z",
  132. "2021-02-24T20:00:00Z",
  133. "2021-02-24T21:00:00Z",
  134. "2021-02-24T22:00:00Z",
  135. "2021-02-24T23:00:00Z",
  136. "2021-02-25T00:00:00Z",
  137. ]
  138. actual_timestamps = _get_timestamps(query)
  139. assert len(actual_timestamps) == 25
  140. assert actual_timestamps == expected_timestamps
  141. def test_simple_query():
  142. query = _make_query("statsPeriod=1d&interval=12h&field=sum(session)")
  143. assert query.query_columns == ["sessions"]
  144. def test_groupby_query():
  145. query = _make_query("statsPeriod=1d&interval=12h&field=sum(session)&groupBy=release")
  146. assert sorted(query.query_columns) == ["release", "sessions"]
  147. assert query.query_groupby == ["release"]
  148. def test_virtual_groupby_query():
  149. query = _make_query("statsPeriod=1d&interval=12h&field=sum(session)&groupBy=session.status")
  150. assert sorted(query.query_columns) == [
  151. "sessions",
  152. "sessions_abnormal",
  153. "sessions_crashed",
  154. "sessions_errored",
  155. ]
  156. assert query.query_groupby == []
  157. query = _make_query(
  158. "statsPeriod=1d&interval=12h&field=count_unique(user)&groupBy=session.status"
  159. )
  160. assert sorted(query.query_columns) == [
  161. "users",
  162. "users_abnormal",
  163. "users_crashed",
  164. "users_errored",
  165. ]
  166. assert query.query_groupby == []
  167. @freeze_time("2020-12-18T11:14:17.105Z")
  168. def test_massage_empty():
  169. query = _make_query("statsPeriod=1d&interval=1d&field=sum(session)")
  170. result_totals = []
  171. result_timeseries = []
  172. expected_result = {
  173. "start": "2020-12-18T00:00:00Z",
  174. "end": "2020-12-18T11:15:00Z",
  175. "query": "",
  176. "intervals": ["2020-12-18T00:00:00Z"],
  177. "groups": [],
  178. }
  179. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  180. assert actual_result == expected_result
  181. @freeze_time("2020-12-18T11:14:17.105Z")
  182. def test_massage_unbalanced_results():
  183. query = _make_query("statsPeriod=1d&interval=1d&field=sum(session)&groupBy=release")
  184. result_totals = [
  185. {"release": "test-example-release", "sessions": 1},
  186. ]
  187. result_timeseries = []
  188. expected_result = {
  189. "start": "2020-12-18T00:00:00Z",
  190. "end": "2020-12-18T11:15:00Z",
  191. "query": "",
  192. "intervals": ["2020-12-18T00:00:00Z"],
  193. "groups": [
  194. {
  195. "by": {"release": "test-example-release"},
  196. "series": {"sum(session)": [0]},
  197. "totals": {"sum(session)": 1},
  198. }
  199. ],
  200. }
  201. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  202. assert actual_result == expected_result
  203. result_totals = []
  204. result_timeseries = [
  205. {
  206. "release": "test-example-release",
  207. "sessions": 1,
  208. "bucketed_started": "2020-12-18T00:00:00+00:00",
  209. },
  210. ]
  211. expected_result = {
  212. "start": "2020-12-18T00:00:00Z",
  213. "end": "2020-12-18T11:15:00Z",
  214. "query": "",
  215. "intervals": ["2020-12-18T00:00:00Z"],
  216. "groups": [
  217. {
  218. "by": {"release": "test-example-release"},
  219. "series": {"sum(session)": [1]},
  220. "totals": {"sum(session)": 0},
  221. }
  222. ],
  223. }
  224. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  225. assert actual_result == expected_result
  226. @freeze_time("2020-12-18T11:14:17.105Z")
  227. def test_massage_simple_timeseries():
  228. """A timeseries is filled up when it only receives partial data"""
  229. query = _make_query("statsPeriod=1d&interval=6h&field=sum(session)")
  230. result_totals = [{"sessions": 4}]
  231. # snuba returns the datetimes as strings for now
  232. result_timeseries = [
  233. {"sessions": 2, "bucketed_started": "2020-12-17T12:00:00+00:00"},
  234. {"sessions": 2, "bucketed_started": "2020-12-18T06:00:00+00:00"},
  235. ]
  236. expected_result = {
  237. "start": "2020-12-17T12:00:00Z",
  238. "end": "2020-12-18T11:15:00Z",
  239. "query": "",
  240. "intervals": [
  241. "2020-12-17T12:00:00Z",
  242. "2020-12-17T18:00:00Z",
  243. "2020-12-18T00:00:00Z",
  244. "2020-12-18T06:00:00Z",
  245. ],
  246. "groups": [
  247. {"by": {}, "series": {"sum(session)": [2, 0, 0, 2]}, "totals": {"sum(session)": 4}}
  248. ],
  249. }
  250. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  251. assert actual_result == expected_result
  252. def test_massage_exact_timeseries():
  253. query = _make_query(
  254. "start=2020-12-17T15:12:34Z&end=2020-12-18T11:14:17Z&interval=6h&field=sum(session)"
  255. )
  256. result_totals = [{"sessions": 4}]
  257. result_timeseries = [
  258. {"sessions": 2, "bucketed_started": "2020-12-17T12:00:00+00:00"},
  259. {"sessions": 2, "bucketed_started": "2020-12-18T06:00:00+00:00"},
  260. ]
  261. expected_result = {
  262. "start": "2020-12-17T12:00:00Z",
  263. "end": "2020-12-18T12:00:00Z",
  264. "query": "",
  265. "intervals": [
  266. "2020-12-17T12:00:00Z",
  267. "2020-12-17T18:00:00Z",
  268. "2020-12-18T00:00:00Z",
  269. "2020-12-18T06:00:00Z",
  270. ],
  271. "groups": [
  272. {"by": {}, "series": {"sum(session)": [2, 0, 0, 2]}, "totals": {"sum(session)": 4}}
  273. ],
  274. }
  275. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  276. assert actual_result == expected_result
  277. @freeze_time("2020-12-18T11:14:17.105Z")
  278. def test_massage_groupby_timeseries():
  279. query = _make_query("statsPeriod=1d&interval=6h&field=sum(session)&groupBy=release")
  280. result_totals = [
  281. {"release": "test-example-release", "sessions": 4},
  282. {"release": "test-example-release-2", "sessions": 1},
  283. ]
  284. # snuba returns the datetimes as strings for now
  285. result_timeseries = [
  286. {
  287. "release": "test-example-release",
  288. "sessions": 2,
  289. "bucketed_started": "2020-12-17T12:00:00+00:00",
  290. },
  291. {
  292. "release": "test-example-release",
  293. "sessions": 2,
  294. "bucketed_started": "2020-12-18T06:00:00+00:00",
  295. },
  296. {
  297. "release": "test-example-release-2",
  298. "sessions": 1,
  299. "bucketed_started": "2020-12-18T06:00:00+00:00",
  300. },
  301. ]
  302. expected_result = {
  303. "start": "2020-12-17T12:00:00Z",
  304. "end": "2020-12-18T11:15:00Z",
  305. "query": "",
  306. "intervals": [
  307. "2020-12-17T12:00:00Z",
  308. "2020-12-17T18:00:00Z",
  309. "2020-12-18T00:00:00Z",
  310. "2020-12-18T06:00:00Z",
  311. ],
  312. "groups": [
  313. {
  314. "by": {"release": "test-example-release"},
  315. "series": {"sum(session)": [2, 0, 0, 2]},
  316. "totals": {"sum(session)": 4},
  317. },
  318. {
  319. "by": {"release": "test-example-release-2"},
  320. "series": {"sum(session)": [0, 0, 0, 1]},
  321. "totals": {"sum(session)": 1},
  322. },
  323. ],
  324. }
  325. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  326. assert actual_result == expected_result
  327. @freeze_time("2020-12-18T13:25:15.769Z")
  328. def test_massage_virtual_groupby_timeseries():
  329. query = _make_query(
  330. "statsPeriod=1d&interval=6h&field=sum(session)&field=count_unique(user)&groupBy=session.status"
  331. )
  332. result_totals = [
  333. {
  334. "users": 1,
  335. "users_crashed": 1,
  336. "sessions": 31,
  337. "sessions_errored": 15,
  338. "users_errored": 1,
  339. "sessions_abnormal": 6,
  340. "sessions_crashed": 8,
  341. "users_abnormal": 0,
  342. }
  343. ]
  344. # snuba returns the datetimes as strings for now
  345. result_timeseries = [
  346. {
  347. "sessions_errored": 4,
  348. "users": 1,
  349. "users_crashed": 0,
  350. "sessions_abnormal": 4,
  351. "sessions": 10,
  352. "users_errored": 0,
  353. "users_abnormal": 0,
  354. "sessions_crashed": 3,
  355. "bucketed_started": "2020-12-17T18:00:00+00:00",
  356. },
  357. {
  358. "sessions_errored": 10,
  359. "users": 1,
  360. "users_crashed": 0,
  361. "sessions_abnormal": 2,
  362. "sessions": 15,
  363. "users_errored": 0,
  364. "users_abnormal": 0,
  365. "sessions_crashed": 4,
  366. "bucketed_started": "2020-12-18T00:00:00+00:00",
  367. },
  368. {
  369. "sessions_errored": 1,
  370. "users": 1,
  371. "users_crashed": 1,
  372. "sessions_abnormal": 0,
  373. "sessions": 3,
  374. "users_errored": 1,
  375. "users_abnormal": 0,
  376. "sessions_crashed": 1,
  377. "bucketed_started": "2020-12-18T12:00:00+00:00",
  378. },
  379. {
  380. "sessions_errored": 0,
  381. "users": 1,
  382. "users_crashed": 0,
  383. "sessions_abnormal": 0,
  384. "sessions": 3,
  385. "users_errored": 0,
  386. "users_abnormal": 0,
  387. "sessions_crashed": 0,
  388. "bucketed_started": "2020-12-18T06:00:00+00:00",
  389. },
  390. ]
  391. expected_result = {
  392. "start": "2020-12-17T18:00:00Z",
  393. "end": "2020-12-18T13:26:00Z",
  394. "query": "",
  395. "intervals": [
  396. "2020-12-17T18:00:00Z",
  397. "2020-12-18T00:00:00Z",
  398. "2020-12-18T06:00:00Z",
  399. "2020-12-18T12:00:00Z",
  400. ],
  401. "groups": [
  402. {
  403. "by": {"session.status": "abnormal"},
  404. "series": {"count_unique(user)": [0, 0, 0, 0], "sum(session)": [4, 2, 0, 0]},
  405. "totals": {"count_unique(user)": 0, "sum(session)": 6},
  406. },
  407. {
  408. "by": {"session.status": "crashed"},
  409. "series": {"count_unique(user)": [0, 0, 0, 1], "sum(session)": [3, 4, 0, 1]},
  410. "totals": {"count_unique(user)": 1, "sum(session)": 8},
  411. },
  412. {
  413. "by": {"session.status": "errored"},
  414. "series": {"count_unique(user)": [0, 0, 0, 0], "sum(session)": [0, 4, 0, 0]},
  415. "totals": {"count_unique(user)": 0, "sum(session)": 1},
  416. },
  417. {
  418. "by": {"session.status": "healthy"},
  419. "series": {"count_unique(user)": [1, 1, 1, 0], "sum(session)": [6, 5, 3, 2]},
  420. # while in one of the time slots, we have a healthy user, it is
  421. # the *same* user as the one experiencing a crash later on,
  422. # so in the *whole* time window, that one user is not counted as healthy,
  423. # so the `0` here is expected, as thats an example of the `count_unique` behavior.
  424. "totals": {"count_unique(user)": 0, "sum(session)": 16},
  425. },
  426. ],
  427. }
  428. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  429. assert actual_result == expected_result
  430. @freeze_time("2020-12-18T13:25:15.769Z")
  431. def test_clamping_in_massage_sessions_results_with_groupby_timeseries():
  432. query = _make_query(
  433. "statsPeriod=12h&interval=6h&field=sum(session)&field=count_unique(user)&groupBy=session.status"
  434. )
  435. # snuba returns the datetimes as strings for now
  436. result_timeseries = [
  437. {
  438. "sessions": 5,
  439. "sessions_errored": 10,
  440. "sessions_crashed": 0,
  441. "sessions_abnormal": 0,
  442. "users": 5,
  443. "users_errored": 10,
  444. "users_crashed": 0,
  445. "users_abnormal": 0,
  446. "bucketed_started": "2020-12-18T06:00:00+00:00",
  447. },
  448. {
  449. "sessions": 7,
  450. "sessions_errored": 3,
  451. "sessions_crashed": 2,
  452. "sessions_abnormal": 2,
  453. "users": 7,
  454. "users_errored": 3,
  455. "users_crashed": 2,
  456. "users_abnormal": 2,
  457. "bucketed_started": "2020-12-18T12:00:00+00:00",
  458. },
  459. ]
  460. expected_result = {
  461. "start": "2020-12-18T06:00:00Z",
  462. "end": "2020-12-18T13:26:00Z",
  463. "query": "",
  464. "intervals": [
  465. "2020-12-18T06:00:00Z",
  466. "2020-12-18T12:00:00Z",
  467. ],
  468. "groups": [
  469. {
  470. "by": {"session.status": "abnormal"},
  471. "series": {"count_unique(user)": [0, 2], "sum(session)": [0, 2]},
  472. "totals": {"count_unique(user)": 0, "sum(session)": 0},
  473. },
  474. {
  475. "by": {"session.status": "crashed"},
  476. "series": {"count_unique(user)": [0, 2], "sum(session)": [0, 2]},
  477. "totals": {"count_unique(user)": 0, "sum(session)": 0},
  478. },
  479. {
  480. "by": {"session.status": "errored"},
  481. "series": {"count_unique(user)": [10, 0], "sum(session)": [10, 0]},
  482. "totals": {"count_unique(user)": 0, "sum(session)": 0},
  483. },
  484. {
  485. "by": {"session.status": "healthy"},
  486. "series": {"count_unique(user)": [0, 4], "sum(session)": [0, 4]},
  487. "totals": {"count_unique(user)": 0, "sum(session)": 0},
  488. },
  489. ],
  490. }
  491. actual_result = result_sorted(massage_sessions_result(query, [], result_timeseries))
  492. assert actual_result == expected_result
  493. @freeze_time("2020-12-18T11:14:17.105Z")
  494. def test_nan_duration():
  495. query = _make_query(
  496. "statsPeriod=1d&interval=6h&field=avg(session.duration)&field=p50(session.duration)"
  497. )
  498. result_totals = [
  499. {
  500. "duration_avg": math.nan,
  501. "duration_quantiles": [math.inf, math.inf, math.inf, math.inf, math.inf, math.inf],
  502. },
  503. ]
  504. result_timeseries = [
  505. {
  506. "duration_avg": math.nan,
  507. "duration_quantiles": [math.nan, math.nan, math.nan, math.nan, math.nan, math.nan],
  508. "bucketed_started": "2020-12-17T12:00:00+00:00",
  509. },
  510. {
  511. "duration_avg": math.inf,
  512. "duration_quantiles": [math.inf, math.inf, math.inf, math.inf, math.inf, math.inf],
  513. "bucketed_started": "2020-12-18T06:00:00+00:00",
  514. },
  515. ]
  516. expected_result = {
  517. "start": "2020-12-17T12:00:00Z",
  518. "end": "2020-12-18T11:15:00Z",
  519. "query": "",
  520. "intervals": [
  521. "2020-12-17T12:00:00Z",
  522. "2020-12-17T18:00:00Z",
  523. "2020-12-18T00:00:00Z",
  524. "2020-12-18T06:00:00Z",
  525. ],
  526. "groups": [
  527. {
  528. "by": {},
  529. "series": {
  530. "avg(session.duration)": [None, None, None, None],
  531. "p50(session.duration)": [None, None, None, None],
  532. },
  533. "totals": {"avg(session.duration)": None, "p50(session.duration)": None},
  534. },
  535. ],
  536. }
  537. actual_result = result_sorted(massage_sessions_result(query, result_totals, result_timeseries))
  538. assert actual_result == expected_result