test_all_indexers.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598
  1. """
  2. Generic testsuite that runs against all productionized indexer backends.
  3. Tests static string indexer, caching string indexer in combination, plugs in
  4. various backends to see if their external behavior makes sense, and that e.g.
  5. the mock indexer actually behaves the same as the postgres indexer.
  6. """
  7. from collections.abc import Mapping
  8. import pytest
  9. from sentry.sentry_metrics.indexer.base import FetchType, FetchTypeExt, Metadata
  10. from sentry.sentry_metrics.indexer.cache import (
  11. BULK_RECORD_CACHE_NAMESPACE,
  12. CachingIndexer,
  13. StringIndexerCache,
  14. )
  15. from sentry.sentry_metrics.indexer.mock import RawSimpleIndexer
  16. from sentry.sentry_metrics.indexer.postgres.postgres_v2 import PGStringIndexerV2
  17. from sentry.sentry_metrics.indexer.strings import SHARED_STRINGS, StaticStringIndexer
  18. from sentry.sentry_metrics.use_case_id_registry import UseCaseID
  19. from sentry.testutils.helpers.options import override_options
  20. BACKENDS = [
  21. RawSimpleIndexer,
  22. pytest.param(PGStringIndexerV2, marks=pytest.mark.django_db),
  23. ]
  24. USE_CASE_IDS = [UseCaseID.SESSIONS, UseCaseID.TRANSACTIONS]
  25. @pytest.fixture(params=BACKENDS)
  26. def indexer_cls(request):
  27. return request.param
  28. @pytest.fixture
  29. def indexer(indexer_cls):
  30. return indexer_cls()
  31. @pytest.fixture
  32. def indexer_cache():
  33. indexer_cache = StringIndexerCache(
  34. cache_name="default",
  35. partition_key="test",
  36. )
  37. yield indexer_cache
  38. indexer_cache.cache.clear()
  39. @pytest.fixture(params=USE_CASE_IDS)
  40. def use_case_id(request):
  41. return request.param
  42. @pytest.fixture
  43. def writes_limiter_option_name(use_case_id):
  44. if use_case_id is UseCaseID.SESSIONS:
  45. return "sentry-metrics.writes-limiter.limits.releasehealth"
  46. return "sentry-metrics.writes-limiter.limits.performance"
  47. def assert_fetch_type_for_tag_string_set(
  48. meta: Mapping[str, Metadata], fetch_type: FetchType, str_set: set[str]
  49. ):
  50. assert all([meta[string].fetch_type == fetch_type for string in str_set])
  51. def test_static_and_non_static_strings_release_health(indexer, use_case_id):
  52. static_indexer = StaticStringIndexer(indexer)
  53. strings = {
  54. use_case_id: {
  55. 2: {"release", "1.0.0"},
  56. 3: {"production", "environment", "release", "2.0.0"},
  57. }
  58. }
  59. results = static_indexer.bulk_record(strings=strings)
  60. v1 = indexer.resolve(use_case_id, 2, "1.0.0")
  61. v2 = indexer.resolve(use_case_id, 3, "2.0.0")
  62. assert results[use_case_id][2]["release"] == SHARED_STRINGS["release"]
  63. assert results[use_case_id][3]["production"] == SHARED_STRINGS["production"]
  64. assert results[use_case_id][3]["environment"] == SHARED_STRINGS["environment"]
  65. assert results[use_case_id][3]["release"] == SHARED_STRINGS["release"]
  66. assert results[use_case_id][2]["1.0.0"] == v1
  67. assert results[use_case_id][3]["2.0.0"] == v2
  68. meta = results.get_fetch_metadata()
  69. assert_fetch_type_for_tag_string_set(meta[use_case_id][2], FetchType.HARDCODED, {"release"})
  70. assert_fetch_type_for_tag_string_set(
  71. meta[use_case_id][3], FetchType.HARDCODED, {"release", "production", "environment"}
  72. )
  73. assert_fetch_type_for_tag_string_set(meta[use_case_id][2], FetchType.FIRST_SEEN, {"1.0.0"})
  74. assert_fetch_type_for_tag_string_set(meta[use_case_id][3], FetchType.FIRST_SEEN, {"2.0.0"})
  75. def test_static_and_non_static_strings_generic_metrics(indexer):
  76. static_indexer = StaticStringIndexer(indexer)
  77. strings = {
  78. UseCaseID.TRANSACTIONS: {
  79. 1: {"production", "environment", "BBB", "CCC"},
  80. 2: {"AAA", "release", "1.0.0"},
  81. 3: {"production", "environment", "release", "AAA", "BBB"},
  82. 4: {"EEE"},
  83. },
  84. UseCaseID.SPANS: {
  85. 3: {"production", "environment", "BBB", "CCC"},
  86. 4: {"AAA", "release", "1.0.0"},
  87. 5: {"production", "environment", "release", "AAA", "BBB"},
  88. 6: {"EEE"},
  89. },
  90. }
  91. static_string_params = [
  92. (UseCaseID.TRANSACTIONS, 1, "production"),
  93. (UseCaseID.TRANSACTIONS, 1, "environment"),
  94. (UseCaseID.TRANSACTIONS, 2, "release"),
  95. (UseCaseID.TRANSACTIONS, 3, "production"),
  96. (UseCaseID.TRANSACTIONS, 3, "environment"),
  97. (UseCaseID.TRANSACTIONS, 3, "release"),
  98. (UseCaseID.SPANS, 3, "production"),
  99. (UseCaseID.SPANS, 3, "environment"),
  100. (UseCaseID.SPANS, 4, "release"),
  101. (UseCaseID.SPANS, 5, "production"),
  102. (UseCaseID.SPANS, 5, "environment"),
  103. (UseCaseID.SPANS, 5, "release"),
  104. ]
  105. first_seen_strings_params = [
  106. (UseCaseID.TRANSACTIONS, 1, "BBB"),
  107. (UseCaseID.TRANSACTIONS, 1, "CCC"),
  108. (UseCaseID.TRANSACTIONS, 2, "AAA"),
  109. (UseCaseID.TRANSACTIONS, 2, "1.0.0"),
  110. (UseCaseID.TRANSACTIONS, 3, "AAA"),
  111. (UseCaseID.TRANSACTIONS, 3, "BBB"),
  112. (UseCaseID.TRANSACTIONS, 4, "EEE"),
  113. (UseCaseID.SPANS, 3, "BBB"),
  114. (UseCaseID.SPANS, 3, "CCC"),
  115. (UseCaseID.SPANS, 4, "AAA"),
  116. (UseCaseID.SPANS, 4, "1.0.0"),
  117. (UseCaseID.SPANS, 5, "AAA"),
  118. (UseCaseID.SPANS, 5, "BBB"),
  119. (UseCaseID.SPANS, 6, "EEE"),
  120. ]
  121. with override_options(
  122. {
  123. "sentry-metrics.writes-limiter.limits.spans.global": [],
  124. "sentry-metrics.writes-limiter.limits.spans.per-org": [],
  125. },
  126. ):
  127. results = static_indexer.bulk_record(strings=strings)
  128. first_seen_strings = {}
  129. for params in first_seen_strings_params:
  130. first_seen_strings[params] = static_indexer.resolve(*params)
  131. for use_case_id, org_id, string in static_string_params:
  132. assert results[use_case_id][org_id][string] == SHARED_STRINGS[string]
  133. for (use_case_id, org_id, string), id in first_seen_strings.items():
  134. assert results[use_case_id][org_id][string] == id
  135. meta = results.get_fetch_metadata()
  136. assert_fetch_type_for_tag_string_set(
  137. meta[UseCaseID.TRANSACTIONS][1], FetchType.HARDCODED, {"production", "environment"}
  138. )
  139. assert_fetch_type_for_tag_string_set(
  140. meta[UseCaseID.TRANSACTIONS][2],
  141. FetchType.HARDCODED,
  142. {"release"},
  143. )
  144. assert_fetch_type_for_tag_string_set(
  145. meta[UseCaseID.TRANSACTIONS][3],
  146. FetchType.HARDCODED,
  147. {"release", "production", "environment"},
  148. )
  149. assert_fetch_type_for_tag_string_set(
  150. meta[UseCaseID.TRANSACTIONS][3],
  151. FetchType.HARDCODED,
  152. set(),
  153. )
  154. assert_fetch_type_for_tag_string_set(
  155. meta[UseCaseID.TRANSACTIONS][1], FetchType.FIRST_SEEN, {"BBB", "CCC"}
  156. )
  157. assert_fetch_type_for_tag_string_set(
  158. meta[UseCaseID.TRANSACTIONS][2],
  159. FetchType.FIRST_SEEN,
  160. {"AAA", "1.0.0"},
  161. )
  162. assert_fetch_type_for_tag_string_set(
  163. meta[UseCaseID.TRANSACTIONS][3],
  164. FetchType.FIRST_SEEN,
  165. {"AAA", "BBB"},
  166. )
  167. assert_fetch_type_for_tag_string_set(
  168. meta[UseCaseID.TRANSACTIONS][4],
  169. FetchType.FIRST_SEEN,
  170. {"EEE"},
  171. )
  172. def test_indexer(indexer, indexer_cache, use_case_id):
  173. with override_options(
  174. {
  175. "sentry-metrics.indexer.read-new-cache-namespace": False,
  176. "sentry-metrics.indexer.write-new-cache-namespace": False,
  177. }
  178. ):
  179. org1_id = 1
  180. org2_id = 2
  181. strings = {"hello", "hey", "hi"}
  182. raw_indexer = indexer
  183. indexer = CachingIndexer(indexer_cache, indexer)
  184. use_case_strings = {use_case_id: {org1_id: strings, org2_id: {"sup"}}}
  185. # create a record with diff org_id but same string that we test against
  186. indexer.record(use_case_id, 999, "hey")
  187. assert list(
  188. indexer_cache.get_many(
  189. "br",
  190. [f"{use_case_id}:{org1_id}:{string}" for string in strings],
  191. ).values()
  192. ) == [None, None, None]
  193. results = indexer.bulk_record(use_case_strings).results
  194. org1_string_ids = {
  195. raw_indexer.resolve(use_case_id, org1_id, "hello"),
  196. raw_indexer.resolve(use_case_id, org1_id, "hey"),
  197. raw_indexer.resolve(use_case_id, org1_id, "hi"),
  198. }
  199. assert None not in org1_string_ids
  200. assert len(org1_string_ids) == 3 # no overlapping ids
  201. org2_string_id = raw_indexer.resolve(use_case_id, org2_id, "sup")
  202. assert org2_string_id not in org1_string_ids
  203. # verify org1 results and cache values
  204. for id_value in results[use_case_id].results[org1_id].values():
  205. assert id_value in org1_string_ids
  206. for cache_value in indexer_cache.get_many(
  207. "br", [f"{use_case_id.value}:{org1_id}:{string}" for string in strings]
  208. ).values():
  209. assert cache_value in org1_string_ids
  210. # verify org2 results and cache values
  211. assert results[use_case_id][org2_id]["sup"] == org2_string_id
  212. assert indexer_cache.get("br", f"{use_case_id.value}:{org2_id}:sup") == org2_string_id
  213. # we should have no results for org_id 999
  214. assert not results[use_case_id].results.get(999)
  215. def test_resolve_and_reverse_resolve(indexer, indexer_cache, use_case_id):
  216. """
  217. Test `resolve` and `reverse_resolve` methods
  218. """
  219. with override_options(
  220. {
  221. "sentry-metrics.indexer.read-new-cache-namespace": False,
  222. "sentry-metrics.indexer.write-new-cache-namespace": False,
  223. }
  224. ):
  225. org1_id = 1
  226. strings = {"hello", "hey", "hi"}
  227. indexer = CachingIndexer(indexer_cache, indexer)
  228. org_strings = {org1_id: strings}
  229. indexer.bulk_record({use_case_id: org_strings})
  230. # test resolve and reverse_resolve
  231. id = indexer.resolve(use_case_id=use_case_id, org_id=org1_id, string="hello")
  232. assert id is not None
  233. assert indexer.reverse_resolve(use_case_id=use_case_id, org_id=org1_id, id=id) == "hello"
  234. # test record on a string that already exists
  235. indexer.record(use_case_id=use_case_id, org_id=org1_id, string="hello")
  236. assert indexer.resolve(use_case_id=use_case_id, org_id=org1_id, string="hello") == id
  237. # test invalid values
  238. assert indexer.resolve(use_case_id=use_case_id, org_id=org1_id, string="beep") is None
  239. assert indexer.reverse_resolve(use_case_id=use_case_id, org_id=org1_id, id=1234) is None
  240. def test_already_created_plus_written_results(indexer, indexer_cache, use_case_id) -> None:
  241. """
  242. Test that we correctly combine db read results with db write results
  243. for the same organization.
  244. """
  245. with override_options(
  246. {
  247. "sentry-metrics.indexer.read-new-cache-namespace": False,
  248. "sentry-metrics.indexer.write-new-cache-namespace": False,
  249. }
  250. ):
  251. org_id = 1234
  252. raw_indexer = indexer
  253. indexer = CachingIndexer(indexer_cache, indexer)
  254. v0 = raw_indexer.record(use_case_id, org_id, "v1.2.0:xyz")
  255. v1 = raw_indexer.record(use_case_id, org_id, "v1.2.1:xyz")
  256. v2 = raw_indexer.record(use_case_id, org_id, "v1.2.2:xyz")
  257. expected_mapping = {"v1.2.0:xyz": v0, "v1.2.1:xyz": v1, "v1.2.2:xyz": v2}
  258. results = indexer.bulk_record(
  259. {use_case_id: {org_id: {"v1.2.0:xyz", "v1.2.1:xyz", "v1.2.2:xyz"}}}
  260. )
  261. assert len(results[use_case_id][org_id]) == len(expected_mapping) == 3
  262. for string, id in results[use_case_id][org_id].items():
  263. assert expected_mapping[string] == id
  264. results = indexer.bulk_record(
  265. {use_case_id: {org_id: {"v1.2.0:xyz", "v1.2.1:xyz", "v1.2.2:xyz", "v1.2.3:xyz"}}},
  266. )
  267. v3 = raw_indexer.resolve(use_case_id, org_id, "v1.2.3:xyz")
  268. expected_mapping["v1.2.3:xyz"] = v3
  269. assert len(results[use_case_id][org_id]) == len(expected_mapping) == 4
  270. for string, id in results[use_case_id][org_id].items():
  271. assert expected_mapping[string] == id
  272. fetch_meta = results.get_fetch_metadata()
  273. assert_fetch_type_for_tag_string_set(
  274. fetch_meta[use_case_id][org_id],
  275. FetchType.CACHE_HIT,
  276. {"v1.2.0:xyz", "v1.2.1:xyz", "v1.2.2:xyz"},
  277. )
  278. assert_fetch_type_for_tag_string_set(
  279. fetch_meta[use_case_id][org_id], FetchType.FIRST_SEEN, {"v1.2.3:xyz"}
  280. )
  281. def test_invalid_timestamp_in_indexer_cache(indexer, indexer_cache) -> None:
  282. """
  283. Test that the caching indexer will incur a miss if the data is staying in the cache for too long
  284. """
  285. with override_options(
  286. {
  287. "sentry-metrics.indexer.read-new-cache-namespace": True,
  288. "sentry-metrics.indexer.write-new-cache-namespace": True,
  289. }
  290. ):
  291. indexer = CachingIndexer(indexer_cache, indexer)
  292. use_case_id = UseCaseID.SPANS
  293. org_id = 1
  294. s = "str"
  295. res = indexer.bulk_record({use_case_id: {org_id: {s}}})
  296. id = res.get_mapped_results()[use_case_id][org_id][s]
  297. assert res.get_fetch_metadata()[use_case_id][org_id][s].fetch_type == FetchType.FIRST_SEEN
  298. indexer.cache.cache.set(
  299. indexer.cache._make_namespaced_cache_key(
  300. BULK_RECORD_CACHE_NAMESPACE, f"{use_case_id.value}:{org_id}:{s}"
  301. ),
  302. indexer.cache._make_cache_val(id, 0),
  303. version=indexer_cache.version,
  304. )
  305. assert (
  306. indexer.bulk_record({use_case_id: {org_id: {s}}})
  307. .get_fetch_metadata()[use_case_id][org_id][s]
  308. .fetch_type
  309. == FetchType.DB_READ
  310. )
  311. assert indexer.cache._validate_result(
  312. indexer.cache.cache.get(
  313. indexer.cache._make_namespaced_cache_key(
  314. BULK_RECORD_CACHE_NAMESPACE, f"{use_case_id.value}:{org_id}:{s}"
  315. ),
  316. version=indexer_cache.version,
  317. )
  318. )
  319. assert (
  320. indexer.bulk_record({use_case_id: {org_id: {s}}})
  321. .get_fetch_metadata()[use_case_id][org_id][s]
  322. .fetch_type
  323. == FetchType.CACHE_HIT
  324. )
  325. def test_already_cached_plus_read_results(indexer, indexer_cache, use_case_id) -> None:
  326. """
  327. Test that we correctly combine cached results with read results
  328. for the same organization.
  329. """
  330. with override_options(
  331. {
  332. "sentry-metrics.indexer.read-new-cache-namespace": False,
  333. "sentry-metrics.indexer.write-new-cache-namespace": False,
  334. }
  335. ):
  336. org_id = 8
  337. cached = {
  338. f"{use_case_id.value}:{org_id}:beep": 10,
  339. f"{use_case_id.value}:{org_id}:boop": 11,
  340. }
  341. indexer_cache.set_many("br", cached)
  342. raw_indexer = indexer
  343. indexer = CachingIndexer(indexer_cache, indexer)
  344. results = indexer.bulk_record({use_case_id: {org_id: {"beep", "boop"}}})
  345. assert len(results[use_case_id][org_id]) == 2
  346. assert results[use_case_id][org_id]["beep"] == 10
  347. assert results[use_case_id][org_id]["boop"] == 11
  348. # confirm we did not write to the db if results were already cached
  349. assert not raw_indexer.resolve(use_case_id, org_id, "beep")
  350. assert not raw_indexer.resolve(use_case_id, org_id, "boop")
  351. bam = raw_indexer.record(use_case_id, org_id, "bam")
  352. assert bam is not None
  353. results = indexer.bulk_record({use_case_id: {org_id: {"beep", "boop", "bam"}}})
  354. assert len(results[use_case_id][org_id]) == 3
  355. assert results[use_case_id][org_id]["beep"] == 10
  356. assert results[use_case_id][org_id]["boop"] == 11
  357. assert results[use_case_id][org_id]["bam"] == bam
  358. fetch_meta = results.get_fetch_metadata()
  359. assert_fetch_type_for_tag_string_set(
  360. fetch_meta[use_case_id][org_id], FetchType.CACHE_HIT, {"beep", "boop"}
  361. )
  362. assert_fetch_type_for_tag_string_set(
  363. fetch_meta[use_case_id][org_id], FetchType.DB_READ, {"bam"}
  364. )
  365. def test_read_when_bulk_record(indexer, use_case_id):
  366. with override_options(
  367. {
  368. "sentry-metrics.indexer.read-new-cache-namespace": False,
  369. "sentry-metrics.indexer.write-new-cache-namespace": False,
  370. }
  371. ):
  372. strings = {
  373. use_case_id: {
  374. 1: {"a"},
  375. 2: {"b", "c"},
  376. 3: {"d", "e", "f"},
  377. 4: {"g", "h", "i", "j"},
  378. 5: {"k", "l", "m", "n", "o"},
  379. }
  380. }
  381. indexer.bulk_record(strings)
  382. results = indexer.bulk_record(strings)
  383. assert all(
  384. str_meta_data.fetch_type is FetchType.DB_READ
  385. for key_result in results.results.values()
  386. for metadata in key_result.meta.values()
  387. for str_meta_data in metadata.values()
  388. )
  389. def test_rate_limited(indexer, use_case_id, writes_limiter_option_name):
  390. """
  391. Assert that rate limits per-org and globally are applied at all.
  392. Since we don't have control over ordering in sets/dicts, we have no
  393. control over which string gets rate-limited. That makes assertions
  394. quite awkward and imprecise.
  395. """
  396. if isinstance(indexer, RawSimpleIndexer):
  397. pytest.skip("mock indexer does not support rate limiting")
  398. org_strings = {1: {"a", "b", "c"}, 2: {"e", "f"}, 3: {"g"}}
  399. with override_options(
  400. {
  401. f"{writes_limiter_option_name}.per-org": [
  402. {"window_seconds": 10, "granularity_seconds": 10, "limit": 1}
  403. ],
  404. }
  405. ):
  406. results = indexer.bulk_record({use_case_id: org_strings})
  407. assert len(results[use_case_id][1]) == 3
  408. assert len(results[use_case_id][2]) == 2
  409. assert len(results[use_case_id][3]) == 1
  410. assert results[use_case_id][3]["g"] is not None
  411. rate_limited_strings = set()
  412. for org_id in 1, 2, 3:
  413. for k, v in results[use_case_id][org_id].items():
  414. if v is None:
  415. rate_limited_strings.add((org_id, k))
  416. assert len(rate_limited_strings) == 3
  417. assert (3, "g") not in rate_limited_strings
  418. for org_id, string in rate_limited_strings:
  419. assert results.get_fetch_metadata()[use_case_id][org_id][string] == Metadata(
  420. id=None,
  421. fetch_type=FetchType.RATE_LIMITED,
  422. fetch_type_ext=FetchTypeExt(is_global=False),
  423. )
  424. org_strings = {1: {"x", "y", "z"}}
  425. # attempt to index even more strings, and assert that we can't get any indexed
  426. with override_options(
  427. {
  428. f"{writes_limiter_option_name}.per-org": [
  429. {"window_seconds": 10, "granularity_seconds": 10, "limit": 1}
  430. ],
  431. "sentry-metrics.indexer.read-new-cache-namespace": False,
  432. }
  433. ):
  434. results = indexer.bulk_record({use_case_id: org_strings})
  435. assert results[use_case_id][1] == {"x": None, "y": None, "z": None}
  436. for letter in "xyz":
  437. assert results.get_fetch_metadata()[use_case_id][1][letter] == Metadata(
  438. id=None,
  439. fetch_type=FetchType.RATE_LIMITED,
  440. fetch_type_ext=FetchTypeExt(is_global=False),
  441. )
  442. org_strings2 = {1: rate_limited_strings}
  443. # assert that if we reconfigure limits, the quota resets
  444. with override_options(
  445. {
  446. f"{writes_limiter_option_name}.global": [
  447. {"window_seconds": 10, "granularity_seconds": 10, "limit": 2}
  448. ],
  449. "sentry-metrics.indexer.read-new-cache-namespace": False,
  450. }
  451. ):
  452. results = indexer.bulk_record({use_case_id: org_strings2})
  453. rate_limited_strings2 = set()
  454. for k, v in results[use_case_id][1].items():
  455. if v is None:
  456. rate_limited_strings2.add(k)
  457. assert len(rate_limited_strings2) == 1
  458. assert len(rate_limited_strings - rate_limited_strings2) == 2
  459. def test_bulk_reverse_resolve(indexer):
  460. """
  461. Tests reverse resolve properly returns the corresponding strings
  462. in the proper order when given a combination of shared and non-shared ids.
  463. """
  464. with override_options(
  465. {
  466. "sentry-metrics.indexer.read-new-cache-namespace": False,
  467. "sentry-metrics.indexer.write-new-cache-namespace": False,
  468. }
  469. ):
  470. org_id = 7
  471. use_case_id = UseCaseID.SESSIONS # any use case would do
  472. static_indexer = StaticStringIndexer(indexer)
  473. a = indexer.record(use_case_id, org_id, "aaa")
  474. b = indexer.record(use_case_id, org_id, "bbb")
  475. c = indexer.record(use_case_id, org_id, "ccc")
  476. production = SHARED_STRINGS["production"]
  477. release = SHARED_STRINGS["release"]
  478. environment = SHARED_STRINGS["environment"]
  479. unknown1 = 6666
  480. unknown2 = 6667
  481. indexes = [a, production, b, unknown1, release, environment, c, unknown2]
  482. # we expect the indexer to resolve the indexes to the original strings and return None for unknown indexes
  483. expected_result = {
  484. a: "aaa",
  485. b: "bbb",
  486. c: "ccc",
  487. production: "production",
  488. release: "release",
  489. environment: "environment",
  490. }
  491. actual_result = static_indexer.bulk_reverse_resolve(use_case_id, org_id, indexes)
  492. assert actual_result == expected_result