test_tasks.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. from datetime import timedelta
  2. from unittest.mock import patch
  3. import pytest
  4. from django.utils import timezone
  5. from freezegun import freeze_time
  6. from sentry.dynamic_sampling import generate_rules
  7. from sentry.dynamic_sampling.rules.helpers.prioritize_transactions import (
  8. get_transactions_resampling_rates,
  9. )
  10. from sentry.dynamic_sampling.tasks import prioritise_projects, prioritise_transactions
  11. from sentry.snuba.metrics import TransactionMRI
  12. from sentry.testutils import BaseMetricsLayerTestCase, SnubaTestCase, TestCase
  13. MOCK_DATETIME = (timezone.now() - timedelta(days=1)).replace(
  14. hour=0, minute=0, second=0, microsecond=0
  15. )
  16. @freeze_time(MOCK_DATETIME)
  17. class TestPrioritiseProjectsTask(BaseMetricsLayerTestCase, TestCase, SnubaTestCase):
  18. @property
  19. def now(self):
  20. return MOCK_DATETIME
  21. def create_project_and_add_metrics(self, name, count, org):
  22. # Create 4 projects
  23. proj = self.create_project(name=name, organization=org)
  24. # disable all biases
  25. proj.update_option(
  26. "sentry:dynamic_sampling_biases",
  27. [
  28. {"id": "boostEnvironments", "active": False},
  29. {"id": "ignoreHealthChecks", "active": False},
  30. {"id": "boostLatestRelease", "active": False},
  31. {"id": "boostKeyTransactions", "active": False},
  32. ],
  33. )
  34. # Store performance metrics for proj A
  35. self.store_performance_metric(
  36. name=TransactionMRI.COUNT_PER_ROOT_PROJECT.value,
  37. tags={"transaction": "foo_transaction"},
  38. minutes_before_now=30,
  39. value=count,
  40. project_id=proj.id,
  41. org_id=org.id,
  42. )
  43. return proj
  44. @patch("sentry.dynamic_sampling.rules.base.quotas.get_blended_sample_rate")
  45. def test_prioritise_projects_simple(self, get_blended_sample_rate):
  46. get_blended_sample_rate.return_value = 0.25
  47. # Create a org
  48. test_org = self.create_organization(name="sample-org")
  49. # Create 4 projects
  50. proj_a = self.create_project_and_add_metrics("a", 9, test_org)
  51. proj_b = self.create_project_and_add_metrics("b", 7, test_org)
  52. proj_c = self.create_project_and_add_metrics("c", 3, test_org)
  53. proj_d = self.create_project_and_add_metrics("d", 1, test_org)
  54. with self.options({"dynamic-sampling.prioritise_projects.sample_rate": 1.0}):
  55. with self.tasks():
  56. prioritise_projects()
  57. # we expect only uniform rule
  58. # also we test here that `generate_rules` can handle trough redis long floats
  59. assert generate_rules(proj_a)[0]["samplingValue"] == {
  60. "type": "sampleRate",
  61. "value": pytest.approx(0.14814814814814817),
  62. }
  63. assert generate_rules(proj_b)[0]["samplingValue"] == {
  64. "type": "sampleRate",
  65. "value": pytest.approx(0.1904761904761905),
  66. }
  67. assert generate_rules(proj_c)[0]["samplingValue"] == {
  68. "type": "sampleRate",
  69. "value": pytest.approx(0.4444444444444444),
  70. }
  71. assert generate_rules(proj_d)[0]["samplingValue"] == {"type": "sampleRate", "value": 1.0}
  72. @freeze_time(MOCK_DATETIME)
  73. class TestPrioritiseTransactionsTask(BaseMetricsLayerTestCase, TestCase, SnubaTestCase):
  74. @property
  75. def now(self):
  76. return MOCK_DATETIME
  77. def setUp(self):
  78. super().setUp()
  79. self.orgs_info = []
  80. num_orgs = 3
  81. num_proj_per_org = 3
  82. for org_idx in range(num_orgs):
  83. org = self.create_organization(f"test-org{org_idx}")
  84. org_info = {"org_id": org.id, "project_ids": []}
  85. self.orgs_info.append(org_info)
  86. for proj_idx in range(num_proj_per_org):
  87. p = self.create_project(organization=org)
  88. org_info["project_ids"].append(p.id)
  89. # create 5 transaction types
  90. for name in ["ts1", "ts2", "tm3", "tl4", "tl5"]:
  91. # make up some unique count
  92. idx = org_idx * num_orgs + proj_idx
  93. num_transactions = self.get_count_for_transaction(idx, name)
  94. self.store_performance_metric(
  95. name=TransactionMRI.COUNT_PER_ROOT_PROJECT.value,
  96. tags={"transaction": name},
  97. minutes_before_now=30,
  98. value=num_transactions,
  99. project_id=p.id,
  100. org_id=org.id,
  101. )
  102. self.org_ids = [org["org_id"] for org in self.orgs_info]
  103. def get_count_for_transaction(self, idx: int, name: str):
  104. """
  105. Create some known count based on transaction name and the order (based on org and project)
  106. """
  107. counts = {
  108. "ts1": 1,
  109. "ts2": 100,
  110. "tm3": 1000,
  111. "tl4": 2000,
  112. "tl5": 3000,
  113. }
  114. return idx + counts[name]
  115. @patch("sentry.dynamic_sampling.rules.base.quotas.get_blended_sample_rate")
  116. def test_prioritise_transactions_simple(self, get_blended_sample_rate):
  117. """
  118. Create orgs projects & transactions and then check that the task creates rebalancing data
  119. in Redis
  120. """
  121. get_blended_sample_rate.return_value = 0.25
  122. with self.options({"dynamic-sampling.prioritise_transactions.load_rate": 1.0}):
  123. with self.feature({"organizations:ds-prioritise-by-transaction-bias": True}):
  124. with self.tasks():
  125. prioritise_transactions()
  126. # now redis should contain rebalancing data for our projects
  127. for org in self.orgs_info:
  128. org_id = org["org_id"]
  129. for proj_id in org["project_ids"]:
  130. tran_rate, global_rate = get_transactions_resampling_rates(
  131. org_id=org_id, proj_id=proj_id, default_rate=0.1
  132. )
  133. for transaction_name in ["ts1", "ts2", "tm3", "tl4", "tl5"]:
  134. assert (
  135. transaction_name in tran_rate
  136. ) # check we have some rate calculated for each transaction