Просмотр исходного кода

typing(metrics_extraction): Do not allow untyped functions (#62205)

mypy does deeper type-checking when a function is fully typed. This
change makes various extraction-related modules have stronger typing by
adding the rule `disallow_untyped_defs = true`.

This change also fixes various errors that could not be seen without
this rule.
Armen Zambrano G 1 год назад
Родитель
Сommit
881f719816

+ 10 - 0
pyproject.toml

@@ -732,3 +732,13 @@ disable_error_code = [
     "var-annotated",
 ]
 # end: sentry modules with typing issues
+
+# beginning: stronger typing
+[[tool.mypy.overrides]]
+module = [
+    "src.sentry.relay.config.metric_extraction",
+    "src.sentry.snuba.metrics.extraction",
+    "tests.sentry.relay.config.test_metric_extraction",
+]
+disallow_untyped_defs = true
+# end: stronger typing

+ 1 - 1
src/sentry/relay/config/metric_extraction.py

@@ -339,7 +339,7 @@ def convert_widget_query_to_metric(
 
 def _can_widget_use_stateful_extraction(
     widget_query: DashboardWidgetQuery, metrics_specs: Sequence[HashedMetricSpec]
-):
+) -> bool:
     if not metrics_specs:
         return False
     spec_hashes = [hashed_spec[0] for hashed_spec in metrics_specs]

+ 1 - 1
src/sentry/snuba/metrics/extraction.py

@@ -1274,7 +1274,7 @@ class OnDemandMetricSpec:
         return QueryParsingResult(conditions=extended_conditions)
 
     @staticmethod
-    def _aggregate_conditions(parsed_field) -> Optional[RuleCondition]:
+    def _aggregate_conditions(parsed_field: FieldParsingResult) -> Optional[RuleCondition]:
         # We have to handle the special case for the "count_if" function, however it may be better to build some
         # better abstracted code to handle third-party rule conditions injection.
         if parsed_field.function == "count_if":

+ 49 - 44
tests/sentry/snuba/test_extraction.py

@@ -88,7 +88,7 @@ from sentry.testutils.pytest.fixtures import django_db_all
         ),  # error.handled is an error search term
     ],
 )
-def test_should_use_on_demand(agg, query, result):
+def test_should_use_on_demand(agg, query, result) -> None:
     assert should_use_on_demand_metrics(Dataset.PerformanceMetrics, agg, query) is result
 
 
@@ -119,13 +119,16 @@ def test_should_use_on_demand(agg, query, result):
         ),
     ],
 )
-def test_should_use_on_demand_with_mri(agg, query, result):
+def test_should_use_on_demand_with_mri(agg, query, result) -> None:
     assert should_use_on_demand_metrics(Dataset.PerformanceMetrics, agg, query) is result
 
 
-def create_spec_if_needed(dataset, agg, query):
-    if should_use_on_demand_metrics(dataset, agg, query):
-        return OnDemandMetricSpec(agg, query)
+def create_spec_if_needed(dataset, agg, query) -> OnDemandMetricSpec | None:
+    return (
+        OnDemandMetricSpec(agg, query)
+        if should_use_on_demand_metrics(dataset, agg, query)
+        else None
+    )
 
 
 class TestCreatesOndemandMetricSpec:
@@ -175,7 +178,7 @@ class TestCreatesOndemandMetricSpec:
             ("count()", "transaction.source:route"),
         ],
     )
-    def test_creates_on_demand_spec(self, aggregate, query):
+    def test_creates_on_demand_spec(self, aggregate, query) -> None:
         assert create_spec_if_needed(self.dataset, aggregate, query)
 
     @pytest.mark.parametrize(
@@ -209,7 +212,7 @@ class TestCreatesOndemandMetricSpec:
             ("failure_rate()", ""),
         ],
     )
-    def test_does_not_create_on_demand_spec(self, aggregate, query):
+    def test_does_not_create_on_demand_spec(self, aggregate, query) -> None:
         assert not create_spec_if_needed(self.dataset, aggregate, query)
 
 
@@ -217,7 +220,7 @@ class TestCreatesOndemandMetricSpec:
     "percentile",
     [0.5, 0.75, 0.9, 0.95, 0.99],
 )
-def test_spec_equivalence_with_percentiles(percentile):
+def test_spec_equivalence_with_percentiles(percentile) -> None:
     fixed_percentile = f"p{int(percentile * 100)}"
 
     spec_1 = OnDemandMetricSpec(f"{fixed_percentile}(measurements.fp)", "transaction.duration:>1s")
@@ -231,7 +234,7 @@ def test_spec_equivalence_with_percentiles(percentile):
     assert spec_1.condition == spec_2.condition
 
 
-def test_spec_simple_query_count():
+def test_spec_simple_query_count() -> None:
     spec = OnDemandMetricSpec("count()", "transaction.duration:>1s")
 
     assert spec._metric_type == "c"
@@ -240,7 +243,7 @@ def test_spec_simple_query_count():
     assert spec.condition == {"name": "event.duration", "op": "gt", "value": 1000.0}
 
 
-def test_spec_simple_query_distribution():
+def test_spec_simple_query_distribution() -> None:
     spec = OnDemandMetricSpec("p75(measurements.fp)", "transaction.duration:>1s")
 
     assert spec._metric_type == "d"
@@ -249,7 +252,7 @@ def test_spec_simple_query_distribution():
     assert spec.condition == {"name": "event.duration", "op": "gt", "value": 1000.0}
 
 
-def test_spec_simple_query_with_environment():
+def test_spec_simple_query_with_environment() -> None:
     spec = OnDemandMetricSpec("count()", "transaction.duration:>1s", "production")
 
     assert spec._metric_type == "c"
@@ -264,7 +267,7 @@ def test_spec_simple_query_with_environment():
     }
 
 
-def test_spec_simple_query_with_environment_only():
+def test_spec_simple_query_with_environment_only() -> None:
     # We use apdex, since it's the only metric which is on demand also without a query.
     spec = OnDemandMetricSpec("apdex(0.8)", "", "production")
 
@@ -274,7 +277,7 @@ def test_spec_simple_query_with_environment_only():
     assert spec.condition == {"name": "event.environment", "op": "eq", "value": "production"}
 
 
-def test_spec_context_mapping():
+def test_spec_context_mapping() -> None:
     spec = OnDemandMetricSpec("count()", "device:SM-A226B")
 
     assert spec._metric_type == "c"
@@ -287,7 +290,7 @@ def test_spec_context_mapping():
     }
 
 
-def test_spec_query_or_precedence_with_environment():
+def test_spec_query_or_precedence_with_environment() -> None:
     spec_1 = OnDemandMetricSpec(
         "count()", "(transaction.duration:>1s OR http.status_code:200)", "dev"
     )
@@ -316,7 +319,7 @@ def test_spec_query_or_precedence_with_environment():
     assert spec_1.condition == spec_2.condition
 
 
-def test_spec_count_if_query_with_environment():
+def test_spec_count_if_query_with_environment() -> None:
     spec = OnDemandMetricSpec(
         "count_if(transaction.duration,equals,300)",
         "(http.method:GET AND endpoint:/hello)",
@@ -342,7 +345,7 @@ def test_spec_count_if_query_with_environment():
     }
 
 
-def test_spec_complex_query_with_environment():
+def test_spec_complex_query_with_environment() -> None:
     spec = OnDemandMetricSpec(
         "count()",
         "transaction.duration:>1s AND http.status_code:200 OR os.browser:Chrome",
@@ -379,7 +382,7 @@ def test_spec_complex_query_with_environment():
     }
 
 
-def test_spec_or_condition():
+def test_spec_or_condition() -> None:
     spec = OnDemandMetricSpec("count()", "transaction.duration:>=100 OR transaction.duration:<1000")
 
     assert spec.condition == {
@@ -391,7 +394,7 @@ def test_spec_or_condition():
     }
 
 
-def test_spec_and_condition():
+def test_spec_and_condition() -> None:
     spec = OnDemandMetricSpec("count()", "release:foo transaction.duration:<10s")
 
     assert spec.condition == {
@@ -403,7 +406,7 @@ def test_spec_and_condition():
     }
 
 
-def test_spec_nested_condition():
+def test_spec_nested_condition() -> None:
     spec = OnDemandMetricSpec("count()", "(release:a OR transaction.op:b) transaction.duration:>1s")
 
     assert spec.condition == {
@@ -421,7 +424,7 @@ def test_spec_nested_condition():
     }
 
 
-def test_spec_boolean_precedence():
+def test_spec_boolean_precedence() -> None:
     spec = OnDemandMetricSpec("count()", "release:a OR transaction.op:b transaction.duration:>1s")
 
     assert spec.condition == {
@@ -439,7 +442,7 @@ def test_spec_boolean_precedence():
     }
 
 
-def test_spec_wildcard():
+def test_spec_wildcard() -> None:
     spec = OnDemandMetricSpec("count()", "release.version:1.*")
 
     assert spec.condition == {
@@ -449,7 +452,7 @@ def test_spec_wildcard():
     }
 
 
-def test_spec_count_if():
+def test_spec_count_if() -> None:
     spec = OnDemandMetricSpec("count_if(transaction.duration,equals,300)", "")
 
     assert spec._metric_type == "c"
@@ -462,7 +465,7 @@ def test_spec_count_if():
     }
 
 
-def test_spec_count_if_with_query():
+def test_spec_count_if_with_query() -> None:
     spec = OnDemandMetricSpec(
         "count_if(transaction.duration,equals,300)", "release:a OR transaction.op:b"
     )
@@ -482,7 +485,7 @@ def test_spec_count_if_with_query():
     }
 
 
-def test_spec_in_operator():
+def test_spec_in_operator() -> None:
     in_spec = OnDemandMetricSpec("count()", "transaction.duration:[1,2,3]")
     not_in_spec = OnDemandMetricSpec("count()", "!transaction.duration:[1,2,3]")
 
@@ -493,7 +496,7 @@ def test_spec_in_operator():
     }
 
 
-def test_spec_with_custom_measurement():
+def test_spec_with_custom_measurement() -> None:
     spec = OnDemandMetricSpec("avg(measurements.memoryUsed)", "measurements.memoryUsed:>100")
 
     assert spec._metric_type == "d"
@@ -506,7 +509,7 @@ def test_spec_with_custom_measurement():
     }
 
 
-def test_spec_with_has():
+def test_spec_with_has() -> None:
     spec = OnDemandMetricSpec(
         "avg(measurements.lcp)", "has:measurements.lcp AND !has:measurements.memoryUsage"
     )
@@ -526,7 +529,7 @@ def test_spec_with_has():
     }
 
 
-def test_spec_with_message():
+def test_spec_with_message() -> None:
     spec = OnDemandMetricSpec(
         "avg(measurements.lcp)", 'message:"issues" AND !message:"alerts" AND "api"'
     )
@@ -547,7 +550,7 @@ def test_spec_with_message():
     }
 
 
-def test_spec_with_unknown_error_status():
+def test_spec_with_unknown_error_status() -> None:
     spec = OnDemandMetricSpec(
         "avg(measurements.lcp)", "transaction.status:unknown_error OR transaction.status:unknown"
     )
@@ -564,7 +567,7 @@ def test_spec_with_unknown_error_status():
     }
 
 
-def test_spec_ignore_fields():
+def test_spec_ignore_fields() -> None:
     with_ignored_field = OnDemandMetricSpec("count()", "transaction.duration:>=1 project:sentry")
     without_ignored_field = OnDemandMetricSpec("count()", "transaction.duration:>=1")
 
@@ -572,7 +575,7 @@ def test_spec_ignore_fields():
 
 
 @django_db_all
-def test_spec_failure_count(default_project):
+def test_spec_failure_count(default_project) -> None:
     spec = OnDemandMetricSpec("failure_count()", "transaction.duration:>1s")
 
     assert spec._metric_type == "c"
@@ -583,7 +586,7 @@ def test_spec_failure_count(default_project):
 
 
 @django_db_all
-def test_spec_failure_rate(default_project):
+def test_spec_failure_rate(default_project) -> None:
     spec = OnDemandMetricSpec("failure_rate()", "transaction.duration:>1s")
 
     assert spec._metric_type == "c"
@@ -595,7 +598,7 @@ def test_spec_failure_rate(default_project):
 
 @django_db_all
 @patch("sentry.snuba.metrics.extraction._get_satisfactory_threshold_and_metric")
-def test_spec_apdex(_get_satisfactory_threshold_and_metric, default_project):
+def test_spec_apdex(_get_satisfactory_threshold_and_metric, default_project) -> None:
     _get_satisfactory_threshold_and_metric.return_value = 100, "transaction.duration"
 
     spec = OnDemandMetricSpec("apdex(10)", "release:a")
@@ -609,7 +612,7 @@ def test_spec_apdex(_get_satisfactory_threshold_and_metric, default_project):
 
 @django_db_all
 @patch("sentry.snuba.metrics.extraction._get_satisfactory_threshold_and_metric")
-def test_spec_apdex_decimal(_get_satisfactory_threshold_and_metric, default_project):
+def test_spec_apdex_decimal(_get_satisfactory_threshold_and_metric, default_project) -> None:
     _get_satisfactory_threshold_and_metric.return_value = 100, "transaction.duration"
 
     spec = OnDemandMetricSpec("apdex(0.8)", "release:a")
@@ -622,7 +625,7 @@ def test_spec_apdex_decimal(_get_satisfactory_threshold_and_metric, default_proj
 
 
 @django_db_all
-def test_spec_epm(default_project):
+def test_spec_epm(default_project) -> None:
     spec = OnDemandMetricSpec("epm()", "transaction.duration:>1s")
 
     assert spec._metric_type == "c"
@@ -633,7 +636,7 @@ def test_spec_epm(default_project):
 
 
 @django_db_all
-def test_spec_eps(default_project):
+def test_spec_eps(default_project) -> None:
     spec = OnDemandMetricSpec("eps()", "transaction.duration:>1s")
 
     assert spec._metric_type == "c"
@@ -643,7 +646,7 @@ def test_spec_eps(default_project):
     assert spec.tags_conditions(default_project) == []
 
 
-def test_cleanup_equivalent_specs():
+def test_cleanup_equivalent_specs() -> None:
     simple_spec = OnDemandMetricSpec("count()", "transaction.duration:>0")
     event_type_spec = OnDemandMetricSpec(
         "count()", "transaction.duration:>0 event.type:transaction"
@@ -671,7 +674,7 @@ def test_cleanup_equivalent_specs():
         "(isNightlyBuild:true OR environment:production)",
     ],
 )
-def test_cleanup_with_environment_injection(query):
+def test_cleanup_with_environment_injection(query) -> None:
     # We are simulating the transformation that the frontend performs in the query, since they add the
     # AND (`event.type:transaction`) at the end.
     field = "count()"
@@ -696,7 +699,9 @@ def test_cleanup_with_environment_injection(query):
 
 @django_db_all
 @patch("sentry.snuba.metrics.extraction._get_satisfactory_threshold_and_metric")
-def test_spec_apdex_without_condition(_get_satisfactory_threshold_and_metric, default_project):
+def test_spec_apdex_without_condition(
+    _get_satisfactory_threshold_and_metric, default_project
+) -> None:
     _get_satisfactory_threshold_and_metric.return_value = 100, "transaction.duration"
 
     spec = OnDemandMetricSpec("apdex(10)", "")
@@ -708,7 +713,7 @@ def test_spec_apdex_without_condition(_get_satisfactory_threshold_and_metric, de
     assert spec.tags_conditions(default_project) == apdex_tag_spec(default_project, ["10"])
 
 
-def test_spec_custom_tag():
+def test_spec_custom_tag() -> None:
     custom_tag_spec = OnDemandMetricSpec("count()", "foo:bar")
 
     assert custom_tag_spec.condition == {"name": "event.tags.foo", "op": "eq", "value": "bar"}
@@ -722,7 +727,7 @@ def test_spec_custom_tag():
         "(release:a OR (transaction.op:b and browser.version:1)) transaction.duration:>1s",
     ],
 )
-def test_query_tokens_to_string(query):
+def test_query_tokens_to_string(query) -> None:
     tokens = parse_search_query(query)
     new_query = query_tokens_to_string(tokens)
     new_tokens = parse_search_query(new_query)
@@ -743,7 +748,7 @@ def test_query_tokens_to_string(query):
         ),
     ],
 )
-def test_to_standard_metrics_query(dirty, clean):
+def test_to_standard_metrics_query(dirty, clean) -> None:
     cleaned_up_query = to_standard_metrics_query(dirty)
     cleaned_up_tokens = parse_search_query(cleaned_up_query)
     clean_tokens = parse_search_query(clean)
@@ -767,7 +772,7 @@ def test_to_standard_metrics_query(dirty, clean):
         ),
     ],
 )
-def test_search_query_converter(query, expected):
+def test_search_query_converter(query, expected) -> None:
     tokens = parse_search_query(query)
     converter = SearchQueryConverter(tokens)
     condition = converter.convert()
@@ -792,7 +797,7 @@ def test_search_query_converter(query, expected):
         (" (AND) And (And) Or release:initial or (and) or", "release:initial"),
     ],
 )
-def test_cleanup_query(dirty, clean):
+def test_cleanup_query(dirty, clean) -> None:
     dirty_tokens = parse_search_query(dirty)
     clean_tokens = parse_search_query(clean)
     actual_clean = cleanup_search_query(dirty_tokens)
@@ -800,7 +805,7 @@ def test_cleanup_query(dirty, clean):
     assert actual_clean == clean_tokens
 
 
-def test_cleanup_query_with_empty_parens():
+def test_cleanup_query_with_empty_parens() -> None:
     """
     Separate test with empty parens because we can't parse a string with empty parens correctly
     """

+ 6 - 7
tests/sentry/tasks/test_on_demand_metrics.py

@@ -29,17 +29,17 @@ OnDemandExtractionState = DashboardWidgetQueryOnDemand.OnDemandExtractionState
 
 
 @pytest.fixture
-def owner():
+def owner() -> None:
     return Factories.create_user()
 
 
 @pytest.fixture
-def organization(owner):
+def organization(owner) -> None:
     return Factories.create_organization(owner=owner)
 
 
 @pytest.fixture
-def project(organization):
+def project(organization) -> None:
     return Factories.create_project(organization=organization)
 
 
@@ -205,7 +205,7 @@ def test_schedule_on_demand_check(
     expected_discover_queries_run,
     expected_cache_set,
     project,
-):
+) -> None:
     cache.clear()
     options = {
         "on_demand_metrics.check_widgets.enable": option_enable,
@@ -318,8 +318,7 @@ def test_process_widget_specs(
     expected_discover_queries_run,
     expected_low_cardinality,
     project,
-):
-
+) -> None:
     raw_snql_query.return_value = (
         _SNQL_DATA_HIGH_CARDINALITY if set_high_cardinality else _SNQL_DATA_LOW_CARDINALITY
     )
@@ -400,7 +399,7 @@ def assert_on_demand_model(
     has_features: bool,
     expected_applicable: bool,
     expected_hashes: Sequence[str],
-):
+) -> None:
     if not expected_applicable:
         assert model.extraction_state == OnDemandExtractionState.DISABLED_NOT_APPLICABLE
         assert model.spec_hashes == []