Browse Source

ref: fix typing for sentry.utils.function_cache (#79518)

a ParamSpec with only P.args is undefined -- this uses TypeVarTuple to
represent variable positional arguments instead

<!-- Describe your PR here. -->
anthony sottile 4 months ago
parent
commit
f89ddc2cd5
2 changed files with 21 additions and 16 deletions
  1. 1 0
      pyproject.toml
  2. 20 16
      src/sentry/utils/function_cache.py

+ 1 - 0
pyproject.toml

@@ -511,6 +511,7 @@ module = [
     "sentry.utils.env",
     "sentry.utils.env",
     "sentry.utils.event",
     "sentry.utils.event",
     "sentry.utils.files",
     "sentry.utils.files",
+    "sentry.utils.function_cache",
     "sentry.utils.geo",
     "sentry.utils.geo",
     "sentry.utils.imports",
     "sentry.utils.imports",
     "sentry.utils.iterators",
     "sentry.utils.iterators",

+ 20 - 16
src/sentry/utils/function_cache.py

@@ -1,9 +1,11 @@
+from __future__ import annotations
+
 import uuid
 import uuid
 from collections.abc import Callable
 from collections.abc import Callable
 from datetime import timedelta
 from datetime import timedelta
 from decimal import Decimal
 from decimal import Decimal
 from functools import partial
 from functools import partial
-from typing import Any, ParamSpec, TypeVar
+from typing import TypeVar, TypeVarTuple
 
 
 from django.core.cache import cache
 from django.core.cache import cache
 from django.db import models
 from django.db import models
@@ -11,12 +13,12 @@ from django.db.models.signals import post_delete, post_save
 
 
 from sentry.utils.hashlib import md5_text
 from sentry.utils.hashlib import md5_text
 
 
-P = ParamSpec("P")
+Ts = TypeVarTuple("Ts")
 R = TypeVar("R")
 R = TypeVar("R")
 S = TypeVar("S", bound=models.Model)
 S = TypeVar("S", bound=models.Model)
 
 
 
 
-def arg_to_hashable(arg: Any):
+def arg_to_hashable(arg: object) -> object:
     if isinstance(arg, (int, float, str, Decimal, uuid.UUID)):
     if isinstance(arg, (int, float, str, Decimal, uuid.UUID)):
         return arg
         return arg
     elif isinstance(arg, models.Model):
     elif isinstance(arg, models.Model):
@@ -27,28 +29,33 @@ def arg_to_hashable(arg: Any):
         )
         )
 
 
 
 
-def cache_key_for_cached_func(cached_func: Callable[P, R], *args):
+def cache_key_for_cached_func(cached_func: Callable[[*Ts], R], *args: *Ts) -> str:
     base_cache_key = f"query_cache:{md5_text(cached_func.__qualname__).hexdigest()}"
     base_cache_key = f"query_cache:{md5_text(cached_func.__qualname__).hexdigest()}"
     vals_to_hash = [arg_to_hashable(arg) for arg in args]
     vals_to_hash = [arg_to_hashable(arg) for arg in args]
     return f"{base_cache_key}:{md5_text(*vals_to_hash).hexdigest()}"
     return f"{base_cache_key}:{md5_text(*vals_to_hash).hexdigest()}"
 
 
 
 
 def clear_cache_for_cached_func(
 def clear_cache_for_cached_func(
-    cached_func: Callable[P, R], arg_getter, recalculate: bool, instance: S, *args, **kwargs
-):
-    args = arg_getter(instance)
-    cache_key = cache_key_for_cached_func(cached_func, *args)
+    cached_func: Callable[[*Ts], R],
+    arg_getter: Callable[[S], tuple[*Ts]],
+    recalculate: bool,
+    instance: S,
+    *args: object,
+    **kwargs: object,
+) -> None:
+    func_args = arg_getter(instance)
+    cache_key = cache_key_for_cached_func(cached_func, *func_args)
     if recalculate:
     if recalculate:
-        cache.set(cache_key, cached_func(*args))
+        cache.set(cache_key, cached_func(*func_args))
     else:
     else:
         cache.delete(cache_key)
         cache.delete(cache_key)
 
 
 
 
 def cache_func_for_models(
 def cache_func_for_models(
-    cache_invalidators: list[tuple[type[S], Callable[[S], P.args]]],
+    cache_invalidators: list[tuple[type[S], Callable[[S], tuple[*Ts]]]],
     cache_ttl: None | timedelta = None,
     cache_ttl: None | timedelta = None,
     recalculate: bool = True,
     recalculate: bool = True,
-):
+) -> Callable[[Callable[[*Ts], R]], Callable[[*Ts], R]]:
     """
     """
     Decorator that caches the result of a function, and actively invalidates the result when related models are
     Decorator that caches the result of a function, and actively invalidates the result when related models are
     created/updated/deleted. To use this, decorate a function with this decorator and pass a list of `cache_invalidators`
     created/updated/deleted. To use this, decorate a function with this decorator and pass a list of `cache_invalidators`
@@ -67,11 +74,8 @@ def cache_func_for_models(
     if cache_ttl is None:
     if cache_ttl is None:
         cache_ttl = timedelta(days=7)
         cache_ttl = timedelta(days=7)
 
 
-    def cached_query_func(func_to_cache: Callable[P, R]):
-        def inner(*args: P.args, **kwargs: P.kwargs) -> R:
-            if kwargs:
-                raise ValueError("Can't cache values using kwargs")
-
+    def cached_query_func(func_to_cache: Callable[[*Ts], R]) -> Callable[[*Ts], R]:
+        def inner(*args: *Ts) -> R:
             cache_key = cache_key_for_cached_func(func_to_cache, *args)
             cache_key = cache_key_for_cached_func(func_to_cache, *args)
             cached_val = cache.get(cache_key, None)
             cached_val = cache.get(cache_key, None)
             if cached_val is None:
             if cached_val is None: