Browse Source

fix(py3): Bump redis to 3.3.11 and redis-py-cluster to latest master (#20717)

* chore(py3): Bump redis to 3.3.11

We're seeing issues with celery in OSS. The hope is that if we upgrade Celery to the latest version
then the issues will already have been fixed.

Latest celery requires redis==3.3.11. Bumping this first so that we can isolate changes.
Dan Fuller 4 years ago
parent
commit
c5c69f5bb1

+ 4 - 2
requirements-base.txt

@@ -44,8 +44,10 @@ python-u2flib-server>=5.0.0,<6.0.0
 PyYAML>=5.3,<5.4
 qrcode>=6.1.0,<6.2.0
 rb>=1.7.0,<2.0.0
-redis-py-cluster==1.3.6
-redis==2.10.6
+# redis-py-cluster==2.1.0 isn't released yet, so just pinning to the current latest commit
+# in the repo.
+redis-py-cluster @ https://github.com/Grokzen/redis-py-cluster/archive/d3ef787e033e0bbf97d0b3d17fc46cb5ae36d660.zip#egg=redis-py-cluster
+redis==3.3.11
 requests-oauthlib==1.2.0
 requests[security]>=2.20.0,<2.21.0
 # [start] jsonschema format validators

+ 1 - 1
src/sentry/buffer/redis.py

@@ -191,7 +191,7 @@ class RedisBuffer(Buffer):
             pipe.hset(key, "s", "1")
 
         pipe.expire(key, self.key_expire)
-        pipe.zadd(pending_key, time(), key)
+        pipe.zadd(pending_key, {key: time()})
         pipe.execute()
 
         metrics.incr(

+ 3 - 3
src/sentry/utils/redis.py

@@ -13,7 +13,7 @@ from pkg_resources import resource_string
 from redis.client import Script, StrictRedis
 from redis.connection import ConnectionPool, Encoder
 from redis.exceptions import ConnectionError, BusyLoadingError
-from rediscluster import StrictRedisCluster
+from rediscluster import RedisCluster
 
 from sentry import options
 from sentry.exceptions import InvalidConfiguration
@@ -78,7 +78,7 @@ class _RBCluster(object):
         return "Redis Blaster Cluster"
 
 
-class RetryingStrictRedisCluster(StrictRedisCluster):
+class RetryingRedisCluster(RedisCluster):
     """
     Execute a command with cluster reinitialization retry logic.
 
@@ -118,7 +118,7 @@ class _RedisCluster(object):
         # make TCP connections on boot. Wrap the client in a lazy proxy object.
         def cluster_factory():
             if config.get("is_redis_cluster", False):
-                return RetryingStrictRedisCluster(
+                return RetryingRedisCluster(
                     startup_nodes=hosts,
                     decode_responses=True,
                     skip_full_coverage_check=True,

+ 5 - 8
tests/sentry/buffer/redis/tests.py

@@ -26,8 +26,7 @@ class RedisBufferTest(TestCase):
     def test_process_pending_one_batch(self, process_incr):
         self.buf.incr_batch_size = 5
         with self.buf.cluster.map() as client:
-            client.zadd("b:p", 1, "foo")
-            client.zadd("b:p", 2, "bar")
+            client.zadd("b:p", {"foo": 1, "bar": 2})
         self.buf.process_pending()
         assert len(process_incr.apply_async.mock_calls) == 1
         process_incr.apply_async.assert_any_call(kwargs={"batch_keys": ["foo", "bar"]})
@@ -39,9 +38,7 @@ class RedisBufferTest(TestCase):
     def test_process_pending_multiple_batches(self, process_incr):
         self.buf.incr_batch_size = 2
         with self.buf.cluster.map() as client:
-            client.zadd("b:p", 1, "foo")
-            client.zadd("b:p", 2, "bar")
-            client.zadd("b:p", 3, "baz")
+            client.zadd("b:p", {"foo": 1, "bar": 2, "baz": 3})
         self.buf.process_pending()
         assert len(process_incr.apply_async.mock_calls) == 2
         process_incr.apply_async.assert_any_call(kwargs={"batch_keys": ["foo", "bar"]})
@@ -126,9 +123,9 @@ class RedisBufferTest(TestCase):
     def test_process_pending_partitions_none(self, process_pending, process_incr):
         self.buf.pending_partitions = 2
         with self.buf.cluster.map() as client:
-            client.zadd("b:p:0", 1, "foo")
-            client.zadd("b:p:1", 1, "bar")
-            client.zadd("b:p", 1, "baz")
+            client.zadd("b:p:0", {"foo": 1})
+            client.zadd("b:p:1", {"bar": 1})
+            client.zadd("b:p", {"baz": 1})
 
         # On first pass, we are expecting to do:
         # * process the buffer that doesn't have a partition (b:p)

+ 1 - 1
tests/sentry/utils/locking/backends/test_redis.py

@@ -29,7 +29,7 @@ class RedisLockBackendTestCase(TestCase):
         assert duration - 2 < float(client.ttl(full_key)) <= duration
 
         self.backend.release(key)
-        assert client.exists(full_key) is False
+        assert not client.exists(full_key)
 
     def test_acquire_fail_on_conflict(self):
         key = "lock"

+ 3 - 3
tests/sentry/utils/test_redis.py

@@ -40,9 +40,9 @@ class ClusterManagerTestCase(TestCase):
         with pytest.raises(KeyError):
             manager.get("invalid")
 
-    @mock.patch("sentry.utils.redis.RetryingStrictRedisCluster")
+    @mock.patch("sentry.utils.redis.RetryingRedisCluster")
     @mock.patch("sentry.utils.redis.StrictRedis")
-    def test_specific_cluster(self, StrictRedis, RetryingStrictRedisCluster):
+    def test_specific_cluster(self, StrictRedis, RetryingRedisCluster):
         manager = make_manager(cluster_type=_RedisCluster)
 
         # We wrap the cluster in a Simple Lazy Object, force creation of the
@@ -51,7 +51,7 @@ class ClusterManagerTestCase(TestCase):
         # cluster foo is fine since it's a single node
         assert manager.get("foo")._setupfunc() is StrictRedis.return_value
         # baz works becasue it's explicitly is_redis_cluster
-        assert manager.get("baz")._setupfunc() is RetryingStrictRedisCluster.return_value
+        assert manager.get("baz")._setupfunc() is RetryingRedisCluster.return_value
 
         # bar is not a valid redis or redis cluster definition
         # becasue it is two hosts, without explicitly saying is_redis_cluster