Browse Source

Revert "nodestore: support bulk cleanup through Riak TTLs"

This reverts commit 93a6971a8ef29c92bae802529bdf4f133dda3b2e.
Matt Robenolt 6 years ago
parent
commit
203cc2bfbc

+ 1 - 16
src/sentry/db/deletion.py

@@ -8,19 +8,16 @@ from django.db import connections, router
 from django.utils import timezone
 
 from sentry.utils import db
-from sentry.db.models.fields.node import NodeField
 
 
 class BulkDeleteQuery(object):
-    def __init__(self, model, project_id=None, dtfield=None, days=None,
-                 order_by=None, skip_nodestore=False):
+    def __init__(self, model, project_id=None, dtfield=None, days=None, order_by=None):
         self.model = model
         self.project_id = int(project_id) if project_id else None
         self.dtfield = dtfield
         self.days = int(days) if days is not None else None
         self.order_by = order_by
         self.using = router.db_for_write(model)
-        self.skip_nodestore = skip_nodestore
 
     def execute_postgres(self, chunk_size=10000):
         quote_name = connections[self.using].ops.quote_name
@@ -100,24 +97,12 @@ class BulkDeleteQuery(object):
         return qs
 
     def _continuous_generic_query(self, query, chunk_size):
-        # Detect which fields, if any, should be ignored
-        # from nodestore deletion.
-        node_fields = []
-        if self.skip_nodestore:
-            for f in self.model._meta.fields:
-                if isinstance(f, NodeField):
-                    node_fields.append(f.name)
         # XXX: we step through because the deletion collector will pull all
         # relations into memory
         exists = True
         while exists:
             exists = False
             for item in query[:chunk_size].iterator():
-                # Setting nodestore ids to None will prevent
-                # deletion. This is used in the case when nodestore
-                # has it's own cleanup.
-                for f in node_fields:
-                    getattr(item, f).id = None
                 item.delete()
                 exists = True
 

+ 4 - 5
src/sentry/nodestore/riak/backend.py

@@ -46,8 +46,7 @@ class RiakNodeStorage(NodeStorage):
         max_retries=3,
         multiget_pool_size=5,
         tcp_keepalive=True,
-        protocol=None,
-        automatic_expiry=False
+        protocol=None
     ):
         # protocol being defined is useless, but is needed for backwards
         # compatability and leveraged as an opportunity to yell at the user
@@ -64,7 +63,6 @@ class RiakNodeStorage(NodeStorage):
             cooldown=cooldown,
             tcp_keepalive=tcp_keepalive,
         )
-        self.automatic_expiry = automatic_expiry
 
     def set(self, id, data):
         self.conn.put(self.bucket, id, json_dumps(data), returnbody='false')
@@ -97,5 +95,6 @@ class RiakNodeStorage(NodeStorage):
         return results
 
     def cleanup(self, cutoff_timestamp):
-        if not self.automatic_expiry:
-            raise NotImplementedError
+        # TODO(dcramer): we should either index timestamps or have this run
+        # a map/reduce (probably the latter)
+        raise NotImplementedError

+ 0 - 4
src/sentry/runner/commands/cleanup.py

@@ -192,8 +192,6 @@ def cleanup(days, project, concurrency, silent, model, router, timed):
         (models.Group, 'last_seen', 'last_seen'),
     )
 
-    skip_nodestore = False
-
     if not silent:
         click.echo('Removing expired values for LostPasswordHash')
 
@@ -237,7 +235,6 @@ def cleanup(days, project, concurrency, silent, model, router, timed):
         cutoff = timezone.now() - timedelta(days=days)
         try:
             nodestore.cleanup(cutoff)
-            skip_nodestore = True
         except NotImplementedError:
             click.echo(
                 "NodeStore backend does not support cleanup operation", err=True)
@@ -267,7 +264,6 @@ def cleanup(days, project, concurrency, silent, model, router, timed):
                 days=days,
                 project_id=project_id,
                 order_by=order_by,
-                skip_nodestore=skip_nodestore,
             ).execute(chunk_size=chunk_size)
 
     for model, dtfield, order_by in DELETES: