Browse Source

ref(async-csv): Remove billing references (#17518)

Clear out any references to billing from existing async export code (+ tests and migration)
Leander Rodrigues 5 years ago
parent
commit
78cbcbbc6c

+ 1 - 1
migrations_lockfile.txt

@@ -10,7 +10,7 @@ auth: 0008_alter_user_username_max_length
 contenttypes: 0002_remove_content_type_name
 jira_ac: 0001_initial
 nodestore: 0001_initial
-sentry: 0049_auto_20200304_0254
+sentry: 0050_auto_20200306_2346
 sessions: 0001_initial
 sites: 0002_alter_domain_unique
 social_auth: 0001_initial

+ 15 - 26
src/sentry/constants.py

@@ -437,46 +437,35 @@ class SentryAppInstallationStatus(object):
 
 
 class ExportQueryType(object):
-    DISCOVER_V2 = 0
-    BILLING_REPORT = 1
-    ISSUE_BY_TAG = 2
-    DISCOVER_V2_STR = "Discover"
-    BILLING_REPORT_STR = "Billing Report"
-    ISSUE_BY_TAG_STR = "Issues-by-Tag"
+    ISSUES_BY_TAG = 0
+    DISCOVER = 1
+    ISSUES_BY_TAG_STR = "Issues-by-Tag"
+    DISCOVER_STR = "Discover"
 
     @classmethod
     def as_choices(cls):
-        return (
-            (cls.DISCOVER_V2, cls.DISCOVER_V2_STR),
-            (cls.BILLING_REPORT, cls.BILLING_REPORT_STR),
-            (cls.ISSUE_BY_TAG, cls.ISSUE_BY_TAG_STR),
-        )
+        return ((cls.ISSUES_BY_TAG, cls.ISSUES_BY_TAG_STR), (cls.DISCOVER, cls.DISCOVER_STR))
 
     @classmethod
     def as_str_choices(cls):
         return (
-            (cls.DISCOVER_V2_STR, cls.DISCOVER_V2_STR),
-            (cls.BILLING_REPORT_STR, cls.BILLING_REPORT_STR),
-            (cls.ISSUE_BY_TAG_STR, cls.ISSUE_BY_TAG_STR),
+            (cls.ISSUES_BY_TAG_STR, cls.ISSUES_BY_TAG_STR),
+            (cls.DISCOVER_STR, cls.DISCOVER_STR),
         )
 
     @classmethod
     def as_str(cls, integer):
-        if integer == cls.DISCOVER_V2:
-            return cls.DISCOVER_V2_STR
-        elif integer == cls.BILLING_REPORT:
-            return cls.BILLING_REPORT_STR
-        elif integer == cls.ISSUE_BY_TAG:
-            return cls.ISSUE_BY_TAG_STR
+        if integer == cls.ISSUES_BY_TAG:
+            return cls.ISSUES_BY_TAG_STR
+        elif integer == cls.DISCOVER:
+            return cls.DISCOVER_STR
 
     @classmethod
     def from_str(cls, string):
-        if string == cls.DISCOVER_V2_STR:
-            return cls.DISCOVER_V2
-        elif string == cls.BILLING_REPORT_STR:
-            return cls.BILLING_REPORT
-        elif string == cls.ISSUE_BY_TAG_STR:
-            return cls.ISSUE_BY_TAG
+        if string == cls.ISSUES_BY_TAG_STR:
+            return cls.ISSUES_BY_TAG
+        elif string == cls.DISCOVER_STR:
+            return cls.DISCOVER
 
 
 StatsPeriod = namedtuple("StatsPeriod", ("segments", "interval"))

+ 38 - 0
src/sentry/migrations/0050_auto_20200306_2346.py

@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+# Generated by Django 1.11.28 on 2020-03-06 23:46
+from __future__ import unicode_literals
+
+from django.db import migrations
+import sentry.db.models.fields.bounded
+
+
+class Migration(migrations.Migration):
+    # This flag is used to mark that a migration shouldn't be automatically run in
+    # production. We set this to True for operations that we think are risky and want
+    # someone from ops to run manually and monitor.
+    # General advice is that if in doubt, mark your migration as `is_dangerous`.
+    # Some things you should always mark as dangerous:
+    # - Large data migrations. Typically we want these to be run manually by ops so that
+    #   they can be monitored. Since data migrations will now hold a transaction open
+    #   this is even more important.
+    # - Adding columns to highly active tables, even ones that are NULL.
+    is_dangerous = False
+
+    # This flag is used to decide whether to run this migration in a transaction or not.
+    # By default we prefer to run in a transaction, but for migrations where you want
+    # to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
+    # want to create an index concurrently when adding one to an existing table.
+    atomic = True
+
+
+    dependencies = [
+        ('sentry', '0049_auto_20200304_0254'),
+    ]
+
+    operations = [
+        migrations.AlterField(
+            model_name='exporteddata',
+            name='query_type',
+            field=sentry.db.models.fields.bounded.BoundedPositiveIntegerField(choices=[(0, b'Issues-by-Tag'), (1, b'Discover')]),
+        ),
+    ]

+ 1 - 1
src/sentry/static/sentry/app/components/dataExport.tsx

@@ -11,7 +11,7 @@ import withOrganization from 'app/utils/withOrganization';
 
 type DataExportPayload = {
   // Coordinate with ExportQueryType string (src/sentry/constants.py)
-  queryType: 'Discover' | 'Billing Report' | 'Issues-by-Tag';
+  queryType: 'Issues-by-Tag' | 'Discover';
   queryInfo: any; // TODO(ts): Formalize different possible payloads
 };
 

+ 12 - 18
src/sentry/tasks/data_export.py

@@ -30,18 +30,17 @@ def assemble_download(data_export_id):
         logger.info("dataexport.start", extra={"data_export_id": data_export_id})
         data_export = ExportedData.objects.get(id=data_export_id)
     except ExportedData.DoesNotExist as error:
-        return capture_exception(error)
+        capture_exception(error)
+        return
 
     # Create a temporary file
     try:
         with tempfile.TemporaryFile() as tf:
             # Process the query based on its type
-            if data_export.query_type == ExportQueryType.DISCOVER_V2:
-                file_name = process_discover_v2(data_export, tf)
-            elif data_export.query_type == ExportQueryType.BILLING_REPORT:
-                file_name = process_billing_report(data_export, tf)
-            elif data_export.query_type == ExportQueryType.ISSUE_BY_TAG:
+            if data_export.query_type == ExportQueryType.ISSUES_BY_TAG:
                 file_name = process_issue_by_tag(data_export, tf)
+            elif data_export.query_type == ExportQueryType.DISCOVER:
+                file_name = process_discover(data_export, tf)
             # Create a new File object and attach it to the ExportedData
             tf.seek(0)
             try:
@@ -72,16 +71,6 @@ def assemble_download(data_export_id):
         return data_export.email_failure(message="Internal processing failure")
 
 
-def process_discover_v2(data_export, file):
-    # TODO(Leander): Implement processing for Discover V2
-    raise NotImplementedError("Discover V2 processing has not been implemented yet")
-
-
-def process_billing_report(data_export, file):
-    # TODO(Leander): Implement processing for Billing Reports
-    raise NotImplementedError("Billing report processing has not been implemented yet")
-
-
 def process_issue_by_tag(data_export, file, limit=None):
     """
     Convert the tag query to a CSV, writing it to the provided file.
@@ -134,9 +123,9 @@ def process_issue_by_tag(data_export, file, limit=None):
         callbacks = []
         fields = ["value", "times_seen", "last_seen", "first_seen"]
 
-    # Example file name: ISSUE_BY_TAG-project10-user__721.csv
+    # Example file name: Issues-by-Tag-project10-user__721.csv
     file_details = six.text_type("{}-{}__{}").format(project.slug, key, data_export.id)
-    file_name = get_file_name(ExportQueryType.ISSUE_BY_TAG_STR, file_details)
+    file_name = get_file_name(ExportQueryType.ISSUES_BY_TAG_STR, file_details)
 
     # Iterate through all the GroupTagValues
     writer = create_writer(file, fields)
@@ -166,6 +155,11 @@ def process_issue_by_tag(data_export, file, limit=None):
     return file_name
 
 
+def process_discover(data_export, file):
+    # TODO(Leander): Implement processing for Discover
+    raise NotImplementedError("Discover processing has not been implemented yet")
+
+
 def create_writer(file, fields):
     writer = csv.DictWriter(file, fields)
     writer.writeheader()

+ 1 - 1
tests/sentry/api/endpoints/test_data_export.py

@@ -11,7 +11,7 @@ from sentry.testutils import APITestCase
 class DataExportTest(APITestCase):
     endpoint = "sentry-api-0-organization-data-export"
     method = "post"
-    payload = {"query_type": "Discover", "query_info": {"env": "test"}}
+    payload = {"query_type": "Issues-by-Tag", "query_info": {"env": "test"}}
 
     def setUp(self):
         self.user = self.create_user("user1@example.com")

+ 1 - 1
tests/sentry/models/test_exporteddata.py

@@ -23,7 +23,7 @@ class ExportedDataTest(TestCase):
         self.user = self.create_user()
         self.organization = self.create_organization()
         self.data_export = ExportedData.objects.create(
-            user=self.user, organization=self.organization, query_type=2, query_info={"env": "test"}
+            user=self.user, organization=self.organization, query_type=0, query_info={"env": "test"}
         )
         self.file1 = File.objects.create(
             name="tempfile-data-export", type="export.csv", headers={"Content-Type": "text/csv"}

+ 4 - 4
tests/sentry/tasks/test_data_export.py

@@ -37,7 +37,7 @@ class AssembleDownloadTest(TestCase, SnubaTestCase):
         de1 = ExportedData.objects.create(
             user=self.user,
             organization=self.org,
-            query_type=2,
+            query_type=0,
             query_info={
                 "project_id": self.project.id,
                 "group_id": self.event.group_id,
@@ -58,7 +58,7 @@ class AssembleDownloadTest(TestCase, SnubaTestCase):
         de2 = ExportedData.objects.create(
             user=self.user,
             organization=self.org,
-            query_type=2,
+            query_type=0,
             query_info={
                 "project_id": self.project.id,
                 "group_id": self.event.group_id,
@@ -81,7 +81,7 @@ class AssembleDownloadTest(TestCase, SnubaTestCase):
         de1 = ExportedData.objects.create(
             user=self.user,
             organization=self.org,
-            query_type=2,
+            query_type=0,
             query_info={"project_id": -1, "group_id": self.event.group_id, "key": "user"},
         )
         with self.tasks():
@@ -92,7 +92,7 @@ class AssembleDownloadTest(TestCase, SnubaTestCase):
         de2 = ExportedData.objects.create(
             user=self.user,
             organization=self.org,
-            query_type=2,
+            query_type=0,
             query_info={"project_id": self.project.id, "group_id": -1, "key": "user"},
         )
         with self.tasks():