Browse Source

ref(ddm): Switch timing to distribution and update units (#60267)

Riccardo Busetti 1 year ago
parent
commit
33252fe830

+ 6 - 6
src/sentry/api/endpoints/relay/project_configs.py

@@ -210,9 +210,9 @@ class RelayProjectConfigsEndpoint(Endpoint):
                 for org_id in orgs:
                     OrganizationOption.objects.get_all_values(org_id)
 
-        metrics.timing("relay_project_configs.projects_requested", len(project_ids))
-        metrics.timing("relay_project_configs.projects_fetched", len(projects))
-        metrics.timing("relay_project_configs.orgs_fetched", len(orgs))
+        metrics.distribution("relay_project_configs.projects_requested", len(project_ids))
+        metrics.distribution("relay_project_configs.projects_fetched", len(projects))
+        metrics.distribution("relay_project_configs.orgs_fetched", len(orgs))
 
         configs: MutableMapping[str, ProjectConfig] = {}
         for public_key in public_keys:
@@ -280,9 +280,9 @@ class RelayProjectConfigsEndpoint(Endpoint):
             for key in ProjectKey.objects.filter(project_id__in=project_ids):
                 project_keys.setdefault(key.project_id, []).append(key)
 
-        metrics.timing("relay_project_configs.projects_requested", len(project_ids))
-        metrics.timing("relay_project_configs.projects_fetched", len(projects))
-        metrics.timing("relay_project_configs.orgs_fetched", len(orgs))
+        metrics.distribution("relay_project_configs.projects_requested", len(project_ids))
+        metrics.distribution("relay_project_configs.projects_fetched", len(projects))
+        metrics.distribution("relay_project_configs.orgs_fetched", len(orgs))
 
         configs: MutableMapping[str, ProjectConfig] = {}
         for project_id in project_ids:

+ 4 - 2
src/sentry/attachments/base.py

@@ -135,8 +135,10 @@ class BaseAttachmentCache:
     def set_unchunked_data(self, key, id, data, timeout=None, metrics_tags=None):
         key = ATTACHMENT_UNCHUNKED_DATA_KEY.format(key=key, id=id)
         compressed = zlib.compress(data)
-        metrics.timing("attachments.blob-size.raw", len(data), tags=metrics_tags)
-        metrics.timing("attachments.blob-size.compressed", len(compressed), tags=metrics_tags)
+        metrics.distribution("attachments.blob-size.raw", len(data), tags=metrics_tags, unit="byte")
+        metrics.distribution(
+            "attachments.blob-size.compressed", len(compressed), tags=metrics_tags, unit="byte"
+        )
         metrics.incr("attachments.received", tags=metrics_tags, skip_internal=False)
         self.inner.set(key, compressed, timeout, raw=True)
 

+ 1 - 1
src/sentry/buffer/redis.py

@@ -307,7 +307,7 @@ class RedisBuffer(Buffer):
             if not pending_buffer.empty():
                 process_incr.apply_async(kwargs={"batch_keys": pending_buffer.flush()})
 
-            metrics.timing("buffer.pending-size", keycount)
+            metrics.distribution("buffer.pending-size", keycount)
         finally:
             client.delete(lock_key)
 

+ 4 - 2
src/sentry/consumers/synchronized.py

@@ -162,21 +162,23 @@ class SynchronizedConsumer(Consumer[TStrategyPayload]):
                 remote_offsets[commit.group][commit.partition] = commit.offset
 
             if commit.orig_message_ts is not None:
-                metrics.timing(
+                metrics.distribution(
                     "commit_log_msg_latency",
                     (now - commit.orig_message_ts) * 1000,
                     tags={
                         "partition": str(commit.partition.index),
                         "group": commit.group,
                     },
+                    unit="millisecond",
                 )
-            metrics.timing(
+            metrics.distribution(
                 "commit_log_latency",
                 (now - datetime.timestamp(message.timestamp)) * 1000,
                 tags={
                     "partition": str(commit.partition.index),
                     "group": commit.group,
                 },
+                unit="millisecond",
             )
 
         self.__commit_log_consumer.close()

+ 4 - 2
src/sentry/data_export/tasks.py

@@ -201,8 +201,10 @@ def assemble_download(
                     countdown=3,
                 )
             else:
-                metrics.timing("dataexport.row_count", next_offset, sample_rate=1.0)
-                metrics.timing("dataexport.file_size", bytes_written, sample_rate=1.0)
+                metrics.distribution("dataexport.row_count", next_offset, sample_rate=1.0)
+                metrics.distribution(
+                    "dataexport.file_size", bytes_written, sample_rate=1.0, unit="byte"
+                )
                 merge_export_blobs.delay(data_export_id)
 
 

+ 4 - 4
src/sentry/datascrubbing.py

@@ -92,16 +92,16 @@ def get_all_pii_configs(project):
 
 def scrub_data(project, event):
     for config in get_all_pii_configs(project):
-        metrics.timing(
+        metrics.distribution(
             "datascrubbing.config.num_applications", len(config.get("applications") or ())
         )
         total_rules = 0
         for selector, rules in (config.get("applications") or {}).items():
-            metrics.timing("datascrubbing.config.selectors.size", len(selector))
-            metrics.timing("datascrubbing.config.rules_per_selector.size", len(rules))
+            metrics.distribution("datascrubbing.config.selectors.size", len(selector))
+            metrics.distribution("datascrubbing.config.rules_per_selector.size", len(rules))
             total_rules += len(rules)
 
-        metrics.timing("datascrubbing.config.rules.size", total_rules)
+        metrics.distribution("datascrubbing.config.rules.size", total_rules)
 
         event = pii_strip_event(config, event)
 

+ 4 - 2
src/sentry/event_manager.py

@@ -732,7 +732,9 @@ class EventManager:
             job["received_timestamp"] - job["recorded_timestamp"],
             tags=metric_tags,
         )
-        metrics.timing("events.size.data.post_save", job["event"].size, tags=metric_tags)
+        metrics.distribution(
+            "events.size.data.post_save", job["event"].size, tags=metric_tags, unit="byte"
+        )
         metrics.incr(
             "events.post_save.normalize.errors",
             amount=len(job["data"].get("errors") or ()),
@@ -2597,7 +2599,7 @@ def _calculate_span_grouping(jobs: Sequence[Job], projects: ProjectsMapping) ->
                 groupings = event.get_span_groupings()
             groupings.write_to_event(event.data)
 
-            metrics.timing("save_event.transaction.span_count", len(groupings.results))
+            metrics.distribution("save_event.transaction.span_count", len(groupings.results))
             unique_default_hashes = set(groupings.results.values())
             metrics.incr(
                 "save_event.transaction.span_group_count.default",

+ 1 - 1
src/sentry/eventstream/kafka/protocol.py

@@ -94,7 +94,7 @@ def get_task_kwargs_for_message(value: bytes) -> Optional[dict[str, Any]]:
     dispatched.
     """
 
-    metrics.timing("eventstream.events.size.data", len(value))
+    metrics.distribution("eventstream.events.size.data", len(value), unit="byte")
     payload = json.loads(value, use_rapid_json=True)
 
     try:

+ 2 - 2
src/sentry/filestore/gcs.py

@@ -57,7 +57,7 @@ def try_repeated(func):
         try:
             result = func()
             metrics_tags.update({"success": "1"})
-            metrics.timing(metrics_key, idx, tags=metrics_tags)
+            metrics.distribution(metrics_key, idx, tags=metrics_tags)
             return result
         except (
             DataCorruption,
@@ -69,7 +69,7 @@ def try_repeated(func):
         ) as e:
             if idx >= GCS_RETRIES:
                 metrics_tags.update({"success": "0", "exception_class": e.__class__.__name__})
-                metrics.timing(metrics_key, idx, tags=metrics_tags)
+                metrics.distribution(metrics_key, idx, tags=metrics_tags)
                 raise
         idx += 1
 

+ 1 - 1
src/sentry/ingest/transaction_clusterer/rules.py

@@ -102,7 +102,7 @@ class ProjectOptionRuleStore:
         converted_rules = [list(tup) for tup in self._sort(rules)]
 
         # Track the number of rules per project.
-        metrics.timing(self._tracker, len(converted_rules))
+        metrics.distribution(self._tracker, len(converted_rules))
 
         project.update_option(self._storage, converted_rules)
 

Some files were not shown because too many files changed in this diff