Browse Source

meta(linting): Fixing flake8-logging issues (partial) (#61370)

Fixing
[flake8-logging](https://github.com/adamchainz/flake8-logging#rules)
issues, splitting from #60850 to make reviewing more manageable.
Bartek Ogryczak 1 year ago
parent
commit
0132f2bf5b

+ 6 - 10
src/sentry/issues/attributes.py

@@ -213,7 +213,7 @@ def post_save_log_group_attributes_changed(instance, sender, created, *args, **k
                     )
                     send_snapshot_values(None, instance, False)
     except Exception:
-        logger.error("failed to log group attributes after group post_save", exc_info=True)
+        logger.exception("failed to log group attributes after group post_save")
 
 
 @issue_deleted.connect(weak=False)
@@ -222,7 +222,7 @@ def on_issue_deleted_log_deleted(group, user, delete_type, **kwargs):
         _log_group_attributes_changed(Operation.DELETED, "group", "all")
         send_snapshot_values(None, group, True)
     except Exception:
-        logger.error("failed to log group attributes after group delete", exc_info=True)
+        logger.exception("failed to log group attributes after group delete")
 
 
 @issue_assigned.connect(weak=False)
@@ -231,9 +231,7 @@ def on_issue_assigned_log_group_assignee_attributes_changed(project, group, user
         _log_group_attributes_changed(Operation.UPDATED, "group_assignee", "all")
         send_snapshot_values(None, group, False)
     except Exception:
-        logger.error(
-            "failed to log group attributes after group_assignee assignment", exc_info=True
-        )
+        logger.exception("failed to log group attributes after group_assignee assignment")
 
 
 @issue_unassigned.connect(weak=False)
@@ -242,9 +240,7 @@ def on_issue_unassigned_log_group_assignee_attributes_changed(project, group, us
         _log_group_attributes_changed(Operation.DELETED, "group_assignee", "all")
         send_snapshot_values(None, group, False)
     except Exception:
-        logger.error(
-            "failed to log group attributes after group_assignee unassignment", exc_info=True
-        )
+        logger.exception("failed to log group attributes after group_assignee unassignment")
 
 
 @receiver(
@@ -257,7 +253,7 @@ def post_save_log_group_owner_changed(instance, sender, created, update_fields,
         )
         send_snapshot_values(instance.group_id, None, False)
     except Exception:
-        logger.error("failed to log group attributes after group_owner updated", exc_info=True)
+        logger.exception("failed to log group attributes after group_owner updated")
 
 
 @receiver(
@@ -268,4 +264,4 @@ def post_delete_log_group_owner_changed(instance, sender, *args, **kwargs):
         _log_group_attributes_changed(Operation.DELETED, "group_owner", "all")
         send_snapshot_values(instance.group_id, None, False)
     except Exception:
-        logger.error("failed to log group attributes after group_owner delete", exc_info=True)
+        logger.exception("failed to log group attributes after group_owner delete")

+ 3 - 2
src/sentry/issues/escalating.py

@@ -341,7 +341,6 @@ def _generate_entity_dataset_query(
     end_date: datetime,
     category: GroupCategory | None = None,
 ) -> Query:
-
     """This simply generates a query based on the passed parameters"""
     group_id_col = Column("group_id")
     proj_id_col = Column("project_id")
@@ -527,7 +526,9 @@ def manage_issue_states(
                     jsonschema.validate(snooze_details, INBOX_REASON_DETAILS)
 
                 except jsonschema.ValidationError:
-                    logging.error("Expired snooze_details invalid jsonschema", extra=snooze_details)
+                    logging.exception(
+                        "Expired snooze_details invalid jsonschema", extra=snooze_details
+                    )
 
                 data.update({"expired_snooze": snooze_details})
 

+ 4 - 1
src/sentry/issues/escalating_group_forecast.py

@@ -100,7 +100,10 @@ class EscalatingGroupForecast:
             # This should not happen, but exists as a check
             forecast_today_index = -1
             logger.error(
-                f"Forecast list index is out of range. Index: {forecast_today_index}. Date now: {date_now}. Forecast date added: {date_added}."
+                "Forecast list index is out of range. Index: %s. Date now: %s. Forecast date added: %s.",
+                forecast_today_index,
+                date_now,
+                date_added,
             )
         return escalating_forecast.forecast[forecast_today_index]
 

+ 1 - 1
src/sentry/lang/javascript/errormapping.py

@@ -131,7 +131,7 @@ def rewrite_exception(data):
                     rv = True
                     break
             except Exception as e:
-                logger.error('Failed to run processor "%s": %s', processor.vendor, e, exc_info=True)
+                logger.exception('Failed to run processor "%s": %s', processor.vendor, e)
                 data.setdefault("_metrics", {})["flag.processing.error"] = True
 
     if meta.raw():

+ 1 - 1
src/sentry/lang/native/error.py

@@ -106,7 +106,7 @@ def write_error(e, data):
         errors = data.setdefault("errors", [])
         errors.append(e.get_data())
     else:
-        logger.debug("Failed to symbolicate with native backend", exc_info=True)
+        logger.debug("Failed to symbolicate with native backend")
 
     if not e.is_user_fixable:
         data.setdefault("_metrics", {})["flag.processing.error"] = True

+ 1 - 2
src/sentry/lang/native/sources.py

@@ -517,7 +517,7 @@ def get_sources_for_project(project):
             # Source configs should be validated when they are saved. If this
             # did not happen, this indicates a bug. Record this, but do not stop
             # processing at this point.
-            logger.error("Invalid symbolicator source config", exc_info=True)
+            logger.exception("Invalid symbolicator source config")
 
     def resolve_alias(source):
         for key in source.get("sources") or ():
@@ -628,7 +628,6 @@ def redact_internal_sources_from_module(module):
     for candidate in module.get("candidates", []):
         source_id = candidate["source"]
         if is_internal_source_id(source_id):
-
             # Only keep location for sentry:project.
             if source_id != "sentry:project":
                 candidate.pop("location", None)

+ 1 - 1
src/sentry/lang/native/symbolicator.py

@@ -343,7 +343,7 @@ class SymbolicatorSession:
                 #
                 # This can happen for any network failure.
                 if attempts > MAX_ATTEMPTS:
-                    logger.error("Failed to contact symbolicator", exc_info=True)
+                    logger.exception("Failed to contact symbolicator")
                     raise
 
                 time.sleep(wait)

+ 1 - 1
src/sentry/loader/browsersdkversion.py

@@ -81,7 +81,7 @@ def get_browser_sdk_version(project_key):
     try:
         return match_selected_version_to_browser_sdk_version(selected_version)
     except Exception:
-        logger.error("error occurred while trying to read js sdk information from the registry")
+        logger.exception("error occurred while trying to read js sdk information from the registry")
         return Version(settings.JS_SDK_LOADER_SDK_VERSION)
 
 

+ 1 - 1
src/sentry/monitors/models.py

@@ -331,7 +331,7 @@ class Monitor(Model):
             jsonschema.validate(self.config, MONITOR_CONFIG)
             return self.config
         except jsonschema.ValidationError:
-            logging.exception(f"Monitor: {self.id} invalid config: {self.config}", exc_info=True)
+            logging.exception("Monitor: %s invalid config: %s", self.id, self.config)
 
     def get_alert_rule(self):
         alert_rule_id = self.config.get("alert_rule_id")

+ 1 - 1
src/sentry/nodestore/django/backend.py

@@ -33,7 +33,7 @@ class DjangoNodeStorage(NodeStorage):
 
             return None
         except Exception as e:
-            logger.exception(e)
+            logger.exception(str(e))
             return {}
 
     def _get_bytes(self, id):

Some files were not shown because too many files changed in this diff