Browse Source

ref(lint): Fix typos (#26199)

Marcos Gaeta 3 years ago
parent
commit
e76b626b01

+ 1 - 1
src/sentry/auth/superuser.py

@@ -255,7 +255,7 @@ class Superuser:
             "exp": self.expires.strftime("%s"),
             "idl": (current_datetime + IDLE_MAX_AGE).strftime("%s"),
             "tok": self.token,
-            # XXX(dcramer): do we really need the uid safety m echanism
+            # XXX(dcramer): do we really need the uid safety mechanism
             "uid": self.uid,
         }
 

+ 16 - 18
src/sentry/demo/data_population.py

@@ -140,7 +140,7 @@ def distribution_v5(hour: int) -> int:
     return 1
 
 
-distrubtion_fns = [
+distribution_fns = [
     distribution_v1,
     distribution_v2,
     distribution_v3,
@@ -299,7 +299,7 @@ def clean_event(event_json):
 
 def fix_spans(event_json, old_span_id):
     """
-    This function does the folowing:
+    This function does the following:
     1. Give spans fresh span_ids & update the parent span ids accordingly
     2. Update span offsets and durations based on transaction duration and some randomness
     """
@@ -320,7 +320,7 @@ def fix_spans(event_json, old_span_id):
                 # set the new parent
                 span["parent_span_id"] = new_parent_id
 
-                # generate a new id and set the replacement mappping
+                # generate a new id and set the replacement mapping
                 new_id = uuid4().hex[:16]
                 update_id_map[span["span_id"]] = new_id
 
@@ -377,22 +377,20 @@ def fix_spans(event_json, old_span_id):
             if i == last_index:
                 duration = remaining_time
             else:
-                # the max duration should give some breathging room to the remaining spans
+                # the max duration should give some breathing room to the remaining spans
                 max_duration = remaining_time - (avg_span_length / 4.0) * (last_index - i)
                 # pick a random length for the span that's at most 2x the average span length
                 duration = min(max_duration, random.uniform(0, 2 * avg_span_length))
             span["data"]["duration"] = duration
             span["start_timestamp"] = event_json["start_timestamp"] + span_offset
             span.setdefault("timestamp", span["start_timestamp"] + duration)
-            # calcualate the next span offset
+            # calculate the next span offset
             span_offset = duration + span_offset
             id_list.append(span["span_id"])
 
 
 def fix_measurements(event_json):
-    """
-    Convert measurment data from durations into timestamps
-    """
+    """ Convert measurement data from durations into timestamps. """
     measurements = event_json.get("measurements")
 
     if measurements:
@@ -410,7 +408,7 @@ def update_context(event, trace=None):
     # delete device since we aren't mocking it (yet)
     if "device" in context:
         del context["device"]
-    # generate ranndom browser and os
+    # generate random browser and os
     context.update(**gen_base_context())
     # add our trace info
     base_trace = context.get("trace", {})
@@ -482,7 +480,7 @@ class DataPopulation:
         beta = config["DURATION_BETA"]
         return MIN_FRONTEND_DURATION / 1000.0 + random.gammavariate(alpha, beta) / (1 + day_weight)
 
-    def fix_breadrumbs(self, event_json):
+    def fix_breadcrumbs(self, event_json):
         """
         Fixes the timestamps on breadcrumbs to match the current time
         Evenly spaces out all breadcrumbs starting at BREADCRUMB_LOOKBACK_TIME ago
@@ -503,7 +501,7 @@ class DataPopulation:
     def fix_timestamps(self, event_json):
         """
         Convert a time zone aware datetime timestamps to a POSIX timestamp
-        for an evnet
+        for an event.
         """
         event_json["timestamp"] = to_timestamp(event_json["timestamp"])
         start_timestamp = event_json.get("start_timestamp")
@@ -512,7 +510,7 @@ class DataPopulation:
 
     def fix_error_event(self, event_json):
         self.fix_timestamps(event_json)
-        self.fix_breadrumbs(event_json)
+        self.fix_breadcrumbs(event_json)
 
     def fix_transaction_event(self, event_json, old_span_id):
         self.fix_timestamps(event_json)
@@ -535,13 +533,13 @@ class DataPopulation:
         """
         Creates an envelope payload for a session and posts it to Relay
         """
-        formated_time = time.isoformat()
+        formatted_time = time.isoformat()
         envelope_headers = "{}"
         item_headers = json.dumps({"type": "session"})
         data = {
             "sid": sid,
             "did": str(user_id),
-            "started": formated_time,
+            "started": formatted_time,
             "duration": random.randrange(2, 60),
             "attrs": {
                 "release": release,
@@ -732,13 +730,13 @@ class DataPopulation:
                 member = random.choice(org_members)
                 GroupAssignee.objects.assign(group, member.user)
 
-    def iter_timestamps(self, disribution_fn_num: int, starting_release: int = 0):
+    def iter_timestamps(self, distribution_fn_num: int, starting_release: int = 0):
         """
         Yields a series of ordered timestamps and the day in a tuple
         """
 
-        # disribution_fn_num starts at 1 instead of 0
-        distribution_fn = distrubtion_fns[disribution_fn_num - 1]
+        # distribution_fn_num starts at 1 instead of 0
+        distribution_fn = distribution_fns[distribution_fn_num - 1]
 
         config = self.get_config()
         MAX_DAYS = config["MAX_DAYS"]
@@ -950,7 +948,7 @@ class DataPopulation:
         This function populates a set of two related events with the same trace id:
         - Front-end transaction
         - Back-end transaction
-        Occurrance times and durations are randomized
+        Occurrence times and durations are randomized
         """
         react_transaction = get_event_from_file("scen2/react_transaction.json")
         python_transaction = get_event_from_file("scen2/python_transaction.json")

+ 1 - 1
src/sentry/demo/demo_start.py

@@ -80,7 +80,7 @@ class DemoStartView(BaseView):
         auth.login(request, user)
         resp = self.redirect(get_redirect_url(request, org))
 
-        # set a cookie of whether the user accepteed tracking so we know
+        # set a cookie of whether the user accepted tracking so we know
         # whether to initialize analytics when accepted_tracking=1
         # 0 means don't show the footer to accept cookies (user already declined)
         # no value means we show the footer to accept cookies (user has neither accepted nor declined)

+ 1 - 1
src/sentry/demo/tasks.py

@@ -100,7 +100,7 @@ def build_up_org_buffer():
     num_to_populate = ORG_BUFFER_SIZE - num_orgs
     logger.info("build_up_org_buffer.check", extra={"num_to_populate": num_to_populate})
 
-    # synchronnously build up our org buffer if under sized
+    # synchronously build up our org buffer if under sized
     if num_to_populate > 0:
         create_demo_org()
         build_up_org_buffer.apply_async()

+ 1 - 1
src/sentry/event_manager.py

@@ -1168,7 +1168,7 @@ def _find_existing_group_id(
         # tombstone may get ignored entirely if there is another hash *before*
         # that happens to have a group_id. This bug may not have been noticed
         # for a long time because most events only ever have 1-2 hashes. It
-        # will definetly get more noticeable with hierarchical grouping and
+        # will definitely get more noticeable with hierarchical grouping and
         # it's not clear what good behavior would look like. Do people want to
         # be able to tombstone `hierarchical_hashes[4]` while still having a
         # group attached to `hierarchical_hashes[0]`? Maybe.

+ 2 - 2
src/sentry/integrations/slack/tasks.py

@@ -37,7 +37,7 @@ class RedisRuleStatus:
 
         cluster_id = getattr(settings, "SENTRY_RULE_TASK_REDIS_CLUSTER", "default")
         self.client = redis_clusters.get(cluster_id)
-        self._set_inital_value()
+        self._set_initial_value()
 
     @property
     def uuid(self):
@@ -55,7 +55,7 @@ class RedisRuleStatus:
     def _generate_uuid(self):
         return uuid4().hex
 
-    def _set_inital_value(self):
+    def _set_initial_value(self):
         value = json.dumps({"status": "pending"})
         self.client.set(self._get_redis_key(), f"{value}", ex=60 * 60, nx=True)
 

+ 1 - 1
src/sentry/plugins/providers/repository.py

@@ -19,7 +19,7 @@ class RepositoryProvider(ProviderMixin):
     """
     Plugin Repository Provider
     Includes all plugins such as those in sentry-plugins repo
-    as well as any outside plugin respoitories (i.e. Trello, Youtrack).
+    as well as any outside plugin repositories (i.e. Trello, Youtrack).
     Does not include the integrations in the sentry repository.
     """
 

+ 1 - 1
src/sentry/utils/retries.py

@@ -72,7 +72,7 @@ class ConditionalRetryPolicy(RetryPolicy[T]):
     callable throws an exception.
 
     The test function takes two arguments: the number of times the callable
-    has unsuccesfully been invoked, and the exception instance that was
+    has unsuccessfully been invoked, and the exception instance that was
     raised during the last execution attempt. This function is expected to
     return a boolean: if the value is ``True``, the callable will be retried;
     if the value is ``False``, the callable will not be retried and the

+ 1 - 1
src/sentry/utils/session_store.py

@@ -8,7 +8,7 @@ EXPIRATION_TTL = 10 * 60
 
 class RedisSessionStore:
     """
-    RedisSessionStore provides a convenience object, which when initalized will
+    RedisSessionStore provides a convenience object, which when initialized will
     store attributes assigned to it into redis. The redis key is stored into
     the request session. Useful for storing data too large to be stored into
     the session cookie.

+ 1 - 1
src/sentry/utils/strings.py

@@ -100,7 +100,7 @@ def soft_hyphenate(value, length, hyphen="\u00ad"):
 def soft_break(value, length, process=lambda chunk: chunk):
     """
     Encourages soft breaking of text values above a maximum length by adding
-    zero-width spaces after common delimeters, as well as soft-hyphenating long
+    zero-width spaces after common delimiters, as well as soft-hyphenating long
     identifiers.
     """
     delimiters = re.compile(r"([{}]+)".format("".join(map(re.escape, ",.$:/+@!?()<>[]{}"))))

Some files were not shown because too many files changed in this diff