Просмотр исходного кода

ref: delete more misc dead code (#84852)

<!-- Describe your PR here. -->
anthony sottile 1 месяц назад
Родитель
Сommit
39b9077895

+ 0 - 4
bin/mock-outcomes

@@ -61,9 +61,5 @@ def generate_outcomes():
                 )
 
 
-def drop_outcomes():
-    assert requests.post(settings.SENTRY_SNUBA + "/tests/entities/outcomes/drop").status_code == 200
-
-
 if __name__ == "__main__":
     generate_outcomes()

+ 0 - 6
bin/mock-replay

@@ -6,7 +6,6 @@ from sentry.runner import configure
 from sentry.utils.json import dumps_htmlsafe
 
 configure()
-import pathlib
 import uuid
 from datetime import datetime, timedelta
 
@@ -88,11 +87,6 @@ def store_replay_segments(replay_id: str, project_id: int, segment_id: int, segm
     )
 
 
-def make_filename(filename: str) -> str:
-    parent_dir = pathlib.Path(__file__).parent.resolve()
-    return f"{parent_dir}/rrweb-output/{filename}"
-
-
 def main():
     project_name = "Replay Test Project"
 

+ 0 - 15
src/sentry/api/bases/event.py

@@ -1,15 +0,0 @@
-from rest_framework.request import Request
-
-from sentry.api.bases.project import ProjectPermission
-
-
-class EventPermission(ProjectPermission):
-    scope_map = {
-        "GET": ["event:read", "event:write", "event:admin"],
-        "POST": ["event:write", "event:admin"],
-        "PUT": ["event:write", "event:admin"],
-        "DELETE": ["event:admin"],
-    }
-
-    def has_object_permission(self, request: Request, view, event):
-        return super().has_object_permission(request, view, event.project)

+ 0 - 5
src/sentry/api/endpoints/release_thresholds/utils/fetch_sessions_data.py

@@ -13,11 +13,6 @@ from sentry.models.organization import Organization
 from sentry.organizations.services.organization.model import RpcOrganization
 from sentry.snuba.sessions_v2 import QueryDefinition
 
-# In minutes
-TWO_WEEKS = 20160
-ONE_WEEK = 10080
-TWENTY_FOUR_HOURS = 1440
-
 
 def fetch_sessions_data(
     request: Request,

+ 0 - 3
src/sentry/api/endpoints/relocations/unpause.py

@@ -22,9 +22,6 @@ ERR_NOT_UNPAUSABLE_STATUS = Template(
     """Relocations can only be unpaused if they are already paused; this relocation is
     `$status`."""
 )
-ERR_COULD_NOT_UNPAUSE_RELOCATION = (
-    "Could not unpause relocation, perhaps because it is no longer in-progress."
-)
 
 logger = logging.getLogger(__name__)
 

+ 0 - 1
src/sentry/api/fields/__init__.py

@@ -3,7 +3,6 @@ from .avatar import *  # noqa: F401,F403
 from .empty_decimal import *  # noqa: F401,F403
 from .empty_integer import *  # noqa: F401,F403
 from .multiplechoice import *  # noqa: F401,F403
-from .secret import *  # noqa: F401,F403
 from .sentry_slug import *  # noqa: F401,F403
 from .serializedfile import *  # noqa: F401,F403
 from .user import *  # noqa: F401,F403

+ 0 - 51
src/sentry/api/fields/secret.py

@@ -1,51 +0,0 @@
-from rest_framework import serializers
-
-
-class SecretField(serializers.Field):
-    """
-    A validator for a secret-containing field whose values are either a string or a magic object.
-
-    This validator should be used when secrets are known to have previous values already stored on
-    the server, i.e. they may be in the form of a magic object representing a redacted secret.
-    """
-
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-        self.string_field = serializers.CharField(min_length=1, max_length=512, *args, **kwargs)
-        self.magic_object_field = serializers.DictField(
-            child=serializers.BooleanField(required=True), allow_empty=False, *args, **kwargs
-        )
-
-    def to_representation(self, obj):
-        if isinstance(obj, dict):
-            return self.magic_object_field.to_representation(obj)
-        return self.string_field.to_representation(obj)
-
-    def to_internal_value(self, data):
-        if isinstance(data, dict):
-            return self.magic_object_field.to_internal_value(data)
-        self.string_field.run_validation(data)
-        return self.string_field.to_internal_value(data)
-
-
-def validate_secret(secret: str | dict[str, bool] | None) -> str | dict[str, bool] | None:
-    """
-    Validates the contents of a field containing a secret that may have a magic object representing
-    some existing value already stored on the server.
-
-    Returns None if the magic object is found, indicating that the field should be back-filled by
-    that existing value.
-    """
-
-    if not secret:
-        return secret
-
-    # If an object was returned then it must be the special value representing the currently
-    # stored secret, i.e. no change was made to it
-    if isinstance(secret, dict):
-        if secret.get("hidden-secret") is True:
-            return None
-        raise serializers.ValidationError("Invalid magic object for secret")
-
-    # Field validation should have already checked everything else.
-    return secret

+ 0 - 25
src/sentry/api/helpers/actionable_items_helper.py

@@ -70,28 +70,3 @@ deprecated_event_errors = [
     EventError.JS_NO_COLUMN,
     EventError.TOO_LARGE_FOR_CACHE,
 ]
-
-
-def get_file_extension(filename):
-    segments = filename.split(".")
-    if len(segments) > 1:
-        return segments[-1]
-    return None
-
-
-def is_frame_filename_valid(frame):
-    filename = frame.abs_path
-    if not filename:
-        return False
-    try:
-        filename = filename.split("/")[-1]
-    except Exception:
-        pass
-
-    if frame.filename == "<anonymous>" and frame.in_app:
-        return False
-    elif frame.function in fileNameBlocklist:
-        return False
-    elif filename and not get_file_extension(filename):
-        return False
-    return True

+ 0 - 1
src/sentry/api/helpers/deprecation.py

@@ -20,7 +20,6 @@ from sentry import options
 from sentry.options import UnknownOption
 from sentry.utils.settings import is_self_hosted
 
-BROWNOUT_LENGTH = timedelta(days=30)
 GONE_MESSAGE = {"message": "This API no longer exists."}
 DEPRECATION_HEADER = "X-Sentry-Deprecation-Date"
 SUGGESTED_API_HEADER = "X-Sentry-Replacement-Endpoint"

+ 0 - 96
src/sentry/api/helpers/span_analysis.py

@@ -1,96 +0,0 @@
-from typing import Any, TypedDict
-
-
-class Row(TypedDict):
-    span_op: str
-    span_group: str
-    transaction_count: int
-    p95_self_time: float
-    sample_event_id: str
-    span_count: int
-    period: str
-
-
-class AugmentedData(Row):
-    span_key: str
-    relative_freq: float
-    score: float
-
-
-def span_analysis(data: list[Row]):
-
-    # create a unique identifier for each span
-    span_keys = [row["span_op"] + "," + row["span_group"] for row in data]
-
-    # number of occurrences of a span/transaction
-    count_col = [row["span_count"] for row in data]
-    txn_count = [row["transaction_count"] for row in data]
-    p95_self_time = [row["p95_self_time"] for row in data]
-
-    # add in two new fields
-    # 1. relative freq - the avg number of times a span occurs per transaction
-    # 2. score - a nondescriptive metric to evaluate the span (relative freq * avg duration)
-    relative_freq = [count_col[x] / txn_count[x] for x in range(len(count_col))]
-    score_col = [relative_freq[x] * p95_self_time[x] for x in range(len(relative_freq))]
-
-    data_frames: list[AugmentedData] = [
-        {
-            **data[i],
-            "relative_freq": relative_freq[i],
-            "score": score_col[i],
-            "span_key": span_keys[i],
-        }
-        for i in range(len(data))
-    ]
-
-    # create two dataframes for period 0 and 1 and keep only the same spans in both periods
-    span_data_p0 = {row["span_key"]: row for row in data_frames if row["period"] == "before"}
-    span_data_p1 = {row["span_key"]: row for row in data_frames if row["period"] == "after"}
-
-    all_keys = set(span_data_p0.keys()).union(span_data_p1.keys())
-
-    # merge the dataframes to do span analysis
-    problem_spans: list[Any] = []
-
-    # Perform the join operation
-    for key in all_keys:
-        row1 = span_data_p0.get(key)
-        row2 = span_data_p1.get(key)
-        new_span = False
-        score_delta = 0.0
-
-        if row1 and row2:
-            score_delta = row2["score"] - row1["score"]
-            freq_delta = row2["relative_freq"] - row1["relative_freq"]
-            duration_delta = row2["p95_self_time"] - row1["p95_self_time"]
-        elif row2:
-            score_delta = row2["score"]
-            freq_delta = row2["relative_freq"]
-            duration_delta = row2["p95_self_time"]
-            new_span = True
-
-        # We're only interested in span changes if they positively impacted duration
-        if score_delta > 0:
-            sample_event_id = row1 and row1["sample_event_id"] or row2 and row2["sample_event_id"]
-            if not sample_event_id:
-                continue
-
-            problem_spans.append(
-                {
-                    "span_op": key.split(",")[0],
-                    "span_group": key.split(",")[1],
-                    "sample_event_id": sample_event_id,
-                    "score_delta": score_delta,
-                    "freq_before": row1["relative_freq"] if row1 else 0,
-                    "freq_after": row2["relative_freq"] if row2 else 0,
-                    "freq_delta": freq_delta,
-                    "duration_delta": duration_delta,
-                    "duration_before": row1["p95_self_time"] if row1 else 0,
-                    "duration_after": row2["p95_self_time"] if row2 else 0,
-                    "is_new_span": new_span,
-                }
-            )
-
-    problem_spans.sort(key=lambda x: x["score_delta"], reverse=True)
-
-    return problem_spans