Просмотр исходного кода

ref(crons): Split processing_errors into 2 modules (#70791)

Splits out the errors from the manager. Removes the
CheckinProcessErrorsManager class (which had no state) and updated the
function names to be more clear
Evan Purkhiser 10 месяцев назад
Родитель
Сommit
48d9c5c458

+ 2 - 2
src/sentry/monitors/consumers/monitor_consumer.py

@@ -42,12 +42,12 @@ from sentry.monitors.models import (
     MonitorLimitsExceeded,
     MonitorType,
 )
-from sentry.monitors.processing_errors import (
+from sentry.monitors.processing_errors.errors import (
     CheckinValidationError,
     ProcessingError,
     ProcessingErrorType,
-    handle_processing_errors,
 )
+from sentry.monitors.processing_errors.manager import handle_processing_errors
 from sentry.monitors.types import CheckinItem
 from sentry.monitors.utils import (
     get_new_timeout_at,

+ 3 - 7
src/sentry/monitors/endpoints/organization_monitor_processing_errors_index.py

@@ -11,10 +11,8 @@ from sentry.apidocs.constants import RESPONSE_FORBIDDEN, RESPONSE_NOT_FOUND, RES
 from sentry.apidocs.parameters import GlobalParams
 from sentry.apidocs.utils import inline_sentry_response_serializer
 from sentry.models.organization import Organization
-from sentry.monitors.processing_errors import (
-    CheckinProcessErrorsManager,
-    CheckinProcessingErrorData,
-)
+from sentry.monitors.processing_errors.errors import CheckinProcessingErrorData
+from sentry.monitors.processing_errors.manager import get_errors_for_projects
 from sentry.utils.auth import AuthenticatedHttpRequest
 
 
@@ -46,9 +44,7 @@ class OrganizationMonitorProcessingErrorsIndexEndpoint(OrganizationEndpoint):
         Retrieves checkin processing errors for a monitor
         """
         projects = self.get_projects(request, organization)
-        paginator = SequencePaginator(
-            list(enumerate(CheckinProcessErrorsManager().get_for_projects(projects)))
-        )
+        paginator = SequencePaginator(list(enumerate(get_errors_for_projects(projects))))
 
         return self.paginate(
             request=request,

+ 3 - 7
src/sentry/monitors/endpoints/project_monitor_processing_errors_index.py

@@ -9,10 +9,8 @@ from sentry.api.serializers import serialize
 from sentry.apidocs.constants import RESPONSE_FORBIDDEN, RESPONSE_NOT_FOUND, RESPONSE_UNAUTHORIZED
 from sentry.apidocs.parameters import GlobalParams, MonitorParams
 from sentry.apidocs.utils import inline_sentry_response_serializer
-from sentry.monitors.processing_errors import (
-    CheckinProcessErrorsManager,
-    CheckinProcessingErrorData,
-)
+from sentry.monitors.processing_errors.errors import CheckinProcessingErrorData
+from sentry.monitors.processing_errors.manager import get_errors_for_monitor
 from sentry.utils.auth import AuthenticatedHttpRequest
 
 from .base import ProjectMonitorEndpoint
@@ -46,9 +44,7 @@ class ProjectMonitorProcessingErrorsIndexEndpoint(ProjectMonitorEndpoint):
         """
         Retrieves checkin processing errors for a monitor
         """
-        paginator = SequencePaginator(
-            list(enumerate(CheckinProcessErrorsManager().get_for_monitor(monitor)))
-        )
+        paginator = SequencePaginator(list(enumerate(get_errors_for_monitor(monitor))))
 
         return self.paginate(
             request=request,

+ 2 - 2
src/sentry/monitors/endpoints/project_processing_errors_details.py

@@ -20,7 +20,7 @@ from sentry.apidocs.constants import (
 )
 from sentry.apidocs.parameters import GlobalParams, MonitorParams
 from sentry.models.project import Project
-from sentry.monitors.processing_errors import CheckinProcessErrorsManager, InvalidProjectError
+from sentry.monitors.processing_errors.manager import InvalidProjectError, delete_error
 
 from .base import ProjectMonitorPermission
 
@@ -55,7 +55,7 @@ class ProjectProcessingErrorsDetailsEndpoint(ProjectEndpoint):
         except ValueError:
             raise ValidationError("Invalid UUID")
         try:
-            CheckinProcessErrorsManager().delete(project, parsed_uuid)
+            delete_error(project, parsed_uuid)
         except InvalidProjectError:
             raise ValidationError("Invalid uuid for project")
         return self.respond(status=204)

+ 0 - 248
src/sentry/monitors/processing_errors.py

@@ -1,248 +0,0 @@
-from __future__ import annotations
-
-import dataclasses
-import logging
-import uuid
-from datetime import timedelta
-from enum import Enum
-from itertools import chain
-from typing import Any, TypedDict
-
-from django.conf import settings
-from redis.client import StrictRedis
-from rediscluster import RedisCluster
-
-from sentry import features
-from sentry.models.organization import Organization
-from sentry.models.project import Project
-from sentry.monitors.models import Monitor
-from sentry.monitors.types import CheckinItem, CheckinItemData
-from sentry.utils import json, metrics, redis
-
-logger = logging.getLogger(__name__)
-
-MAX_ERRORS_PER_SET = 10
-MONITOR_ERRORS_LIFETIME = timedelta(days=7)
-
-
-class ProcessingErrorType(Enum):
-    CHECKIN_ENVIRONMENT_MISMATCH = 0
-    """The environment sent with the checkin update doesn't match the environment already associated with the checkin"""
-    CHECKIN_FINISHED = 1
-    """The checkin was already completed and we attempted to modify it"""
-    CHECKIN_GUID_PROJECT_MISMATCH = 2
-    """The guid for the checkin matched a checkin that was related to a different project than the one provided in the DSN"""
-    CHECKIN_INVALID_DURATION = 3
-    """We dropped a checkin due to invalid duration"""
-    CHECKIN_INVALID_GUID = 4
-    """GUID passed with checkin is invalid"""
-    CHECKIN_VALIDATION_FAILED = 5
-    """Checkin format was invalid"""
-    MONITOR_DISABLED = 6
-    """Monitor was disabled for a non-billing related reason"""
-    MONITOR_DISABLED_NO_QUOTA = 7
-    """Monitor was disabled and we couldn't assign a seat"""
-    MONITOR_INVALID_CONFIG = 8
-    """A monitor wasn't found, and we failed to upsert due to invalid config"""
-    MONITOR_INVALID_ENVIRONMENT = 9
-    """The environment information passed with the checkin was invalid"""
-    MONITOR_LIMIT_EXCEEDED = 10
-    """The maximum number of monitors allowed per project has been exceeded"""
-    MONITOR_NOT_FOUND = 11
-    """Monitor with the provided slug doesn't exist, and either no or invalid upsert data provided"""
-    MONITOR_OVER_QUOTA = 12
-    """This monitor can't accept checkins and is over quota"""
-    MONITOR_ENVIRONMENT_LIMIT_EXCEEDED = 13
-    """The monitor has too many environments associated with it already, can't add another"""
-    MONITOR_ENVIRONMENT_RATELIMITED = 14
-    """This monitor environment is sending checkins too frequently"""
-    ORGANIZATION_KILLSWITCH_ENABLED = 15
-    """We have disabled checkin ingestion for this org. Contact support for details"""
-
-
-class CheckinValidationError(Exception):
-    def __init__(self, processing_errors: list[ProcessingError], monitor: Monitor | None = None):
-        # Monitor is optional, since we don't always have the monitor related to the checkin available
-        self.processing_errors = processing_errors
-        self.monitor = monitor
-
-
-class ProcessingErrorData(TypedDict):
-    type: str
-    data: dict[str, Any]
-
-
-@dataclasses.dataclass(frozen=True)
-class ProcessingError:
-    type: ProcessingErrorType
-    data: dict[str, Any] = dataclasses.field(default_factory=dict)
-
-    def to_dict(self) -> ProcessingErrorData:
-        return {
-            "type": self.type.name,
-            "data": self.data,
-        }
-
-    @classmethod
-    def from_dict(cls, processing_error_data: ProcessingErrorData) -> ProcessingError:
-        return cls(
-            ProcessingErrorType[processing_error_data["type"]],
-            processing_error_data["data"],
-        )
-
-
-class CheckinProcessingErrorData(TypedDict):
-    errors: list[ProcessingErrorData]
-    checkin: CheckinItemData
-    id: str
-
-
-@dataclasses.dataclass(frozen=True)
-class CheckinProcessingError:
-    errors: list[ProcessingError]
-    checkin: CheckinItem
-    id: uuid.UUID = dataclasses.field(default_factory=uuid.uuid4)
-
-    def to_dict(self) -> CheckinProcessingErrorData:
-        return {
-            "errors": [error.to_dict() for error in self.errors],
-            "checkin": self.checkin.to_dict(),
-            "id": self.id.hex,
-        }
-
-    @classmethod
-    def from_dict(cls, data: CheckinProcessingErrorData) -> CheckinProcessingError:
-        return cls(
-            errors=[ProcessingError.from_dict(error) for error in data["errors"]],
-            checkin=CheckinItem.from_dict(data["checkin"]),
-            id=uuid.UUID(data["id"]),
-        )
-
-    def __hash__(self):
-        return hash(self.id.hex)
-
-    def __eq__(self, other):
-        if isinstance(other, CheckinProcessingError):
-            return self.id.hex == other.id.hex
-        return False
-
-
-class InvalidProjectError(Exception):
-    pass
-
-
-class CheckinProcessErrorsManager:
-    def _get_cluster(self) -> RedisCluster[str] | StrictRedis[str]:
-        return redis.redis_clusters.get(settings.SENTRY_MONITORS_REDIS_CLUSTER)
-
-    def _get_entity_identifier_from_error(
-        self, error: CheckinProcessingError, monitor: Monitor | None = None
-    ) -> str:
-        if monitor is None:
-            # Attempt to get the monitor from the checkin info if we failed to retrieve it during ingestion
-            try:
-                monitor = Monitor.objects.get(
-                    project_id=error.checkin.message["project_id"],
-                    slug=error.checkin.payload["monitor_slug"],
-                )
-            except Monitor.DoesNotExist:
-                pass
-        if monitor:
-            entity_identifier = self.build_monitor_identifier(monitor)
-        else:
-            entity_identifier = self.build_project_identifier(error.checkin.message["project_id"])
-
-        return entity_identifier
-
-    def store(self, error: CheckinProcessingError, monitor: Monitor | None):
-        entity_identifier = self._get_entity_identifier_from_error(error, monitor)
-        error_set_key = self.build_set_identifier(entity_identifier)
-        error_key = self.build_error_identifier(error.id)
-        serialized_error = json.dumps(error.to_dict())
-        redis_client = self._get_cluster()
-        pipeline = redis_client.pipeline(transaction=False)
-        pipeline.zadd(error_set_key, {error.id.hex: error.checkin.ts.timestamp()})
-        pipeline.set(error_key, serialized_error, ex=MONITOR_ERRORS_LIFETIME)
-        # Cap the error list to the `MAX_ERRORS_PER_SET` most recent errors
-        pipeline.zremrangebyrank(error_set_key, 0, -(MAX_ERRORS_PER_SET + 1))
-        pipeline.expire(error_set_key, MONITOR_ERRORS_LIFETIME)
-        pipeline.execute()
-
-    def build_set_identifier(self, entity_identifier: str) -> str:
-        return f"monitors.processing_errors_set.{entity_identifier}"
-
-    def build_error_identifier(self, uuid: uuid.UUID) -> str:
-        return f"monitors.processing_errors.{uuid.hex}"
-
-    def build_monitor_identifier(self, monitor: Monitor) -> str:
-        return f"monitor:{monitor.id}"
-
-    def get_for_monitor(self, monitor: Monitor) -> list[CheckinProcessingError]:
-        return self._get_for_entities([self.build_monitor_identifier(monitor)])
-
-    def build_project_identifier(self, project_id: int) -> str:
-        return f"project:{project_id}"
-
-    def get_for_projects(self, projects: list[Project]) -> list[CheckinProcessingError]:
-        return self._get_for_entities(
-            [self.build_project_identifier(project.id) for project in projects]
-        )
-
-    def delete(self, project: Project, uuid: uuid.UUID):
-        error_identifier = self.build_error_identifier(uuid)
-        redis = self._get_cluster()
-        raw_error = redis.get(error_identifier)
-        if raw_error is None:
-            return
-        error = CheckinProcessingError.from_dict(json.loads(raw_error))
-        if error.checkin.message["project_id"] != project.id:
-            # TODO: Better exception class
-            raise InvalidProjectError()
-
-        entity_identifier = self._get_entity_identifier_from_error(error)
-        self._delete_for_entity(entity_identifier, uuid)
-
-    def _get_for_entities(self, entity_identifiers: list[str]) -> list[CheckinProcessingError]:
-        redis = self._get_cluster()
-        pipeline = redis.pipeline()
-        for identifier in entity_identifiers:
-            pipeline.zrange(self.build_set_identifier(identifier), 0, MAX_ERRORS_PER_SET, desc=True)
-        error_identifiers = [
-            self.build_error_identifier(uuid.UUID(error_identifier))
-            for error_identifier in chain(*pipeline.execute())
-        ]
-        errors = [
-            CheckinProcessingError.from_dict(json.loads(raw_error))
-            for raw_error in redis.mget(error_identifiers)
-            if raw_error is not None
-        ]
-        errors.sort(key=lambda error: error.checkin.ts.timestamp(), reverse=True)
-        return errors
-
-    def _delete_for_entity(self, entity_identifier: str, uuid: uuid.UUID) -> None:
-        pipeline = self._get_cluster().pipeline()
-        pipeline.zrem(self.build_set_identifier(entity_identifier), uuid.hex)
-        pipeline.delete(self.build_error_identifier(uuid))
-        pipeline.execute()
-
-
-def handle_processing_errors(item: CheckinItem, error: CheckinValidationError):
-    try:
-        project = Project.objects.get_from_cache(id=item.message["project_id"])
-        organization = Organization.objects.get_from_cache(id=project.organization_id)
-        if not features.has("organizations:crons-write-user-feedback", organization):
-            return
-
-        metrics.incr(
-            "monitors.checkin.handle_processing_error",
-            tags={
-                "source": "consumer",
-                "sdk_platform": item.message["sdk"],
-            },
-        )
-
-        checkin_processing_error = CheckinProcessingError(error.processing_errors, item)
-        manager = CheckinProcessErrorsManager()
-        manager.store(checkin_processing_error, error.monitor)
-    except Exception:
-        logger.exception("Failed to log processing error")

+ 114 - 0
src/sentry/monitors/processing_errors/errors.py

@@ -0,0 +1,114 @@
+from __future__ import annotations
+
+import dataclasses
+import logging
+import uuid
+from enum import Enum
+from typing import Any, TypedDict
+
+from sentry.monitors.models import Monitor
+from sentry.monitors.types import CheckinItem, CheckinItemData
+
+logger = logging.getLogger(__name__)
+
+
+class ProcessingErrorType(Enum):
+    CHECKIN_ENVIRONMENT_MISMATCH = 0
+    """The environment sent with the checkin update doesn't match the environment already associated with the checkin"""
+    CHECKIN_FINISHED = 1
+    """The checkin was already completed and we attempted to modify it"""
+    CHECKIN_GUID_PROJECT_MISMATCH = 2
+    """The guid for the checkin matched a checkin that was related to a different project than the one provided in the DSN"""
+    CHECKIN_INVALID_DURATION = 3
+    """We dropped a checkin due to invalid duration"""
+    CHECKIN_INVALID_GUID = 4
+    """GUID passed with checkin is invalid"""
+    CHECKIN_VALIDATION_FAILED = 5
+    """Checkin format was invalid"""
+    MONITOR_DISABLED = 6
+    """Monitor was disabled for a non-billing related reason"""
+    MONITOR_DISABLED_NO_QUOTA = 7
+    """Monitor was disabled and we couldn't assign a seat"""
+    MONITOR_INVALID_CONFIG = 8
+    """A monitor wasn't found, and we failed to upsert due to invalid config"""
+    MONITOR_INVALID_ENVIRONMENT = 9
+    """The environment information passed with the checkin was invalid"""
+    MONITOR_LIMIT_EXCEEDED = 10
+    """The maximum number of monitors allowed per project has been exceeded"""
+    MONITOR_NOT_FOUND = 11
+    """Monitor with the provided slug doesn't exist, and either no or invalid upsert data provided"""
+    MONITOR_OVER_QUOTA = 12
+    """This monitor can't accept checkins and is over quota"""
+    MONITOR_ENVIRONMENT_LIMIT_EXCEEDED = 13
+    """The monitor has too many environments associated with it already, can't add another"""
+    MONITOR_ENVIRONMENT_RATELIMITED = 14
+    """This monitor environment is sending checkins too frequently"""
+    ORGANIZATION_KILLSWITCH_ENABLED = 15
+    """We have disabled checkin ingestion for this org. Contact support for details"""
+
+
+class CheckinValidationError(Exception):
+    def __init__(self, processing_errors: list[ProcessingError], monitor: Monitor | None = None):
+        # Monitor is optional, since we don't always have the monitor related to the checkin available
+        self.processing_errors = processing_errors
+        self.monitor = monitor
+
+
+class ProcessingErrorData(TypedDict):
+    type: str
+    data: dict[str, Any]
+
+
+@dataclasses.dataclass(frozen=True)
+class ProcessingError:
+    type: ProcessingErrorType
+    data: dict[str, Any] = dataclasses.field(default_factory=dict)
+
+    def to_dict(self) -> ProcessingErrorData:
+        return {
+            "type": self.type.name,
+            "data": self.data,
+        }
+
+    @classmethod
+    def from_dict(cls, processing_error_data: ProcessingErrorData) -> ProcessingError:
+        return cls(
+            ProcessingErrorType[processing_error_data["type"]],
+            processing_error_data["data"],
+        )
+
+
+class CheckinProcessingErrorData(TypedDict):
+    errors: list[ProcessingErrorData]
+    checkin: CheckinItemData
+    id: str
+
+
+@dataclasses.dataclass(frozen=True)
+class CheckinProcessingError:
+    errors: list[ProcessingError]
+    checkin: CheckinItem
+    id: uuid.UUID = dataclasses.field(default_factory=uuid.uuid4)
+
+    def to_dict(self) -> CheckinProcessingErrorData:
+        return {
+            "errors": [error.to_dict() for error in self.errors],
+            "checkin": self.checkin.to_dict(),
+            "id": self.id.hex,
+        }
+
+    @classmethod
+    def from_dict(cls, data: CheckinProcessingErrorData) -> CheckinProcessingError:
+        return cls(
+            errors=[ProcessingError.from_dict(error) for error in data["errors"]],
+            checkin=CheckinItem.from_dict(data["checkin"]),
+            id=uuid.UUID(data["id"]),
+        )
+
+    def __hash__(self):
+        return hash(self.id.hex)
+
+    def __eq__(self, other):
+        if isinstance(other, CheckinProcessingError):
+            return self.id.hex == other.id.hex
+        return False

+ 153 - 0
src/sentry/monitors/processing_errors/manager.py

@@ -0,0 +1,153 @@
+from __future__ import annotations
+
+import logging
+import uuid
+from datetime import timedelta
+from itertools import chain
+
+from django.conf import settings
+from redis.client import StrictRedis
+from rediscluster import RedisCluster
+
+from sentry import features
+from sentry.models.organization import Organization
+from sentry.models.project import Project
+from sentry.monitors.models import Monitor
+from sentry.monitors.types import CheckinItem
+from sentry.utils import json, metrics, redis
+
+from .errors import CheckinProcessingError, CheckinValidationError
+
+logger = logging.getLogger(__name__)
+
+MAX_ERRORS_PER_SET = 10
+MONITOR_ERRORS_LIFETIME = timedelta(days=7)
+
+
+class InvalidProjectError(Exception):
+    pass
+
+
+def _get_cluster() -> RedisCluster | StrictRedis[str]:
+    return redis.redis_clusters.get(settings.SENTRY_MONITORS_REDIS_CLUSTER)
+
+
+def build_set_identifier(entity_identifier: str) -> str:
+    return f"monitors.processing_errors_set.{entity_identifier}"
+
+
+def build_error_identifier(uuid: uuid.UUID) -> str:
+    return f"monitors.processing_errors.{uuid.hex}"
+
+
+def build_monitor_identifier(monitor: Monitor) -> str:
+    return f"monitor:{monitor.id}"
+
+
+def build_project_identifier(project_id: int) -> str:
+    return f"project:{project_id}"
+
+
+def _get_entity_identifier_from_error(
+    error: CheckinProcessingError,
+    monitor: Monitor | None = None,
+) -> str:
+    if monitor is None:
+        # Attempt to get the monitor from the checkin info if we failed to retrieve it during ingestion
+        try:
+            monitor = Monitor.objects.get(
+                project_id=error.checkin.message["project_id"],
+                slug=error.checkin.payload["monitor_slug"],
+            )
+        except Monitor.DoesNotExist:
+            pass
+    if monitor:
+        entity_identifier = build_monitor_identifier(monitor)
+    else:
+        entity_identifier = build_project_identifier(error.checkin.message["project_id"])
+
+    return entity_identifier
+
+
+def _get_for_entities(entity_identifiers: list[str]) -> list[CheckinProcessingError]:
+    redis = _get_cluster()
+    pipeline = redis.pipeline()
+    for identifier in entity_identifiers:
+        pipeline.zrange(build_set_identifier(identifier), 0, MAX_ERRORS_PER_SET, desc=True)
+    error_identifiers = [
+        build_error_identifier(uuid.UUID(error_identifier))
+        for error_identifier in chain(*pipeline.execute())
+    ]
+    errors = [
+        CheckinProcessingError.from_dict(json.loads(raw_error))
+        for raw_error in redis.mget(error_identifiers)
+        if raw_error is not None
+    ]
+    errors.sort(key=lambda error: error.checkin.ts.timestamp(), reverse=True)
+    return errors
+
+
+def _delete_for_entity(entity_identifier: str, uuid: uuid.UUID) -> None:
+    pipeline = _get_cluster().pipeline()
+    pipeline.zrem(build_set_identifier(entity_identifier), uuid.hex)
+    pipeline.delete(build_error_identifier(uuid))
+    pipeline.execute()
+
+
+def store_error(error: CheckinProcessingError, monitor: Monitor | None):
+    entity_identifier = _get_entity_identifier_from_error(error, monitor)
+    error_set_key = build_set_identifier(entity_identifier)
+    error_key = build_error_identifier(error.id)
+    serialized_error = json.dumps(error.to_dict())
+    redis_client = _get_cluster()
+    pipeline = redis_client.pipeline(transaction=False)
+    pipeline.zadd(error_set_key, {error.id.hex: error.checkin.ts.timestamp()})
+    pipeline.set(error_key, serialized_error, ex=MONITOR_ERRORS_LIFETIME)
+    # Cap the error list to the `MAX_ERRORS_PER_SET` most recent errors
+    pipeline.zremrangebyrank(error_set_key, 0, -(MAX_ERRORS_PER_SET + 1))
+    pipeline.expire(error_set_key, MONITOR_ERRORS_LIFETIME)
+    pipeline.execute()
+
+
+def delete_error(project: Project, uuid: uuid.UUID):
+    error_identifier = build_error_identifier(uuid)
+    redis = _get_cluster()
+    raw_error = redis.get(error_identifier)
+    if raw_error is None:
+        return
+    error = CheckinProcessingError.from_dict(json.loads(raw_error))
+    if error.checkin.message["project_id"] != project.id:
+        # TODO: Better exception class
+        raise InvalidProjectError()
+
+    entity_identifier = _get_entity_identifier_from_error(error)
+    _delete_for_entity(entity_identifier, uuid)
+
+
+def get_errors_for_monitor(monitor: Monitor) -> list[CheckinProcessingError]:
+    return _get_for_entities([build_monitor_identifier(monitor)])
+
+
+def get_errors_for_projects(projects: list[Project]) -> list[CheckinProcessingError]:
+    return _get_for_entities([build_project_identifier(project.id) for project in projects])
+
+
+def handle_processing_errors(item: CheckinItem, error: CheckinValidationError):
+    try:
+        project = Project.objects.get_from_cache(id=item.message["project_id"])
+        organization = Organization.objects.get_from_cache(id=project.organization_id)
+        if not features.has("organizations:crons-write-user-feedback", organization):
+            return
+
+        metrics.incr(
+            "monitors.checkin.handle_processing_error",
+            tags={
+                "source": "consumer",
+                "sdk_platform": item.message["sdk"],
+            },
+        )
+
+        checkin_processing_error = CheckinProcessingError(error.processing_errors, item)
+        store_error(checkin_processing_error, error.monitor)
+    except Exception:
+        logger.exception("Failed to log processing error")

+ 4 - 1
src/sentry/monitors/serializers.py

@@ -17,7 +17,10 @@ from sentry.monitors.models import (
     MonitorIncident,
     MonitorStatus,
 )
-from sentry.monitors.processing_errors import CheckinProcessingError, CheckinProcessingErrorData
+from sentry.monitors.processing_errors.errors import (
+    CheckinProcessingError,
+    CheckinProcessingErrorData,
+)
 from sentry.monitors.utils import fetch_associated_groups
 from sentry.monitors.validators import IntervalNames
 from sentry.types.actor import Actor

+ 1 - 1
src/sentry/monitors/testutils.py

@@ -2,7 +2,7 @@ from datetime import datetime
 
 from sentry_kafka_schemas.schema_types.ingest_monitors_v1 import CheckIn
 
-from sentry.monitors.processing_errors import (
+from sentry.monitors.processing_errors.errors import (
     CheckinProcessingError,
     ProcessingError,
     ProcessingErrorType,

+ 1 - 1
tests/sentry/monitors/consumers/test_monitor_consumer.py

@@ -29,7 +29,7 @@ from sentry.monitors.models import (
     MonitorType,
     ScheduleType,
 )
-from sentry.monitors.processing_errors import (
+from sentry.monitors.processing_errors.errors import (
     CheckinValidationError,
     ProcessingError,
     ProcessingErrorType,

Некоторые файлы не были показаны из-за большого количества измененных файлов