views.py 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. """ Port of sentry.api.endpoints.chunk.ChunkUploadEndpoint """
  2. import logging
  3. from gzip import GzipFile
  4. from io import BytesIO
  5. from django.conf import settings
  6. from django.shortcuts import get_object_or_404
  7. from django.urls import reverse
  8. from rest_framework import status, views
  9. from rest_framework.response import Response
  10. from organizations_ext.models import Organization
  11. from .models import FileBlob
  12. from .permissions import ChunkUploadPermission
  13. # Force just one blob
  14. CHUNK_UPLOAD_BLOB_SIZE = 32 * 1024 * 1024 # 32MB
  15. MAX_CHUNKS_PER_REQUEST = 1
  16. MAX_REQUEST_SIZE = CHUNK_UPLOAD_BLOB_SIZE
  17. MAX_CONCURRENCY = 1
  18. HASH_ALGORITHM = "sha1"
  19. CHUNK_UPLOAD_ACCEPT = (
  20. "debug_files", # DIF assemble
  21. "release_files", # Release files assemble
  22. "pdbs", # PDB upload and debug id override
  23. "sources", # Source artifact bundle upload
  24. )
  25. class GzipChunk(BytesIO):
  26. def __init__(self, file):
  27. data = GzipFile(fileobj=file, mode="rb").read()
  28. self.size = len(data)
  29. self.name = file.name
  30. super().__init__(data)
  31. class ChunkUploadAPIView(views.APIView):
  32. permission_classes = [ChunkUploadPermission]
  33. def get(self, request, organization_slug):
  34. url = settings.GLITCHTIP_URL.geturl() + reverse(
  35. "chunk-upload", args=[organization_slug]
  36. )
  37. return Response(
  38. {
  39. "url": url,
  40. "chunkSize": CHUNK_UPLOAD_BLOB_SIZE,
  41. "chunksPerRequest": MAX_CHUNKS_PER_REQUEST,
  42. "maxFileSize": 2147483648,
  43. "maxRequestSize": MAX_REQUEST_SIZE,
  44. "concurrency": MAX_CONCURRENCY,
  45. "hashAlgorithm": HASH_ALGORITHM,
  46. "compression": ["gzip"],
  47. "accept": CHUNK_UPLOAD_ACCEPT,
  48. }
  49. )
  50. def post(self, request, organization_slug):
  51. logger = logging.getLogger("glitchtip.files")
  52. logger.info("chunkupload.start")
  53. organization = get_object_or_404(
  54. Organization, slug=organization_slug.lower(), users=self.request.user
  55. )
  56. self.check_object_permissions(request, organization)
  57. files = request.data.getlist("file")
  58. files += [GzipChunk(chunk) for chunk in request.data.getlist("file_gzip")]
  59. if len(files) == 0:
  60. # No files uploaded is ok
  61. logger.info("chunkupload.end", extra={"status": status.HTTP_200_OK})
  62. return Response(status=status.HTTP_200_OK)
  63. logger.info("chunkupload.post.files", extra={"len": len(files)})
  64. # Validate file size
  65. checksums = []
  66. size = 0
  67. for chunk in files:
  68. size += chunk.size
  69. if chunk.size > CHUNK_UPLOAD_BLOB_SIZE:
  70. logger.info(
  71. "chunkupload.end", extra={"status": status.HTTP_400_BAD_REQUEST}
  72. )
  73. return Response(
  74. {"error": "Chunk size too large"},
  75. status=status.HTTP_400_BAD_REQUEST,
  76. )
  77. checksums.append(chunk.name)
  78. if size > MAX_REQUEST_SIZE:
  79. logger.info(
  80. "chunkupload.end", extra={"status": status.HTTP_400_BAD_REQUEST}
  81. )
  82. return Response(
  83. {"error": "Request too large"}, status=status.HTTP_400_BAD_REQUEST
  84. )
  85. if len(files) > MAX_CHUNKS_PER_REQUEST:
  86. logger.info(
  87. "chunkupload.end", extra={"status": status.HTTP_400_BAD_REQUEST}
  88. )
  89. return Response(
  90. {"error": "Too many chunks"}, status=status.HTTP_400_BAD_REQUEST
  91. )
  92. try:
  93. FileBlob.from_files(
  94. zip(files, checksums), organization=organization, logger=logger
  95. )
  96. except IOError as err:
  97. logger.info(
  98. "chunkupload.end", extra={"status": status.HTTP_400_BAD_REQUEST}
  99. )
  100. return Response({"error": str(err)}, status=status.HTTP_400_BAD_REQUEST)
  101. logger.info("chunkupload.end", extra={"status": status.HTTP_200_OK})
  102. return Response(status=status.HTTP_200_OK)