filer_multipart.go 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441
  1. package s3api
  2. import (
  3. "cmp"
  4. "encoding/hex"
  5. "encoding/xml"
  6. "fmt"
  7. "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
  8. "github.com/seaweedfs/seaweedfs/weed/stats"
  9. "golang.org/x/exp/slices"
  10. "math"
  11. "path/filepath"
  12. "sort"
  13. "strconv"
  14. "strings"
  15. "time"
  16. "github.com/aws/aws-sdk-go/aws"
  17. "github.com/aws/aws-sdk-go/service/s3"
  18. "github.com/google/uuid"
  19. "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
  20. "github.com/seaweedfs/seaweedfs/weed/filer"
  21. "github.com/seaweedfs/seaweedfs/weed/glog"
  22. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  23. )
  24. const (
  25. multipartExt = ".part"
  26. multiPartMinSize = 5 * 1024 * 1024
  27. )
  28. type InitiateMultipartUploadResult struct {
  29. XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult"`
  30. s3.CreateMultipartUploadOutput
  31. }
  32. func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code s3err.ErrorCode) {
  33. glog.V(2).Infof("createMultipartUpload input %v", input)
  34. uploadIdString := s3a.generateUploadID(*input.Key)
  35. uploadIdString = uploadIdString + "_" + strings.ReplaceAll(uuid.New().String(), "-", "")
  36. if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
  37. if entry.Extended == nil {
  38. entry.Extended = make(map[string][]byte)
  39. }
  40. entry.Extended["key"] = []byte(*input.Key)
  41. for k, v := range input.Metadata {
  42. entry.Extended[k] = []byte(*v)
  43. }
  44. if input.ContentType != nil {
  45. entry.Attributes.Mime = *input.ContentType
  46. }
  47. }); err != nil {
  48. glog.Errorf("NewMultipartUpload error: %v", err)
  49. return nil, s3err.ErrInternalError
  50. }
  51. output = &InitiateMultipartUploadResult{
  52. CreateMultipartUploadOutput: s3.CreateMultipartUploadOutput{
  53. Bucket: input.Bucket,
  54. Key: objectKey(input.Key),
  55. UploadId: aws.String(uploadIdString),
  56. },
  57. }
  58. return
  59. }
  60. type CompleteMultipartUploadResult struct {
  61. XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult"`
  62. s3.CompleteMultipartUploadOutput
  63. }
  64. func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput, parts *CompleteMultipartUpload) (output *CompleteMultipartUploadResult, code s3err.ErrorCode) {
  65. glog.V(2).Infof("completeMultipartUpload input %v", input)
  66. if len(parts.Parts) == 0 {
  67. stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
  68. return nil, s3err.ErrNoSuchUpload
  69. }
  70. completedPartNumbers := []int{}
  71. completedPartMap := make(map[int][]string)
  72. for _, part := range parts.Parts {
  73. if _, ok := completedPartMap[part.PartNumber]; !ok {
  74. completedPartNumbers = append(completedPartNumbers, part.PartNumber)
  75. }
  76. completedPartMap[part.PartNumber] = append(completedPartMap[part.PartNumber], part.ETag)
  77. }
  78. sort.Ints(completedPartNumbers)
  79. uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
  80. entries, _, err := s3a.list(uploadDirectory, "", "", false, maxPartsList)
  81. if err != nil {
  82. glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries))
  83. stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
  84. return nil, s3err.ErrNoSuchUpload
  85. }
  86. if len(entries) == 0 {
  87. entryName, dirName := s3a.getEntryNameAndDir(input)
  88. if entry, _ := s3a.getEntry(dirName, entryName); entry != nil && entry.Extended != nil {
  89. if uploadId, ok := entry.Extended[s3_constants.SeaweedFSUploadId]; ok && *input.UploadId == string(uploadId) {
  90. return &CompleteMultipartUploadResult{
  91. CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
  92. Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlEscapeObject(dirName), urlPathEscape(entryName))),
  93. Bucket: input.Bucket,
  94. ETag: aws.String("\"" + filer.ETagChunks(entry.GetChunks()) + "\""),
  95. Key: objectKey(input.Key),
  96. },
  97. }, s3err.ErrNone
  98. }
  99. }
  100. stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
  101. return nil, s3err.ErrNoSuchUpload
  102. }
  103. pentry, err := s3a.getEntry(s3a.genUploadsFolder(*input.Bucket), *input.UploadId)
  104. if err != nil {
  105. glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err)
  106. stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
  107. return nil, s3err.ErrNoSuchUpload
  108. }
  109. deleteEntries := []*filer_pb.Entry{}
  110. partEntries := make(map[int][]*filer_pb.Entry, len(entries))
  111. entityTooSmall := false
  112. for _, entry := range entries {
  113. foundEntry := false
  114. glog.V(4).Infof("completeMultipartUpload part entries %s", entry.Name)
  115. if entry.IsDirectory || !strings.HasSuffix(entry.Name, multipartExt) {
  116. continue
  117. }
  118. partNumber, err := parsePartNumber(entry.Name)
  119. if err != nil {
  120. stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartNumber).Inc()
  121. glog.Errorf("completeMultipartUpload failed to pasre partNumber %s:%s", entry.Name, err)
  122. continue
  123. }
  124. completedPartsByNumber, ok := completedPartMap[partNumber]
  125. if !ok {
  126. continue
  127. }
  128. for _, partETag := range completedPartsByNumber {
  129. partETag = strings.Trim(partETag, `"`)
  130. entryETag := hex.EncodeToString(entry.Attributes.GetMd5())
  131. if partETag != "" && len(partETag) == 32 && entryETag != "" {
  132. if entryETag != partETag {
  133. glog.Errorf("completeMultipartUpload %s ETag mismatch chunk: %s part: %s", entry.Name, entryETag, partETag)
  134. stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedEtagMismatch).Inc()
  135. continue
  136. }
  137. } else {
  138. glog.Warningf("invalid complete etag %s, partEtag %s", partETag, entryETag)
  139. stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedEtagInvalid).Inc()
  140. }
  141. if len(entry.Chunks) == 0 {
  142. glog.Warningf("completeMultipartUpload %s empty chunks", entry.Name)
  143. stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartEmpty).Inc()
  144. continue
  145. }
  146. //there maybe multi same part, because of client retry
  147. partEntries[partNumber] = append(partEntries[partNumber], entry)
  148. foundEntry = true
  149. }
  150. if foundEntry {
  151. if len(completedPartNumbers) > 1 && partNumber != completedPartNumbers[len(completedPartNumbers)-1] &&
  152. entry.Attributes.FileSize < multiPartMinSize {
  153. glog.Warningf("completeMultipartUpload %s part file size less 5mb", entry.Name)
  154. entityTooSmall = true
  155. }
  156. } else {
  157. deleteEntries = append(deleteEntries, entry)
  158. }
  159. }
  160. if entityTooSmall {
  161. stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompleteEntityTooSmall).Inc()
  162. return nil, s3err.ErrEntityTooSmall
  163. }
  164. mime := pentry.Attributes.Mime
  165. var finalParts []*filer_pb.FileChunk
  166. var offset int64
  167. for _, partNumber := range completedPartNumbers {
  168. partEntriesByNumber, ok := partEntries[partNumber]
  169. if !ok {
  170. glog.Errorf("part %d has no entry", partNumber)
  171. stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartNotFound).Inc()
  172. return nil, s3err.ErrInvalidPart
  173. }
  174. found := false
  175. if len(partEntriesByNumber) > 1 {
  176. slices.SortFunc(partEntriesByNumber, func(a, b *filer_pb.Entry) int {
  177. return cmp.Compare(b.Chunks[0].ModifiedTsNs, a.Chunks[0].ModifiedTsNs)
  178. })
  179. }
  180. for _, entry := range partEntriesByNumber {
  181. if found {
  182. deleteEntries = append(deleteEntries, entry)
  183. stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartEntryMismatch).Inc()
  184. continue
  185. }
  186. for _, chunk := range entry.GetChunks() {
  187. p := &filer_pb.FileChunk{
  188. FileId: chunk.GetFileIdString(),
  189. Offset: offset,
  190. Size: chunk.Size,
  191. ModifiedTsNs: chunk.ModifiedTsNs,
  192. CipherKey: chunk.CipherKey,
  193. ETag: chunk.ETag,
  194. IsCompressed: chunk.IsCompressed,
  195. }
  196. finalParts = append(finalParts, p)
  197. offset += int64(chunk.Size)
  198. }
  199. found = true
  200. }
  201. }
  202. entryName, dirName := s3a.getEntryNameAndDir(input)
  203. err = s3a.mkFile(dirName, entryName, finalParts, func(entry *filer_pb.Entry) {
  204. if entry.Extended == nil {
  205. entry.Extended = make(map[string][]byte)
  206. }
  207. entry.Extended[s3_constants.SeaweedFSUploadId] = []byte(*input.UploadId)
  208. for k, v := range pentry.Extended {
  209. if k != "key" {
  210. entry.Extended[k] = v
  211. }
  212. }
  213. if pentry.Attributes.Mime != "" {
  214. entry.Attributes.Mime = pentry.Attributes.Mime
  215. } else if mime != "" {
  216. entry.Attributes.Mime = mime
  217. }
  218. entry.Attributes.FileSize = uint64(offset)
  219. })
  220. if err != nil {
  221. glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err)
  222. return nil, s3err.ErrInternalError
  223. }
  224. output = &CompleteMultipartUploadResult{
  225. CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
  226. Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlEscapeObject(dirName), urlPathEscape(entryName))),
  227. Bucket: input.Bucket,
  228. ETag: aws.String("\"" + filer.ETagChunks(finalParts) + "\""),
  229. Key: objectKey(input.Key),
  230. },
  231. }
  232. for _, deleteEntry := range deleteEntries {
  233. //delete unused part data
  234. glog.Infof("completeMultipartUpload cleanup %s upload %s unused %s", *input.Bucket, *input.UploadId, deleteEntry.Name)
  235. if err = s3a.rm(uploadDirectory, deleteEntry.Name, true, true); err != nil {
  236. glog.Warningf("completeMultipartUpload cleanup %s upload %s unused %s : %v", *input.Bucket, *input.UploadId, deleteEntry.Name, err)
  237. }
  238. }
  239. if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil {
  240. glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err)
  241. }
  242. return
  243. }
  244. func (s3a *S3ApiServer) getEntryNameAndDir(input *s3.CompleteMultipartUploadInput) (string, string) {
  245. entryName := filepath.Base(*input.Key)
  246. dirName := filepath.ToSlash(filepath.Dir(*input.Key))
  247. if dirName == "." {
  248. dirName = ""
  249. }
  250. if strings.HasPrefix(dirName, "/") {
  251. dirName = dirName[1:]
  252. }
  253. dirName = fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, *input.Bucket, dirName)
  254. // remove suffix '/'
  255. if strings.HasSuffix(dirName, "/") {
  256. dirName = dirName[:len(dirName)-1]
  257. }
  258. return entryName, dirName
  259. }
  260. func parsePartNumber(fileName string) (int, error) {
  261. var partNumberString string
  262. index := strings.Index(fileName, "_")
  263. if index != -1 {
  264. partNumberString = fileName[:index]
  265. } else {
  266. partNumberString = fileName[:len(fileName)-len(multipartExt)]
  267. }
  268. return strconv.Atoi(partNumberString)
  269. }
  270. func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code s3err.ErrorCode) {
  271. glog.V(2).Infof("abortMultipartUpload input %v", input)
  272. exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
  273. if err != nil {
  274. glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err)
  275. return nil, s3err.ErrNoSuchUpload
  276. }
  277. if exists {
  278. err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true)
  279. }
  280. if err != nil {
  281. glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err)
  282. return nil, s3err.ErrInternalError
  283. }
  284. return &s3.AbortMultipartUploadOutput{}, s3err.ErrNone
  285. }
  286. type ListMultipartUploadsResult struct {
  287. XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult"`
  288. // copied from s3.ListMultipartUploadsOutput, the Uploads is not converting to <Upload></Upload>
  289. Bucket *string `type:"string"`
  290. Delimiter *string `type:"string"`
  291. EncodingType *string `type:"string" enum:"EncodingType"`
  292. IsTruncated *bool `type:"boolean"`
  293. KeyMarker *string `type:"string"`
  294. MaxUploads *int64 `type:"integer"`
  295. NextKeyMarker *string `type:"string"`
  296. NextUploadIdMarker *string `type:"string"`
  297. Prefix *string `type:"string"`
  298. UploadIdMarker *string `type:"string"`
  299. Upload []*s3.MultipartUpload `locationName:"Upload" type:"list" flattened:"true"`
  300. }
  301. func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code s3err.ErrorCode) {
  302. // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
  303. glog.V(2).Infof("listMultipartUploads input %v", input)
  304. output = &ListMultipartUploadsResult{
  305. Bucket: input.Bucket,
  306. Delimiter: input.Delimiter,
  307. EncodingType: input.EncodingType,
  308. KeyMarker: input.KeyMarker,
  309. MaxUploads: input.MaxUploads,
  310. Prefix: input.Prefix,
  311. IsTruncated: aws.Bool(false),
  312. }
  313. entries, _, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), "", *input.UploadIdMarker, false, math.MaxInt32)
  314. if err != nil {
  315. glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err)
  316. return
  317. }
  318. uploadsCount := int64(0)
  319. for _, entry := range entries {
  320. if entry.Extended != nil {
  321. key := string(entry.Extended["key"])
  322. if *input.KeyMarker != "" && *input.KeyMarker != key {
  323. continue
  324. }
  325. if *input.Prefix != "" && !strings.HasPrefix(key, *input.Prefix) {
  326. continue
  327. }
  328. output.Upload = append(output.Upload, &s3.MultipartUpload{
  329. Key: objectKey(aws.String(key)),
  330. UploadId: aws.String(entry.Name),
  331. })
  332. uploadsCount += 1
  333. }
  334. if uploadsCount >= *input.MaxUploads {
  335. output.IsTruncated = aws.Bool(true)
  336. output.NextUploadIdMarker = aws.String(entry.Name)
  337. break
  338. }
  339. }
  340. return
  341. }
  342. type ListPartsResult struct {
  343. XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult"`
  344. // copied from s3.ListPartsOutput, the Parts is not converting to <Part></Part>
  345. Bucket *string `type:"string"`
  346. IsTruncated *bool `type:"boolean"`
  347. Key *string `min:"1" type:"string"`
  348. MaxParts *int64 `type:"integer"`
  349. NextPartNumberMarker *int64 `type:"integer"`
  350. PartNumberMarker *int64 `type:"integer"`
  351. Part []*s3.Part `locationName:"Part" type:"list" flattened:"true"`
  352. StorageClass *string `type:"string" enum:"StorageClass"`
  353. UploadId *string `type:"string"`
  354. }
  355. func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code s3err.ErrorCode) {
  356. // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
  357. glog.V(2).Infof("listObjectParts input %v", input)
  358. output = &ListPartsResult{
  359. Bucket: input.Bucket,
  360. Key: objectKey(input.Key),
  361. UploadId: input.UploadId,
  362. MaxParts: input.MaxParts, // the maximum number of parts to return.
  363. PartNumberMarker: input.PartNumberMarker, // the part number starts after this, exclusive
  364. StorageClass: aws.String("STANDARD"),
  365. }
  366. entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d%s", *input.PartNumberMarker, multipartExt), false, uint32(*input.MaxParts))
  367. if err != nil {
  368. glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err)
  369. return nil, s3err.ErrNoSuchUpload
  370. }
  371. // Note: The upload directory is sort of a marker of the existence of an multipart upload request.
  372. // So can not just delete empty upload folders.
  373. output.IsTruncated = aws.Bool(!isLast)
  374. for _, entry := range entries {
  375. if strings.HasSuffix(entry.Name, multipartExt) && !entry.IsDirectory {
  376. partNumber, err := parsePartNumber(entry.Name)
  377. if err != nil {
  378. glog.Errorf("listObjectParts %s %s parse %s: %v", *input.Bucket, *input.UploadId, entry.Name, err)
  379. continue
  380. }
  381. output.Part = append(output.Part, &s3.Part{
  382. PartNumber: aws.Int64(int64(partNumber)),
  383. LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0).UTC()),
  384. Size: aws.Int64(int64(filer.FileSize(entry))),
  385. ETag: aws.String("\"" + filer.ETag(entry) + "\""),
  386. })
  387. if !isLast {
  388. output.NextPartNumberMarker = aws.Int64(int64(partNumber))
  389. }
  390. }
  391. }
  392. return
  393. }