123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441 |
- package s3api
- import (
- "cmp"
- "encoding/hex"
- "encoding/xml"
- "fmt"
- "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
- "github.com/seaweedfs/seaweedfs/weed/stats"
- "golang.org/x/exp/slices"
- "math"
- "path/filepath"
- "sort"
- "strconv"
- "strings"
- "time"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/service/s3"
- "github.com/google/uuid"
- "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
- "github.com/seaweedfs/seaweedfs/weed/filer"
- "github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
- )
- const (
- multipartExt = ".part"
- multiPartMinSize = 5 * 1024 * 1024
- )
- type InitiateMultipartUploadResult struct {
- XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult"`
- s3.CreateMultipartUploadOutput
- }
- func (s3a *S3ApiServer) createMultipartUpload(input *s3.CreateMultipartUploadInput) (output *InitiateMultipartUploadResult, code s3err.ErrorCode) {
- glog.V(2).Infof("createMultipartUpload input %v", input)
- uploadIdString := s3a.generateUploadID(*input.Key)
- uploadIdString = uploadIdString + "_" + strings.ReplaceAll(uuid.New().String(), "-", "")
- if err := s3a.mkdir(s3a.genUploadsFolder(*input.Bucket), uploadIdString, func(entry *filer_pb.Entry) {
- if entry.Extended == nil {
- entry.Extended = make(map[string][]byte)
- }
- entry.Extended["key"] = []byte(*input.Key)
- for k, v := range input.Metadata {
- entry.Extended[k] = []byte(*v)
- }
- if input.ContentType != nil {
- entry.Attributes.Mime = *input.ContentType
- }
- }); err != nil {
- glog.Errorf("NewMultipartUpload error: %v", err)
- return nil, s3err.ErrInternalError
- }
- output = &InitiateMultipartUploadResult{
- CreateMultipartUploadOutput: s3.CreateMultipartUploadOutput{
- Bucket: input.Bucket,
- Key: objectKey(input.Key),
- UploadId: aws.String(uploadIdString),
- },
- }
- return
- }
- type CompleteMultipartUploadResult struct {
- XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult"`
- s3.CompleteMultipartUploadOutput
- }
- func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput, parts *CompleteMultipartUpload) (output *CompleteMultipartUploadResult, code s3err.ErrorCode) {
- glog.V(2).Infof("completeMultipartUpload input %v", input)
- if len(parts.Parts) == 0 {
- stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
- return nil, s3err.ErrNoSuchUpload
- }
- completedPartNumbers := []int{}
- completedPartMap := make(map[int][]string)
- for _, part := range parts.Parts {
- if _, ok := completedPartMap[part.PartNumber]; !ok {
- completedPartNumbers = append(completedPartNumbers, part.PartNumber)
- }
- completedPartMap[part.PartNumber] = append(completedPartMap[part.PartNumber], part.ETag)
- }
- sort.Ints(completedPartNumbers)
- uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
- entries, _, err := s3a.list(uploadDirectory, "", "", false, maxPartsList)
- if err != nil {
- glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries))
- stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
- return nil, s3err.ErrNoSuchUpload
- }
- if len(entries) == 0 {
- entryName, dirName := s3a.getEntryNameAndDir(input)
- if entry, _ := s3a.getEntry(dirName, entryName); entry != nil && entry.Extended != nil {
- if uploadId, ok := entry.Extended[s3_constants.SeaweedFSUploadId]; ok && *input.UploadId == string(uploadId) {
- return &CompleteMultipartUploadResult{
- CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
- Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlEscapeObject(dirName), urlPathEscape(entryName))),
- Bucket: input.Bucket,
- ETag: aws.String("\"" + filer.ETagChunks(entry.GetChunks()) + "\""),
- Key: objectKey(input.Key),
- },
- }, s3err.ErrNone
- }
- }
- stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
- return nil, s3err.ErrNoSuchUpload
- }
- pentry, err := s3a.getEntry(s3a.genUploadsFolder(*input.Bucket), *input.UploadId)
- if err != nil {
- glog.Errorf("completeMultipartUpload %s %s error: %v", *input.Bucket, *input.UploadId, err)
- stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
- return nil, s3err.ErrNoSuchUpload
- }
- deleteEntries := []*filer_pb.Entry{}
- partEntries := make(map[int][]*filer_pb.Entry, len(entries))
- entityTooSmall := false
- for _, entry := range entries {
- foundEntry := false
- glog.V(4).Infof("completeMultipartUpload part entries %s", entry.Name)
- if entry.IsDirectory || !strings.HasSuffix(entry.Name, multipartExt) {
- continue
- }
- partNumber, err := parsePartNumber(entry.Name)
- if err != nil {
- stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartNumber).Inc()
- glog.Errorf("completeMultipartUpload failed to pasre partNumber %s:%s", entry.Name, err)
- continue
- }
- completedPartsByNumber, ok := completedPartMap[partNumber]
- if !ok {
- continue
- }
- for _, partETag := range completedPartsByNumber {
- partETag = strings.Trim(partETag, `"`)
- entryETag := hex.EncodeToString(entry.Attributes.GetMd5())
- if partETag != "" && len(partETag) == 32 && entryETag != "" {
- if entryETag != partETag {
- glog.Errorf("completeMultipartUpload %s ETag mismatch chunk: %s part: %s", entry.Name, entryETag, partETag)
- stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedEtagMismatch).Inc()
- continue
- }
- } else {
- glog.Warningf("invalid complete etag %s, partEtag %s", partETag, entryETag)
- stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedEtagInvalid).Inc()
- }
- if len(entry.Chunks) == 0 {
- glog.Warningf("completeMultipartUpload %s empty chunks", entry.Name)
- stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartEmpty).Inc()
- continue
- }
- //there maybe multi same part, because of client retry
- partEntries[partNumber] = append(partEntries[partNumber], entry)
- foundEntry = true
- }
- if foundEntry {
- if len(completedPartNumbers) > 1 && partNumber != completedPartNumbers[len(completedPartNumbers)-1] &&
- entry.Attributes.FileSize < multiPartMinSize {
- glog.Warningf("completeMultipartUpload %s part file size less 5mb", entry.Name)
- entityTooSmall = true
- }
- } else {
- deleteEntries = append(deleteEntries, entry)
- }
- }
- if entityTooSmall {
- stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompleteEntityTooSmall).Inc()
- return nil, s3err.ErrEntityTooSmall
- }
- mime := pentry.Attributes.Mime
- var finalParts []*filer_pb.FileChunk
- var offset int64
- for _, partNumber := range completedPartNumbers {
- partEntriesByNumber, ok := partEntries[partNumber]
- if !ok {
- glog.Errorf("part %d has no entry", partNumber)
- stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartNotFound).Inc()
- return nil, s3err.ErrInvalidPart
- }
- found := false
- if len(partEntriesByNumber) > 1 {
- slices.SortFunc(partEntriesByNumber, func(a, b *filer_pb.Entry) int {
- return cmp.Compare(b.Chunks[0].ModifiedTsNs, a.Chunks[0].ModifiedTsNs)
- })
- }
- for _, entry := range partEntriesByNumber {
- if found {
- deleteEntries = append(deleteEntries, entry)
- stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedPartEntryMismatch).Inc()
- continue
- }
- for _, chunk := range entry.GetChunks() {
- p := &filer_pb.FileChunk{
- FileId: chunk.GetFileIdString(),
- Offset: offset,
- Size: chunk.Size,
- ModifiedTsNs: chunk.ModifiedTsNs,
- CipherKey: chunk.CipherKey,
- ETag: chunk.ETag,
- IsCompressed: chunk.IsCompressed,
- }
- finalParts = append(finalParts, p)
- offset += int64(chunk.Size)
- }
- found = true
- }
- }
- entryName, dirName := s3a.getEntryNameAndDir(input)
- err = s3a.mkFile(dirName, entryName, finalParts, func(entry *filer_pb.Entry) {
- if entry.Extended == nil {
- entry.Extended = make(map[string][]byte)
- }
- entry.Extended[s3_constants.SeaweedFSUploadId] = []byte(*input.UploadId)
- for k, v := range pentry.Extended {
- if k != "key" {
- entry.Extended[k] = v
- }
- }
- if pentry.Attributes.Mime != "" {
- entry.Attributes.Mime = pentry.Attributes.Mime
- } else if mime != "" {
- entry.Attributes.Mime = mime
- }
- entry.Attributes.FileSize = uint64(offset)
- })
- if err != nil {
- glog.Errorf("completeMultipartUpload %s/%s error: %v", dirName, entryName, err)
- return nil, s3err.ErrInternalError
- }
- output = &CompleteMultipartUploadResult{
- CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
- Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlEscapeObject(dirName), urlPathEscape(entryName))),
- Bucket: input.Bucket,
- ETag: aws.String("\"" + filer.ETagChunks(finalParts) + "\""),
- Key: objectKey(input.Key),
- },
- }
- for _, deleteEntry := range deleteEntries {
- //delete unused part data
- glog.Infof("completeMultipartUpload cleanup %s upload %s unused %s", *input.Bucket, *input.UploadId, deleteEntry.Name)
- if err = s3a.rm(uploadDirectory, deleteEntry.Name, true, true); err != nil {
- glog.Warningf("completeMultipartUpload cleanup %s upload %s unused %s : %v", *input.Bucket, *input.UploadId, deleteEntry.Name, err)
- }
- }
- if err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, false, true); err != nil {
- glog.V(1).Infof("completeMultipartUpload cleanup %s upload %s: %v", *input.Bucket, *input.UploadId, err)
- }
- return
- }
- func (s3a *S3ApiServer) getEntryNameAndDir(input *s3.CompleteMultipartUploadInput) (string, string) {
- entryName := filepath.Base(*input.Key)
- dirName := filepath.ToSlash(filepath.Dir(*input.Key))
- if dirName == "." {
- dirName = ""
- }
- if strings.HasPrefix(dirName, "/") {
- dirName = dirName[1:]
- }
- dirName = fmt.Sprintf("%s/%s/%s", s3a.option.BucketsPath, *input.Bucket, dirName)
- // remove suffix '/'
- if strings.HasSuffix(dirName, "/") {
- dirName = dirName[:len(dirName)-1]
- }
- return entryName, dirName
- }
- func parsePartNumber(fileName string) (int, error) {
- var partNumberString string
- index := strings.Index(fileName, "_")
- if index != -1 {
- partNumberString = fileName[:index]
- } else {
- partNumberString = fileName[:len(fileName)-len(multipartExt)]
- }
- return strconv.Atoi(partNumberString)
- }
- func (s3a *S3ApiServer) abortMultipartUpload(input *s3.AbortMultipartUploadInput) (output *s3.AbortMultipartUploadOutput, code s3err.ErrorCode) {
- glog.V(2).Infof("abortMultipartUpload input %v", input)
- exists, err := s3a.exists(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true)
- if err != nil {
- glog.V(1).Infof("bucket %s abort upload %s: %v", *input.Bucket, *input.UploadId, err)
- return nil, s3err.ErrNoSuchUpload
- }
- if exists {
- err = s3a.rm(s3a.genUploadsFolder(*input.Bucket), *input.UploadId, true, true)
- }
- if err != nil {
- glog.V(1).Infof("bucket %s remove upload %s: %v", *input.Bucket, *input.UploadId, err)
- return nil, s3err.ErrInternalError
- }
- return &s3.AbortMultipartUploadOutput{}, s3err.ErrNone
- }
- type ListMultipartUploadsResult struct {
- XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult"`
- // copied from s3.ListMultipartUploadsOutput, the Uploads is not converting to <Upload></Upload>
- Bucket *string `type:"string"`
- Delimiter *string `type:"string"`
- EncodingType *string `type:"string" enum:"EncodingType"`
- IsTruncated *bool `type:"boolean"`
- KeyMarker *string `type:"string"`
- MaxUploads *int64 `type:"integer"`
- NextKeyMarker *string `type:"string"`
- NextUploadIdMarker *string `type:"string"`
- Prefix *string `type:"string"`
- UploadIdMarker *string `type:"string"`
- Upload []*s3.MultipartUpload `locationName:"Upload" type:"list" flattened:"true"`
- }
- func (s3a *S3ApiServer) listMultipartUploads(input *s3.ListMultipartUploadsInput) (output *ListMultipartUploadsResult, code s3err.ErrorCode) {
- // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html
- glog.V(2).Infof("listMultipartUploads input %v", input)
- output = &ListMultipartUploadsResult{
- Bucket: input.Bucket,
- Delimiter: input.Delimiter,
- EncodingType: input.EncodingType,
- KeyMarker: input.KeyMarker,
- MaxUploads: input.MaxUploads,
- Prefix: input.Prefix,
- IsTruncated: aws.Bool(false),
- }
- entries, _, err := s3a.list(s3a.genUploadsFolder(*input.Bucket), "", *input.UploadIdMarker, false, math.MaxInt32)
- if err != nil {
- glog.Errorf("listMultipartUploads %s error: %v", *input.Bucket, err)
- return
- }
- uploadsCount := int64(0)
- for _, entry := range entries {
- if entry.Extended != nil {
- key := string(entry.Extended["key"])
- if *input.KeyMarker != "" && *input.KeyMarker != key {
- continue
- }
- if *input.Prefix != "" && !strings.HasPrefix(key, *input.Prefix) {
- continue
- }
- output.Upload = append(output.Upload, &s3.MultipartUpload{
- Key: objectKey(aws.String(key)),
- UploadId: aws.String(entry.Name),
- })
- uploadsCount += 1
- }
- if uploadsCount >= *input.MaxUploads {
- output.IsTruncated = aws.Bool(true)
- output.NextUploadIdMarker = aws.String(entry.Name)
- break
- }
- }
- return
- }
- type ListPartsResult struct {
- XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult"`
- // copied from s3.ListPartsOutput, the Parts is not converting to <Part></Part>
- Bucket *string `type:"string"`
- IsTruncated *bool `type:"boolean"`
- Key *string `min:"1" type:"string"`
- MaxParts *int64 `type:"integer"`
- NextPartNumberMarker *int64 `type:"integer"`
- PartNumberMarker *int64 `type:"integer"`
- Part []*s3.Part `locationName:"Part" type:"list" flattened:"true"`
- StorageClass *string `type:"string" enum:"StorageClass"`
- UploadId *string `type:"string"`
- }
- func (s3a *S3ApiServer) listObjectParts(input *s3.ListPartsInput) (output *ListPartsResult, code s3err.ErrorCode) {
- // https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html
- glog.V(2).Infof("listObjectParts input %v", input)
- output = &ListPartsResult{
- Bucket: input.Bucket,
- Key: objectKey(input.Key),
- UploadId: input.UploadId,
- MaxParts: input.MaxParts, // the maximum number of parts to return.
- PartNumberMarker: input.PartNumberMarker, // the part number starts after this, exclusive
- StorageClass: aws.String("STANDARD"),
- }
- entries, isLast, err := s3a.list(s3a.genUploadsFolder(*input.Bucket)+"/"+*input.UploadId, "", fmt.Sprintf("%04d%s", *input.PartNumberMarker, multipartExt), false, uint32(*input.MaxParts))
- if err != nil {
- glog.Errorf("listObjectParts %s %s error: %v", *input.Bucket, *input.UploadId, err)
- return nil, s3err.ErrNoSuchUpload
- }
- // Note: The upload directory is sort of a marker of the existence of an multipart upload request.
- // So can not just delete empty upload folders.
- output.IsTruncated = aws.Bool(!isLast)
- for _, entry := range entries {
- if strings.HasSuffix(entry.Name, multipartExt) && !entry.IsDirectory {
- partNumber, err := parsePartNumber(entry.Name)
- if err != nil {
- glog.Errorf("listObjectParts %s %s parse %s: %v", *input.Bucket, *input.UploadId, entry.Name, err)
- continue
- }
- output.Part = append(output.Part, &s3.Part{
- PartNumber: aws.Int64(int64(partNumber)),
- LastModified: aws.Time(time.Unix(entry.Attributes.Mtime, 0).UTC()),
- Size: aws.Int64(int64(filer.FileSize(entry))),
- ETag: aws.String("\"" + filer.ETag(entry) + "\""),
- })
- if !isLast {
- output.NextPartNumberMarker = aws.Int64(int64(partNumber))
- }
- }
- }
- return
- }
|