s3api_object_copy_handlers.go 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. package s3api
  2. import (
  3. "fmt"
  4. "github.com/seaweedfs/seaweedfs/weed/glog"
  5. "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
  6. "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
  7. "modernc.org/strutil"
  8. "net/http"
  9. "net/url"
  10. "strconv"
  11. "strings"
  12. "time"
  13. "github.com/seaweedfs/seaweedfs/weed/util"
  14. )
  15. const (
  16. DirectiveCopy = "COPY"
  17. DirectiveReplace = "REPLACE"
  18. )
  19. func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
  20. dstBucket, dstObject := s3_constants.GetBucketAndObject(r)
  21. // Copy source path.
  22. cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
  23. if err != nil {
  24. // Save unescaped string as is.
  25. cpSrcPath = r.Header.Get("X-Amz-Copy-Source")
  26. }
  27. srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
  28. glog.V(3).Infof("CopyObjectHandler %s %s => %s %s", srcBucket, srcObject, dstBucket, dstObject)
  29. replaceMeta, replaceTagging := replaceDirective(r.Header)
  30. if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && (replaceMeta || replaceTagging) {
  31. fullPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject))
  32. dir, name := fullPath.DirAndName()
  33. entry, err := s3a.getEntry(dir, name)
  34. if err != nil || entry.IsDirectory {
  35. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  36. return
  37. }
  38. entry.Extended, err = processMetadataBytes(r.Header, entry.Extended, replaceMeta, replaceTagging)
  39. if err != nil {
  40. glog.Errorf("CopyObjectHandler ValidateTags error %s: %v", r.URL, err)
  41. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag)
  42. return
  43. }
  44. err = s3a.touch(dir, name, entry)
  45. if err != nil {
  46. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  47. return
  48. }
  49. writeSuccessResponseXML(w, r, CopyObjectResult{
  50. ETag: fmt.Sprintf("%x", entry.Attributes.Md5),
  51. LastModified: time.Now().UTC(),
  52. })
  53. return
  54. }
  55. // If source object is empty or bucket is empty, reply back invalid copy source.
  56. if srcObject == "" || srcBucket == "" {
  57. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  58. return
  59. }
  60. srcPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, srcBucket, srcObject))
  61. dir, name := srcPath.DirAndName()
  62. if entry, err := s3a.getEntry(dir, name); err != nil || entry.IsDirectory {
  63. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  64. return
  65. }
  66. if srcBucket == dstBucket && srcObject == dstObject {
  67. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopyDest)
  68. return
  69. }
  70. dstUrl := fmt.Sprintf("http://%s%s/%s%s",
  71. s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, dstBucket, urlEscapeObject(dstObject))
  72. srcUrl := fmt.Sprintf("http://%s%s/%s%s",
  73. s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlEscapeObject(srcObject))
  74. _, _, resp, err := util.DownloadFile(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false))
  75. if err != nil {
  76. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  77. return
  78. }
  79. defer util.CloseResponse(resp)
  80. tagErr := processMetadata(r.Header, resp.Header, replaceMeta, replaceTagging, s3a.getTags, dir, name)
  81. if tagErr != nil {
  82. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  83. return
  84. }
  85. glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
  86. destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)
  87. etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body, destination)
  88. if errCode != s3err.ErrNone {
  89. s3err.WriteErrorResponse(w, r, errCode)
  90. return
  91. }
  92. setEtag(w, etag)
  93. response := CopyObjectResult{
  94. ETag: etag,
  95. LastModified: time.Now().UTC(),
  96. }
  97. writeSuccessResponseXML(w, r, response)
  98. }
  99. func pathToBucketAndObject(path string) (bucket, object string) {
  100. path = strings.TrimPrefix(path, "/")
  101. parts := strings.SplitN(path, "/", 2)
  102. if len(parts) == 2 {
  103. return parts[0], "/" + parts[1]
  104. }
  105. return parts[0], "/"
  106. }
  107. type CopyPartResult struct {
  108. LastModified time.Time `xml:"LastModified"`
  109. ETag string `xml:"ETag"`
  110. }
  111. func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
  112. // https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html
  113. // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
  114. dstBucket, dstObject := s3_constants.GetBucketAndObject(r)
  115. // Copy source path.
  116. cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
  117. if err != nil {
  118. // Save unescaped string as is.
  119. cpSrcPath = r.Header.Get("X-Amz-Copy-Source")
  120. }
  121. srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
  122. // If source object is empty or bucket is empty, reply back invalid copy source.
  123. if srcObject == "" || srcBucket == "" {
  124. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  125. return
  126. }
  127. uploadID := r.URL.Query().Get("uploadId")
  128. partIDString := r.URL.Query().Get("partNumber")
  129. partID, err := strconv.Atoi(partIDString)
  130. if err != nil {
  131. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
  132. return
  133. }
  134. glog.V(3).Infof("CopyObjectPartHandler %s %s => %s part %d", srcBucket, srcObject, dstBucket, partID)
  135. // check partID with maximum part ID for multipart objects
  136. if partID > globalMaxPartID {
  137. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts)
  138. return
  139. }
  140. rangeHeader := r.Header.Get("x-amz-copy-source-range")
  141. dstUrl := fmt.Sprintf("http://%s%s/%s/%04d.part",
  142. s3a.option.Filer.ToHttpAddress(), s3a.genUploadsFolder(dstBucket), uploadID, partID)
  143. srcUrl := fmt.Sprintf("http://%s%s/%s%s",
  144. s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlEscapeObject(srcObject))
  145. resp, dataReader, err := util.ReadUrlAsReaderCloser(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false), rangeHeader)
  146. if err != nil {
  147. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  148. return
  149. }
  150. defer util.CloseResponse(resp)
  151. defer dataReader.Close()
  152. glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
  153. destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)
  154. etag, errCode := s3a.putToFiler(r, dstUrl, dataReader, destination)
  155. if errCode != s3err.ErrNone {
  156. s3err.WriteErrorResponse(w, r, errCode)
  157. return
  158. }
  159. setEtag(w, etag)
  160. response := CopyPartResult{
  161. ETag: etag,
  162. LastModified: time.Now().UTC(),
  163. }
  164. writeSuccessResponseXML(w, r, response)
  165. }
  166. func replaceDirective(reqHeader http.Header) (replaceMeta, replaceTagging bool) {
  167. return reqHeader.Get(s3_constants.AmzUserMetaDirective) == DirectiveReplace, reqHeader.Get(s3_constants.AmzObjectTaggingDirective) == DirectiveReplace
  168. }
  169. func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTagging bool, getTags func(parentDirectoryPath string, entryName string) (tags map[string]string, err error), dir, name string) (err error) {
  170. if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) == 0 {
  171. if sc := existing[s3_constants.AmzStorageClass]; len(sc) > 0 {
  172. reqHeader[s3_constants.AmzStorageClass] = sc
  173. }
  174. }
  175. if !replaceMeta {
  176. for header, _ := range reqHeader {
  177. if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) {
  178. delete(reqHeader, header)
  179. }
  180. }
  181. for k, v := range existing {
  182. if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) {
  183. reqHeader[k] = v
  184. }
  185. }
  186. }
  187. if !replaceTagging {
  188. for header, _ := range reqHeader {
  189. if strings.HasPrefix(header, s3_constants.AmzObjectTagging) {
  190. delete(reqHeader, header)
  191. }
  192. }
  193. found := false
  194. for k, _ := range existing {
  195. if strings.HasPrefix(k, s3_constants.AmzObjectTaggingPrefix) {
  196. found = true
  197. break
  198. }
  199. }
  200. if found {
  201. tags, err := getTags(dir, name)
  202. if err != nil {
  203. return err
  204. }
  205. var tagArr []string
  206. for k, v := range tags {
  207. tagArr = append(tagArr, fmt.Sprintf("%s=%s", k, v))
  208. }
  209. tagStr := strutil.JoinFields(tagArr, "&")
  210. reqHeader.Set(s3_constants.AmzObjectTagging, tagStr)
  211. }
  212. }
  213. return
  214. }
  215. func processMetadataBytes(reqHeader http.Header, existing map[string][]byte, replaceMeta, replaceTagging bool) (metadata map[string][]byte, err error) {
  216. metadata = make(map[string][]byte)
  217. if sc := existing[s3_constants.AmzStorageClass]; len(sc) > 0 {
  218. metadata[s3_constants.AmzStorageClass] = sc
  219. }
  220. if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) > 0 {
  221. metadata[s3_constants.AmzStorageClass] = []byte(sc)
  222. }
  223. if replaceMeta {
  224. for header, values := range reqHeader {
  225. if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) {
  226. for _, value := range values {
  227. metadata[header] = []byte(value)
  228. }
  229. }
  230. }
  231. } else {
  232. for k, v := range existing {
  233. if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) {
  234. metadata[k] = v
  235. }
  236. }
  237. }
  238. if replaceTagging {
  239. if tags := reqHeader.Get(s3_constants.AmzObjectTagging); tags != "" {
  240. parsedTags, err := parseTagsHeader(tags)
  241. if err != nil {
  242. return nil, err
  243. }
  244. err = ValidateTags(parsedTags)
  245. if err != nil {
  246. return nil, err
  247. }
  248. for k, v := range parsedTags {
  249. metadata[s3_constants.AmzObjectTagging+"-"+k] = []byte(v)
  250. }
  251. }
  252. } else {
  253. for k, v := range existing {
  254. if strings.HasPrefix(k, s3_constants.AmzObjectTagging) {
  255. metadata[k] = v
  256. }
  257. }
  258. delete(metadata, s3_constants.AmzTagCount)
  259. }
  260. return
  261. }