s3api_object_handlers_copy.go 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. package s3api
  2. import (
  3. "fmt"
  4. "net/http"
  5. "net/url"
  6. "strconv"
  7. "strings"
  8. "time"
  9. "modernc.org/strutil"
  10. "github.com/seaweedfs/seaweedfs/weed/glog"
  11. "github.com/seaweedfs/seaweedfs/weed/s3api/s3_constants"
  12. "github.com/seaweedfs/seaweedfs/weed/s3api/s3err"
  13. "github.com/seaweedfs/seaweedfs/weed/util"
  14. util_http "github.com/seaweedfs/seaweedfs/weed/util/http"
  15. )
  16. const (
  17. DirectiveCopy = "COPY"
  18. DirectiveReplace = "REPLACE"
  19. )
  20. func (s3a *S3ApiServer) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
  21. dstBucket, dstObject := s3_constants.GetBucketAndObject(r)
  22. // Copy source path.
  23. cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
  24. if err != nil {
  25. // Save unescaped string as is.
  26. cpSrcPath = r.Header.Get("X-Amz-Copy-Source")
  27. }
  28. srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
  29. glog.V(3).Infof("CopyObjectHandler %s %s => %s %s", srcBucket, srcObject, dstBucket, dstObject)
  30. replaceMeta, replaceTagging := replaceDirective(r.Header)
  31. if (srcBucket == dstBucket && srcObject == dstObject || cpSrcPath == "") && (replaceMeta || replaceTagging) {
  32. fullPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject))
  33. dir, name := fullPath.DirAndName()
  34. entry, err := s3a.getEntry(dir, name)
  35. if err != nil || entry.IsDirectory {
  36. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  37. return
  38. }
  39. entry.Extended, err = processMetadataBytes(r.Header, entry.Extended, replaceMeta, replaceTagging)
  40. entry.Attributes.Mtime = time.Now().Unix()
  41. if err != nil {
  42. glog.Errorf("CopyObjectHandler ValidateTags error %s: %v", r.URL, err)
  43. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidTag)
  44. return
  45. }
  46. err = s3a.touch(dir, name, entry)
  47. if err != nil {
  48. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  49. return
  50. }
  51. writeSuccessResponseXML(w, r, CopyObjectResult{
  52. ETag: fmt.Sprintf("%x", entry.Attributes.Md5),
  53. LastModified: time.Now().UTC(),
  54. })
  55. return
  56. }
  57. // If source object is empty or bucket is empty, reply back invalid copy source.
  58. if srcObject == "" || srcBucket == "" {
  59. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  60. return
  61. }
  62. srcPath := util.FullPath(fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, srcBucket, srcObject))
  63. dir, name := srcPath.DirAndName()
  64. if entry, err := s3a.getEntry(dir, name); err != nil || entry.IsDirectory {
  65. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  66. return
  67. }
  68. if srcBucket == dstBucket && srcObject == dstObject {
  69. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopyDest)
  70. return
  71. }
  72. dstUrl := fmt.Sprintf("http://%s%s/%s%s",
  73. s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, dstBucket, urlEscapeObject(dstObject))
  74. srcUrl := fmt.Sprintf("http://%s%s/%s%s",
  75. s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlEscapeObject(srcObject))
  76. _, _, resp, err := util_http.DownloadFile(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false))
  77. if err != nil {
  78. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  79. return
  80. }
  81. defer util_http.CloseResponse(resp)
  82. tagErr := processMetadata(r.Header, resp.Header, replaceMeta, replaceTagging, s3a.getTags, dir, name)
  83. if tagErr != nil {
  84. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  85. return
  86. }
  87. glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
  88. destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)
  89. etag, errCode := s3a.putToFiler(r, dstUrl, resp.Body, destination, dstBucket)
  90. if errCode != s3err.ErrNone {
  91. s3err.WriteErrorResponse(w, r, errCode)
  92. return
  93. }
  94. setEtag(w, etag)
  95. response := CopyObjectResult{
  96. ETag: etag,
  97. LastModified: time.Now().UTC(),
  98. }
  99. writeSuccessResponseXML(w, r, response)
  100. }
  101. func pathToBucketAndObject(path string) (bucket, object string) {
  102. path = strings.TrimPrefix(path, "/")
  103. parts := strings.SplitN(path, "/", 2)
  104. if len(parts) == 2 {
  105. return parts[0], "/" + parts[1]
  106. }
  107. return parts[0], "/"
  108. }
  109. type CopyPartResult struct {
  110. LastModified time.Time `xml:"LastModified"`
  111. ETag string `xml:"ETag"`
  112. }
  113. func (s3a *S3ApiServer) CopyObjectPartHandler(w http.ResponseWriter, r *http.Request) {
  114. // https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html
  115. // https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html
  116. dstBucket, dstObject := s3_constants.GetBucketAndObject(r)
  117. // Copy source path.
  118. cpSrcPath, err := url.QueryUnescape(r.Header.Get("X-Amz-Copy-Source"))
  119. if err != nil {
  120. // Save unescaped string as is.
  121. cpSrcPath = r.Header.Get("X-Amz-Copy-Source")
  122. }
  123. srcBucket, srcObject := pathToBucketAndObject(cpSrcPath)
  124. // If source object is empty or bucket is empty, reply back invalid copy source.
  125. if srcObject == "" || srcBucket == "" {
  126. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  127. return
  128. }
  129. uploadID := r.URL.Query().Get("uploadId")
  130. partIDString := r.URL.Query().Get("partNumber")
  131. partID, err := strconv.Atoi(partIDString)
  132. if err != nil {
  133. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
  134. return
  135. }
  136. glog.V(3).Infof("CopyObjectPartHandler %s %s => %s part %d", srcBucket, srcObject, dstBucket, partID)
  137. // check partID with maximum part ID for multipart objects
  138. if partID > globalMaxPartID {
  139. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidMaxParts)
  140. return
  141. }
  142. rangeHeader := r.Header.Get("x-amz-copy-source-range")
  143. dstUrl := s3a.genPartUploadUrl(dstBucket, uploadID, partID)
  144. srcUrl := fmt.Sprintf("http://%s%s/%s%s",
  145. s3a.option.Filer.ToHttpAddress(), s3a.option.BucketsPath, srcBucket, urlEscapeObject(srcObject))
  146. resp, dataReader, err := util_http.ReadUrlAsReaderCloser(srcUrl, s3a.maybeGetFilerJwtAuthorizationToken(false), rangeHeader)
  147. if err != nil {
  148. s3err.WriteErrorResponse(w, r, s3err.ErrInvalidCopySource)
  149. return
  150. }
  151. defer util_http.CloseResponse(resp)
  152. defer dataReader.Close()
  153. glog.V(2).Infof("copy from %s to %s", srcUrl, dstUrl)
  154. destination := fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, dstBucket, dstObject)
  155. etag, errCode := s3a.putToFiler(r, dstUrl, dataReader, destination, dstBucket)
  156. if errCode != s3err.ErrNone {
  157. s3err.WriteErrorResponse(w, r, errCode)
  158. return
  159. }
  160. setEtag(w, etag)
  161. response := CopyPartResult{
  162. ETag: etag,
  163. LastModified: time.Now().UTC(),
  164. }
  165. writeSuccessResponseXML(w, r, response)
  166. }
  167. func replaceDirective(reqHeader http.Header) (replaceMeta, replaceTagging bool) {
  168. return reqHeader.Get(s3_constants.AmzUserMetaDirective) == DirectiveReplace, reqHeader.Get(s3_constants.AmzObjectTaggingDirective) == DirectiveReplace
  169. }
  170. func processMetadata(reqHeader, existing http.Header, replaceMeta, replaceTagging bool, getTags func(parentDirectoryPath string, entryName string) (tags map[string]string, err error), dir, name string) (err error) {
  171. if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) == 0 {
  172. if sc := existing[s3_constants.AmzStorageClass]; len(sc) > 0 {
  173. reqHeader[s3_constants.AmzStorageClass] = sc
  174. }
  175. }
  176. if !replaceMeta {
  177. for header, _ := range reqHeader {
  178. if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) {
  179. delete(reqHeader, header)
  180. }
  181. }
  182. for k, v := range existing {
  183. if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) {
  184. reqHeader[k] = v
  185. }
  186. }
  187. }
  188. if !replaceTagging {
  189. for header, _ := range reqHeader {
  190. if strings.HasPrefix(header, s3_constants.AmzObjectTagging) {
  191. delete(reqHeader, header)
  192. }
  193. }
  194. found := false
  195. for k, _ := range existing {
  196. if strings.HasPrefix(k, s3_constants.AmzObjectTaggingPrefix) {
  197. found = true
  198. break
  199. }
  200. }
  201. if found {
  202. tags, err := getTags(dir, name)
  203. if err != nil {
  204. return err
  205. }
  206. var tagArr []string
  207. for k, v := range tags {
  208. tagArr = append(tagArr, fmt.Sprintf("%s=%s", k, v))
  209. }
  210. tagStr := strutil.JoinFields(tagArr, "&")
  211. reqHeader.Set(s3_constants.AmzObjectTagging, tagStr)
  212. }
  213. }
  214. return
  215. }
  216. func processMetadataBytes(reqHeader http.Header, existing map[string][]byte, replaceMeta, replaceTagging bool) (metadata map[string][]byte, err error) {
  217. metadata = make(map[string][]byte)
  218. if sc := existing[s3_constants.AmzStorageClass]; len(sc) > 0 {
  219. metadata[s3_constants.AmzStorageClass] = sc
  220. }
  221. if sc := reqHeader.Get(s3_constants.AmzStorageClass); len(sc) > 0 {
  222. metadata[s3_constants.AmzStorageClass] = []byte(sc)
  223. }
  224. if replaceMeta {
  225. for header, values := range reqHeader {
  226. if strings.HasPrefix(header, s3_constants.AmzUserMetaPrefix) {
  227. for _, value := range values {
  228. metadata[header] = []byte(value)
  229. }
  230. }
  231. }
  232. } else {
  233. for k, v := range existing {
  234. if strings.HasPrefix(k, s3_constants.AmzUserMetaPrefix) {
  235. metadata[k] = v
  236. }
  237. }
  238. }
  239. if replaceTagging {
  240. if tags := reqHeader.Get(s3_constants.AmzObjectTagging); tags != "" {
  241. parsedTags, err := parseTagsHeader(tags)
  242. if err != nil {
  243. return nil, err
  244. }
  245. err = ValidateTags(parsedTags)
  246. if err != nil {
  247. return nil, err
  248. }
  249. for k, v := range parsedTags {
  250. metadata[s3_constants.AmzObjectTagging+"-"+k] = []byte(v)
  251. }
  252. }
  253. } else {
  254. for k, v := range existing {
  255. if strings.HasPrefix(k, s3_constants.AmzObjectTagging) {
  256. metadata[k] = v
  257. }
  258. }
  259. delete(metadata, s3_constants.AmzTagCount)
  260. }
  261. return
  262. }