123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439 |
- package s3api
- import (
- "crypto/md5"
- "encoding/json"
- "encoding/xml"
- "fmt"
- "github.com/chrislusf/seaweedfs/weed/filer"
- "github.com/pquerna/cachecontrol/cacheobject"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "sort"
- "strings"
- "time"
- "github.com/chrislusf/seaweedfs/weed/s3api/s3err"
- "github.com/gorilla/mux"
- "github.com/chrislusf/seaweedfs/weed/glog"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- weed_server "github.com/chrislusf/seaweedfs/weed/server"
- "github.com/chrislusf/seaweedfs/weed/util"
- )
- var (
- client *http.Client
- )
- func init() {
- client = &http.Client{Transport: &http.Transport{
- MaxIdleConns: 1024,
- MaxIdleConnsPerHost: 1024,
- }}
- }
- func (s3a *S3ApiServer) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
- // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
- bucket, object := getBucketAndObject(r)
- _, err := validateContentMd5(r.Header)
- if err != nil {
- s3err.WriteErrorResponse(w, s3err.ErrInvalidDigest, r)
- return
- }
- if r.Header.Get("Cache-Control") != "" {
- if _, err = cacheobject.ParseRequestCacheControl(r.Header.Get("Cache-Control")); err != nil {
- s3err.WriteErrorResponse(w, s3err.ErrInvalidDigest, r)
- return
- }
- }
- if r.Header.Get("Expires") != "" {
- if _, err = time.Parse(http.TimeFormat, r.Header.Get("Expires")); err != nil {
- s3err.WriteErrorResponse(w, s3err.ErrInvalidDigest, r)
- return
- }
- }
- dataReader := r.Body
- rAuthType := getRequestAuthType(r)
- if s3a.iam.isEnabled() {
- var s3ErrCode s3err.ErrorCode
- switch rAuthType {
- case authTypeStreamingSigned:
- dataReader, s3ErrCode = s3a.iam.newSignV4ChunkedReader(r)
- case authTypeSignedV2, authTypePresignedV2:
- _, s3ErrCode = s3a.iam.isReqAuthenticatedV2(r)
- case authTypePresigned, authTypeSigned:
- _, s3ErrCode = s3a.iam.reqSignatureV4Verify(r)
- }
- if s3ErrCode != s3err.ErrNone {
- s3err.WriteErrorResponse(w, s3ErrCode, r)
- return
- }
- } else {
- if authTypeStreamingSigned == rAuthType {
- s3err.WriteErrorResponse(w, s3err.ErrAuthNotSetup, r)
- return
- }
- }
- defer dataReader.Close()
- if strings.HasSuffix(object, "/") {
- if err := s3a.mkdir(s3a.option.BucketsPath, bucket+object, nil); err != nil {
- s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
- return
- }
- } else {
- uploadUrl := fmt.Sprintf("http://%s%s/%s%s", s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object))
- etag, errCode := s3a.putToFiler(r, uploadUrl, dataReader)
- if errCode != s3err.ErrNone {
- s3err.WriteErrorResponse(w, errCode, r)
- return
- }
- setEtag(w, etag)
- }
- writeSuccessResponseEmpty(w)
- }
- func urlPathEscape(object string) string {
- var escapedParts []string
- for _, part := range strings.Split(object, "/") {
- escapedParts = append(escapedParts, url.PathEscape(part))
- }
- return strings.Join(escapedParts, "/")
- }
- func (s3a *S3ApiServer) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
- bucket, object := getBucketAndObject(r)
- if strings.HasSuffix(r.URL.Path, "/") {
- s3err.WriteErrorResponse(w, s3err.ErrNotImplemented, r)
- return
- }
- destUrl := fmt.Sprintf("http://%s%s/%s%s",
- s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object))
- s3a.proxyToFiler(w, r, destUrl, passThroughResponse)
- }
- func (s3a *S3ApiServer) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
- bucket, object := getBucketAndObject(r)
- destUrl := fmt.Sprintf("http://%s%s/%s%s",
- s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object))
- s3a.proxyToFiler(w, r, destUrl, passThroughResponse)
- }
- func (s3a *S3ApiServer) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
- bucket, object := getBucketAndObject(r)
- destUrl := fmt.Sprintf("http://%s%s/%s%s?recursive=true",
- s3a.option.Filer, s3a.option.BucketsPath, bucket, urlPathEscape(object))
- s3a.proxyToFiler(w, r, destUrl, func(proxyResponse *http.Response, w http.ResponseWriter) {
- for k, v := range proxyResponse.Header {
- w.Header()[k] = v
- }
- w.WriteHeader(http.StatusNoContent)
- })
- }
- // / ObjectIdentifier carries key name for the object to delete.
- type ObjectIdentifier struct {
- ObjectName string `xml:"Key"`
- }
- // DeleteObjectsRequest - xml carrying the object key names which needs to be deleted.
- type DeleteObjectsRequest struct {
- // Element to enable quiet mode for the request
- Quiet bool
- // List of objects to be deleted
- Objects []ObjectIdentifier `xml:"Object"`
- }
- // DeleteError structure.
- type DeleteError struct {
- Code string
- Message string
- Key string
- }
- // DeleteObjectsResponse container for multiple object deletes.
- type DeleteObjectsResponse struct {
- XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
- // Collection of all deleted objects
- DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"`
- // Collection of errors deleting certain objects.
- Errors []DeleteError `xml:"Error,omitempty"`
- }
- // DeleteMultipleObjectsHandler - Delete multiple objects
- func (s3a *S3ApiServer) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Request) {
- bucket, _ := getBucketAndObject(r)
- deleteXMLBytes, err := ioutil.ReadAll(r.Body)
- if err != nil {
- s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
- return
- }
- deleteObjects := &DeleteObjectsRequest{}
- if err := xml.Unmarshal(deleteXMLBytes, deleteObjects); err != nil {
- s3err.WriteErrorResponse(w, s3err.ErrMalformedXML, r)
- return
- }
- var deletedObjects []ObjectIdentifier
- var deleteErrors []DeleteError
- directoriesWithDeletion := make(map[string]int)
- s3a.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
- // delete file entries
- for _, object := range deleteObjects.Objects {
- lastSeparator := strings.LastIndex(object.ObjectName, "/")
- parentDirectoryPath, entryName, isDeleteData, isRecursive := "", object.ObjectName, true, false
- if lastSeparator > 0 && lastSeparator+1 < len(object.ObjectName) {
- entryName = object.ObjectName[lastSeparator+1:]
- parentDirectoryPath = "/" + object.ObjectName[:lastSeparator]
- }
- parentDirectoryPath = fmt.Sprintf("%s/%s%s", s3a.option.BucketsPath, bucket, parentDirectoryPath)
- err := doDeleteEntry(client, parentDirectoryPath, entryName, isDeleteData, isRecursive)
- if err == nil {
- directoriesWithDeletion[parentDirectoryPath]++
- deletedObjects = append(deletedObjects, object)
- } else if strings.Contains(err.Error(), filer.MsgFailDelNonEmptyFolder) {
- deletedObjects = append(deletedObjects, object)
- } else {
- delete(directoriesWithDeletion, parentDirectoryPath)
- deleteErrors = append(deleteErrors, DeleteError{
- Code: "",
- Message: err.Error(),
- Key: object.ObjectName,
- })
- }
- }
- // purge empty folders, only checking folders with deletions
- for len(directoriesWithDeletion) > 0 {
- directoriesWithDeletion = s3a.doDeleteEmptyDirectories(client, directoriesWithDeletion)
- }
- return nil
- })
- deleteResp := DeleteObjectsResponse{}
- if !deleteObjects.Quiet {
- deleteResp.DeletedObjects = deletedObjects
- }
- deleteResp.Errors = deleteErrors
- writeSuccessResponseXML(w, deleteResp)
- }
- func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerClient, directoriesWithDeletion map[string]int) (newDirectoriesWithDeletion map[string]int) {
- var allDirs []string
- for dir, _ := range directoriesWithDeletion {
- allDirs = append(allDirs, dir)
- }
- sort.Slice(allDirs, func(i, j int) bool {
- return len(allDirs[i]) > len(allDirs[j])
- })
- newDirectoriesWithDeletion = make(map[string]int)
- for _, dir := range allDirs {
- parentDir, dirName := util.FullPath(dir).DirAndName()
- if parentDir == s3a.option.BucketsPath {
- continue
- }
- if err := doDeleteEntry(client, parentDir, dirName, false, false); err != nil {
- glog.V(4).Infof("directory %s has %d deletion but still not empty: %v", dir, directoriesWithDeletion[dir], err)
- } else {
- newDirectoriesWithDeletion[parentDir]++
- }
- }
- return
- }
- var passThroughHeaders = []string{
- "response-cache-control",
- "response-content-disposition",
- "response-content-encoding",
- "response-content-language",
- "response-content-type",
- "response-expires",
- }
- func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, destUrl string, responseFn func(proxyResponse *http.Response, w http.ResponseWriter)) {
- glog.V(2).Infof("s3 proxying %s to %s", r.Method, destUrl)
- proxyReq, err := http.NewRequest(r.Method, destUrl, r.Body)
- if err != nil {
- glog.Errorf("NewRequest %s: %v", destUrl, err)
- s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
- return
- }
- proxyReq.Header.Set("Host", s3a.option.Filer)
- proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
- for header, values := range r.Header {
- // handle s3 related headers
- passed := false
- for _, h := range passThroughHeaders {
- if strings.ToLower(header) == h && len(values) > 0 {
- proxyReq.Header.Add(header[len("response-"):], values[0])
- passed = true
- break
- }
- }
- if passed {
- continue
- }
- // handle other headers
- for _, value := range values {
- proxyReq.Header.Add(header, value)
- }
- }
- resp, postErr := client.Do(proxyReq)
- if postErr != nil {
- glog.Errorf("post to filer: %v", postErr)
- s3err.WriteErrorResponse(w, s3err.ErrInternalError, r)
- return
- }
- defer util.CloseResponse(resp)
- if resp.StatusCode == http.StatusPreconditionFailed {
- s3err.WriteErrorResponse(w, s3err.ErrPreconditionFailed, r)
- return
- }
- if (resp.ContentLength == -1 || resp.StatusCode == 404) && resp.StatusCode != 304 {
- if r.Method != "DELETE" {
- s3err.WriteErrorResponse(w, s3err.ErrNoSuchKey, r)
- return
- }
- }
- responseFn(resp, w)
- }
- func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) {
- for k, v := range proxyResponse.Header {
- w.Header()[k] = v
- }
- if proxyResponse.Header.Get("Content-Range") != "" && proxyResponse.StatusCode == 200 {
- w.WriteHeader(http.StatusPartialContent)
- } else {
- w.WriteHeader(proxyResponse.StatusCode)
- }
- io.Copy(w, proxyResponse.Body)
- }
- func (s3a *S3ApiServer) putToFiler(r *http.Request, uploadUrl string, dataReader io.Reader) (etag string, code s3err.ErrorCode) {
- hash := md5.New()
- var body = io.TeeReader(dataReader, hash)
- proxyReq, err := http.NewRequest("PUT", uploadUrl, body)
- if err != nil {
- glog.Errorf("NewRequest %s: %v", uploadUrl, err)
- return "", s3err.ErrInternalError
- }
- proxyReq.Header.Set("Host", s3a.option.Filer)
- proxyReq.Header.Set("X-Forwarded-For", r.RemoteAddr)
- for header, values := range r.Header {
- for _, value := range values {
- proxyReq.Header.Add(header, value)
- }
- }
- resp, postErr := client.Do(proxyReq)
- if postErr != nil {
- glog.Errorf("post to filer: %v", postErr)
- return "", s3err.ErrInternalError
- }
- defer resp.Body.Close()
- etag = fmt.Sprintf("%x", hash.Sum(nil))
- resp_body, ra_err := ioutil.ReadAll(resp.Body)
- if ra_err != nil {
- glog.Errorf("upload to filer response read %d: %v", resp.StatusCode, ra_err)
- return etag, s3err.ErrInternalError
- }
- var ret weed_server.FilerPostResult
- unmarshal_err := json.Unmarshal(resp_body, &ret)
- if unmarshal_err != nil {
- glog.Errorf("failing to read upload to %s : %v", uploadUrl, string(resp_body))
- return "", s3err.ErrInternalError
- }
- if ret.Error != "" {
- glog.Errorf("upload to filer error: %v", ret.Error)
- return "", filerErrorToS3Error(ret.Error)
- }
- return etag, s3err.ErrNone
- }
- func setEtag(w http.ResponseWriter, etag string) {
- if etag != "" {
- if strings.HasPrefix(etag, "\"") {
- w.Header().Set("ETag", etag)
- } else {
- w.Header().Set("ETag", "\""+etag+"\"")
- }
- }
- }
- func getBucketAndObject(r *http.Request) (bucket, object string) {
- vars := mux.Vars(r)
- bucket = vars["bucket"]
- object = vars["object"]
- if !strings.HasPrefix(object, "/") {
- object = "/" + object
- }
- return
- }
- func filerErrorToS3Error(errString string) s3err.ErrorCode {
- if strings.HasPrefix(errString, "existing ") && strings.HasSuffix(errString, "is a directory") {
- return s3err.ErrExistingObjectIsDirectory
- }
- return s3err.ErrInternalError
- }
|