filer_server_handlers_write_cipher.go 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. package weed_server
  2. import (
  3. "context"
  4. "fmt"
  5. "net/http"
  6. "strings"
  7. "time"
  8. "github.com/chrislusf/seaweedfs/weed/filer2"
  9. "github.com/chrislusf/seaweedfs/weed/glog"
  10. "github.com/chrislusf/seaweedfs/weed/operation"
  11. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  12. "github.com/chrislusf/seaweedfs/weed/storage/needle"
  13. "github.com/chrislusf/seaweedfs/weed/util"
  14. )
  15. // handling single chunk POST or PUT upload
  16. func (fs *FilerServer) encrypt(ctx context.Context, w http.ResponseWriter, r *http.Request,
  17. replication string, collection string, dataCenter string, ttlSeconds int32, ttlString string, fsync bool) (filerResult *FilerPostResult, err error) {
  18. fileId, urlLocation, auth, err := fs.assignNewFileInfo(replication, collection, dataCenter, ttlString, fsync)
  19. if err != nil || fileId == "" || urlLocation == "" {
  20. return nil, fmt.Errorf("fail to allocate volume for %s, collection:%s, datacenter:%s", r.URL.Path, collection, dataCenter)
  21. }
  22. glog.V(4).Infof("write %s to %v", r.URL.Path, urlLocation)
  23. // Note: encrypt(gzip(data)), encrypt data first, then gzip
  24. sizeLimit := int64(fs.option.MaxMB) * 1024 * 1024
  25. pu, err := needle.ParseUpload(r, sizeLimit)
  26. uncompressedData := pu.Data
  27. if pu.IsGzipped {
  28. uncompressedData = pu.UncompressedData
  29. }
  30. if pu.MimeType == "" {
  31. pu.MimeType = http.DetectContentType(uncompressedData)
  32. // println("detect2 mimetype to", pu.MimeType)
  33. }
  34. uploadResult, uploadError := operation.UploadData(urlLocation, pu.FileName, true, uncompressedData, false, pu.MimeType, pu.PairMap, auth)
  35. if uploadError != nil {
  36. return nil, fmt.Errorf("upload to volume server: %v", uploadError)
  37. }
  38. // Save to chunk manifest structure
  39. fileChunks := []*filer_pb.FileChunk{uploadResult.ToPbFileChunk(fileId, 0)}
  40. // fmt.Printf("uploaded: %+v\n", uploadResult)
  41. path := r.URL.Path
  42. if strings.HasSuffix(path, "/") {
  43. if pu.FileName != "" {
  44. path += pu.FileName
  45. }
  46. }
  47. entry := &filer2.Entry{
  48. FullPath: util.FullPath(path),
  49. Attr: filer2.Attr{
  50. Mtime: time.Now(),
  51. Crtime: time.Now(),
  52. Mode: 0660,
  53. Uid: OS_UID,
  54. Gid: OS_GID,
  55. Replication: replication,
  56. Collection: collection,
  57. TtlSec: ttlSeconds,
  58. Mime: pu.MimeType,
  59. Md5: util.Base64Md5ToBytes(pu.ContentMd5),
  60. },
  61. Chunks: fileChunks,
  62. }
  63. filerResult = &FilerPostResult{
  64. Name: pu.FileName,
  65. Size: int64(pu.OriginalDataSize),
  66. }
  67. if dbErr := fs.filer.CreateEntry(ctx, entry, false, false); dbErr != nil {
  68. fs.filer.DeleteChunks(entry.Chunks)
  69. err = dbErr
  70. filerResult.Error = dbErr.Error()
  71. return
  72. }
  73. return
  74. }