download.go 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. package command
  2. import (
  3. "fmt"
  4. "io"
  5. "io/ioutil"
  6. "net/http"
  7. "os"
  8. "path"
  9. "strings"
  10. "github.com/chrislusf/seaweedfs/weed/operation"
  11. "github.com/chrislusf/seaweedfs/weed/util"
  12. )
  13. var (
  14. d DownloadOptions
  15. )
  16. type DownloadOptions struct {
  17. server *string
  18. dir *string
  19. }
  20. func init() {
  21. cmdDownload.Run = runDownload // break init cycle
  22. d.server = cmdDownload.Flag.String("server", "localhost:9333", "SeaweedFS master location")
  23. d.dir = cmdDownload.Flag.String("dir", ".", "Download the whole folder recursively if specified.")
  24. }
  25. var cmdDownload = &Command{
  26. UsageLine: "download -server=localhost:9333 -dir=one_directory fid1 [fid2 fid3 ...]",
  27. Short: "download files by file id",
  28. Long: `download files by file id.
  29. Usually you just need to use curl to lookup the file's volume server, and then download them directly.
  30. This download tool combine the two steps into one.
  31. What's more, if you use "weed upload -maxMB=..." option to upload a big file divided into chunks, you can
  32. use this tool to download the chunks and merge them automatically.
  33. `,
  34. }
  35. func runDownload(cmd *Command, args []string) bool {
  36. for _, fid := range args {
  37. if e := downloadToFile(func() string { return *d.server }, fid, util.ResolvePath(*d.dir)); e != nil {
  38. fmt.Println("Download Error: ", fid, e)
  39. }
  40. }
  41. return true
  42. }
  43. func downloadToFile(masterFn operation.GetMasterFn, fileId, saveDir string) error {
  44. fileUrl, lookupError := operation.LookupFileId(masterFn, fileId)
  45. if lookupError != nil {
  46. return lookupError
  47. }
  48. filename, _, rc, err := util.DownloadFile(fileUrl)
  49. if err != nil {
  50. return err
  51. }
  52. defer util.CloseResponse(rc)
  53. if filename == "" {
  54. filename = fileId
  55. }
  56. isFileList := false
  57. if strings.HasSuffix(filename, "-list") {
  58. // old command compatible
  59. isFileList = true
  60. filename = filename[0 : len(filename)-len("-list")]
  61. }
  62. f, err := os.OpenFile(path.Join(saveDir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
  63. if err != nil {
  64. return err
  65. }
  66. defer f.Close()
  67. if isFileList {
  68. content, err := ioutil.ReadAll(rc.Body)
  69. if err != nil {
  70. return err
  71. }
  72. fids := strings.Split(string(content), "\n")
  73. for _, partId := range fids {
  74. var n int
  75. _, part, err := fetchContent(masterFn, partId)
  76. if err == nil {
  77. n, err = f.Write(part)
  78. }
  79. if err == nil && n < len(part) {
  80. err = io.ErrShortWrite
  81. }
  82. if err != nil {
  83. return err
  84. }
  85. }
  86. } else {
  87. if _, err = io.Copy(f, rc.Body); err != nil {
  88. return err
  89. }
  90. }
  91. return nil
  92. }
  93. func fetchContent(masterFn operation.GetMasterFn, fileId string) (filename string, content []byte, e error) {
  94. fileUrl, lookupError := operation.LookupFileId(masterFn, fileId)
  95. if lookupError != nil {
  96. return "", nil, lookupError
  97. }
  98. var rc *http.Response
  99. if filename, _, rc, e = util.DownloadFile(fileUrl); e != nil {
  100. return "", nil, e
  101. }
  102. defer util.CloseResponse(rc)
  103. content, e = ioutil.ReadAll(rc.Body)
  104. return
  105. }
  106. func WriteFile(filename string, data []byte, perm os.FileMode) error {
  107. f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
  108. if err != nil {
  109. return err
  110. }
  111. n, err := f.Write(data)
  112. f.Close()
  113. if err == nil && n < len(data) {
  114. err = io.ErrShortWrite
  115. }
  116. return err
  117. }