download.go 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. package command
  2. import (
  3. "fmt"
  4. "io"
  5. "io/ioutil"
  6. "os"
  7. "path"
  8. "strings"
  9. "github.com/chrislusf/seaweedfs/weed/operation"
  10. "github.com/chrislusf/seaweedfs/weed/util"
  11. )
  12. var (
  13. d DownloadOptions
  14. )
  15. type DownloadOptions struct {
  16. server *string
  17. dir *string
  18. }
  19. func init() {
  20. cmdDownload.Run = runDownload // break init cycle
  21. d.server = cmdDownload.Flag.String("server", "localhost:9333", "SeaweedFS master location")
  22. d.dir = cmdDownload.Flag.String("dir", ".", "Download the whole folder recursively if specified.")
  23. }
  24. var cmdDownload = &Command{
  25. UsageLine: "download -server=localhost:9333 -dir=one_directory fid1 [fid2 fid3 ...]",
  26. Short: "download files by file id",
  27. Long: `download files by file id.
  28. Usually you just need to use curl to lookup the file's volume server, and then download them directly.
  29. This download tool combine the two steps into one.
  30. What's more, if you use "weed upload -maxMB=..." option to upload a big file divided into chunks, you can
  31. use this tool to download the chunks and merge them automatically.
  32. `,
  33. }
  34. func runDownload(cmd *Command, args []string) bool {
  35. for _, fid := range args {
  36. if e := downloadToFile(*d.server, fid, *d.dir); e != nil {
  37. fmt.Println("Download Error: ", fid, e)
  38. }
  39. }
  40. return true
  41. }
  42. func downloadToFile(server, fileId, saveDir string) error {
  43. fileUrl, lookupError := operation.LookupFileId(server, fileId)
  44. if lookupError != nil {
  45. return lookupError
  46. }
  47. filename, rc, err := util.DownloadUrl(fileUrl)
  48. if err != nil {
  49. return err
  50. }
  51. defer rc.Close()
  52. if filename == "" {
  53. filename = fileId
  54. }
  55. isFileList := false
  56. if strings.HasSuffix(filename, "-list") {
  57. // old command compatible
  58. isFileList = true
  59. filename = filename[0 : len(filename)-len("-list")]
  60. }
  61. f, err := os.OpenFile(path.Join(saveDir, filename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.ModePerm)
  62. if err != nil {
  63. return err
  64. }
  65. defer f.Close()
  66. if isFileList {
  67. content, err := ioutil.ReadAll(rc)
  68. if err != nil {
  69. return err
  70. }
  71. fids := strings.Split(string(content), "\n")
  72. for _, partId := range fids {
  73. var n int
  74. _, part, err := fetchContent(*d.server, partId)
  75. if err == nil {
  76. n, err = f.Write(part)
  77. }
  78. if err == nil && n < len(part) {
  79. err = io.ErrShortWrite
  80. }
  81. if err != nil {
  82. return err
  83. }
  84. }
  85. } else {
  86. if _, err = io.Copy(f, rc); err != nil {
  87. return err
  88. }
  89. }
  90. return nil
  91. }
  92. func fetchContent(server string, fileId string) (filename string, content []byte, e error) {
  93. fileUrl, lookupError := operation.LookupFileId(server, fileId)
  94. if lookupError != nil {
  95. return "", nil, lookupError
  96. }
  97. var rc io.ReadCloser
  98. if filename, rc, e = util.DownloadUrl(fileUrl); e != nil {
  99. return "", nil, e
  100. }
  101. content, e = ioutil.ReadAll(rc)
  102. rc.Close()
  103. return
  104. }
  105. func WriteFile(filename string, data []byte, perm os.FileMode) error {
  106. f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
  107. if err != nil {
  108. return err
  109. }
  110. n, err := f.Write(data)
  111. f.Close()
  112. if err == nil && n < len(data) {
  113. err = io.ErrShortWrite
  114. }
  115. return err
  116. }