filer_sink.go 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. package filersink
  2. import (
  3. "context"
  4. "fmt"
  5. "github.com/seaweedfs/seaweedfs/weed/pb"
  6. "github.com/seaweedfs/seaweedfs/weed/wdclient"
  7. "math"
  8. "google.golang.org/grpc"
  9. "github.com/seaweedfs/seaweedfs/weed/security"
  10. "github.com/seaweedfs/seaweedfs/weed/filer"
  11. "github.com/seaweedfs/seaweedfs/weed/glog"
  12. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  13. "github.com/seaweedfs/seaweedfs/weed/replication/sink"
  14. "github.com/seaweedfs/seaweedfs/weed/replication/source"
  15. "github.com/seaweedfs/seaweedfs/weed/util"
  16. )
  17. type FilerSink struct {
  18. filerSource *source.FilerSource
  19. grpcAddress string
  20. dir string
  21. replication string
  22. collection string
  23. ttlSec int32
  24. diskType string
  25. dataCenter string
  26. grpcDialOption grpc.DialOption
  27. address string
  28. writeChunkByFiler bool
  29. isIncremental bool
  30. executor *util.LimitedConcurrentExecutor
  31. signature int32
  32. }
  33. func init() {
  34. sink.Sinks = append(sink.Sinks, &FilerSink{})
  35. }
  36. func (fs *FilerSink) GetName() string {
  37. return "filer"
  38. }
  39. func (fs *FilerSink) GetSinkToDirectory() string {
  40. return fs.dir
  41. }
  42. func (fs *FilerSink) IsIncremental() bool {
  43. return fs.isIncremental
  44. }
  45. func (fs *FilerSink) Initialize(configuration util.Configuration, prefix string) error {
  46. fs.isIncremental = configuration.GetBool(prefix + "is_incremental")
  47. fs.dataCenter = configuration.GetString(prefix + "dataCenter")
  48. fs.signature = util.RandomInt32()
  49. return fs.DoInitialize(
  50. "",
  51. configuration.GetString(prefix+"grpcAddress"),
  52. configuration.GetString(prefix+"directory"),
  53. configuration.GetString(prefix+"replication"),
  54. configuration.GetString(prefix+"collection"),
  55. configuration.GetInt(prefix+"ttlSec"),
  56. configuration.GetString(prefix+"disk"),
  57. security.LoadClientTLS(util.GetViper(), "grpc.client"),
  58. false)
  59. }
  60. func (fs *FilerSink) SetSourceFiler(s *source.FilerSource) {
  61. fs.filerSource = s
  62. }
  63. func (fs *FilerSink) DoInitialize(address, grpcAddress string, dir string,
  64. replication string, collection string, ttlSec int, diskType string, grpcDialOption grpc.DialOption, writeChunkByFiler bool) (err error) {
  65. fs.address = address
  66. if fs.address == "" {
  67. fs.address = pb.GrpcAddressToServerAddress(grpcAddress)
  68. }
  69. fs.grpcAddress = grpcAddress
  70. fs.dir = dir
  71. fs.replication = replication
  72. fs.collection = collection
  73. fs.ttlSec = int32(ttlSec)
  74. fs.diskType = diskType
  75. fs.grpcDialOption = grpcDialOption
  76. fs.writeChunkByFiler = writeChunkByFiler
  77. fs.executor = util.NewLimitedConcurrentExecutor(32)
  78. return nil
  79. }
  80. func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bool, signatures []int32) error {
  81. dir, name := util.FullPath(key).DirAndName()
  82. glog.V(4).Infof("delete entry: %v", key)
  83. err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures)
  84. if err != nil {
  85. glog.V(0).Infof("delete entry %s: %v", key, err)
  86. return fmt.Errorf("delete entry %s: %v", key, err)
  87. }
  88. return nil
  89. }
  90. func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures []int32) error {
  91. return fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  92. dir, name := util.FullPath(key).DirAndName()
  93. // look up existing entry
  94. lookupRequest := &filer_pb.LookupDirectoryEntryRequest{
  95. Directory: dir,
  96. Name: name,
  97. }
  98. // glog.V(1).Infof("lookup: %v", lookupRequest)
  99. if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
  100. if filer.ETag(resp.Entry) == filer.ETag(entry) {
  101. glog.V(3).Infof("already replicated %s", key)
  102. return nil
  103. }
  104. }
  105. replicatedChunks, err := fs.replicateChunks(entry.GetChunks(), key)
  106. if err != nil {
  107. // only warning here since the source chunk may have been deleted already
  108. glog.Warningf("replicate entry chunks %s: %v", key, err)
  109. return nil
  110. }
  111. // glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.GetChunks(), replicatedChunks)
  112. request := &filer_pb.CreateEntryRequest{
  113. Directory: dir,
  114. Entry: &filer_pb.Entry{
  115. Name: name,
  116. IsDirectory: entry.IsDirectory,
  117. Attributes: entry.Attributes,
  118. Extended: entry.Extended,
  119. Chunks: replicatedChunks,
  120. Content: entry.Content,
  121. RemoteEntry: entry.RemoteEntry,
  122. },
  123. IsFromOtherCluster: true,
  124. Signatures: signatures,
  125. }
  126. glog.V(3).Infof("create: %v", request)
  127. if err := filer_pb.CreateEntry(client, request); err != nil {
  128. glog.V(0).Infof("create entry %s: %v", key, err)
  129. return fmt.Errorf("create entry %s: %v", key, err)
  130. }
  131. return nil
  132. })
  133. }
  134. func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParentPath string, newEntry *filer_pb.Entry, deleteIncludeChunks bool, signatures []int32) (foundExistingEntry bool, err error) {
  135. dir, name := util.FullPath(key).DirAndName()
  136. // read existing entry
  137. var existingEntry *filer_pb.Entry
  138. err = fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  139. request := &filer_pb.LookupDirectoryEntryRequest{
  140. Directory: dir,
  141. Name: name,
  142. }
  143. glog.V(4).Infof("lookup entry: %v", request)
  144. resp, err := filer_pb.LookupEntry(client, request)
  145. if err != nil {
  146. glog.V(0).Infof("lookup %s: %v", key, err)
  147. return err
  148. }
  149. existingEntry = resp.Entry
  150. return nil
  151. })
  152. if err != nil {
  153. return false, fmt.Errorf("lookup %s: %v", key, err)
  154. }
  155. glog.V(4).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
  156. if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime {
  157. // skip if already changed
  158. // this usually happens when the messages are not ordered
  159. glog.V(2).Infof("late updates %s", key)
  160. } else {
  161. // find out what changed
  162. deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry)
  163. if err != nil {
  164. return true, fmt.Errorf("replicate %s compare chunks error: %v", key, err)
  165. }
  166. // delete the chunks that are deleted from the source
  167. if deleteIncludeChunks {
  168. // remove the deleted chunks. Actual data deletion happens in filer UpdateEntry FindUnusedFileChunks
  169. existingEntry.Chunks = filer.DoMinusChunksBySourceFileId(existingEntry.GetChunks(), deletedChunks)
  170. }
  171. // replicate the chunks that are new in the source
  172. replicatedChunks, err := fs.replicateChunks(newChunks, key)
  173. if err != nil {
  174. glog.Warningf("replicate entry chunks %s: %v", key, err)
  175. return true, nil
  176. }
  177. existingEntry.Chunks = append(existingEntry.GetChunks(), replicatedChunks...)
  178. existingEntry.Attributes = newEntry.Attributes
  179. existingEntry.Extended = newEntry.Extended
  180. existingEntry.HardLinkId = newEntry.HardLinkId
  181. existingEntry.HardLinkCounter = newEntry.HardLinkCounter
  182. existingEntry.Content = newEntry.Content
  183. existingEntry.RemoteEntry = newEntry.RemoteEntry
  184. }
  185. // save updated meta data
  186. return true, fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
  187. request := &filer_pb.UpdateEntryRequest{
  188. Directory: newParentPath,
  189. Entry: existingEntry,
  190. IsFromOtherCluster: true,
  191. Signatures: signatures,
  192. }
  193. if _, err := client.UpdateEntry(context.Background(), request); err != nil {
  194. return fmt.Errorf("update existingEntry %s: %v", key, err)
  195. }
  196. return nil
  197. })
  198. }
  199. func compareChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, oldEntry, newEntry *filer_pb.Entry) (deletedChunks, newChunks []*filer_pb.FileChunk, err error) {
  200. aData, aMeta, aErr := filer.ResolveChunkManifest(lookupFileIdFn, oldEntry.GetChunks(), 0, math.MaxInt64)
  201. if aErr != nil {
  202. return nil, nil, aErr
  203. }
  204. bData, bMeta, bErr := filer.ResolveChunkManifest(lookupFileIdFn, newEntry.GetChunks(), 0, math.MaxInt64)
  205. if bErr != nil {
  206. return nil, nil, bErr
  207. }
  208. deletedChunks = append(deletedChunks, filer.DoMinusChunks(aData, bData)...)
  209. deletedChunks = append(deletedChunks, filer.DoMinusChunks(aMeta, bMeta)...)
  210. newChunks = append(newChunks, filer.DoMinusChunks(bData, aData)...)
  211. newChunks = append(newChunks, filer.DoMinusChunks(bMeta, aMeta)...)
  212. return
  213. }