filer_notify.go 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. package filer2
  2. import (
  3. "context"
  4. "fmt"
  5. "io"
  6. "strings"
  7. "time"
  8. "github.com/golang/protobuf/proto"
  9. "github.com/chrislusf/seaweedfs/weed/glog"
  10. "github.com/chrislusf/seaweedfs/weed/notification"
  11. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  12. "github.com/chrislusf/seaweedfs/weed/util"
  13. )
  14. func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool) {
  15. var fullpath string
  16. if oldEntry != nil {
  17. fullpath = string(oldEntry.FullPath)
  18. } else if newEntry != nil {
  19. fullpath = string(newEntry.FullPath)
  20. } else {
  21. return
  22. }
  23. // println("fullpath:", fullpath)
  24. if strings.HasPrefix(fullpath, SystemLogDir) {
  25. return
  26. }
  27. newParentPath := ""
  28. if newEntry != nil {
  29. newParentPath, _ = newEntry.FullPath.DirAndName()
  30. }
  31. eventNotification := &filer_pb.EventNotification{
  32. OldEntry: oldEntry.ToProtoEntry(),
  33. NewEntry: newEntry.ToProtoEntry(),
  34. DeleteChunks: deleteChunks,
  35. NewParentPath: newParentPath,
  36. IsFromOtherCluster: isFromOtherCluster,
  37. }
  38. if notification.Queue != nil {
  39. glog.V(3).Infof("notifying entry update %v", fullpath)
  40. notification.Queue.SendMessage(fullpath, eventNotification)
  41. }
  42. f.logMetaEvent(ctx, fullpath, eventNotification)
  43. }
  44. func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotification *filer_pb.EventNotification) {
  45. dir, _ := util.FullPath(fullpath).DirAndName()
  46. event := &filer_pb.SubscribeMetadataResponse{
  47. Directory: dir,
  48. EventNotification: eventNotification,
  49. TsNs: time.Now().UnixNano(),
  50. }
  51. data, err := proto.Marshal(event)
  52. if err != nil {
  53. glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
  54. return
  55. }
  56. f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data)
  57. }
  58. func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
  59. targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.segment", SystemLogDir,
  60. startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(),
  61. // startTime.Second(), startTime.Nanosecond(),
  62. )
  63. for {
  64. if err := f.appendToFile(targetFile, buf); err != nil {
  65. glog.V(1).Infof("log write failed %s: %v", targetFile, err)
  66. time.Sleep(737 * time.Millisecond)
  67. } else {
  68. break
  69. }
  70. }
  71. }
  72. func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
  73. startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
  74. startHourMinute := fmt.Sprintf("%02d-%02d.segment", startTime.Hour(), startTime.Minute())
  75. sizeBuf := make([]byte, 4)
  76. startTsNs := startTime.UnixNano()
  77. dayEntries, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366)
  78. if listDayErr != nil {
  79. return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr)
  80. }
  81. for _, dayEntry := range dayEntries {
  82. // println("checking day", dayEntry.FullPath)
  83. hourMinuteEntries, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60)
  84. if listHourMinuteErr != nil {
  85. return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
  86. }
  87. for _, hourMinuteEntry := range hourMinuteEntries {
  88. // println("checking hh-mm", hourMinuteEntry.FullPath)
  89. if dayEntry.Name() == startDate {
  90. if strings.Compare(hourMinuteEntry.Name(), startHourMinute) < 0 {
  91. continue
  92. }
  93. }
  94. // println("processing", hourMinuteEntry.FullPath)
  95. chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.Chunks)
  96. if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
  97. chunkedFileReader.Close()
  98. if err == io.EOF {
  99. break
  100. }
  101. return lastTsNs, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err)
  102. }
  103. chunkedFileReader.Close()
  104. }
  105. }
  106. return lastTsNs, nil
  107. }
  108. func ReadEachLogEntry(r io.Reader, sizeBuf []byte, ns int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
  109. for {
  110. n, err := r.Read(sizeBuf)
  111. if err != nil {
  112. return lastTsNs, err
  113. }
  114. if n != 4 {
  115. return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n)
  116. }
  117. size := util.BytesToUint32(sizeBuf)
  118. // println("entry size", size)
  119. entryData := make([]byte, size)
  120. n, err = r.Read(entryData)
  121. if err != nil {
  122. return lastTsNs, err
  123. }
  124. if n != int(size) {
  125. return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size)
  126. }
  127. logEntry := &filer_pb.LogEntry{}
  128. if err = proto.Unmarshal(entryData, logEntry); err != nil {
  129. return lastTsNs, err
  130. }
  131. if logEntry.TsNs <= ns {
  132. return lastTsNs, nil
  133. }
  134. // println("each log: ", logEntry.TsNs)
  135. if err := eachLogEntryFn(logEntry); err != nil {
  136. return lastTsNs, err
  137. } else {
  138. lastTsNs = logEntry.TsNs
  139. }
  140. }
  141. }