filer_notify.go 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. package filer
  2. import (
  3. "context"
  4. "fmt"
  5. "io"
  6. "math"
  7. "regexp"
  8. "strings"
  9. "time"
  10. "google.golang.org/protobuf/proto"
  11. "github.com/seaweedfs/seaweedfs/weed/glog"
  12. "github.com/seaweedfs/seaweedfs/weed/notification"
  13. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  14. "github.com/seaweedfs/seaweedfs/weed/util"
  15. )
  16. func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool, signatures []int32) {
  17. var fullpath string
  18. if oldEntry != nil {
  19. fullpath = string(oldEntry.FullPath)
  20. } else if newEntry != nil {
  21. fullpath = string(newEntry.FullPath)
  22. } else {
  23. return
  24. }
  25. // println("fullpath:", fullpath)
  26. if strings.HasPrefix(fullpath, SystemLogDir) {
  27. return
  28. }
  29. foundSelf := false
  30. for _, sig := range signatures {
  31. if sig == f.Signature {
  32. foundSelf = true
  33. }
  34. }
  35. if !foundSelf {
  36. signatures = append(signatures, f.Signature)
  37. }
  38. newParentPath := ""
  39. if newEntry != nil {
  40. newParentPath, _ = newEntry.FullPath.DirAndName()
  41. }
  42. eventNotification := &filer_pb.EventNotification{
  43. OldEntry: oldEntry.ToProtoEntry(),
  44. NewEntry: newEntry.ToProtoEntry(),
  45. DeleteChunks: deleteChunks,
  46. NewParentPath: newParentPath,
  47. IsFromOtherCluster: isFromOtherCluster,
  48. Signatures: signatures,
  49. }
  50. if notification.Queue != nil {
  51. glog.V(3).Infof("notifying entry update %v", fullpath)
  52. if err := notification.Queue.SendMessage(fullpath, eventNotification); err != nil {
  53. // throw message
  54. glog.Error(err)
  55. }
  56. }
  57. f.logMetaEvent(ctx, fullpath, eventNotification)
  58. }
  59. func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotification *filer_pb.EventNotification) {
  60. dir, _ := util.FullPath(fullpath).DirAndName()
  61. event := &filer_pb.SubscribeMetadataResponse{
  62. Directory: dir,
  63. EventNotification: eventNotification,
  64. TsNs: time.Now().UnixNano(),
  65. }
  66. data, err := proto.Marshal(event)
  67. if err != nil {
  68. glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
  69. return
  70. }
  71. f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs)
  72. }
  73. func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
  74. if len(buf) == 0 {
  75. return
  76. }
  77. startTime, stopTime = startTime.UTC(), stopTime.UTC()
  78. targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.%08x", SystemLogDir,
  79. startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), f.UniqueFilerId,
  80. // startTime.Second(), startTime.Nanosecond(),
  81. )
  82. for {
  83. if err := f.appendToFile(targetFile, buf); err != nil {
  84. glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err)
  85. time.Sleep(737 * time.Millisecond)
  86. } else {
  87. break
  88. }
  89. }
  90. }
  91. var (
  92. VolumeNotFoundPattern = regexp.MustCompile(`volume \d+? not found`)
  93. )
  94. func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, stopTsNs int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, isDone bool, err error) {
  95. startTime = startTime.UTC()
  96. startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
  97. startHourMinute := fmt.Sprintf("%02d-%02d", startTime.Hour(), startTime.Minute())
  98. var stopDate, stopHourMinute string
  99. if stopTsNs != 0 {
  100. stopTime := time.Unix(0, stopTsNs+24*60*60*int64(time.Nanosecond)).UTC()
  101. stopDate = fmt.Sprintf("%04d-%02d-%02d", stopTime.Year(), stopTime.Month(), stopTime.Day())
  102. stopHourMinute = fmt.Sprintf("%02d-%02d", stopTime.Hour(), stopTime.Minute())
  103. }
  104. sizeBuf := make([]byte, 4)
  105. startTsNs := startTime.UnixNano()
  106. dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, math.MaxInt32, "", "", "")
  107. if listDayErr != nil {
  108. return lastTsNs, isDone, fmt.Errorf("fail to list log by day: %v", listDayErr)
  109. }
  110. for _, dayEntry := range dayEntries {
  111. if stopDate != "" {
  112. if strings.Compare(dayEntry.Name(), stopDate) > 0 {
  113. break
  114. }
  115. }
  116. // println("checking day", dayEntry.FullPath)
  117. hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, math.MaxInt32, "", "", "")
  118. if listHourMinuteErr != nil {
  119. return lastTsNs, isDone, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
  120. }
  121. for _, hourMinuteEntry := range hourMinuteEntries {
  122. // println("checking hh-mm", hourMinuteEntry.FullPath)
  123. if dayEntry.Name() == startDate {
  124. hourMinute := util.FileNameBase(hourMinuteEntry.Name())
  125. if strings.Compare(hourMinute, startHourMinute) < 0 {
  126. continue
  127. }
  128. }
  129. if dayEntry.Name() == stopDate {
  130. hourMinute := util.FileNameBase(hourMinuteEntry.Name())
  131. if strings.Compare(hourMinute, stopHourMinute) > 0 {
  132. break
  133. }
  134. }
  135. // println("processing", hourMinuteEntry.FullPath)
  136. chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.GetChunks())
  137. if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, stopTsNs, eachLogEntryFn); err != nil {
  138. chunkedFileReader.Close()
  139. if err == io.EOF {
  140. continue
  141. }
  142. if VolumeNotFoundPattern.MatchString(err.Error()) {
  143. glog.Warningf("skipping reading %s: %v", hourMinuteEntry.FullPath, err)
  144. continue
  145. }
  146. return lastTsNs, isDone, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err)
  147. }
  148. chunkedFileReader.Close()
  149. }
  150. }
  151. return lastTsNs, isDone, nil
  152. }
  153. func ReadEachLogEntry(r io.Reader, sizeBuf []byte, startTsNs, stopTsNs int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
  154. for {
  155. n, err := r.Read(sizeBuf)
  156. if err != nil {
  157. return lastTsNs, err
  158. }
  159. if n != 4 {
  160. return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n)
  161. }
  162. size := util.BytesToUint32(sizeBuf)
  163. // println("entry size", size)
  164. entryData := make([]byte, size)
  165. n, err = r.Read(entryData)
  166. if err != nil {
  167. return lastTsNs, err
  168. }
  169. if n != int(size) {
  170. return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size)
  171. }
  172. logEntry := &filer_pb.LogEntry{}
  173. if err = proto.Unmarshal(entryData, logEntry); err != nil {
  174. return lastTsNs, err
  175. }
  176. if logEntry.TsNs <= startTsNs {
  177. continue
  178. }
  179. if stopTsNs != 0 && logEntry.TsNs > stopTsNs {
  180. return lastTsNs, err
  181. }
  182. // println("each log: ", logEntry.TsNs)
  183. if err := eachLogEntryFn(logEntry); err != nil {
  184. return lastTsNs, err
  185. } else {
  186. lastTsNs = logEntry.TsNs
  187. }
  188. }
  189. }