filer_notify.go 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. package filer
  2. import (
  3. "context"
  4. "fmt"
  5. "io"
  6. "math"
  7. "strings"
  8. "time"
  9. "google.golang.org/protobuf/proto"
  10. "github.com/seaweedfs/seaweedfs/weed/glog"
  11. "github.com/seaweedfs/seaweedfs/weed/notification"
  12. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  13. "github.com/seaweedfs/seaweedfs/weed/util"
  14. )
  15. func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry, deleteChunks, isFromOtherCluster bool, signatures []int32) {
  16. var fullpath string
  17. if oldEntry != nil {
  18. fullpath = string(oldEntry.FullPath)
  19. } else if newEntry != nil {
  20. fullpath = string(newEntry.FullPath)
  21. } else {
  22. return
  23. }
  24. // println("fullpath:", fullpath)
  25. if strings.HasPrefix(fullpath, SystemLogDir) {
  26. return
  27. }
  28. foundSelf := false
  29. for _, sig := range signatures {
  30. if sig == f.Signature {
  31. foundSelf = true
  32. }
  33. }
  34. if !foundSelf {
  35. signatures = append(signatures, f.Signature)
  36. }
  37. newParentPath := ""
  38. if newEntry != nil {
  39. newParentPath, _ = newEntry.FullPath.DirAndName()
  40. }
  41. eventNotification := &filer_pb.EventNotification{
  42. OldEntry: oldEntry.ToProtoEntry(),
  43. NewEntry: newEntry.ToProtoEntry(),
  44. DeleteChunks: deleteChunks,
  45. NewParentPath: newParentPath,
  46. IsFromOtherCluster: isFromOtherCluster,
  47. Signatures: signatures,
  48. }
  49. if notification.Queue != nil {
  50. glog.V(3).Infof("notifying entry update %v", fullpath)
  51. if err := notification.Queue.SendMessage(fullpath, eventNotification); err != nil {
  52. // throw message
  53. glog.Error(err)
  54. }
  55. }
  56. f.logMetaEvent(ctx, fullpath, eventNotification)
  57. }
  58. func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotification *filer_pb.EventNotification) {
  59. dir, _ := util.FullPath(fullpath).DirAndName()
  60. event := &filer_pb.SubscribeMetadataResponse{
  61. Directory: dir,
  62. EventNotification: eventNotification,
  63. TsNs: time.Now().UnixNano(),
  64. }
  65. data, err := proto.Marshal(event)
  66. if err != nil {
  67. glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
  68. return
  69. }
  70. f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs)
  71. }
  72. func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
  73. if len(buf) == 0 {
  74. return
  75. }
  76. startTime, stopTime = startTime.UTC(), stopTime.UTC()
  77. targetFile := fmt.Sprintf("%s/%04d-%02d-%02d/%02d-%02d.%08x", SystemLogDir,
  78. startTime.Year(), startTime.Month(), startTime.Day(), startTime.Hour(), startTime.Minute(), f.UniqueFilerId,
  79. // startTime.Second(), startTime.Nanosecond(),
  80. )
  81. for {
  82. if err := f.appendToFile(targetFile, buf); err != nil {
  83. glog.V(0).Infof("metadata log write failed %s: %v", targetFile, err)
  84. time.Sleep(737 * time.Millisecond)
  85. } else {
  86. break
  87. }
  88. }
  89. }
  90. func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, stopTsNs int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, isDone bool, err error) {
  91. startTime = startTime.UTC()
  92. startDate := fmt.Sprintf("%04d-%02d-%02d", startTime.Year(), startTime.Month(), startTime.Day())
  93. startHourMinute := fmt.Sprintf("%02d-%02d", startTime.Hour(), startTime.Minute())
  94. var stopDate, stopHourMinute string
  95. if stopTsNs != 0 {
  96. stopTime := time.Unix(0, stopTsNs+24*60*60*int64(time.Nanosecond)).UTC()
  97. stopDate = fmt.Sprintf("%04d-%02d-%02d", stopTime.Year(), stopTime.Month(), stopTime.Day())
  98. stopHourMinute = fmt.Sprintf("%02d-%02d", stopTime.Hour(), stopTime.Minute())
  99. }
  100. sizeBuf := make([]byte, 4)
  101. startTsNs := startTime.UnixNano()
  102. dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, math.MaxInt32, "", "", "")
  103. if listDayErr != nil {
  104. return lastTsNs, isDone, fmt.Errorf("fail to list log by day: %v", listDayErr)
  105. }
  106. for _, dayEntry := range dayEntries {
  107. if stopDate != "" {
  108. if strings.Compare(dayEntry.Name(), stopDate) > 0 {
  109. break
  110. }
  111. }
  112. // println("checking day", dayEntry.FullPath)
  113. hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, math.MaxInt32, "", "", "")
  114. if listHourMinuteErr != nil {
  115. return lastTsNs, isDone, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
  116. }
  117. for _, hourMinuteEntry := range hourMinuteEntries {
  118. // println("checking hh-mm", hourMinuteEntry.FullPath)
  119. if dayEntry.Name() == startDate {
  120. hourMinute := util.FileNameBase(hourMinuteEntry.Name())
  121. if strings.Compare(hourMinute, startHourMinute) < 0 {
  122. continue
  123. }
  124. }
  125. if dayEntry.Name() == stopDate {
  126. hourMinute := util.FileNameBase(hourMinuteEntry.Name())
  127. if strings.Compare(hourMinute, stopHourMinute) > 0 {
  128. break
  129. }
  130. }
  131. // println("processing", hourMinuteEntry.FullPath)
  132. chunkedFileReader := NewChunkStreamReaderFromFiler(f.MasterClient, hourMinuteEntry.GetChunks())
  133. if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, stopTsNs, eachLogEntryFn); err != nil {
  134. chunkedFileReader.Close()
  135. if err == io.EOF {
  136. continue
  137. }
  138. return lastTsNs, isDone, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err)
  139. }
  140. chunkedFileReader.Close()
  141. }
  142. }
  143. return lastTsNs, isDone, nil
  144. }
  145. func ReadEachLogEntry(r io.Reader, sizeBuf []byte, startTsNs, stopTsNs int64, eachLogEntryFn func(logEntry *filer_pb.LogEntry) error) (lastTsNs int64, err error) {
  146. for {
  147. n, err := r.Read(sizeBuf)
  148. if err != nil {
  149. return lastTsNs, err
  150. }
  151. if n != 4 {
  152. return lastTsNs, fmt.Errorf("size %d bytes, expected 4 bytes", n)
  153. }
  154. size := util.BytesToUint32(sizeBuf)
  155. // println("entry size", size)
  156. entryData := make([]byte, size)
  157. n, err = r.Read(entryData)
  158. if err != nil {
  159. return lastTsNs, err
  160. }
  161. if n != int(size) {
  162. return lastTsNs, fmt.Errorf("entry data %d bytes, expected %d bytes", n, size)
  163. }
  164. logEntry := &filer_pb.LogEntry{}
  165. if err = proto.Unmarshal(entryData, logEntry); err != nil {
  166. return lastTsNs, err
  167. }
  168. if logEntry.TsNs <= startTsNs {
  169. continue
  170. }
  171. if stopTsNs != 0 && logEntry.TsNs > stopTsNs {
  172. return lastTsNs, err
  173. }
  174. // println("each log: ", logEntry.TsNs)
  175. if err := eachLogEntryFn(logEntry); err != nil {
  176. return lastTsNs, err
  177. } else {
  178. lastTsNs = logEntry.TsNs
  179. }
  180. }
  181. }