needle_map_metric.go 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. package storage
  2. import (
  3. "fmt"
  4. "os"
  5. "sync/atomic"
  6. "github.com/chrislusf/seaweedfs/weed/storage/idx"
  7. . "github.com/chrislusf/seaweedfs/weed/storage/types"
  8. "github.com/tylertreat/BoomFilters"
  9. )
  10. type mapMetric struct {
  11. DeletionCounter uint32 `json:"DeletionCounter"`
  12. FileCounter uint32 `json:"FileCounter"`
  13. DeletionByteCounter uint64 `json:"DeletionByteCounter"`
  14. FileByteCounter uint64 `json:"FileByteCounter"`
  15. MaximumFileKey uint64 `json:"MaxFileKey"`
  16. }
  17. func (mm *mapMetric) logDelete(deletedByteCount Size) {
  18. if mm == nil {
  19. return
  20. }
  21. mm.LogDeletionCounter(deletedByteCount)
  22. }
  23. func (mm *mapMetric) logPut(key NeedleId, oldSize Size, newSize Size) {
  24. if mm == nil {
  25. return
  26. }
  27. mm.MaybeSetMaxFileKey(key)
  28. mm.LogFileCounter(newSize)
  29. if oldSize > 0 && oldSize.IsValid() {
  30. mm.LogDeletionCounter(oldSize)
  31. }
  32. }
  33. func (mm *mapMetric) LogFileCounter(newSize Size) {
  34. if mm == nil {
  35. return
  36. }
  37. atomic.AddUint32(&mm.FileCounter, 1)
  38. atomic.AddUint64(&mm.FileByteCounter, uint64(newSize))
  39. }
  40. func (mm *mapMetric) LogDeletionCounter(oldSize Size) {
  41. if mm == nil {
  42. return
  43. }
  44. if oldSize > 0 {
  45. atomic.AddUint32(&mm.DeletionCounter, 1)
  46. atomic.AddUint64(&mm.DeletionByteCounter, uint64(oldSize))
  47. }
  48. }
  49. func (mm *mapMetric) ContentSize() uint64 {
  50. if mm == nil {
  51. return 0
  52. }
  53. return atomic.LoadUint64(&mm.FileByteCounter)
  54. }
  55. func (mm *mapMetric) DeletedSize() uint64 {
  56. if mm == nil {
  57. return 0
  58. }
  59. return atomic.LoadUint64(&mm.DeletionByteCounter)
  60. }
  61. func (mm *mapMetric) FileCount() int {
  62. if mm == nil {
  63. return 0
  64. }
  65. return int(atomic.LoadUint32(&mm.FileCounter))
  66. }
  67. func (mm *mapMetric) DeletedCount() int {
  68. if mm == nil {
  69. return 0
  70. }
  71. return int(atomic.LoadUint32(&mm.DeletionCounter))
  72. }
  73. func (mm *mapMetric) MaxFileKey() NeedleId {
  74. if mm == nil {
  75. return 0
  76. }
  77. t := uint64(mm.MaximumFileKey)
  78. return Uint64ToNeedleId(t)
  79. }
  80. func (mm *mapMetric) MaybeSetMaxFileKey(key NeedleId) {
  81. if mm == nil {
  82. return
  83. }
  84. if key > mm.MaxFileKey() {
  85. atomic.StoreUint64(&mm.MaximumFileKey, uint64(key))
  86. }
  87. }
  88. func newNeedleMapMetricFromIndexFile(r *os.File) (mm *mapMetric, err error) {
  89. mm = &mapMetric{}
  90. var bf *boom.BloomFilter
  91. buf := make([]byte, NeedleIdSize)
  92. err = reverseWalkIndexFile(r, func(entryCount int64) {
  93. bf = boom.NewBloomFilter(uint(entryCount), 0.001)
  94. }, func(key NeedleId, offset Offset, size Size) error {
  95. mm.MaybeSetMaxFileKey(key)
  96. NeedleIdToBytes(buf, key)
  97. if size.IsValid() {
  98. mm.FileByteCounter += uint64(size)
  99. }
  100. if !bf.TestAndAdd(buf) {
  101. mm.FileCounter++
  102. } else {
  103. // deleted file
  104. mm.DeletionCounter++
  105. if size.IsValid() {
  106. // previously already deleted file
  107. mm.DeletionByteCounter += uint64(size)
  108. }
  109. }
  110. return nil
  111. })
  112. return
  113. }
  114. func reverseWalkIndexFile(r *os.File, initFn func(entryCount int64), fn func(key NeedleId, offset Offset, size Size) error) error {
  115. fi, err := r.Stat()
  116. if err != nil {
  117. return fmt.Errorf("file %s stat error: %v", r.Name(), err)
  118. }
  119. fileSize := fi.Size()
  120. if fileSize%NeedleMapEntrySize != 0 {
  121. return fmt.Errorf("unexpected file %s size: %d", r.Name(), fileSize)
  122. }
  123. entryCount := fileSize / NeedleMapEntrySize
  124. initFn(entryCount)
  125. batchSize := int64(1024 * 4)
  126. bytes := make([]byte, NeedleMapEntrySize*batchSize)
  127. nextBatchSize := entryCount % batchSize
  128. if nextBatchSize == 0 {
  129. nextBatchSize = batchSize
  130. }
  131. remainingCount := entryCount - nextBatchSize
  132. for remainingCount >= 0 {
  133. _, e := r.ReadAt(bytes[:NeedleMapEntrySize*nextBatchSize], NeedleMapEntrySize*remainingCount)
  134. // glog.V(0).Infoln("file", r.Name(), "readerOffset", NeedleMapEntrySize*remainingCount, "count", count, "e", e)
  135. if e != nil {
  136. return e
  137. }
  138. for i := int(nextBatchSize) - 1; i >= 0; i-- {
  139. key, offset, size := idx.IdxFileEntry(bytes[i*NeedleMapEntrySize : i*NeedleMapEntrySize+NeedleMapEntrySize])
  140. if e = fn(key, offset, size); e != nil {
  141. return e
  142. }
  143. }
  144. nextBatchSize = batchSize
  145. remainingCount -= nextBatchSize
  146. }
  147. return nil
  148. }