volume_checking.go 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. package storage
  2. import (
  3. "fmt"
  4. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  5. "io"
  6. "os"
  7. "github.com/seaweedfs/seaweedfs/weed/glog"
  8. "github.com/seaweedfs/seaweedfs/weed/storage/backend"
  9. "github.com/seaweedfs/seaweedfs/weed/storage/idx"
  10. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  11. . "github.com/seaweedfs/seaweedfs/weed/storage/types"
  12. "github.com/seaweedfs/seaweedfs/weed/util"
  13. )
  14. func CheckAndFixVolumeDataIntegrity(v *Volume, indexFile *os.File) (lastAppendAtNs uint64, err error) {
  15. var indexSize int64
  16. if indexSize, err = verifyIndexFileIntegrity(indexFile); err != nil {
  17. return 0, fmt.Errorf("verifyIndexFileIntegrity %s failed: %v", indexFile.Name(), err)
  18. }
  19. if indexSize == 0 {
  20. return 0, nil
  21. }
  22. healthyIndexSize := indexSize
  23. for i := 1; i <= 10 && indexSize >= int64(i)*NeedleMapEntrySize; i++ {
  24. // check and fix last 10 entries
  25. lastAppendAtNs, err = doCheckAndFixVolumeData(v, indexFile, indexSize-int64(i)*NeedleMapEntrySize)
  26. if err == io.EOF {
  27. healthyIndexSize = indexSize - int64(i)*NeedleMapEntrySize
  28. continue
  29. }
  30. if err != ErrorSizeMismatch {
  31. break
  32. }
  33. }
  34. if healthyIndexSize < indexSize {
  35. glog.Warningf("CheckAndFixVolumeDataIntegrity truncate idx file %s from %d to %d", indexFile.Name(), indexSize, healthyIndexSize)
  36. err = indexFile.Truncate(healthyIndexSize)
  37. if err != nil {
  38. glog.Warningf("CheckAndFixVolumeDataIntegrity truncate idx file %s from %d to %d: %v", indexFile.Name(), indexSize, healthyIndexSize, err)
  39. }
  40. }
  41. return
  42. }
  43. func doCheckAndFixVolumeData(v *Volume, indexFile *os.File, indexOffset int64) (lastAppendAtNs uint64, err error) {
  44. var lastIdxEntry []byte
  45. if lastIdxEntry, err = readIndexEntryAtOffset(indexFile, indexOffset); err != nil {
  46. return 0, fmt.Errorf("readLastIndexEntry %s failed: %v", indexFile.Name(), err)
  47. }
  48. key, offset, size := idx.IdxFileEntry(lastIdxEntry)
  49. if offset.IsZero() {
  50. return 0, nil
  51. }
  52. if size < 0 {
  53. // read the deletion entry
  54. if lastAppendAtNs, err = verifyDeletedNeedleIntegrity(v.DataBackend, v.Version(), key); err != nil {
  55. return lastAppendAtNs, fmt.Errorf("verifyNeedleIntegrity %s failed: %v", indexFile.Name(), err)
  56. }
  57. } else {
  58. if lastAppendAtNs, err = verifyNeedleIntegrity(v.DataBackend, v.Version(), offset.ToActualOffset(), key, size); err != nil {
  59. return lastAppendAtNs, err
  60. }
  61. }
  62. return lastAppendAtNs, nil
  63. }
  64. func verifyIndexFileIntegrity(indexFile *os.File) (indexSize int64, err error) {
  65. if indexSize, err = util.GetFileSize(indexFile); err == nil {
  66. if indexSize%NeedleMapEntrySize != 0 {
  67. err = fmt.Errorf("index file's size is %d bytes, maybe corrupted", indexSize)
  68. }
  69. }
  70. return
  71. }
  72. func readIndexEntryAtOffset(indexFile *os.File, offset int64) (bytes []byte, err error) {
  73. if offset < 0 {
  74. err = fmt.Errorf("offset %d for index file is invalid", offset)
  75. return
  76. }
  77. bytes = make([]byte, NeedleMapEntrySize)
  78. _, err = indexFile.ReadAt(bytes, offset)
  79. return
  80. }
  81. func verifyNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, offset int64, key NeedleId, size Size) (lastAppendAtNs uint64, err error) {
  82. n, _, _, err := needle.ReadNeedleHeader(datFile, v, offset)
  83. if err == io.EOF {
  84. return 0, err
  85. }
  86. if err != nil {
  87. return 0, fmt.Errorf("read %s at %d", datFile.Name(), offset)
  88. }
  89. if n.Size != size {
  90. return 0, ErrorSizeMismatch
  91. }
  92. if v == needle.Version3 {
  93. bytes := make([]byte, TimestampSize)
  94. _, err = datFile.ReadAt(bytes, offset+NeedleHeaderSize+int64(size)+needle.NeedleChecksumSize)
  95. if err == io.EOF {
  96. return 0, err
  97. }
  98. if err != nil {
  99. return 0, fmt.Errorf("verifyNeedleIntegrity check %s entry offset %d size %d: %v", datFile.Name(), offset, size, err)
  100. }
  101. n.AppendAtNs = util.BytesToUint64(bytes)
  102. fileTailOffset := offset + needle.GetActualSize(size, v)
  103. fileSize, _, err := datFile.GetStat()
  104. if err != nil {
  105. return 0, fmt.Errorf("stat file %s: %v", datFile.Name(), err)
  106. }
  107. if fileSize == fileTailOffset {
  108. return n.AppendAtNs, nil
  109. }
  110. if fileSize > fileTailOffset {
  111. glog.Warningf("Truncate %s from %d bytes to %d bytes!", datFile.Name(), fileSize, fileTailOffset)
  112. err = datFile.Truncate(fileTailOffset)
  113. if err == nil {
  114. return n.AppendAtNs, nil
  115. }
  116. return n.AppendAtNs, fmt.Errorf("truncate file %s: %v", datFile.Name(), err)
  117. }
  118. glog.Warningf("data file %s has %d bytes, less than expected %d bytes!", datFile.Name(), fileSize, fileTailOffset)
  119. }
  120. if err = n.ReadData(datFile, offset, size, v); err != nil {
  121. return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", offset, offset+int64(size), err)
  122. }
  123. if n.Id != key {
  124. return n.AppendAtNs, fmt.Errorf("index key %#x does not match needle's Id %#x", key, n.Id)
  125. }
  126. return n.AppendAtNs, err
  127. }
  128. func verifyDeletedNeedleIntegrity(datFile backend.BackendStorageFile, v needle.Version, key NeedleId) (lastAppendAtNs uint64, err error) {
  129. n := new(needle.Needle)
  130. size := n.DiskSize(v)
  131. var fileSize int64
  132. fileSize, _, err = datFile.GetStat()
  133. if err != nil {
  134. return 0, fmt.Errorf("GetStat: %v", err)
  135. }
  136. if err = n.ReadData(datFile, fileSize-size, Size(0), v); err != nil {
  137. return n.AppendAtNs, fmt.Errorf("read data [%d,%d) : %v", fileSize-size, size, err)
  138. }
  139. if n.Id != key {
  140. return n.AppendAtNs, fmt.Errorf("index key %#x does not match needle's Id %#x", key, n.Id)
  141. }
  142. return n.AppendAtNs, err
  143. }
  144. func (v *Volume) checkIdxFile() error {
  145. datFileSize, _, err := v.DataBackend.GetStat()
  146. if err != nil {
  147. return fmt.Errorf("get stat %s: %v", v.FileName(".dat"), err)
  148. }
  149. if datFileSize <= super_block.SuperBlockSize {
  150. return nil
  151. }
  152. indexFileName := v.FileName(".idx")
  153. if util.FileExists(indexFileName) {
  154. return nil
  155. }
  156. return fmt.Errorf("idx file %s does not exists", indexFileName)
  157. }