ec_decoder.go 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. package erasure_coding
  2. import (
  3. "fmt"
  4. "io"
  5. "os"
  6. "github.com/seaweedfs/seaweedfs/weed/storage/backend"
  7. "github.com/seaweedfs/seaweedfs/weed/storage/idx"
  8. "github.com/seaweedfs/seaweedfs/weed/storage/needle"
  9. "github.com/seaweedfs/seaweedfs/weed/storage/needle_map"
  10. "github.com/seaweedfs/seaweedfs/weed/storage/super_block"
  11. "github.com/seaweedfs/seaweedfs/weed/storage/types"
  12. "github.com/seaweedfs/seaweedfs/weed/util"
  13. )
  14. // write .idx file from .ecx and .ecj files
  15. func WriteIdxFileFromEcIndex(baseFileName string) (err error) {
  16. ecxFile, openErr := os.OpenFile(baseFileName+".ecx", os.O_RDONLY, 0644)
  17. if openErr != nil {
  18. return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr)
  19. }
  20. defer ecxFile.Close()
  21. idxFile, openErr := os.OpenFile(baseFileName+".idx", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
  22. if openErr != nil {
  23. return fmt.Errorf("cannot open %s.idx: %v", baseFileName, openErr)
  24. }
  25. defer idxFile.Close()
  26. io.Copy(idxFile, ecxFile)
  27. err = iterateEcjFile(baseFileName, func(key types.NeedleId) error {
  28. bytes := needle_map.ToBytes(key, types.Offset{}, types.TombstoneFileSize)
  29. idxFile.Write(bytes)
  30. return nil
  31. })
  32. return err
  33. }
  34. // FindDatFileSize calculate .dat file size from max offset entry
  35. // there may be extra deletions after that entry
  36. // but they are deletions anyway
  37. func FindDatFileSize(dataBaseFileName, indexBaseFileName string) (datSize int64, err error) {
  38. version, err := readEcVolumeVersion(dataBaseFileName)
  39. if err != nil {
  40. return 0, fmt.Errorf("read ec volume %s version: %v", dataBaseFileName, err)
  41. }
  42. err = iterateEcxFile(indexBaseFileName, func(key types.NeedleId, offset types.Offset, size types.Size) error {
  43. if size.IsDeleted() {
  44. return nil
  45. }
  46. entryStopOffset := offset.ToActualOffset() + needle.GetActualSize(size, version)
  47. if datSize < entryStopOffset {
  48. datSize = entryStopOffset
  49. }
  50. return nil
  51. })
  52. return
  53. }
  54. func readEcVolumeVersion(baseFileName string) (version needle.Version, err error) {
  55. // find volume version
  56. datFile, err := os.OpenFile(baseFileName+".ec00", os.O_RDONLY, 0644)
  57. if err != nil {
  58. return 0, fmt.Errorf("open ec volume %s superblock: %v", baseFileName, err)
  59. }
  60. datBackend := backend.NewDiskFile(datFile)
  61. superBlock, err := super_block.ReadSuperBlock(datBackend)
  62. datBackend.Close()
  63. if err != nil {
  64. return 0, fmt.Errorf("read ec volume %s superblock: %v", baseFileName, err)
  65. }
  66. return superBlock.Version, nil
  67. }
  68. func iterateEcxFile(baseFileName string, processNeedleFn func(key types.NeedleId, offset types.Offset, size types.Size) error) error {
  69. ecxFile, openErr := os.OpenFile(baseFileName+".ecx", os.O_RDONLY, 0644)
  70. if openErr != nil {
  71. return fmt.Errorf("cannot open ec index %s.ecx: %v", baseFileName, openErr)
  72. }
  73. defer ecxFile.Close()
  74. buf := make([]byte, types.NeedleMapEntrySize)
  75. for {
  76. n, err := ecxFile.Read(buf)
  77. if n != types.NeedleMapEntrySize {
  78. if err == io.EOF {
  79. return nil
  80. }
  81. return err
  82. }
  83. key, offset, size := idx.IdxFileEntry(buf)
  84. if processNeedleFn != nil {
  85. err = processNeedleFn(key, offset, size)
  86. }
  87. if err != nil {
  88. if err != io.EOF {
  89. return err
  90. }
  91. return nil
  92. }
  93. }
  94. }
  95. func iterateEcjFile(baseFileName string, processNeedleFn func(key types.NeedleId) error) error {
  96. if !util.FileExists(baseFileName + ".ecj") {
  97. return nil
  98. }
  99. ecjFile, openErr := os.OpenFile(baseFileName+".ecj", os.O_RDONLY, 0644)
  100. if openErr != nil {
  101. return fmt.Errorf("cannot open ec index %s.ecj: %v", baseFileName, openErr)
  102. }
  103. defer ecjFile.Close()
  104. buf := make([]byte, types.NeedleIdSize)
  105. for {
  106. n, err := ecjFile.Read(buf)
  107. if n != types.NeedleIdSize {
  108. if err == io.EOF {
  109. return nil
  110. }
  111. return err
  112. }
  113. if processNeedleFn != nil {
  114. err = processNeedleFn(types.BytesToNeedleId(buf))
  115. }
  116. if err != nil {
  117. if err == io.EOF {
  118. return nil
  119. }
  120. return err
  121. }
  122. }
  123. }
  124. // WriteDatFile generates .dat from .ec00 ~ .ec09 files
  125. func WriteDatFile(baseFileName string, datFileSize int64, shardFileNames []string) error {
  126. datFile, openErr := os.OpenFile(baseFileName+".dat", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
  127. if openErr != nil {
  128. return fmt.Errorf("cannot write volume %s.dat: %v", baseFileName, openErr)
  129. }
  130. defer datFile.Close()
  131. inputFiles := make([]*os.File, DataShardsCount)
  132. defer func() {
  133. for shardId := 0; shardId < DataShardsCount; shardId++ {
  134. if inputFiles[shardId] != nil {
  135. inputFiles[shardId].Close()
  136. }
  137. }
  138. }()
  139. for shardId := 0; shardId < DataShardsCount; shardId++ {
  140. inputFiles[shardId], openErr = os.OpenFile(shardFileNames[shardId], os.O_RDONLY, 0)
  141. if openErr != nil {
  142. return openErr
  143. }
  144. }
  145. for datFileSize >= DataShardsCount*ErasureCodingLargeBlockSize {
  146. for shardId := 0; shardId < DataShardsCount; shardId++ {
  147. w, err := io.CopyN(datFile, inputFiles[shardId], ErasureCodingLargeBlockSize)
  148. if w != ErasureCodingLargeBlockSize {
  149. return fmt.Errorf("copy %s large block on shardId %d: %v", baseFileName, shardId, err)
  150. }
  151. datFileSize -= ErasureCodingLargeBlockSize
  152. }
  153. }
  154. for datFileSize > 0 {
  155. for shardId := 0; shardId < DataShardsCount; shardId++ {
  156. toRead := min(datFileSize, ErasureCodingSmallBlockSize)
  157. w, err := io.CopyN(datFile, inputFiles[shardId], toRead)
  158. if w != toRead {
  159. return fmt.Errorf("copy %s small block %d: %v", baseFileName, shardId, err)
  160. }
  161. datFileSize -= toRead
  162. }
  163. }
  164. return nil
  165. }
  166. func min(x, y int64) int64 {
  167. if x > y {
  168. return y
  169. }
  170. return x
  171. }