page_writer.go 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. package filesys
  2. import (
  3. "fmt"
  4. "github.com/chrislusf/seaweedfs/weed/filesys/page_writer"
  5. "github.com/chrislusf/seaweedfs/weed/glog"
  6. )
  7. type PageWriter struct {
  8. f *File
  9. collection string
  10. replication string
  11. chunkSize int64
  12. writerPattern *WriterPattern
  13. randomWriter page_writer.DirtyPages
  14. streamWriter page_writer.DirtyPages
  15. }
  16. var (
  17. _ = page_writer.DirtyPages(&PageWriter{})
  18. )
  19. func newPageWriter(file *File, chunkSize int64) *PageWriter {
  20. pw := &PageWriter{
  21. f: file,
  22. chunkSize: chunkSize,
  23. writerPattern: NewWriterPattern(chunkSize),
  24. randomWriter: newTempFileDirtyPages(file, chunkSize),
  25. streamWriter: newStreamDirtyPages(file, chunkSize),
  26. //streamWriter: newContinuousDirtyPages(file),
  27. //streamWriter: nil,
  28. }
  29. return pw
  30. }
  31. func (pw *PageWriter) AddPage(offset int64, data []byte) {
  32. glog.V(4).Infof("%v AddPage [%d, %d) streaming:%v", pw.f.fullpath(), offset, offset+int64(len(data)), pw.writerPattern.IsStreamingMode())
  33. chunkIndex := offset / pw.chunkSize
  34. for i := chunkIndex; len(data) > 0; i++ {
  35. writeSize := min(int64(len(data)), (i+1)*pw.chunkSize-offset)
  36. pw.addToOneChunk(i, offset, data[:writeSize])
  37. offset += writeSize
  38. data = data[writeSize:]
  39. }
  40. }
  41. func (pw *PageWriter) addToOneChunk(chunkIndex, offset int64, data []byte) {
  42. if chunkIndex > 0 {
  43. if pw.writerPattern.IsStreamingMode() && pw.streamWriter != nil {
  44. pw.streamWriter.AddPage(offset, data)
  45. return
  46. }
  47. }
  48. pw.randomWriter.AddPage(offset, data)
  49. }
  50. func (pw *PageWriter) FlushData() error {
  51. pw.writerPattern.Reset()
  52. if pw.streamWriter != nil {
  53. if err := pw.streamWriter.FlushData(); err != nil {
  54. return err
  55. }
  56. }
  57. return pw.randomWriter.FlushData()
  58. }
  59. func (pw *PageWriter) ReadDirtyDataAt(data []byte, offset int64) (maxStop int64) {
  60. glog.V(4).Infof("ReadDirtyDataAt %v [%d, %d)", pw.f.fullpath(), offset, offset+int64(len(data)))
  61. originalData := data
  62. originalOffset := offset
  63. chunkIndex := offset / pw.chunkSize
  64. for i := chunkIndex; len(data) > 0; i++ {
  65. readSize := min(int64(len(data)), (i+1)*pw.chunkSize-offset)
  66. if pw.streamWriter != nil {
  67. m1 := pw.streamWriter.ReadDirtyDataAt(data[:readSize], offset)
  68. maxStop = max(maxStop, m1)
  69. }
  70. m2 := pw.randomWriter.ReadDirtyDataAt(data[:readSize], offset)
  71. maxStop = max(maxStop, m2)
  72. offset += readSize
  73. data = data[readSize:]
  74. }
  75. page_writer.CheckByteZero(fmt.Sprintf("page writer read [%d,%d) of size %d", originalOffset, originalOffset+int64(len(originalData)), pw.f.entry.Attributes.FileSize), originalData, 0, maxStop-originalOffset)
  76. return
  77. }
  78. func (pw *PageWriter) GetStorageOptions() (collection, replication string) {
  79. if pw.writerPattern.IsStreamingMode() && pw.streamWriter != nil {
  80. return pw.streamWriter.GetStorageOptions()
  81. }
  82. return pw.randomWriter.GetStorageOptions()
  83. }
  84. func (pw *PageWriter) Destroy() {
  85. if pw.streamWriter != nil {
  86. pw.streamWriter.Destroy()
  87. }
  88. pw.randomWriter.Destroy()
  89. }
  90. func max(x, y int64) int64 {
  91. if x > y {
  92. return x
  93. }
  94. return y
  95. }
  96. func min(x, y int64) int64 {
  97. if x < y {
  98. return x
  99. }
  100. return y
  101. }