dirty_page.go 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. package filesys
  2. import (
  3. "bytes"
  4. "io"
  5. "runtime"
  6. "sync"
  7. "time"
  8. "github.com/chrislusf/seaweedfs/weed/util/log"
  9. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  10. )
  11. type ContinuousDirtyPages struct {
  12. intervals *ContinuousIntervals
  13. f *File
  14. writeWaitGroup sync.WaitGroup
  15. chunkAddLock sync.Mutex
  16. chunkSaveErrChan chan error
  17. chunkSaveErrChanClosed bool
  18. lastErr error
  19. collection string
  20. replication string
  21. }
  22. func newDirtyPages(file *File) *ContinuousDirtyPages {
  23. dirtyPages := &ContinuousDirtyPages{
  24. intervals: &ContinuousIntervals{},
  25. f: file,
  26. chunkSaveErrChan: make(chan error, runtime.NumCPU()),
  27. }
  28. go func() {
  29. for t := range dirtyPages.chunkSaveErrChan {
  30. if t != nil {
  31. dirtyPages.lastErr = t
  32. }
  33. }
  34. }()
  35. return dirtyPages
  36. }
  37. func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) {
  38. log.Tracef("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize)
  39. if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) {
  40. // this is more than what buffer can hold.
  41. pages.flushAndSave(offset, data)
  42. }
  43. pages.intervals.AddInterval(data, offset)
  44. if pages.intervals.TotalSize() >= pages.f.wfs.option.ChunkSizeLimit {
  45. pages.saveExistingLargestPageToStorage()
  46. }
  47. return
  48. }
  49. func (pages *ContinuousDirtyPages) flushAndSave(offset int64, data []byte) {
  50. // flush existing
  51. pages.saveExistingPagesToStorage()
  52. // flush the new page
  53. pages.saveToStorage(bytes.NewReader(data), offset, int64(len(data)))
  54. return
  55. }
  56. func (pages *ContinuousDirtyPages) saveExistingPagesToStorage() {
  57. for pages.saveExistingLargestPageToStorage() {
  58. }
  59. }
  60. func (pages *ContinuousDirtyPages) saveExistingLargestPageToStorage() (hasSavedData bool) {
  61. maxList := pages.intervals.RemoveLargestIntervalLinkedList()
  62. if maxList == nil {
  63. return false
  64. }
  65. fileSize := int64(pages.f.entry.Attributes.FileSize)
  66. chunkSize := min(maxList.Size(), fileSize-maxList.Offset())
  67. if chunkSize == 0 {
  68. return false
  69. }
  70. pages.saveToStorage(maxList.ToReader(), maxList.Offset(), chunkSize)
  71. return true
  72. }
  73. func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64, size int64) {
  74. errChanSize := pages.f.wfs.option.ConcurrentWriters
  75. if errChanSize == 0 {
  76. errChanSize = runtime.NumCPU()
  77. }
  78. if pages.chunkSaveErrChanClosed {
  79. pages.chunkSaveErrChan = make(chan error, errChanSize)
  80. pages.chunkSaveErrChanClosed = false
  81. }
  82. mtime := time.Now().UnixNano()
  83. pages.writeWaitGroup.Add(1)
  84. writer := func() {
  85. defer pages.writeWaitGroup.Done()
  86. reader = io.LimitReader(reader, size)
  87. chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset)
  88. if err != nil {
  89. log.Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
  90. pages.chunkSaveErrChan <- err
  91. return
  92. }
  93. chunk.Mtime = mtime
  94. pages.collection, pages.replication = collection, replication
  95. pages.chunkAddLock.Lock()
  96. defer pages.chunkAddLock.Unlock()
  97. pages.f.addChunks([]*filer_pb.FileChunk{chunk})
  98. log.Tracef("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size)
  99. }
  100. if pages.f.wfs.concurrentWriters != nil {
  101. pages.f.wfs.concurrentWriters.Execute(writer)
  102. } else {
  103. go writer()
  104. }
  105. }
  106. func max(x, y int64) int64 {
  107. if x > y {
  108. return x
  109. }
  110. return y
  111. }
  112. func min(x, y int64) int64 {
  113. if x < y {
  114. return x
  115. }
  116. return y
  117. }
  118. func (pages *ContinuousDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) {
  119. return pages.intervals.ReadDataAt(data, startOffset)
  120. }