upload_pipeline.go 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. package page_writer
  2. import (
  3. "fmt"
  4. "github.com/seaweedfs/seaweedfs/weed/glog"
  5. "github.com/seaweedfs/seaweedfs/weed/util"
  6. "sync"
  7. "sync/atomic"
  8. )
  9. type LogicChunkIndex int
  10. type UploadPipeline struct {
  11. uploaderCount int32
  12. uploaderCountCond *sync.Cond
  13. filepath util.FullPath
  14. ChunkSize int64
  15. uploaders *util.LimitedConcurrentExecutor
  16. saveToStorageFn SaveToStorageFunc
  17. writableChunkLimit int
  18. swapFile *SwapFile
  19. chunksLock sync.Mutex
  20. writableChunks map[LogicChunkIndex]PageChunk
  21. sealedChunks map[LogicChunkIndex]*SealedChunk
  22. activeReadChunks map[LogicChunkIndex]int
  23. readerCountCond *sync.Cond
  24. }
  25. type SealedChunk struct {
  26. chunk PageChunk
  27. referenceCounter int // track uploading or reading processes
  28. }
  29. func (sc *SealedChunk) FreeReference(messageOnFree string) {
  30. sc.referenceCounter--
  31. if sc.referenceCounter == 0 {
  32. glog.V(4).Infof("Free sealed chunk: %s", messageOnFree)
  33. sc.chunk.FreeResource()
  34. }
  35. }
  36. func NewUploadPipeline(writers *util.LimitedConcurrentExecutor, chunkSize int64, saveToStorageFn SaveToStorageFunc, bufferChunkLimit int, swapFileDir string) *UploadPipeline {
  37. t := &UploadPipeline{
  38. ChunkSize: chunkSize,
  39. writableChunks: make(map[LogicChunkIndex]PageChunk),
  40. sealedChunks: make(map[LogicChunkIndex]*SealedChunk),
  41. uploaders: writers,
  42. uploaderCountCond: sync.NewCond(&sync.Mutex{}),
  43. saveToStorageFn: saveToStorageFn,
  44. activeReadChunks: make(map[LogicChunkIndex]int),
  45. writableChunkLimit: bufferChunkLimit,
  46. swapFile: NewSwapFile(swapFileDir, chunkSize),
  47. }
  48. t.readerCountCond = sync.NewCond(&t.chunksLock)
  49. return t
  50. }
  51. func (up *UploadPipeline) SaveDataAt(p []byte, off int64, isSequential bool, tsNs int64) (n int) {
  52. up.chunksLock.Lock()
  53. defer up.chunksLock.Unlock()
  54. logicChunkIndex := LogicChunkIndex(off / up.ChunkSize)
  55. pageChunk, found := up.writableChunks[logicChunkIndex]
  56. if !found {
  57. if len(up.writableChunks) > up.writableChunkLimit {
  58. // if current file chunks is over the per file buffer count limit
  59. candidateChunkIndex, fullness := LogicChunkIndex(-1), int64(0)
  60. for lci, mc := range up.writableChunks {
  61. chunkFullness := mc.WrittenSize()
  62. if fullness < chunkFullness {
  63. candidateChunkIndex = lci
  64. fullness = chunkFullness
  65. }
  66. }
  67. /* // this algo generates too many chunks
  68. candidateChunkIndex, lowestActivityScore := LogicChunkIndex(-1), int64(math.MaxInt64)
  69. for wci, wc := range up.writableChunks {
  70. activityScore := wc.ActivityScore()
  71. if lowestActivityScore >= activityScore {
  72. if lowestActivityScore == activityScore {
  73. chunkFullness := wc.WrittenSize()
  74. if fullness < chunkFullness {
  75. candidateChunkIndex = lci
  76. fullness = chunkFullness
  77. }
  78. }
  79. candidateChunkIndex = wci
  80. lowestActivityScore = activityScore
  81. }
  82. }
  83. */
  84. up.moveToSealed(up.writableChunks[candidateChunkIndex], candidateChunkIndex)
  85. // fmt.Printf("flush chunk %d with %d bytes written\n", logicChunkIndex, fullness)
  86. }
  87. // fmt.Printf("isSequential:%v len(up.writableChunks):%v memChunkCounter:%v", isSequential, len(up.writableChunks), memChunkCounter)
  88. if isSequential &&
  89. len(up.writableChunks) < up.writableChunkLimit &&
  90. atomic.LoadInt64(&memChunkCounter) < 4*int64(up.writableChunkLimit) {
  91. pageChunk = NewMemChunk(logicChunkIndex, up.ChunkSize)
  92. // fmt.Printf(" create mem chunk %d\n", logicChunkIndex)
  93. } else {
  94. pageChunk = up.swapFile.NewSwapFileChunk(logicChunkIndex)
  95. // fmt.Printf(" create file chunk %d\n", logicChunkIndex)
  96. }
  97. up.writableChunks[logicChunkIndex] = pageChunk
  98. }
  99. //if _, foundSealed := up.sealedChunks[logicChunkIndex]; foundSealed {
  100. // println("found already sealed chunk", logicChunkIndex)
  101. //}
  102. //if _, foundReading := up.activeReadChunks[logicChunkIndex]; foundReading {
  103. // println("found active read chunk", logicChunkIndex)
  104. //}
  105. n = pageChunk.WriteDataAt(p, off, tsNs)
  106. up.maybeMoveToSealed(pageChunk, logicChunkIndex)
  107. return
  108. }
  109. func (up *UploadPipeline) MaybeReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64) {
  110. logicChunkIndex := LogicChunkIndex(off / up.ChunkSize)
  111. up.chunksLock.Lock()
  112. defer func() {
  113. up.readerCountCond.Signal()
  114. up.chunksLock.Unlock()
  115. }()
  116. // read from sealed chunks first
  117. sealedChunk, found := up.sealedChunks[logicChunkIndex]
  118. if found {
  119. maxStop = sealedChunk.chunk.ReadDataAt(p, off, tsNs)
  120. glog.V(4).Infof("%s read sealed memchunk [%d,%d)", up.filepath, off, maxStop)
  121. }
  122. // read from writable chunks last
  123. writableChunk, found := up.writableChunks[logicChunkIndex]
  124. if !found {
  125. return
  126. }
  127. writableMaxStop := writableChunk.ReadDataAt(p, off, tsNs)
  128. glog.V(4).Infof("%s read writable memchunk [%d,%d)", up.filepath, off, writableMaxStop)
  129. maxStop = max(maxStop, writableMaxStop)
  130. return
  131. }
  132. func (up *UploadPipeline) FlushAll() {
  133. up.flushChunks()
  134. up.waitForCurrentWritersToComplete()
  135. }
  136. func (up *UploadPipeline) flushChunks() {
  137. up.chunksLock.Lock()
  138. defer up.chunksLock.Unlock()
  139. for logicChunkIndex, memChunk := range up.writableChunks {
  140. up.moveToSealed(memChunk, logicChunkIndex)
  141. }
  142. }
  143. func (up *UploadPipeline) maybeMoveToSealed(memChunk PageChunk, logicChunkIndex LogicChunkIndex) {
  144. if memChunk.IsComplete() {
  145. up.moveToSealed(memChunk, logicChunkIndex)
  146. }
  147. }
  148. func (up *UploadPipeline) moveToSealed(memChunk PageChunk, logicChunkIndex LogicChunkIndex) {
  149. atomic.AddInt32(&up.uploaderCount, 1)
  150. glog.V(4).Infof("%s uploaderCount %d ++> %d", up.filepath, up.uploaderCount-1, up.uploaderCount)
  151. if oldMemChunk, found := up.sealedChunks[logicChunkIndex]; found {
  152. oldMemChunk.FreeReference(fmt.Sprintf("%s replace chunk %d", up.filepath, logicChunkIndex))
  153. }
  154. sealedChunk := &SealedChunk{
  155. chunk: memChunk,
  156. referenceCounter: 1, // default 1 is for uploading process
  157. }
  158. up.sealedChunks[logicChunkIndex] = sealedChunk
  159. delete(up.writableChunks, logicChunkIndex)
  160. // unlock before submitting the uploading jobs
  161. up.chunksLock.Unlock()
  162. up.uploaders.Execute(func() {
  163. // first add to the file chunks
  164. sealedChunk.chunk.SaveContent(up.saveToStorageFn)
  165. // notify waiting process
  166. atomic.AddInt32(&up.uploaderCount, -1)
  167. glog.V(4).Infof("%s uploaderCount %d --> %d", up.filepath, up.uploaderCount+1, up.uploaderCount)
  168. // Lock and Unlock are not required,
  169. // but it may signal multiple times during one wakeup,
  170. // and the waiting goroutine may miss some of them!
  171. up.uploaderCountCond.L.Lock()
  172. up.uploaderCountCond.Broadcast()
  173. up.uploaderCountCond.L.Unlock()
  174. // wait for readers
  175. up.chunksLock.Lock()
  176. defer up.chunksLock.Unlock()
  177. for up.IsLocked(logicChunkIndex) {
  178. up.readerCountCond.Wait()
  179. }
  180. // then remove from sealed chunks
  181. delete(up.sealedChunks, logicChunkIndex)
  182. sealedChunk.FreeReference(fmt.Sprintf("%s finished uploading chunk %d", up.filepath, logicChunkIndex))
  183. })
  184. up.chunksLock.Lock()
  185. }
  186. func (up *UploadPipeline) Shutdown() {
  187. up.swapFile.FreeResource()
  188. up.chunksLock.Lock()
  189. defer up.chunksLock.Unlock()
  190. for logicChunkIndex, sealedChunk := range up.sealedChunks {
  191. sealedChunk.FreeReference(fmt.Sprintf("%s uploadpipeline shutdown chunk %d", up.filepath, logicChunkIndex))
  192. }
  193. }