123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121 |
- package filesys
- import (
- "fmt"
- "github.com/chrislusf/seaweedfs/weed/filesys/page_writer"
- "github.com/chrislusf/seaweedfs/weed/glog"
- )
- type PageWriter struct {
- f *File
- collection string
- replication string
- chunkSize int64
- writerPattern *WriterPattern
- randomWriter page_writer.DirtyPages
- streamWriter page_writer.DirtyPages
- }
- var (
- _ = page_writer.DirtyPages(&PageWriter{})
- )
- func newPageWriter(file *File, chunkSize int64) *PageWriter {
- pw := &PageWriter{
- f: file,
- chunkSize: chunkSize,
- writerPattern: NewWriterPattern(chunkSize),
- randomWriter: newTempFileDirtyPages(file, chunkSize),
- streamWriter: newStreamDirtyPages(file, chunkSize),
- //streamWriter: newContinuousDirtyPages(file),
- //streamWriter: nil,
- }
- return pw
- }
- func (pw *PageWriter) AddPage(offset int64, data []byte) {
- glog.V(4).Infof("%v AddPage [%d, %d) streaming:%v", pw.f.fullpath(), offset, offset+int64(len(data)), pw.writerPattern.IsStreamingMode())
- chunkIndex := offset / pw.chunkSize
- for i := chunkIndex; len(data) > 0; i++ {
- writeSize := min(int64(len(data)), (i+1)*pw.chunkSize-offset)
- pw.addToOneChunk(i, offset, data[:writeSize])
- offset += writeSize
- data = data[writeSize:]
- }
- }
- func (pw *PageWriter) addToOneChunk(chunkIndex, offset int64, data []byte) {
- if chunkIndex > 0 {
- if pw.writerPattern.IsStreamingMode() && pw.streamWriter != nil {
- pw.streamWriter.AddPage(offset, data)
- return
- }
- }
- pw.randomWriter.AddPage(offset, data)
- }
- func (pw *PageWriter) FlushData() error {
- pw.writerPattern.Reset()
- if pw.streamWriter != nil {
- if err := pw.streamWriter.FlushData(); err != nil {
- return err
- }
- }
- return pw.randomWriter.FlushData()
- }
- func (pw *PageWriter) ReadDirtyDataAt(data []byte, offset int64) (maxStop int64) {
- glog.V(4).Infof("ReadDirtyDataAt %v [%d, %d)", pw.f.fullpath(), offset, offset+int64(len(data)))
- originalData := data
- originalOffset := offset
- chunkIndex := offset / pw.chunkSize
- for i := chunkIndex; len(data) > 0; i++ {
- readSize := min(int64(len(data)), (i+1)*pw.chunkSize-offset)
- if pw.streamWriter != nil {
- m1 := pw.streamWriter.ReadDirtyDataAt(data[:readSize], offset)
- maxStop = max(maxStop, m1)
- }
- m2 := pw.randomWriter.ReadDirtyDataAt(data[:readSize], offset)
- maxStop = max(maxStop, m2)
- offset += readSize
- data = data[readSize:]
- }
- page_writer.CheckByteZero(fmt.Sprintf("page writer read [%d,%d) of size %d", originalOffset, originalOffset+int64(len(originalData)), pw.f.entry.Attributes.FileSize), originalData, 0, maxStop-originalOffset)
- return
- }
- func (pw *PageWriter) GetStorageOptions() (collection, replication string) {
- if pw.writerPattern.IsStreamingMode() && pw.streamWriter != nil {
- return pw.streamWriter.GetStorageOptions()
- }
- return pw.randomWriter.GetStorageOptions()
- }
- func (pw *PageWriter) Destroy() {
- if pw.streamWriter != nil {
- pw.streamWriter.Destroy()
- }
- pw.randomWriter.Destroy()
- }
- func max(x, y int64) int64 {
- if x > y {
- return x
- }
- return y
- }
- func min(x, y int64) int64 {
- if x < y {
- return x
- }
- return y
- }
|