filechunks.go 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. package filer
  2. import (
  3. "bytes"
  4. "encoding/hex"
  5. "fmt"
  6. "github.com/chrislusf/seaweedfs/weed/wdclient"
  7. "math"
  8. "sort"
  9. "sync"
  10. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  11. "github.com/chrislusf/seaweedfs/weed/util"
  12. )
  13. func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
  14. for _, c := range chunks {
  15. t := uint64(c.Offset + int64(c.Size))
  16. if size < t {
  17. size = t
  18. }
  19. }
  20. return
  21. }
  22. func FileSize2(entry *Entry) (size uint64) {
  23. return maxUint64(TotalSize(entry.Chunks), entry.Attr.FileSize)
  24. }
  25. func FileSize(entry *filer_pb.Entry) (size uint64) {
  26. return maxUint64(TotalSize(entry.Chunks), entry.Attributes.FileSize)
  27. }
  28. func ETag(entry *filer_pb.Entry) (etag string) {
  29. if entry.Attributes == nil || entry.Attributes.Md5 == nil {
  30. return ETagChunks(entry.Chunks)
  31. }
  32. return fmt.Sprintf("%x", entry.Attributes.Md5)
  33. }
  34. func ETagEntry(entry *Entry) (etag string) {
  35. if entry.Attr.Md5 == nil {
  36. return ETagChunks(entry.Chunks)
  37. }
  38. return fmt.Sprintf("%x", entry.Attr.Md5)
  39. }
  40. func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
  41. if len(chunks) == 1 {
  42. return chunks[0].ETag
  43. }
  44. md5_digests := [][]byte{}
  45. for _, c := range chunks {
  46. md5_decoded, _ := hex.DecodeString(c.ETag)
  47. md5_digests = append(md5_digests, md5_decoded)
  48. }
  49. return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks))
  50. }
  51. func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
  52. visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
  53. fileIds := make(map[string]bool)
  54. for _, interval := range visibles {
  55. fileIds[interval.fileId] = true
  56. }
  57. for _, chunk := range chunks {
  58. if _, found := fileIds[chunk.GetFileIdString()]; found {
  59. compacted = append(compacted, chunk)
  60. } else {
  61. garbage = append(garbage, chunk)
  62. }
  63. }
  64. return
  65. }
  66. func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
  67. aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as)
  68. if aErr != nil {
  69. return nil, aErr
  70. }
  71. bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs)
  72. if bErr != nil {
  73. return nil, bErr
  74. }
  75. delta = append(delta, DoMinusChunks(aData, bData)...)
  76. delta = append(delta, DoMinusChunks(aMeta, bMeta)...)
  77. return
  78. }
  79. func DoMinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
  80. fileIds := make(map[string]bool)
  81. for _, interval := range bs {
  82. fileIds[interval.GetFileIdString()] = true
  83. }
  84. for _, chunk := range as {
  85. if _, found := fileIds[chunk.GetFileIdString()]; !found {
  86. delta = append(delta, chunk)
  87. }
  88. }
  89. return
  90. }
  91. type ChunkView struct {
  92. FileId string
  93. Offset int64
  94. Size uint64
  95. LogicOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk
  96. ChunkSize uint64
  97. CipherKey []byte
  98. IsGzipped bool
  99. }
  100. func (cv *ChunkView) IsFullChunk() bool {
  101. return cv.Size == cv.ChunkSize
  102. }
  103. func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
  104. visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks)
  105. return ViewFromVisibleIntervals(visibles, offset, size)
  106. }
  107. func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) {
  108. stop := offset + size
  109. if size == math.MaxInt64 {
  110. stop = math.MaxInt64
  111. }
  112. if stop < offset {
  113. stop = math.MaxInt64
  114. }
  115. for _, chunk := range visibles {
  116. chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop)
  117. if chunkStart < chunkStop {
  118. views = append(views, &ChunkView{
  119. FileId: chunk.fileId,
  120. Offset: chunkStart - chunk.start + chunk.chunkOffset,
  121. Size: uint64(chunkStop - chunkStart),
  122. LogicOffset: chunkStart,
  123. ChunkSize: chunk.chunkSize,
  124. CipherKey: chunk.cipherKey,
  125. IsGzipped: chunk.isGzipped,
  126. })
  127. }
  128. }
  129. return views
  130. }
  131. func logPrintf(name string, visibles []VisibleInterval) {
  132. /*
  133. glog.V(0).Infof("%s len %d", name, len(visibles))
  134. for _, v := range visibles {
  135. glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
  136. }
  137. */
  138. }
  139. var bufPool = sync.Pool{
  140. New: func() interface{} {
  141. return new(VisibleInterval)
  142. },
  143. }
  144. func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) {
  145. newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
  146. length := len(visibles)
  147. if length == 0 {
  148. return append(visibles, newV)
  149. }
  150. last := visibles[length-1]
  151. if last.stop <= chunk.Offset {
  152. return append(visibles, newV)
  153. }
  154. logPrintf(" before", visibles)
  155. // glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size)
  156. chunkStop := chunk.Offset + int64(chunk.Size)
  157. for _, v := range visibles {
  158. if v.start < chunk.Offset && chunk.Offset < v.stop {
  159. t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped)
  160. newVisibles = append(newVisibles, t)
  161. // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
  162. }
  163. if v.start < chunkStop && chunkStop < v.stop {
  164. t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped)
  165. newVisibles = append(newVisibles, t)
  166. // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
  167. }
  168. if chunkStop <= v.start || v.stop <= chunk.Offset {
  169. newVisibles = append(newVisibles, v)
  170. // glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop)
  171. }
  172. }
  173. newVisibles = append(newVisibles, newV)
  174. logPrintf(" append", newVisibles)
  175. for i := len(newVisibles) - 1; i >= 0; i-- {
  176. if i > 0 && newV.start < newVisibles[i-1].start {
  177. newVisibles[i] = newVisibles[i-1]
  178. } else {
  179. newVisibles[i] = newV
  180. break
  181. }
  182. }
  183. logPrintf(" sorted", newVisibles)
  184. return newVisibles
  185. }
  186. // NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
  187. // If the file chunk content is a chunk manifest
  188. func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (visibles []VisibleInterval, err error) {
  189. chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks)
  190. sort.Slice(chunks, func(i, j int) bool {
  191. if chunks[i].Mtime == chunks[j].Mtime {
  192. filer_pb.EnsureFid(chunks[i])
  193. filer_pb.EnsureFid(chunks[j])
  194. if chunks[i].Fid == nil || chunks[j].Fid == nil {
  195. return true
  196. }
  197. return chunks[i].Fid.FileKey < chunks[j].Fid.FileKey
  198. }
  199. return chunks[i].Mtime < chunks[j].Mtime // keep this to make tests run
  200. })
  201. for _, chunk := range chunks {
  202. // glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
  203. visibles = MergeIntoVisibles(visibles, chunk)
  204. logPrintf("add", visibles)
  205. }
  206. return
  207. }
  208. // find non-overlapping visible intervals
  209. // visible interval map to one file chunk
  210. type VisibleInterval struct {
  211. start int64
  212. stop int64
  213. modifiedTime int64
  214. fileId string
  215. chunkOffset int64
  216. chunkSize uint64
  217. cipherKey []byte
  218. isGzipped bool
  219. }
  220. func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkOffset int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval {
  221. return VisibleInterval{
  222. start: start,
  223. stop: stop,
  224. fileId: fileId,
  225. modifiedTime: modifiedTime,
  226. chunkOffset: chunkOffset, // the starting position in the chunk
  227. chunkSize: chunkSize,
  228. cipherKey: cipherKey,
  229. isGzipped: isGzipped,
  230. }
  231. }
  232. func min(x, y int64) int64 {
  233. if x <= y {
  234. return x
  235. }
  236. return y
  237. }
  238. func max(x, y int64) int64 {
  239. if x <= y {
  240. return y
  241. }
  242. return x
  243. }