filechunks.go 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. package filer
  2. import (
  3. "bytes"
  4. "fmt"
  5. "github.com/chrislusf/seaweedfs/weed/wdclient"
  6. "math"
  7. "sort"
  8. "sync"
  9. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  10. "github.com/chrislusf/seaweedfs/weed/util"
  11. )
  12. func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
  13. for _, c := range chunks {
  14. t := uint64(c.Offset + int64(c.Size))
  15. if size < t {
  16. size = t
  17. }
  18. }
  19. return
  20. }
  21. func FileSize(entry *filer_pb.Entry) (size uint64) {
  22. return maxUint64(TotalSize(entry.Chunks), entry.Attributes.FileSize)
  23. }
  24. func ETag(entry *filer_pb.Entry) (etag string) {
  25. if entry.Attributes == nil || entry.Attributes.Md5 == nil {
  26. return ETagChunks(entry.Chunks)
  27. }
  28. return fmt.Sprintf("%x", entry.Attributes.Md5)
  29. }
  30. func ETagEntry(entry *Entry) (etag string) {
  31. if entry.Attr.Md5 == nil {
  32. return ETagChunks(entry.Chunks)
  33. }
  34. return fmt.Sprintf("%x", entry.Attr.Md5)
  35. }
  36. func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
  37. if len(chunks) == 1 {
  38. return fmt.Sprintf("%x", util.Base64Md5ToBytes(chunks[0].ETag))
  39. }
  40. md5_digests := [][]byte{}
  41. for _, c := range chunks {
  42. md5_digests = append(md5_digests, util.Base64Md5ToBytes(c.ETag))
  43. }
  44. return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks))
  45. }
  46. func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
  47. visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, 0, math.MaxInt64)
  48. fileIds := make(map[string]bool)
  49. for _, interval := range visibles {
  50. fileIds[interval.fileId] = true
  51. }
  52. for _, chunk := range chunks {
  53. if _, found := fileIds[chunk.GetFileIdString()]; found {
  54. compacted = append(compacted, chunk)
  55. } else {
  56. garbage = append(garbage, chunk)
  57. }
  58. }
  59. return
  60. }
  61. func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
  62. aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as, 0, math.MaxInt64)
  63. if aErr != nil {
  64. return nil, aErr
  65. }
  66. bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs, 0, math.MaxInt64)
  67. if bErr != nil {
  68. return nil, bErr
  69. }
  70. delta = append(delta, DoMinusChunks(aData, bData)...)
  71. delta = append(delta, DoMinusChunks(aMeta, bMeta)...)
  72. return
  73. }
  74. func DoMinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
  75. fileIds := make(map[string]bool)
  76. for _, interval := range bs {
  77. fileIds[interval.GetFileIdString()] = true
  78. }
  79. for _, chunk := range as {
  80. if _, found := fileIds[chunk.GetFileIdString()]; !found {
  81. delta = append(delta, chunk)
  82. }
  83. }
  84. return
  85. }
  86. func DoMinusChunksBySourceFileId(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
  87. fileIds := make(map[string]bool)
  88. for _, interval := range bs {
  89. fileIds[interval.GetFileIdString()] = true
  90. }
  91. for _, chunk := range as {
  92. if _, found := fileIds[chunk.GetSourceFileId()]; !found {
  93. delta = append(delta, chunk)
  94. }
  95. }
  96. return
  97. }
  98. type ChunkView struct {
  99. FileId string
  100. Offset int64
  101. Size uint64
  102. LogicOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk
  103. ChunkSize uint64
  104. CipherKey []byte
  105. IsGzipped bool
  106. }
  107. func (cv *ChunkView) IsFullChunk() bool {
  108. return cv.Size == cv.ChunkSize
  109. }
  110. func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
  111. visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, offset, offset+size)
  112. return ViewFromVisibleIntervals(visibles, offset, size)
  113. }
  114. func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) {
  115. stop := offset + size
  116. if size == math.MaxInt64 {
  117. stop = math.MaxInt64
  118. }
  119. if stop < offset {
  120. stop = math.MaxInt64
  121. }
  122. for _, chunk := range visibles {
  123. chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop)
  124. if chunkStart < chunkStop {
  125. views = append(views, &ChunkView{
  126. FileId: chunk.fileId,
  127. Offset: chunkStart - chunk.start + chunk.chunkOffset,
  128. Size: uint64(chunkStop - chunkStart),
  129. LogicOffset: chunkStart,
  130. ChunkSize: chunk.chunkSize,
  131. CipherKey: chunk.cipherKey,
  132. IsGzipped: chunk.isGzipped,
  133. })
  134. }
  135. }
  136. return views
  137. }
  138. func logPrintf(name string, visibles []VisibleInterval) {
  139. /*
  140. glog.V(0).Infof("%s len %d", name, len(visibles))
  141. for _, v := range visibles {
  142. glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
  143. }
  144. */
  145. }
  146. var bufPool = sync.Pool{
  147. New: func() interface{} {
  148. return new(VisibleInterval)
  149. },
  150. }
  151. func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) {
  152. newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Mtime, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
  153. length := len(visibles)
  154. if length == 0 {
  155. return append(visibles, newV)
  156. }
  157. last := visibles[length-1]
  158. if last.stop <= chunk.Offset {
  159. return append(visibles, newV)
  160. }
  161. logPrintf(" before", visibles)
  162. // glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size)
  163. chunkStop := chunk.Offset + int64(chunk.Size)
  164. for _, v := range visibles {
  165. if v.start < chunk.Offset && chunk.Offset < v.stop {
  166. t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped)
  167. newVisibles = append(newVisibles, t)
  168. // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
  169. }
  170. if v.start < chunkStop && chunkStop < v.stop {
  171. t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped)
  172. newVisibles = append(newVisibles, t)
  173. // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
  174. }
  175. if chunkStop <= v.start || v.stop <= chunk.Offset {
  176. newVisibles = append(newVisibles, v)
  177. // glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop)
  178. }
  179. }
  180. newVisibles = append(newVisibles, newV)
  181. logPrintf(" append", newVisibles)
  182. for i := len(newVisibles) - 1; i >= 0; i-- {
  183. if i > 0 && newV.start < newVisibles[i-1].start {
  184. newVisibles[i] = newVisibles[i-1]
  185. } else {
  186. newVisibles[i] = newV
  187. break
  188. }
  189. }
  190. logPrintf(" sorted", newVisibles)
  191. return newVisibles
  192. }
  193. // NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
  194. // If the file chunk content is a chunk manifest
  195. func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles []VisibleInterval, err error) {
  196. chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset)
  197. visibles2 := readResolvedChunks(chunks)
  198. if true {
  199. return visibles2, err
  200. }
  201. sort.Slice(chunks, func(i, j int) bool {
  202. if chunks[i].Mtime == chunks[j].Mtime {
  203. filer_pb.EnsureFid(chunks[i])
  204. filer_pb.EnsureFid(chunks[j])
  205. if chunks[i].Fid == nil || chunks[j].Fid == nil {
  206. return true
  207. }
  208. return chunks[i].Fid.FileKey < chunks[j].Fid.FileKey
  209. }
  210. return chunks[i].Mtime < chunks[j].Mtime // keep this to make tests run
  211. })
  212. for _, chunk := range chunks {
  213. // glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
  214. visibles = MergeIntoVisibles(visibles, chunk)
  215. logPrintf("add", visibles)
  216. }
  217. if len(visibles) != len(visibles2) {
  218. fmt.Printf("different visibles size %d : %d\n", len(visibles), len(visibles2))
  219. } else {
  220. for i := 0; i < len(visibles); i++ {
  221. checkDifference(visibles[i], visibles2[i])
  222. }
  223. }
  224. return
  225. }
  226. func checkDifference(x, y VisibleInterval) {
  227. if x.start != y.start ||
  228. x.stop != y.stop ||
  229. x.fileId != y.fileId ||
  230. x.modifiedTime != y.modifiedTime {
  231. fmt.Printf("different visible %+v : %+v\n", x, y)
  232. }
  233. }
  234. // find non-overlapping visible intervals
  235. // visible interval map to one file chunk
  236. type VisibleInterval struct {
  237. start int64
  238. stop int64
  239. modifiedTime int64
  240. fileId string
  241. chunkOffset int64
  242. chunkSize uint64
  243. cipherKey []byte
  244. isGzipped bool
  245. }
  246. func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkOffset int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval {
  247. return VisibleInterval{
  248. start: start,
  249. stop: stop,
  250. fileId: fileId,
  251. modifiedTime: modifiedTime,
  252. chunkOffset: chunkOffset, // the starting position in the chunk
  253. chunkSize: chunkSize,
  254. cipherKey: cipherKey,
  255. isGzipped: isGzipped,
  256. }
  257. }
  258. func min(x, y int64) int64 {
  259. if x <= y {
  260. return x
  261. }
  262. return y
  263. }
  264. func max(x, y int64) int64 {
  265. if x <= y {
  266. return y
  267. }
  268. return x
  269. }