filechunks.go 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. package filer
  2. import (
  3. "bytes"
  4. "fmt"
  5. "github.com/seaweedfs/seaweedfs/weed/wdclient"
  6. "golang.org/x/exp/slices"
  7. "math"
  8. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  9. "github.com/seaweedfs/seaweedfs/weed/util"
  10. )
  11. func TotalSize(chunks []*filer_pb.FileChunk) (size uint64) {
  12. for _, c := range chunks {
  13. t := uint64(c.Offset + int64(c.Size))
  14. if size < t {
  15. size = t
  16. }
  17. }
  18. return
  19. }
  20. func FileSize(entry *filer_pb.Entry) (size uint64) {
  21. if entry == nil || entry.Attributes == nil {
  22. return 0
  23. }
  24. fileSize := entry.Attributes.FileSize
  25. if entry.RemoteEntry != nil {
  26. if entry.RemoteEntry.RemoteMtime > entry.Attributes.Mtime {
  27. fileSize = maxUint64(fileSize, uint64(entry.RemoteEntry.RemoteSize))
  28. }
  29. }
  30. return maxUint64(TotalSize(entry.GetChunks()), fileSize)
  31. }
  32. func ETag(entry *filer_pb.Entry) (etag string) {
  33. if entry.Attributes == nil || entry.Attributes.Md5 == nil {
  34. return ETagChunks(entry.GetChunks())
  35. }
  36. return fmt.Sprintf("%x", entry.Attributes.Md5)
  37. }
  38. func ETagEntry(entry *Entry) (etag string) {
  39. if entry.IsInRemoteOnly() {
  40. return entry.Remote.RemoteETag
  41. }
  42. if entry.Attr.Md5 == nil {
  43. return ETagChunks(entry.GetChunks())
  44. }
  45. return fmt.Sprintf("%x", entry.Attr.Md5)
  46. }
  47. func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
  48. if len(chunks) == 1 {
  49. return fmt.Sprintf("%x", util.Base64Md5ToBytes(chunks[0].ETag))
  50. }
  51. var md5Digests [][]byte
  52. for _, c := range chunks {
  53. md5Digests = append(md5Digests, util.Base64Md5ToBytes(c.ETag))
  54. }
  55. return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5Digests, nil)), len(chunks))
  56. }
  57. func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
  58. visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, 0, math.MaxInt64)
  59. fileIds := make(map[string]bool)
  60. for _, interval := range visibles {
  61. fileIds[interval.fileId] = true
  62. }
  63. for _, chunk := range chunks {
  64. if _, found := fileIds[chunk.GetFileIdString()]; found {
  65. compacted = append(compacted, chunk)
  66. } else {
  67. garbage = append(garbage, chunk)
  68. }
  69. }
  70. return
  71. }
  72. func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
  73. aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as, 0, math.MaxInt64)
  74. if aErr != nil {
  75. return nil, aErr
  76. }
  77. bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs, 0, math.MaxInt64)
  78. if bErr != nil {
  79. return nil, bErr
  80. }
  81. delta = append(delta, DoMinusChunks(aData, bData)...)
  82. delta = append(delta, DoMinusChunks(aMeta, bMeta)...)
  83. return
  84. }
  85. func DoMinusChunks(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
  86. fileIds := make(map[string]bool)
  87. for _, interval := range bs {
  88. fileIds[interval.GetFileIdString()] = true
  89. }
  90. for _, chunk := range as {
  91. if _, found := fileIds[chunk.GetFileIdString()]; !found {
  92. delta = append(delta, chunk)
  93. }
  94. }
  95. return
  96. }
  97. func DoMinusChunksBySourceFileId(as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk) {
  98. fileIds := make(map[string]bool)
  99. for _, interval := range bs {
  100. fileIds[interval.GetFileIdString()] = true
  101. fileIds[interval.GetSourceFileId()] = true
  102. }
  103. for _, chunk := range as {
  104. _, sourceFileIdFound := fileIds[chunk.GetSourceFileId()]
  105. _, fileIdFound := fileIds[chunk.GetFileId()]
  106. if !sourceFileIdFound && !fileIdFound {
  107. delta = append(delta, chunk)
  108. }
  109. }
  110. return
  111. }
  112. type ChunkView struct {
  113. FileId string
  114. Offset int64
  115. Size uint64
  116. LogicOffset int64 // actual offset in the file, for the data specified via [offset, offset+size) in current chunk
  117. ChunkSize uint64
  118. CipherKey []byte
  119. IsGzipped bool
  120. }
  121. func (cv *ChunkView) IsFullChunk() bool {
  122. return cv.Size == cv.ChunkSize
  123. }
  124. func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (views []*ChunkView) {
  125. visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, offset, offset+size)
  126. return ViewFromVisibleIntervals(visibles, offset, size)
  127. }
  128. func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int64) (views []*ChunkView) {
  129. stop := offset + size
  130. if size == math.MaxInt64 {
  131. stop = math.MaxInt64
  132. }
  133. if stop < offset {
  134. stop = math.MaxInt64
  135. }
  136. for _, chunk := range visibles {
  137. chunkStart, chunkStop := max(offset, chunk.start), min(stop, chunk.stop)
  138. if chunkStart < chunkStop {
  139. views = append(views, &ChunkView{
  140. FileId: chunk.fileId,
  141. Offset: chunkStart - chunk.start + chunk.chunkOffset,
  142. Size: uint64(chunkStop - chunkStart),
  143. LogicOffset: chunkStart,
  144. ChunkSize: chunk.chunkSize,
  145. CipherKey: chunk.cipherKey,
  146. IsGzipped: chunk.isGzipped,
  147. })
  148. }
  149. }
  150. return views
  151. }
  152. func logPrintf(name string, visibles []VisibleInterval) {
  153. /*
  154. glog.V(0).Infof("%s len %d", name, len(visibles))
  155. for _, v := range visibles {
  156. glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
  157. }
  158. */
  159. }
  160. func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (newVisibles []VisibleInterval) {
  161. newV := newVisibleInterval(chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.ModifiedTsNs, 0, chunk.Size, chunk.CipherKey, chunk.IsCompressed)
  162. length := len(visibles)
  163. if length == 0 {
  164. return append(visibles, newV)
  165. }
  166. last := visibles[length-1]
  167. if last.stop <= chunk.Offset {
  168. return append(visibles, newV)
  169. }
  170. logPrintf(" before", visibles)
  171. // glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size)
  172. chunkStop := chunk.Offset + int64(chunk.Size)
  173. for _, v := range visibles {
  174. if v.start < chunk.Offset && chunk.Offset < v.stop {
  175. t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTsNs, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped)
  176. newVisibles = append(newVisibles, t)
  177. // glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
  178. }
  179. if v.start < chunkStop && chunkStop < v.stop {
  180. t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTsNs, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped)
  181. newVisibles = append(newVisibles, t)
  182. // glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
  183. }
  184. if chunkStop <= v.start || v.stop <= chunk.Offset {
  185. newVisibles = append(newVisibles, v)
  186. // glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop)
  187. }
  188. }
  189. newVisibles = append(newVisibles, newV)
  190. logPrintf(" append", newVisibles)
  191. for i := len(newVisibles) - 1; i >= 0; i-- {
  192. if i > 0 && newV.start < newVisibles[i-1].start {
  193. newVisibles[i] = newVisibles[i-1]
  194. } else {
  195. newVisibles[i] = newV
  196. break
  197. }
  198. }
  199. logPrintf(" sorted", newVisibles)
  200. return newVisibles
  201. }
  202. // NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
  203. // If the file chunk content is a chunk manifest
  204. func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles []VisibleInterval, err error) {
  205. chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset)
  206. if err != nil {
  207. return
  208. }
  209. visibles2 := readResolvedChunks(chunks)
  210. if true {
  211. return visibles2, err
  212. }
  213. slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool {
  214. if a.ModifiedTsNs == b.ModifiedTsNs {
  215. filer_pb.EnsureFid(a)
  216. filer_pb.EnsureFid(b)
  217. if a.Fid == nil || b.Fid == nil {
  218. return true
  219. }
  220. return a.Fid.FileKey < b.Fid.FileKey
  221. }
  222. return a.ModifiedTsNs < b.ModifiedTsNs
  223. })
  224. for _, chunk := range chunks {
  225. // glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
  226. visibles = MergeIntoVisibles(visibles, chunk)
  227. logPrintf("add", visibles)
  228. }
  229. if len(visibles) != len(visibles2) {
  230. fmt.Printf("different visibles size %d : %d\n", len(visibles), len(visibles2))
  231. } else {
  232. for i := 0; i < len(visibles); i++ {
  233. checkDifference(visibles[i], visibles2[i])
  234. }
  235. }
  236. return
  237. }
  238. func checkDifference(x, y VisibleInterval) {
  239. if x.start != y.start ||
  240. x.stop != y.stop ||
  241. x.fileId != y.fileId ||
  242. x.modifiedTsNs != y.modifiedTsNs {
  243. fmt.Printf("different visible %+v : %+v\n", x, y)
  244. }
  245. }
  246. // find non-overlapping visible intervals
  247. // visible interval map to one file chunk
  248. type VisibleInterval struct {
  249. start int64
  250. stop int64
  251. modifiedTsNs int64
  252. fileId string
  253. chunkOffset int64
  254. chunkSize uint64
  255. cipherKey []byte
  256. isGzipped bool
  257. }
  258. func newVisibleInterval(start, stop int64, fileId string, modifiedTime int64, chunkOffset int64, chunkSize uint64, cipherKey []byte, isGzipped bool) VisibleInterval {
  259. return VisibleInterval{
  260. start: start,
  261. stop: stop,
  262. fileId: fileId,
  263. modifiedTsNs: modifiedTime,
  264. chunkOffset: chunkOffset, // the starting position in the chunk
  265. chunkSize: chunkSize,
  266. cipherKey: cipherKey,
  267. isGzipped: isGzipped,
  268. }
  269. }
  270. func min(x, y int64) int64 {
  271. if x <= y {
  272. return x
  273. }
  274. return y
  275. }
  276. func max(x, y int64) int64 {
  277. if x <= y {
  278. return y
  279. }
  280. return x
  281. }