filechunks_test.go 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570
  1. package filer
  2. import (
  3. "fmt"
  4. "log"
  5. "math"
  6. "math/rand"
  7. "strconv"
  8. "testing"
  9. "github.com/stretchr/testify/assert"
  10. "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
  11. )
  12. func TestCompactFileChunks(t *testing.T) {
  13. chunks := []*filer_pb.FileChunk{
  14. {Offset: 10, Size: 100, FileId: "abc", ModifiedTsNs: 50},
  15. {Offset: 100, Size: 100, FileId: "def", ModifiedTsNs: 100},
  16. {Offset: 200, Size: 100, FileId: "ghi", ModifiedTsNs: 200},
  17. {Offset: 110, Size: 200, FileId: "jkl", ModifiedTsNs: 300},
  18. }
  19. compacted, garbage := CompactFileChunks(nil, chunks)
  20. if len(compacted) != 3 {
  21. t.Fatalf("unexpected compacted: %d", len(compacted))
  22. }
  23. if len(garbage) != 1 {
  24. t.Fatalf("unexpected garbage: %d", len(garbage))
  25. }
  26. }
  27. func TestCompactFileChunks2(t *testing.T) {
  28. chunks := []*filer_pb.FileChunk{
  29. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 50},
  30. {Offset: 100, Size: 100, FileId: "def", ModifiedTsNs: 100},
  31. {Offset: 200, Size: 100, FileId: "ghi", ModifiedTsNs: 200},
  32. {Offset: 0, Size: 100, FileId: "abcf", ModifiedTsNs: 300},
  33. {Offset: 50, Size: 100, FileId: "fhfh", ModifiedTsNs: 400},
  34. {Offset: 100, Size: 100, FileId: "yuyu", ModifiedTsNs: 500},
  35. }
  36. k := 3
  37. for n := 0; n < k; n++ {
  38. chunks = append(chunks, &filer_pb.FileChunk{
  39. Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), ModifiedTsNs: int64(n),
  40. })
  41. chunks = append(chunks, &filer_pb.FileChunk{
  42. Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), ModifiedTsNs: int64(n + k),
  43. })
  44. }
  45. compacted, garbage := CompactFileChunks(nil, chunks)
  46. if len(compacted) != 4 {
  47. t.Fatalf("unexpected compacted: %d", len(compacted))
  48. }
  49. if len(garbage) != 8 {
  50. t.Fatalf("unexpected garbage: %d", len(garbage))
  51. }
  52. }
  53. func TestRandomFileChunksCompact(t *testing.T) {
  54. data := make([]byte, 1024)
  55. var chunks []*filer_pb.FileChunk
  56. for i := 0; i < 15; i++ {
  57. start, stop := rand.Intn(len(data)), rand.Intn(len(data))
  58. if start > stop {
  59. start, stop = stop, start
  60. }
  61. if start+16 < stop {
  62. stop = start + 16
  63. }
  64. chunk := &filer_pb.FileChunk{
  65. FileId: strconv.Itoa(i),
  66. Offset: int64(start),
  67. Size: uint64(stop - start),
  68. ModifiedTsNs: int64(i),
  69. Fid: &filer_pb.FileId{FileKey: uint64(i)},
  70. }
  71. chunks = append(chunks, chunk)
  72. for x := start; x < stop; x++ {
  73. data[x] = byte(i)
  74. }
  75. }
  76. visibles, _ := NonOverlappingVisibleIntervals(nil, chunks, 0, math.MaxInt64)
  77. for visible := visibles.Front(); visible != nil; visible = visible.Next {
  78. v := visible.Value
  79. for x := v.start; x < v.stop; x++ {
  80. assert.Equal(t, strconv.Itoa(int(data[x])), v.fileId)
  81. }
  82. }
  83. }
  84. func TestIntervalMerging(t *testing.T) {
  85. testcases := []struct {
  86. Chunks []*filer_pb.FileChunk
  87. Expected []*VisibleInterval
  88. }{
  89. // case 0: normal
  90. {
  91. Chunks: []*filer_pb.FileChunk{
  92. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  93. {Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134},
  94. {Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353},
  95. },
  96. Expected: []*VisibleInterval{
  97. {start: 0, stop: 100, fileId: "abc"},
  98. {start: 100, stop: 200, fileId: "asdf"},
  99. {start: 200, stop: 300, fileId: "fsad"},
  100. },
  101. },
  102. // case 1: updates overwrite full chunks
  103. {
  104. Chunks: []*filer_pb.FileChunk{
  105. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  106. {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  107. },
  108. Expected: []*VisibleInterval{
  109. {start: 0, stop: 200, fileId: "asdf"},
  110. },
  111. },
  112. // case 2: updates overwrite part of previous chunks
  113. {
  114. Chunks: []*filer_pb.FileChunk{
  115. {Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123},
  116. {Offset: 0, Size: 70, FileId: "b", ModifiedTsNs: 134},
  117. },
  118. Expected: []*VisibleInterval{
  119. {start: 0, stop: 70, fileId: "b"},
  120. {start: 70, stop: 100, fileId: "a", offsetInChunk: 70},
  121. },
  122. },
  123. // case 3: updates overwrite full chunks
  124. {
  125. Chunks: []*filer_pb.FileChunk{
  126. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  127. {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  128. {Offset: 50, Size: 250, FileId: "xxxx", ModifiedTsNs: 154},
  129. },
  130. Expected: []*VisibleInterval{
  131. {start: 0, stop: 50, fileId: "asdf"},
  132. {start: 50, stop: 300, fileId: "xxxx"},
  133. },
  134. },
  135. // case 4: updates far away from prev chunks
  136. {
  137. Chunks: []*filer_pb.FileChunk{
  138. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  139. {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  140. {Offset: 250, Size: 250, FileId: "xxxx", ModifiedTsNs: 154},
  141. },
  142. Expected: []*VisibleInterval{
  143. {start: 0, stop: 200, fileId: "asdf"},
  144. {start: 250, stop: 500, fileId: "xxxx"},
  145. },
  146. },
  147. // case 5: updates overwrite full chunks
  148. {
  149. Chunks: []*filer_pb.FileChunk{
  150. {Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123},
  151. {Offset: 0, Size: 200, FileId: "d", ModifiedTsNs: 184},
  152. {Offset: 70, Size: 150, FileId: "c", ModifiedTsNs: 143},
  153. {Offset: 80, Size: 100, FileId: "b", ModifiedTsNs: 134},
  154. },
  155. Expected: []*VisibleInterval{
  156. {start: 0, stop: 200, fileId: "d"},
  157. {start: 200, stop: 220, fileId: "c", offsetInChunk: 130},
  158. },
  159. },
  160. // case 6: same updates
  161. {
  162. Chunks: []*filer_pb.FileChunk{
  163. {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123},
  164. {Offset: 0, Size: 100, FileId: "axf", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 124},
  165. {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 125},
  166. },
  167. Expected: []*VisibleInterval{
  168. {start: 0, stop: 100, fileId: "xyz"},
  169. },
  170. },
  171. // case 7: real updates
  172. {
  173. Chunks: []*filer_pb.FileChunk{
  174. {Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", ModifiedTsNs: 123},
  175. {Offset: 0, Size: 3145728, FileId: "3,029565bf3092", ModifiedTsNs: 130},
  176. {Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", ModifiedTsNs: 140},
  177. {Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", ModifiedTsNs: 150},
  178. {Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", ModifiedTsNs: 160},
  179. {Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", ModifiedTsNs: 170},
  180. },
  181. Expected: []*VisibleInterval{
  182. {start: 0, stop: 2097152, fileId: "3,029565bf3092"},
  183. {start: 2097152, stop: 5242880, fileId: "6,029632f47ae2"},
  184. {start: 5242880, stop: 8388608, fileId: "2,029734c5aa10"},
  185. {start: 8388608, stop: 11534336, fileId: "5,02982f80de50"},
  186. {start: 11534336, stop: 14376529, fileId: "7,0299ad723803"},
  187. },
  188. },
  189. // case 8: real bug
  190. {
  191. Chunks: []*filer_pb.FileChunk{
  192. {Offset: 0, Size: 77824, FileId: "4,0b3df938e301", ModifiedTsNs: 123},
  193. {Offset: 471040, Size: 472225 - 471040, FileId: "6,0b3e0650019c", ModifiedTsNs: 130},
  194. {Offset: 77824, Size: 208896 - 77824, FileId: "4,0b3f0c7202f0", ModifiedTsNs: 140},
  195. {Offset: 208896, Size: 339968 - 208896, FileId: "2,0b4031a72689", ModifiedTsNs: 150},
  196. {Offset: 339968, Size: 471040 - 339968, FileId: "3,0b416a557362", ModifiedTsNs: 160},
  197. },
  198. Expected: []*VisibleInterval{
  199. {start: 0, stop: 77824, fileId: "4,0b3df938e301"},
  200. {start: 77824, stop: 208896, fileId: "4,0b3f0c7202f0"},
  201. {start: 208896, stop: 339968, fileId: "2,0b4031a72689"},
  202. {start: 339968, stop: 471040, fileId: "3,0b416a557362"},
  203. {start: 471040, stop: 472225, fileId: "6,0b3e0650019c"},
  204. },
  205. },
  206. }
  207. for i, testcase := range testcases {
  208. log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i)
  209. intervals, _ := NonOverlappingVisibleIntervals(nil, testcase.Chunks, 0, math.MaxInt64)
  210. x := -1
  211. for visible := intervals.Front(); visible != nil; visible = visible.Next {
  212. x++
  213. interval := visible.Value
  214. log.Printf("test case %d, interval start=%d, stop=%d, fileId=%s",
  215. i, interval.start, interval.stop, interval.fileId)
  216. }
  217. x = -1
  218. for visible := intervals.Front(); visible != nil; visible = visible.Next {
  219. x++
  220. interval := visible.Value
  221. if interval.start != testcase.Expected[x].start {
  222. t.Fatalf("failed on test case %d, interval %d, start %d, expect %d",
  223. i, x, interval.start, testcase.Expected[x].start)
  224. }
  225. if interval.stop != testcase.Expected[x].stop {
  226. t.Fatalf("failed on test case %d, interval %d, stop %d, expect %d",
  227. i, x, interval.stop, testcase.Expected[x].stop)
  228. }
  229. if interval.fileId != testcase.Expected[x].fileId {
  230. t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s",
  231. i, x, interval.fileId, testcase.Expected[x].fileId)
  232. }
  233. if interval.offsetInChunk != testcase.Expected[x].offsetInChunk {
  234. t.Fatalf("failed on test case %d, interval %d, offsetInChunk %d, expect %d",
  235. i, x, interval.offsetInChunk, testcase.Expected[x].offsetInChunk)
  236. }
  237. }
  238. if intervals.Len() != len(testcase.Expected) {
  239. t.Fatalf("failed to compact test case %d, len %d expected %d", i, intervals.Len(), len(testcase.Expected))
  240. }
  241. }
  242. }
  243. func TestChunksReading(t *testing.T) {
  244. testcases := []struct {
  245. Chunks []*filer_pb.FileChunk
  246. Offset int64
  247. Size int64
  248. Expected []*ChunkView
  249. }{
  250. // case 0: normal
  251. {
  252. Chunks: []*filer_pb.FileChunk{
  253. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  254. {Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134},
  255. {Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353},
  256. },
  257. Offset: 0,
  258. Size: 250,
  259. Expected: []*ChunkView{
  260. {OffsetInChunk: 0, ViewSize: 100, FileId: "abc", ViewOffset: 0},
  261. {OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 100},
  262. {OffsetInChunk: 0, ViewSize: 50, FileId: "fsad", ViewOffset: 200},
  263. },
  264. },
  265. // case 1: updates overwrite full chunks
  266. {
  267. Chunks: []*filer_pb.FileChunk{
  268. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  269. {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  270. },
  271. Offset: 50,
  272. Size: 100,
  273. Expected: []*ChunkView{
  274. {OffsetInChunk: 50, ViewSize: 100, FileId: "asdf", ViewOffset: 50},
  275. },
  276. },
  277. // case 2: updates overwrite part of previous chunks
  278. {
  279. Chunks: []*filer_pb.FileChunk{
  280. {Offset: 3, Size: 100, FileId: "a", ModifiedTsNs: 123},
  281. {Offset: 10, Size: 50, FileId: "b", ModifiedTsNs: 134},
  282. },
  283. Offset: 30,
  284. Size: 40,
  285. Expected: []*ChunkView{
  286. {OffsetInChunk: 20, ViewSize: 30, FileId: "b", ViewOffset: 30},
  287. {OffsetInChunk: 57, ViewSize: 10, FileId: "a", ViewOffset: 60},
  288. },
  289. },
  290. // case 3: updates overwrite full chunks
  291. {
  292. Chunks: []*filer_pb.FileChunk{
  293. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  294. {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  295. {Offset: 50, Size: 250, FileId: "xxxx", ModifiedTsNs: 154},
  296. },
  297. Offset: 0,
  298. Size: 200,
  299. Expected: []*ChunkView{
  300. {OffsetInChunk: 0, ViewSize: 50, FileId: "asdf", ViewOffset: 0},
  301. {OffsetInChunk: 0, ViewSize: 150, FileId: "xxxx", ViewOffset: 50},
  302. },
  303. },
  304. // case 4: updates far away from prev chunks
  305. {
  306. Chunks: []*filer_pb.FileChunk{
  307. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  308. {Offset: 0, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  309. {Offset: 250, Size: 250, FileId: "xxxx", ModifiedTsNs: 154},
  310. },
  311. Offset: 0,
  312. Size: 400,
  313. Expected: []*ChunkView{
  314. {OffsetInChunk: 0, ViewSize: 200, FileId: "asdf", ViewOffset: 0},
  315. {OffsetInChunk: 0, ViewSize: 150, FileId: "xxxx", ViewOffset: 250},
  316. },
  317. },
  318. // case 5: updates overwrite full chunks
  319. {
  320. Chunks: []*filer_pb.FileChunk{
  321. {Offset: 0, Size: 100, FileId: "a", ModifiedTsNs: 123},
  322. {Offset: 0, Size: 200, FileId: "c", ModifiedTsNs: 184},
  323. {Offset: 70, Size: 150, FileId: "b", ModifiedTsNs: 143},
  324. {Offset: 80, Size: 100, FileId: "xxxx", ModifiedTsNs: 134},
  325. },
  326. Offset: 0,
  327. Size: 220,
  328. Expected: []*ChunkView{
  329. {OffsetInChunk: 0, ViewSize: 200, FileId: "c", ViewOffset: 0},
  330. {OffsetInChunk: 130, ViewSize: 20, FileId: "b", ViewOffset: 200},
  331. },
  332. },
  333. // case 6: same updates
  334. {
  335. Chunks: []*filer_pb.FileChunk{
  336. {Offset: 0, Size: 100, FileId: "abc", Fid: &filer_pb.FileId{FileKey: 1}, ModifiedTsNs: 123},
  337. {Offset: 0, Size: 100, FileId: "def", Fid: &filer_pb.FileId{FileKey: 2}, ModifiedTsNs: 124},
  338. {Offset: 0, Size: 100, FileId: "xyz", Fid: &filer_pb.FileId{FileKey: 3}, ModifiedTsNs: 125},
  339. },
  340. Offset: 0,
  341. Size: 100,
  342. Expected: []*ChunkView{
  343. {OffsetInChunk: 0, ViewSize: 100, FileId: "xyz", ViewOffset: 0},
  344. },
  345. },
  346. // case 7: edge cases
  347. {
  348. Chunks: []*filer_pb.FileChunk{
  349. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  350. {Offset: 100, Size: 100, FileId: "asdf", ModifiedTsNs: 134},
  351. {Offset: 200, Size: 100, FileId: "fsad", ModifiedTsNs: 353},
  352. },
  353. Offset: 0,
  354. Size: 200,
  355. Expected: []*ChunkView{
  356. {OffsetInChunk: 0, ViewSize: 100, FileId: "abc", ViewOffset: 0},
  357. {OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 100},
  358. },
  359. },
  360. // case 8: edge cases
  361. {
  362. Chunks: []*filer_pb.FileChunk{
  363. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 123},
  364. {Offset: 90, Size: 200, FileId: "asdf", ModifiedTsNs: 134},
  365. {Offset: 190, Size: 300, FileId: "fsad", ModifiedTsNs: 353},
  366. },
  367. Offset: 0,
  368. Size: 300,
  369. Expected: []*ChunkView{
  370. {OffsetInChunk: 0, ViewSize: 90, FileId: "abc", ViewOffset: 0},
  371. {OffsetInChunk: 0, ViewSize: 100, FileId: "asdf", ViewOffset: 90},
  372. {OffsetInChunk: 0, ViewSize: 110, FileId: "fsad", ViewOffset: 190},
  373. },
  374. },
  375. // case 9: edge cases
  376. {
  377. Chunks: []*filer_pb.FileChunk{
  378. {Offset: 0, Size: 43175947, FileId: "2,111fc2cbfac1", ModifiedTsNs: 1},
  379. {Offset: 43175936, Size: 52981771 - 43175936, FileId: "2,112a36ea7f85", ModifiedTsNs: 2},
  380. {Offset: 52981760, Size: 72564747 - 52981760, FileId: "4,112d5f31c5e7", ModifiedTsNs: 3},
  381. {Offset: 72564736, Size: 133255179 - 72564736, FileId: "1,113245f0cdb6", ModifiedTsNs: 4},
  382. {Offset: 133255168, Size: 137269259 - 133255168, FileId: "3,1141a70733b5", ModifiedTsNs: 5},
  383. {Offset: 137269248, Size: 153578836 - 137269248, FileId: "1,114201d5bbdb", ModifiedTsNs: 6},
  384. },
  385. Offset: 0,
  386. Size: 153578836,
  387. Expected: []*ChunkView{
  388. {OffsetInChunk: 0, ViewSize: 43175936, FileId: "2,111fc2cbfac1", ViewOffset: 0},
  389. {OffsetInChunk: 0, ViewSize: 52981760 - 43175936, FileId: "2,112a36ea7f85", ViewOffset: 43175936},
  390. {OffsetInChunk: 0, ViewSize: 72564736 - 52981760, FileId: "4,112d5f31c5e7", ViewOffset: 52981760},
  391. {OffsetInChunk: 0, ViewSize: 133255168 - 72564736, FileId: "1,113245f0cdb6", ViewOffset: 72564736},
  392. {OffsetInChunk: 0, ViewSize: 137269248 - 133255168, FileId: "3,1141a70733b5", ViewOffset: 133255168},
  393. {OffsetInChunk: 0, ViewSize: 153578836 - 137269248, FileId: "1,114201d5bbdb", ViewOffset: 137269248},
  394. },
  395. },
  396. }
  397. for i, testcase := range testcases {
  398. if i != 2 {
  399. // continue
  400. }
  401. log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i)
  402. chunks := ViewFromChunks(nil, testcase.Chunks, testcase.Offset, testcase.Size)
  403. x := -1
  404. for c := chunks.Front(); c != nil; c = c.Next {
  405. x++
  406. chunk := c.Value
  407. log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s",
  408. i, x, chunk.OffsetInChunk, chunk.ViewSize, chunk.FileId)
  409. if chunk.OffsetInChunk != testcase.Expected[x].OffsetInChunk {
  410. t.Fatalf("failed on read case %d, chunk %s, Offset %d, expect %d",
  411. i, chunk.FileId, chunk.OffsetInChunk, testcase.Expected[x].OffsetInChunk)
  412. }
  413. if chunk.ViewSize != testcase.Expected[x].ViewSize {
  414. t.Fatalf("failed on read case %d, chunk %s, ViewSize %d, expect %d",
  415. i, chunk.FileId, chunk.ViewSize, testcase.Expected[x].ViewSize)
  416. }
  417. if chunk.FileId != testcase.Expected[x].FileId {
  418. t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s",
  419. i, x, chunk.FileId, testcase.Expected[x].FileId)
  420. }
  421. if chunk.ViewOffset != testcase.Expected[x].ViewOffset {
  422. t.Fatalf("failed on read case %d, chunk %d, ViewOffset %d, expect %d",
  423. i, x, chunk.ViewOffset, testcase.Expected[x].ViewOffset)
  424. }
  425. }
  426. if chunks.Len() != len(testcase.Expected) {
  427. t.Fatalf("failed to read test case %d, len %d expected %d", i, chunks.Len(), len(testcase.Expected))
  428. }
  429. }
  430. }
  431. func BenchmarkCompactFileChunks(b *testing.B) {
  432. var chunks []*filer_pb.FileChunk
  433. k := 1024
  434. for n := 0; n < k; n++ {
  435. chunks = append(chunks, &filer_pb.FileChunk{
  436. Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), ModifiedTsNs: int64(n),
  437. })
  438. chunks = append(chunks, &filer_pb.FileChunk{
  439. Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), ModifiedTsNs: int64(n + k),
  440. })
  441. }
  442. for n := 0; n < b.N; n++ {
  443. CompactFileChunks(nil, chunks)
  444. }
  445. }
  446. func addVisibleInterval(visibles *IntervalList[*VisibleInterval], x *VisibleInterval) {
  447. visibles.AppendInterval(&Interval[*VisibleInterval]{
  448. StartOffset: x.start,
  449. StopOffset: x.stop,
  450. TsNs: x.modifiedTsNs,
  451. Value: x,
  452. })
  453. }
  454. func TestViewFromVisibleIntervals(t *testing.T) {
  455. visibles := NewIntervalList[*VisibleInterval]()
  456. addVisibleInterval(visibles, &VisibleInterval{
  457. start: 0,
  458. stop: 25,
  459. fileId: "fid1",
  460. })
  461. addVisibleInterval(visibles, &VisibleInterval{
  462. start: 4096,
  463. stop: 8192,
  464. fileId: "fid2",
  465. })
  466. addVisibleInterval(visibles, &VisibleInterval{
  467. start: 16384,
  468. stop: 18551,
  469. fileId: "fid3",
  470. })
  471. views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32)
  472. if views.Len() != visibles.Len() {
  473. assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error")
  474. }
  475. }
  476. func TestViewFromVisibleIntervals2(t *testing.T) {
  477. visibles := NewIntervalList[*VisibleInterval]()
  478. addVisibleInterval(visibles, &VisibleInterval{
  479. start: 344064,
  480. stop: 348160,
  481. fileId: "fid1",
  482. })
  483. addVisibleInterval(visibles, &VisibleInterval{
  484. start: 348160,
  485. stop: 356352,
  486. fileId: "fid2",
  487. })
  488. views := ViewFromVisibleIntervals(visibles, 0, math.MaxInt32)
  489. if views.Len() != visibles.Len() {
  490. assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error")
  491. }
  492. }
  493. func TestViewFromVisibleIntervals3(t *testing.T) {
  494. visibles := NewIntervalList[*VisibleInterval]()
  495. addVisibleInterval(visibles, &VisibleInterval{
  496. start: 1000,
  497. stop: 2000,
  498. fileId: "fid1",
  499. })
  500. addVisibleInterval(visibles, &VisibleInterval{
  501. start: 3000,
  502. stop: 4000,
  503. fileId: "fid2",
  504. })
  505. views := ViewFromVisibleIntervals(visibles, 1700, 1500)
  506. if views.Len() != visibles.Len() {
  507. assert.Equal(t, visibles.Len(), views.Len(), "ViewFromVisibleIntervals error")
  508. }
  509. }
  510. func TestCompactFileChunks3(t *testing.T) {
  511. chunks := []*filer_pb.FileChunk{
  512. {Offset: 0, Size: 100, FileId: "abc", ModifiedTsNs: 50},
  513. {Offset: 100, Size: 100, FileId: "ghi", ModifiedTsNs: 50},
  514. {Offset: 200, Size: 100, FileId: "jkl", ModifiedTsNs: 100},
  515. {Offset: 300, Size: 100, FileId: "def", ModifiedTsNs: 200},
  516. }
  517. compacted, _ := CompactFileChunks(nil, chunks)
  518. if len(compacted) != 4 {
  519. t.Fatalf("unexpected compacted: %d", len(compacted))
  520. }
  521. }