123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384 |
- package filer2
- import (
- "log"
- "testing"
- "fmt"
- "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
- )
- func TestCompactFileChunks(t *testing.T) {
- chunks := []*filer_pb.FileChunk{
- {Offset: 10, Size: 100, FileId: "abc", Mtime: 50},
- {Offset: 100, Size: 100, FileId: "def", Mtime: 100},
- {Offset: 200, Size: 100, FileId: "ghi", Mtime: 200},
- {Offset: 110, Size: 200, FileId: "jkl", Mtime: 300},
- }
- compacted, garbage := CompactFileChunks(chunks)
- if len(compacted) != 3 {
- t.Fatalf("unexpected compacted: %d", len(compacted))
- }
- if len(garbage) != 1 {
- t.Fatalf("unexpected garbage: %d", len(garbage))
- }
- }
- func TestCompactFileChunks2(t *testing.T) {
- chunks := []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 50},
- {Offset: 100, Size: 100, FileId: "def", Mtime: 100},
- {Offset: 200, Size: 100, FileId: "ghi", Mtime: 200},
- {Offset: 0, Size: 100, FileId: "abcf", Mtime: 300},
- {Offset: 50, Size: 100, FileId: "fhfh", Mtime: 400},
- {Offset: 100, Size: 100, FileId: "yuyu", Mtime: 500},
- }
- k := 3
- for n := 0; n < k; n++ {
- chunks = append(chunks, &filer_pb.FileChunk{
- Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), Mtime: int64(n),
- })
- chunks = append(chunks, &filer_pb.FileChunk{
- Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), Mtime: int64(n + k),
- })
- }
- compacted, garbage := CompactFileChunks(chunks)
- if len(compacted) != 4 {
- t.Fatalf("unexpected compacted: %d", len(compacted))
- }
- if len(garbage) != 8 {
- t.Fatalf("unexpected garbage: %d", len(garbage))
- }
- }
- func TestIntervalMerging(t *testing.T) {
- testcases := []struct {
- Chunks []*filer_pb.FileChunk
- Expected []*VisibleInterval
- }{
- // case 0: normal
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
- {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
- },
- Expected: []*VisibleInterval{
- {start: 0, stop: 100, fileId: "abc"},
- {start: 100, stop: 200, fileId: "asdf"},
- {start: 200, stop: 300, fileId: "fsad"},
- },
- },
- // case 1: updates overwrite full chunks
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
- },
- Expected: []*VisibleInterval{
- {start: 0, stop: 200, fileId: "asdf"},
- },
- },
- // case 2: updates overwrite part of previous chunks
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 50, FileId: "asdf", Mtime: 134},
- },
- Expected: []*VisibleInterval{
- {start: 0, stop: 50, fileId: "asdf"},
- {start: 50, stop: 100, fileId: "abc"},
- },
- },
- // case 3: updates overwrite full chunks
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
- {Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154},
- },
- Expected: []*VisibleInterval{
- {start: 0, stop: 50, fileId: "asdf"},
- {start: 50, stop: 300, fileId: "xxxx"},
- },
- },
- // case 4: updates far away from prev chunks
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
- {Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154},
- },
- Expected: []*VisibleInterval{
- {start: 0, stop: 200, fileId: "asdf"},
- {start: 250, stop: 500, fileId: "xxxx"},
- },
- },
- // case 5: updates overwrite full chunks
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 200, FileId: "asdf", Mtime: 184},
- {Offset: 70, Size: 150, FileId: "abc", Mtime: 143},
- {Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134},
- },
- Expected: []*VisibleInterval{
- {start: 0, stop: 200, fileId: "asdf"},
- {start: 200, stop: 220, fileId: "abc"},
- },
- },
- // case 6: same updates
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- },
- Expected: []*VisibleInterval{
- {start: 0, stop: 100, fileId: "abc"},
- },
- },
- // case 7: real updates
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 2097152, FileId: "7,0294cbb9892b", Mtime: 123},
- {Offset: 0, Size: 3145728, FileId: "3,029565bf3092", Mtime: 130},
- {Offset: 2097152, Size: 3145728, FileId: "6,029632f47ae2", Mtime: 140},
- {Offset: 5242880, Size: 3145728, FileId: "2,029734c5aa10", Mtime: 150},
- {Offset: 8388608, Size: 3145728, FileId: "5,02982f80de50", Mtime: 160},
- {Offset: 11534336, Size: 2842193, FileId: "7,0299ad723803", Mtime: 170},
- },
- Expected: []*VisibleInterval{
- {start: 0, stop: 2097152, fileId: "3,029565bf3092"},
- {start: 2097152, stop: 5242880, fileId: "6,029632f47ae2"},
- {start: 5242880, stop: 8388608, fileId: "2,029734c5aa10"},
- {start: 8388608, stop: 11534336, fileId: "5,02982f80de50"},
- {start: 11534336, stop: 14376529, fileId: "7,0299ad723803"},
- },
- },
- // case 8: real bug
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 77824, FileId: "4,0b3df938e301", Mtime: 123},
- {Offset: 471040, Size: 472225 - 471040, FileId: "6,0b3e0650019c", Mtime: 130},
- {Offset: 77824, Size: 208896 - 77824, FileId: "4,0b3f0c7202f0", Mtime: 140},
- {Offset: 208896, Size: 339968 - 208896, FileId: "2,0b4031a72689", Mtime: 150},
- {Offset: 339968, Size: 471040 - 339968, FileId: "3,0b416a557362", Mtime: 160},
- },
- Expected: []*VisibleInterval{
- {start: 0, stop: 77824, fileId: "4,0b3df938e301"},
- {start: 77824, stop: 208896, fileId: "4,0b3f0c7202f0"},
- {start: 208896, stop: 339968, fileId: "2,0b4031a72689"},
- {start: 339968, stop: 471040, fileId: "3,0b416a557362"},
- {start: 471040, stop: 472225, fileId: "6,0b3e0650019c"},
- },
- },
- }
- for i, testcase := range testcases {
- log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i)
- intervals := NonOverlappingVisibleIntervals(testcase.Chunks)
- for x, interval := range intervals {
- log.Printf("test case %d, interval %d, start=%d, stop=%d, fileId=%s",
- i, x, interval.start, interval.stop, interval.fileId)
- }
- for x, interval := range intervals {
- if interval.start != testcase.Expected[x].start {
- t.Fatalf("failed on test case %d, interval %d, start %d, expect %d",
- i, x, interval.start, testcase.Expected[x].start)
- }
- if interval.stop != testcase.Expected[x].stop {
- t.Fatalf("failed on test case %d, interval %d, stop %d, expect %d",
- i, x, interval.stop, testcase.Expected[x].stop)
- }
- if interval.fileId != testcase.Expected[x].fileId {
- t.Fatalf("failed on test case %d, interval %d, chunkId %s, expect %s",
- i, x, interval.fileId, testcase.Expected[x].fileId)
- }
- }
- if len(intervals) != len(testcase.Expected) {
- t.Fatalf("failed to compact test case %d, len %d expected %d", i, len(intervals), len(testcase.Expected))
- }
- }
- }
- func TestChunksReading(t *testing.T) {
- testcases := []struct {
- Chunks []*filer_pb.FileChunk
- Offset int64
- Size int
- Expected []*ChunkView
- }{
- // case 0: normal
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
- {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
- },
- Offset: 0,
- Size: 250,
- Expected: []*ChunkView{
- {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
- {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
- {Offset: 0, Size: 50, FileId: "fsad", LogicOffset: 200},
- },
- },
- // case 1: updates overwrite full chunks
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
- },
- Offset: 50,
- Size: 100,
- Expected: []*ChunkView{
- {Offset: 50, Size: 100, FileId: "asdf", LogicOffset: 50},
- },
- },
- // case 2: updates overwrite part of previous chunks
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 50, FileId: "asdf", Mtime: 134},
- },
- Offset: 25,
- Size: 50,
- Expected: []*ChunkView{
- {Offset: 25, Size: 25, FileId: "asdf", LogicOffset: 25},
- {Offset: 0, Size: 25, FileId: "abc", LogicOffset: 50},
- },
- },
- // case 3: updates overwrite full chunks
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
- {Offset: 50, Size: 250, FileId: "xxxx", Mtime: 154},
- },
- Offset: 0,
- Size: 200,
- Expected: []*ChunkView{
- {Offset: 0, Size: 50, FileId: "asdf", LogicOffset: 0},
- {Offset: 0, Size: 150, FileId: "xxxx", LogicOffset: 50},
- },
- },
- // case 4: updates far away from prev chunks
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 200, FileId: "asdf", Mtime: 134},
- {Offset: 250, Size: 250, FileId: "xxxx", Mtime: 154},
- },
- Offset: 0,
- Size: 400,
- Expected: []*ChunkView{
- {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0},
- // {Offset: 0, Size: 150, FileId: "xxxx"}, // missing intervals should not happen
- },
- },
- // case 5: updates overwrite full chunks
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 200, FileId: "asdf", Mtime: 184},
- {Offset: 70, Size: 150, FileId: "abc", Mtime: 143},
- {Offset: 80, Size: 100, FileId: "xxxx", Mtime: 134},
- },
- Offset: 0,
- Size: 220,
- Expected: []*ChunkView{
- {Offset: 0, Size: 200, FileId: "asdf", LogicOffset: 0},
- {Offset: 0, Size: 20, FileId: "abc", LogicOffset: 200},
- },
- },
- // case 6: same updates
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- },
- Offset: 0,
- Size: 100,
- Expected: []*ChunkView{
- {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
- },
- },
- // case 7: edge cases
- {
- Chunks: []*filer_pb.FileChunk{
- {Offset: 0, Size: 100, FileId: "abc", Mtime: 123},
- {Offset: 100, Size: 100, FileId: "asdf", Mtime: 134},
- {Offset: 200, Size: 100, FileId: "fsad", Mtime: 353},
- },
- Offset: 0,
- Size: 200,
- Expected: []*ChunkView{
- {Offset: 0, Size: 100, FileId: "abc", LogicOffset: 0},
- {Offset: 0, Size: 100, FileId: "asdf", LogicOffset: 100},
- },
- },
- }
- for i, testcase := range testcases {
- log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i)
- chunks := ViewFromChunks(testcase.Chunks, testcase.Offset, testcase.Size)
- for x, chunk := range chunks {
- log.Printf("read case %d, chunk %d, offset=%d, size=%d, fileId=%s",
- i, x, chunk.Offset, chunk.Size, chunk.FileId)
- if chunk.Offset != testcase.Expected[x].Offset {
- t.Fatalf("failed on read case %d, chunk %d, Offset %d, expect %d",
- i, x, chunk.Offset, testcase.Expected[x].Offset)
- }
- if chunk.Size != testcase.Expected[x].Size {
- t.Fatalf("failed on read case %d, chunk %d, Size %d, expect %d",
- i, x, chunk.Size, testcase.Expected[x].Size)
- }
- if chunk.FileId != testcase.Expected[x].FileId {
- t.Fatalf("failed on read case %d, chunk %d, FileId %s, expect %s",
- i, x, chunk.FileId, testcase.Expected[x].FileId)
- }
- if chunk.LogicOffset != testcase.Expected[x].LogicOffset {
- t.Fatalf("failed on read case %d, chunk %d, LogicOffset %d, expect %d",
- i, x, chunk.LogicOffset, testcase.Expected[x].LogicOffset)
- }
- }
- if len(chunks) != len(testcase.Expected) {
- t.Fatalf("failed to read test case %d, len %d expected %d", i, len(chunks), len(testcase.Expected))
- }
- }
- }
- func BenchmarkCompactFileChunks(b *testing.B) {
- var chunks []*filer_pb.FileChunk
- k := 1024
- for n := 0; n < k; n++ {
- chunks = append(chunks, &filer_pb.FileChunk{
- Offset: int64(n * 100), Size: 100, FileId: fmt.Sprintf("fileId%d", n), Mtime: int64(n),
- })
- chunks = append(chunks, &filer_pb.FileChunk{
- Offset: int64(n * 50), Size: 100, FileId: fmt.Sprintf("fileId%d", n+k), Mtime: int64(n + k),
- })
- }
- for n := 0; n < b.N; n++ {
- CompactFileChunks(chunks)
- }
- }
|