Browse Source

refactor filer_pb.Entry and filer.Entry to use GetChunks()

for later locking on reading chunks
chrislu 2 years ago
parent
commit
70a4c98b00

+ 1 - 1
unmaintained/see_meta/see_meta.go

@@ -59,7 +59,7 @@ func walkMetaFile(dst *os.File) error {
 		}
 
 		fmt.Fprintf(os.Stdout, "file %s %v\n", util.FullPath(fullEntry.Dir).Child(fullEntry.Entry.Name), fullEntry.Entry.Attributes.String())
-		for i, chunk := range fullEntry.Entry.Chunks {
+		for i, chunk := range fullEntry.Entry.GetChunks() {
 			fmt.Fprintf(os.Stdout, "  chunk: %d %v %d,%x%08x\n", i+1, chunk, chunk.Fid.VolumeId, chunk.Fid.FileKey, chunk.Fid.Cookie)
 		}
 

+ 1 - 1
weed/command/filer_cat.go

@@ -114,7 +114,7 @@ func runFilerCat(cmd *Command, args []string) bool {
 
 		filerCat.filerClient = client
 
-		return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, int64(filer.FileSize(respLookupEntry.Entry)))
+		return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.GetChunks(), 0, int64(filer.FileSize(respLookupEntry.Entry)))
 
 	})
 

+ 1 - 1
weed/filer/abstract_sql/abstract_sql_store.go

@@ -158,7 +158,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
 		return fmt.Errorf("encode %s: %s", entry.FullPath, err)
 	}
 
-	if len(entry.Chunks) > filer.CountEntryChunksForGzip {
+	if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
 		meta = util.MaybeGzipData(meta)
 	}
 

+ 2 - 2
weed/filer/arangodb/arangodb_store.go

@@ -157,7 +157,7 @@ func (store *ArangodbStore) InsertEntry(ctx context.Context, entry *filer.Entry)
 		return fmt.Errorf("encode %s: %s", entry.FullPath, err)
 	}
 
-	if len(entry.Chunks) > filer.CountEntryChunksForGzip {
+	if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
 		meta = util.MaybeGzipData(meta)
 	}
 	model := &Model{
@@ -196,7 +196,7 @@ func (store *ArangodbStore) UpdateEntry(ctx context.Context, entry *filer.Entry)
 		return fmt.Errorf("encode %s: %s", entry.FullPath, err)
 	}
 
-	if len(entry.Chunks) > filer.CountEntryChunksForGzip {
+	if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
 		meta = util.MaybeGzipData(meta)
 	}
 	model := &Model{

+ 1 - 1
weed/filer/cassandra/cassandra_store.go

@@ -100,7 +100,7 @@ func (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer.Entry
 		return fmt.Errorf("encode %s: %s", entry.FullPath, err)
 	}
 
-	if len(entry.Chunks) > filer.CountEntryChunksForGzip {
+	if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
 		meta = util.MaybeGzipData(meta)
 	}
 

+ 6 - 2
weed/filer/entry.go

@@ -46,7 +46,7 @@ type Entry struct {
 }
 
 func (entry *Entry) Size() uint64 {
-	return maxUint64(maxUint64(TotalSize(entry.Chunks), entry.FileSize), uint64(len(entry.Content)))
+	return maxUint64(maxUint64(TotalSize(entry.GetChunks()), entry.FileSize), uint64(len(entry.Content)))
 }
 
 func (entry *Entry) Timestamp() time.Time {
@@ -91,7 +91,7 @@ func (entry *Entry) ToExistingProtoEntry(message *filer_pb.Entry) {
 	}
 	message.IsDirectory = entry.IsDirectory()
 	message.Attributes = EntryAttributeToPb(entry)
-	message.Chunks = entry.Chunks
+	message.Chunks = entry.GetChunks()
 	message.Extended = entry.Extended
 	message.HardLinkId = entry.HardLinkId
 	message.HardLinkCounter = entry.HardLinkCounter
@@ -123,6 +123,10 @@ func (entry *Entry) ToProtoFullEntry() *filer_pb.FullEntry {
 	}
 }
 
+func (entry *Entry) GetChunks() []*filer_pb.FileChunk {
+	return entry.Chunks
+}
+
 func FromPbEntry(dir string, entry *filer_pb.Entry) *Entry {
 	t := &Entry{}
 	t.FullPath = util.NewFullPath(dir, entry.Name)

+ 1 - 1
weed/filer/etcd/etcd_store.go

@@ -82,7 +82,7 @@ func (store *EtcdStore) InsertEntry(ctx context.Context, entry *filer.Entry) (er
 		return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
 	}
 
-	if len(entry.Chunks) > filer.CountEntryChunksForGzip {
+	if len(entry.GetChunks()) > filer.CountEntryChunksForGzip {
 		meta = weed_util.MaybeGzipData(meta)
 	}
 

+ 3 - 3
weed/filer/filechunks.go

@@ -31,19 +31,19 @@ func FileSize(entry *filer_pb.Entry) (size uint64) {
 			fileSize = maxUint64(fileSize, uint64(entry.RemoteEntry.RemoteSize))
 		}
 	}
-	return maxUint64(TotalSize(entry.Chunks), fileSize)
+	return maxUint64(TotalSize(entry.GetChunks()), fileSize)
 }
 
 func ETag(entry *filer_pb.Entry) (etag string) {
 	if entry.Attributes == nil || entry.Attributes.Md5 == nil {
-		return ETagChunks(entry.Chunks)
+		return ETagChunks(entry.GetChunks())
 	}
 	return fmt.Sprintf("%x", entry.Attributes.Md5)
 }
 
 func ETagEntry(entry *Entry) (etag string) {
 	if entry.Attr.Md5 == nil {
-		return ETagChunks(entry.Chunks)
+		return ETagChunks(entry.GetChunks())
 	}
 	return fmt.Sprintf("%x", entry.Attr.Md5)
 }

+ 1 - 1
weed/filer/filer_conf.go

@@ -75,7 +75,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
 		return fc.LoadFromBytes(entry.Content)
 	}
 
-	return fc.loadFromChunks(filer, entry.Content, entry.Chunks, entry.Size())
+	return fc.loadFromChunks(filer, entry.Content, entry.GetChunks(), entry.Size())
 }
 
 func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*filer_pb.FileChunk, size uint64) (err error) {

+ 2 - 2
weed/filer/filer_delete_entry.go

@@ -48,7 +48,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
 	}
 
 	if shouldDeleteChunks && !isDeleteCollection {
-		f.DirectDeleteChunks(entry.Chunks)
+		f.DirectDeleteChunks(entry.GetChunks())
 	}
 
 	// delete the file or folder
@@ -93,7 +93,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
 						// hard link chunk data are deleted separately
 						err = onHardLinkIdsFn([]HardLinkId{sub.HardLinkId})
 					} else {
-						err = onChunksFn(sub.Chunks)
+						err = onChunksFn(sub.GetChunks())
 					}
 				}
 				if err != nil && !ignoreRecursiveError {

Some files were not shown because too many files changed in this diff