Browse Source

go fmt and fix some typo

bingoohuang 6 years ago
parent
commit
ab6be025d7

+ 3 - 3
unmaintained/fix_dat/fix_dat.go

@@ -77,13 +77,13 @@ func iterateEntries(datFile, idxFile *os.File, visitNeedle func(n *storage.Needl
 	readerOffset += int64(count)
 
 	// start to read dat file
-	superblock, err := storage.ReadSuperBlock(datFile)
+	superBlock, err := storage.ReadSuperBlock(datFile)
 	if err != nil {
 		fmt.Printf("cannot read dat file super block: %v", err)
 		return
 	}
-	offset := int64(superblock.BlockSize())
-	version := superblock.Version()
+	offset := int64(superBlock.BlockSize())
+	version := superBlock.Version()
 	n, rest, err := storage.ReadNeedleHeader(datFile, version, offset)
 	if err != nil {
 		fmt.Printf("cannot read needle header: %v", err)

+ 1 - 1
unmaintained/repeated_vacuum/repeated_vacuum.go

@@ -29,7 +29,7 @@ func main() {
 		rand.Read(data)
 		reader := bytes.NewReader(data)
 
-		targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url,assignResult.Fid)
+		targetUrl := fmt.Sprintf("http://%s/%s", assignResult.Url, assignResult.Fid)
 
 		_, err = operation.Upload(targetUrl, fmt.Sprintf("test%d", i), reader, false, "", nil, "")
 		if err != nil {

+ 1 - 1
weed/command/backup.go

@@ -38,7 +38,7 @@ var cmdBackup = &Command{
 	This will help to backup future new volumes.
 	
 	Usually backing up is just copying the .dat (and .idx) files.
-	But it's tricky to incremententally copy the differences.
+	But it's tricky to incrementally copy the differences.
 	
 	The complexity comes when there are multiple addition, deletion and compaction.
 	This tool will handle them correctly and efficiently, avoiding unnecessary data transporation.

+ 6 - 6
weed/command/benchmark.go

@@ -45,7 +45,7 @@ var (
 )
 
 func init() {
-	cmdBenchmark.Run = runbenchmark // break init cycle
+	cmdBenchmark.Run = runBenchmark // break init cycle
 	cmdBenchmark.IsDebug = cmdBenchmark.Flag.Bool("debug", false, "verbose debug information")
 	b.masters = cmdBenchmark.Flag.String("master", "localhost:9333", "SeaweedFS master location")
 	b.concurrency = cmdBenchmark.Flag.Int("c", 16, "number of concurrent write or read processes")
@@ -101,7 +101,7 @@ var (
 	readStats  *stats
 )
 
-func runbenchmark(cmd *Command, args []string) bool {
+func runBenchmark(cmd *Command, args []string) bool {
 	fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
 	if *b.maxCpu < 1 {
 		*b.maxCpu = runtime.NumCPU()
@@ -121,17 +121,17 @@ func runbenchmark(cmd *Command, args []string) bool {
 	masterClient.WaitUntilConnected()
 
 	if *b.write {
-		bench_write()
+		benchWrite()
 	}
 
 	if *b.read {
-		bench_read()
+		benchRead()
 	}
 
 	return true
 }
 
-func bench_write() {
+func benchWrite() {
 	fileIdLineChan := make(chan string)
 	finishChan := make(chan bool)
 	writeStats = newStats(*b.concurrency)
@@ -158,7 +158,7 @@ func bench_write() {
 	writeStats.printStats()
 }
 
-func bench_read() {
+func benchRead() {
 	fileIdLineChan := make(chan string)
 	finishChan := make(chan bool)
 	readStats = newStats(*b.concurrency)

+ 2 - 2
weed/command/filer_copy.go

@@ -315,7 +315,7 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string,
 
 func detectMimeType(f *os.File) string {
 	head := make([]byte, 512)
-	f.Seek(0, 0)
+	f.Seek(0, io.SeekStart)
 	n, err := f.Read(head)
 	if err == io.EOF {
 		return ""
@@ -324,7 +324,7 @@ func detectMimeType(f *os.File) string {
 		fmt.Printf("read head of %v: %v\n", f.Name(), err)
 		return "application/octet-stream"
 	}
-	f.Seek(0, 0)
+	f.Seek(0, io.SeekStart)
 	mimeType := http.DetectContentType(head[:n])
 	return mimeType
 }

+ 1 - 1
weed/command/filer_export.go

@@ -14,7 +14,7 @@ func init() {
 }
 
 var cmdFilerExport = &Command{
-	UsageLine: "filer.export -sourceStore=mysql -targetStroe=cassandra",
+	UsageLine: "filer.export -sourceStore=mysql -targetStore=cassandra",
 	Short:     "export meta data in filer store",
 	Long: `Iterate the file tree and export all metadata out
 

+ 1 - 1
weed/filer2/redis/universal_redis_store.go

@@ -25,7 +25,7 @@ func (store *UniversalRedisStore) InsertEntry(entry *filer2.Entry) (err error) {
 		return fmt.Errorf("encoding %s %+v: %v", entry.FullPath, entry.Attr, err)
 	}
 
-	_, err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec) * time.Second).Result()
+	_, err = store.Client.Set(string(entry.FullPath), value, time.Duration(entry.TtlSec)*time.Second).Result()
 
 	if err != nil {
 		return fmt.Errorf("persisting %s : %v", entry.FullPath, err)

+ 2 - 2
weed/filesys/dir_link.go

@@ -6,10 +6,10 @@ import (
 	"syscall"
 	"time"
 
-	"github.com/seaweedfs/fuse"
-	"github.com/seaweedfs/fuse/fs"
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+	"github.com/seaweedfs/fuse"
+	"github.com/seaweedfs/fuse/fs"
 )
 
 var _ = fs.NodeSymlinker(&Dir{})

+ 1 - 1
weed/filesys/dir_rename.go

@@ -1,9 +1,9 @@
 package filesys
 
 import (
+	"context"
 	"github.com/seaweedfs/fuse"
 	"github.com/seaweedfs/fuse/fs"
-	"context"
 	"math"
 	"path/filepath"
 

+ 4 - 4
weed/filesys/wfs.go

@@ -58,7 +58,7 @@ type statsCache struct {
 func NewSeaweedFileSystem(option *Option) *WFS {
 	wfs := &WFS{
 		option:                    option,
-		listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(1024*8).ItemsToPrune(100)),
+		listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(1024 * 8).ItemsToPrune(100)),
 		pathToHandleIndex:         make(map[string]int),
 		bufPool: sync.Pool{
 			New: func() interface{} {
@@ -174,11 +174,11 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
 	resp.Blocks = totalDiskSize / blockSize
 
 	// Compute the number of used blocks
-	numblocks := uint64(usedDiskSize / blockSize)
+	numBlocks := uint64(usedDiskSize / blockSize)
 
 	// Report the number of free and available blocks for the block size
-	resp.Bfree = resp.Blocks - numblocks
-	resp.Bavail = resp.Blocks - numblocks
+	resp.Bfree = resp.Blocks - numBlocks
+	resp.Bavail = resp.Blocks - numBlocks
 	resp.Bsize = uint32(blockSize)
 
 	// Report the total number of possible files in the file system (and those free)

Some files were not shown because too many files changed in this diff