filer.go 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. package filer
  2. import (
  3. "context"
  4. "fmt"
  5. "github.com/chrislusf/seaweedfs/weed/cluster"
  6. "github.com/chrislusf/seaweedfs/weed/pb"
  7. "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
  8. "os"
  9. "strings"
  10. "time"
  11. "google.golang.org/grpc"
  12. "github.com/chrislusf/seaweedfs/weed/glog"
  13. "github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
  14. "github.com/chrislusf/seaweedfs/weed/util"
  15. "github.com/chrislusf/seaweedfs/weed/util/log_buffer"
  16. "github.com/chrislusf/seaweedfs/weed/wdclient"
  17. )
  18. const (
  19. LogFlushInterval = time.Minute
  20. PaginationSize = 1024
  21. FilerStoreId = "filer.store.id"
  22. )
  23. var (
  24. OS_UID = uint32(os.Getuid())
  25. OS_GID = uint32(os.Getgid())
  26. )
  27. type Filer struct {
  28. Store VirtualFilerStore
  29. MasterClient *wdclient.MasterClient
  30. fileIdDeletionQueue *util.UnboundedQueue
  31. GrpcDialOption grpc.DialOption
  32. DirBucketsPath string
  33. FsyncBuckets []string
  34. buckets *FilerBuckets
  35. Cipher bool
  36. LocalMetaLogBuffer *log_buffer.LogBuffer
  37. metaLogCollection string
  38. metaLogReplication string
  39. MetaAggregator *MetaAggregator
  40. Signature int32
  41. FilerConf *FilerConf
  42. RemoteStorage *FilerRemoteStorage
  43. UniqueFileId uint32
  44. }
  45. func NewFiler(masters []pb.ServerAddress, grpcDialOption grpc.DialOption,
  46. filerHost pb.ServerAddress, collection string, replication string, dataCenter string, notifyFn func()) *Filer {
  47. f := &Filer{
  48. MasterClient: wdclient.NewMasterClient(grpcDialOption, cluster.FilerType, filerHost, dataCenter, masters),
  49. fileIdDeletionQueue: util.NewUnboundedQueue(),
  50. GrpcDialOption: grpcDialOption,
  51. FilerConf: NewFilerConf(),
  52. RemoteStorage: NewFilerRemoteStorage(),
  53. UniqueFileId: uint32(util.RandomInt32()),
  54. }
  55. f.LocalMetaLogBuffer = log_buffer.NewLogBuffer("local", LogFlushInterval, f.logFlushFunc, notifyFn)
  56. f.metaLogCollection = collection
  57. f.metaLogReplication = replication
  58. go f.loopProcessingDeletion()
  59. return f
  60. }
  61. func (f *Filer) AggregateFromPeers(self pb.ServerAddress) {
  62. f.MetaAggregator = NewMetaAggregator(f, self, f.GrpcDialOption)
  63. f.MasterClient.OnPeerUpdate = f.MetaAggregator.OnPeerUpdate
  64. for _, peerUpdate := range f.ListExistingPeerUpdates() {
  65. f.MetaAggregator.OnPeerUpdate(peerUpdate)
  66. }
  67. }
  68. func (f *Filer) ListExistingPeerUpdates() (existingNodes []*master_pb.ClusterNodeUpdate) {
  69. if grpcErr := pb.WithMasterClient(false, f.MasterClient.GetMaster(), f.GrpcDialOption, func(client master_pb.SeaweedClient) error {
  70. resp, err := client.ListClusterNodes(context.Background(), &master_pb.ListClusterNodesRequest{
  71. ClientType: cluster.FilerType,
  72. })
  73. glog.V(0).Infof("the cluster has %d filers\n", len(resp.ClusterNodes))
  74. for _, node := range resp.ClusterNodes {
  75. existingNodes = append(existingNodes, &master_pb.ClusterNodeUpdate{
  76. NodeType: cluster.FilerType,
  77. Address: node.Address,
  78. IsLeader: node.IsLeader,
  79. IsAdd: true,
  80. })
  81. }
  82. return err
  83. }); grpcErr != nil {
  84. glog.V(0).Infof("connect to %s: %v", f.MasterClient.GetMaster(), grpcErr)
  85. }
  86. return
  87. }
  88. func (f *Filer) SetStore(store FilerStore) {
  89. f.Store = NewFilerStoreWrapper(store)
  90. f.setOrLoadFilerStoreSignature(store)
  91. }
  92. func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) {
  93. storeIdBytes, err := store.KvGet(context.Background(), []byte(FilerStoreId))
  94. if err == ErrKvNotFound || err == nil && len(storeIdBytes) == 0 {
  95. f.Signature = util.RandomInt32()
  96. storeIdBytes = make([]byte, 4)
  97. util.Uint32toBytes(storeIdBytes, uint32(f.Signature))
  98. if err = store.KvPut(context.Background(), []byte(FilerStoreId), storeIdBytes); err != nil {
  99. glog.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err)
  100. }
  101. glog.V(0).Infof("create %s to %d", FilerStoreId, f.Signature)
  102. } else if err == nil && len(storeIdBytes) == 4 {
  103. f.Signature = int32(util.BytesToUint32(storeIdBytes))
  104. glog.V(0).Infof("existing %s = %d", FilerStoreId, f.Signature)
  105. } else {
  106. glog.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err)
  107. }
  108. }
  109. func (f *Filer) GetStore() (store FilerStore) {
  110. return f.Store
  111. }
  112. func (fs *Filer) GetMaster() pb.ServerAddress {
  113. return fs.MasterClient.GetMaster()
  114. }
  115. func (fs *Filer) KeepMasterClientConnected() {
  116. fs.MasterClient.KeepConnectedToMaster()
  117. }
  118. func (f *Filer) BeginTransaction(ctx context.Context) (context.Context, error) {
  119. return f.Store.BeginTransaction(ctx)
  120. }
  121. func (f *Filer) CommitTransaction(ctx context.Context) error {
  122. return f.Store.CommitTransaction(ctx)
  123. }
  124. func (f *Filer) RollbackTransaction(ctx context.Context) error {
  125. return f.Store.RollbackTransaction(ctx)
  126. }
  127. func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFromOtherCluster bool, signatures []int32) error {
  128. if string(entry.FullPath) == "/" {
  129. return nil
  130. }
  131. oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
  132. /*
  133. if !hasWritePermission(lastDirectoryEntry, entry) {
  134. glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
  135. lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
  136. return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
  137. }
  138. */
  139. if oldEntry == nil {
  140. dirParts := strings.Split(string(entry.FullPath), "/")
  141. if err := f.ensureParentDirecotryEntry(ctx, entry, dirParts, len(dirParts)-1, isFromOtherCluster); err != nil {
  142. return err
  143. }
  144. glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
  145. if err := f.Store.InsertEntry(ctx, entry); err != nil {
  146. glog.Errorf("insert entry %s: %v", entry.FullPath, err)
  147. return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
  148. }
  149. } else {
  150. if o_excl {
  151. glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
  152. return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
  153. }
  154. glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
  155. if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
  156. glog.Errorf("update entry %s: %v", entry.FullPath, err)
  157. return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
  158. }
  159. }
  160. f.maybeAddBucket(entry)
  161. f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures)
  162. f.deleteChunksIfNotNew(oldEntry, entry)
  163. glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
  164. return nil
  165. }
  166. func (f *Filer) ensureParentDirecotryEntry(ctx context.Context, entry *Entry, dirParts []string, level int, isFromOtherCluster bool) (err error) {
  167. if level == 0 {
  168. return nil
  169. }
  170. dirPath := "/" + util.Join(dirParts[:level]...)
  171. // fmt.Printf("%d directory: %+v\n", i, dirPath)
  172. // check the store directly
  173. glog.V(4).Infof("find uncached directory: %s", dirPath)
  174. dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
  175. // no such existing directory
  176. if dirEntry == nil {
  177. // ensure parent directory
  178. if err = f.ensureParentDirecotryEntry(ctx, entry, dirParts, level-1, isFromOtherCluster); err != nil {
  179. return err
  180. }
  181. // create the directory
  182. now := time.Now()
  183. dirEntry = &Entry{
  184. FullPath: util.FullPath(dirPath),
  185. Attr: Attr{
  186. Mtime: now,
  187. Crtime: now,
  188. Mode: os.ModeDir | entry.Mode | 0111,
  189. Uid: entry.Uid,
  190. Gid: entry.Gid,
  191. Collection: entry.Collection,
  192. Replication: entry.Replication,
  193. UserName: entry.UserName,
  194. GroupNames: entry.GroupNames,
  195. },
  196. }
  197. glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
  198. mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
  199. if mkdirErr != nil {
  200. if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
  201. glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
  202. return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
  203. }
  204. } else {
  205. f.maybeAddBucket(dirEntry)
  206. f.NotifyUpdateEvent(ctx, nil, dirEntry, false, isFromOtherCluster, nil)
  207. }
  208. } else if !dirEntry.IsDirectory() {
  209. glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
  210. return fmt.Errorf("%s is a file", dirPath)
  211. }
  212. return nil
  213. }
  214. func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) {
  215. if oldEntry != nil {
  216. entry.Attr.Crtime = oldEntry.Attr.Crtime
  217. if oldEntry.IsDirectory() && !entry.IsDirectory() {
  218. glog.Errorf("existing %s is a directory", oldEntry.FullPath)
  219. return fmt.Errorf("existing %s is a directory", oldEntry.FullPath)
  220. }
  221. if !oldEntry.IsDirectory() && entry.IsDirectory() {
  222. glog.Errorf("existing %s is a file", oldEntry.FullPath)
  223. return fmt.Errorf("existing %s is a file", oldEntry.FullPath)
  224. }
  225. }
  226. return f.Store.UpdateEntry(ctx, entry)
  227. }
  228. var (
  229. Root = &Entry{
  230. FullPath: "/",
  231. Attr: Attr{
  232. Mtime: time.Now(),
  233. Crtime: time.Now(),
  234. Mode: os.ModeDir | 0755,
  235. Uid: OS_UID,
  236. Gid: OS_GID,
  237. },
  238. }
  239. )
  240. func (f *Filer) FindEntry(ctx context.Context, p util.FullPath) (entry *Entry, err error) {
  241. if string(p) == "/" {
  242. return Root, nil
  243. }
  244. entry, err = f.Store.FindEntry(ctx, p)
  245. if entry != nil && entry.TtlSec > 0 {
  246. if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
  247. f.Store.DeleteOneEntry(ctx, entry)
  248. return nil, filer_pb.ErrNotFound
  249. }
  250. }
  251. return
  252. }
  253. func (f *Filer) doListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, eachEntryFunc ListEachEntryFunc) (expiredCount int64, lastFileName string, err error) {
  254. lastFileName, err = f.Store.ListDirectoryPrefixedEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool {
  255. select {
  256. case <-ctx.Done():
  257. return false
  258. default:
  259. if entry.TtlSec > 0 {
  260. if entry.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {
  261. f.Store.DeleteOneEntry(ctx, entry)
  262. expiredCount++
  263. return true
  264. }
  265. }
  266. return eachEntryFunc(entry)
  267. }
  268. })
  269. if err != nil {
  270. return expiredCount, lastFileName, err
  271. }
  272. return
  273. }
  274. func (f *Filer) Shutdown() {
  275. f.LocalMetaLogBuffer.Shutdown()
  276. f.Store.Shutdown()
  277. }