123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186 |
- package filer
- import (
- "bytes"
- "context"
- "fmt"
- "github.com/seaweedfs/seaweedfs/weed/pb"
- "github.com/seaweedfs/seaweedfs/weed/wdclient"
- "google.golang.org/grpc"
- "io"
- "github.com/seaweedfs/seaweedfs/weed/glog"
- "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
- "github.com/seaweedfs/seaweedfs/weed/util"
- "github.com/viant/ptrie"
- jsonpb "google.golang.org/protobuf/encoding/protojson"
- )
- const (
- DirectoryEtcRoot = "/etc/"
- DirectoryEtcSeaweedFS = "/etc/seaweedfs"
- DirectoryEtcRemote = "/etc/remote"
- FilerConfName = "filer.conf"
- IamConfigDirectory = "/etc/iam"
- IamIdentityFile = "identity.json"
- IamPoliciesFile = "policies.json"
- )
- type FilerConf struct {
- rules ptrie.Trie[*filer_pb.FilerConf_PathConf]
- }
- func ReadFilerConf(filerGrpcAddress pb.ServerAddress, grpcDialOption grpc.DialOption, masterClient *wdclient.MasterClient) (*FilerConf, error) {
- var buf bytes.Buffer
- if err := pb.WithGrpcFilerClient(false, 0, filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
- if masterClient != nil {
- return ReadEntry(masterClient, client, DirectoryEtcSeaweedFS, FilerConfName, &buf)
- } else {
- content, err := ReadInsideFiler(client, DirectoryEtcSeaweedFS, FilerConfName)
- buf = *bytes.NewBuffer(content)
- return err
- }
- }); err != nil && err != filer_pb.ErrNotFound {
- return nil, fmt.Errorf("read %s/%s: %v", DirectoryEtcSeaweedFS, FilerConfName, err)
- }
- fc := NewFilerConf()
- if buf.Len() > 0 {
- if err := fc.LoadFromBytes(buf.Bytes()); err != nil {
- return nil, fmt.Errorf("parse %s/%s: %v", DirectoryEtcSeaweedFS, FilerConfName, err)
- }
- }
- return fc, nil
- }
- func NewFilerConf() (fc *FilerConf) {
- fc = &FilerConf{
- rules: ptrie.New[*filer_pb.FilerConf_PathConf](),
- }
- return fc
- }
- func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
- filerConfPath := util.NewFullPath(DirectoryEtcSeaweedFS, FilerConfName)
- entry, err := filer.FindEntry(context.Background(), filerConfPath)
- if err != nil {
- if err == filer_pb.ErrNotFound {
- return nil
- }
- glog.Errorf("read filer conf entry %s: %v", filerConfPath, err)
- return
- }
- if len(entry.Content) > 0 {
- return fc.LoadFromBytes(entry.Content)
- }
- return fc.loadFromChunks(filer, entry.Content, entry.GetChunks(), entry.Size())
- }
- func (fc *FilerConf) loadFromChunks(filer *Filer, content []byte, chunks []*filer_pb.FileChunk, size uint64) (err error) {
- if len(content) == 0 {
- content, err = filer.readEntry(chunks, size)
- if err != nil {
- glog.Errorf("read filer conf content: %v", err)
- return
- }
- }
- return fc.LoadFromBytes(content)
- }
- func (fc *FilerConf) LoadFromBytes(data []byte) (err error) {
- conf := &filer_pb.FilerConf{}
- if err := jsonpb.Unmarshal(data, conf); err != nil {
- return err
- }
- return fc.doLoadConf(conf)
- }
- func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) {
- for _, location := range conf.Locations {
- err = fc.AddLocationConf(location)
- if err != nil {
- // this is not recoverable
- return nil
- }
- }
- return nil
- }
- func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) {
- err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf)
- if err != nil {
- glog.Errorf("put location prefix: %v", err)
- }
- return
- }
- func (fc *FilerConf) DeleteLocationConf(locationPrefix string) {
- rules := ptrie.New[*filer_pb.FilerConf_PathConf]()
- fc.rules.Walk(func(key []byte, value *filer_pb.FilerConf_PathConf) bool {
- if string(key) == locationPrefix {
- return true
- }
- key = bytes.Clone(key)
- _ = rules.Put(key, value)
- return true
- })
- fc.rules = rules
- return
- }
- func (fc *FilerConf) MatchStorageRule(path string) (pathConf *filer_pb.FilerConf_PathConf) {
- pathConf = &filer_pb.FilerConf_PathConf{}
- fc.rules.MatchPrefix([]byte(path), func(key []byte, value *filer_pb.FilerConf_PathConf) bool {
- mergePathConf(pathConf, value)
- return true
- })
- return pathConf
- }
- func (fc *FilerConf) GetCollectionTtls(collection string) (ttls map[string]string) {
- ttls = make(map[string]string)
- fc.rules.Walk(func(key []byte, value *filer_pb.FilerConf_PathConf) bool {
- if value.Collection == collection {
- ttls[value.LocationPrefix] = value.GetTtl()
- }
- return true
- })
- return ttls
- }
- // merge if values in b is not empty, merge them into a
- func mergePathConf(a, b *filer_pb.FilerConf_PathConf) {
- a.Collection = util.Nvl(b.Collection, a.Collection)
- a.Replication = util.Nvl(b.Replication, a.Replication)
- a.Ttl = util.Nvl(b.Ttl, a.Ttl)
- a.DiskType = util.Nvl(b.DiskType, a.DiskType)
- a.Fsync = b.Fsync || a.Fsync
- if b.VolumeGrowthCount > 0 {
- a.VolumeGrowthCount = b.VolumeGrowthCount
- }
- a.ReadOnly = b.ReadOnly || a.ReadOnly
- if b.MaxFileNameLength > 0 {
- a.MaxFileNameLength = b.MaxFileNameLength
- }
- a.DataCenter = util.Nvl(b.DataCenter, a.DataCenter)
- a.Rack = util.Nvl(b.Rack, a.Rack)
- a.DataNode = util.Nvl(b.DataNode, a.DataNode)
- }
- func (fc *FilerConf) ToProto() *filer_pb.FilerConf {
- m := &filer_pb.FilerConf{}
- fc.rules.Walk(func(key []byte, value *filer_pb.FilerConf_PathConf) bool {
- m.Locations = append(m.Locations, value)
- return true
- })
- return m
- }
- func (fc *FilerConf) ToText(writer io.Writer) error {
- return ProtoToText(writer, fc.ToProto())
- }
|