123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350 |
- # A sample TOML config file for SeaweedFS filer store
- # Used with "weed filer" or "weed server -filer"
- # Put this file to one of the location, with descending priority
- # ./filer.toml
- # $HOME/.seaweedfs/filer.toml
- # /etc/seaweedfs/filer.toml
- ####################################################
- # Customizable filer server options
- ####################################################
- [filer.options]
- # with http DELETE, by default the filer would check whether a folder is empty.
- # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
- recursive_delete = false
- ####################################################
- # The following are filer store options
- ####################################################
- [leveldb2]
- # local on disk, mostly for simple single-machine setup, fairly scalable
- # faster than previous leveldb, recommended.
- enabled = true
- dir = "./filerldb2" # directory to store level db files
- [leveldb3]
- # similar to leveldb2.
- # each bucket has its own meta store.
- enabled = false
- dir = "./filerldb3" # directory to store level db files
- [rocksdb]
- # local on disk, similar to leveldb
- # since it is using a C wrapper, you need to install rocksdb and build it by yourself
- enabled = false
- dir = "./filerrdb" # directory to store rocksdb files
- [sqlite]
- # local on disk, similar to leveldb
- enabled = false
- dbFile = "./filer.db" # sqlite db file
- [mysql] # or memsql, tidb
- # CREATE TABLE IF NOT EXISTS `filemeta` (
- # `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field',
- # `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name',
- # `directory` TEXT NOT NULL COMMENT 'full path to parent directory',
- # `meta` LONGBLOB,
- # PRIMARY KEY (`dirhash`, `name`)
- # ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
- enabled = false
- hostname = "localhost"
- port = 3306
- username = "root"
- password = ""
- database = "" # create or use an existing database
- connection_max_idle = 2
- connection_max_open = 100
- connection_max_lifetime_seconds = 0
- interpolateParams = false
- # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
- enableUpsert = true
- upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
- [mysql2] # or memsql, tidb
- enabled = false
- createTable = """
- CREATE TABLE IF NOT EXISTS `%s` (
- `dirhash` BIGINT NOT NULL,
- `name` VARCHAR(766) NOT NULL,
- `directory` TEXT NOT NULL,
- `meta` LONGBLOB,
- PRIMARY KEY (`dirhash`, `name`)
- ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
- """
- hostname = "localhost"
- port = 3306
- username = "root"
- password = ""
- database = "" # create or use an existing database
- connection_max_idle = 2
- connection_max_open = 100
- connection_max_lifetime_seconds = 0
- interpolateParams = false
- # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
- enableUpsert = true
- upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
- [postgres] # or cockroachdb, YugabyteDB
- # CREATE TABLE IF NOT EXISTS filemeta (
- # dirhash BIGINT,
- # name VARCHAR(65535),
- # directory VARCHAR(65535),
- # meta bytea,
- # PRIMARY KEY (dirhash, name)
- # );
- enabled = false
- hostname = "localhost"
- port = 5432
- username = "postgres"
- password = ""
- database = "postgres" # create or use an existing database
- schema = ""
- sslmode = "disable"
- connection_max_idle = 100
- connection_max_open = 100
- connection_max_lifetime_seconds = 0
- # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
- enableUpsert = true
- upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
- [postgres2]
- enabled = false
- createTable = """
- CREATE TABLE IF NOT EXISTS "%s" (
- dirhash BIGINT,
- name VARCHAR(65535),
- directory VARCHAR(65535),
- meta bytea,
- PRIMARY KEY (dirhash, name)
- );
- """
- hostname = "localhost"
- port = 5432
- username = "postgres"
- password = ""
- database = "postgres" # create or use an existing database
- schema = ""
- sslmode = "disable"
- connection_max_idle = 100
- connection_max_open = 100
- connection_max_lifetime_seconds = 0
- # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
- enableUpsert = true
- upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
- [cassandra]
- # CREATE TABLE filemeta (
- # directory varchar,
- # name varchar,
- # meta blob,
- # PRIMARY KEY (directory, name)
- # ) WITH CLUSTERING ORDER BY (name ASC);
- enabled = false
- keyspace = "seaweedfs"
- hosts = [
- "localhost:9042",
- ]
- username = ""
- password = ""
- # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
- superLargeDirectories = []
- # Name of the datacenter local to this filer, used as host selection fallback.
- localDC = ""
- # Gocql connection timeout, default: 600ms
- connection_timeout_millisecond = 600
- [hbase]
- enabled = false
- zkquorum = ""
- table = "seaweedfs"
- [redis2]
- enabled = false
- address = "localhost:6379"
- password = ""
- database = 0
- # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
- superLargeDirectories = []
- [redis2_sentinel]
- enabled = false
- addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
- masterName = "master"
- username = ""
- password = ""
- database = 0
- [redis_cluster2]
- enabled = false
- addresses = [
- "localhost:30001",
- "localhost:30002",
- "localhost:30003",
- "localhost:30004",
- "localhost:30005",
- "localhost:30006",
- ]
- password = ""
- # allows reads from slave servers or the master, but all writes still go to the master
- readOnly = false
- # automatically use the closest Redis server for reads
- routeByLatency = false
- # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
- superLargeDirectories = []
- [redis_lua]
- enabled = false
- address = "localhost:6379"
- password = ""
- database = 0
- # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
- superLargeDirectories = []
- [redis_lua_sentinel]
- enabled = false
- addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
- masterName = "master"
- username = ""
- password = ""
- database = 0
- [redis_lua_cluster]
- enabled = false
- addresses = [
- "localhost:30001",
- "localhost:30002",
- "localhost:30003",
- "localhost:30004",
- "localhost:30005",
- "localhost:30006",
- ]
- password = ""
- # allows reads from slave servers or the master, but all writes still go to the master
- readOnly = false
- # automatically use the closest Redis server for reads
- routeByLatency = false
- # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
- superLargeDirectories = []
- [redis3] # beta
- enabled = false
- address = "localhost:6379"
- password = ""
- database = 0
- [redis3_sentinel]
- enabled = false
- addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
- masterName = "master"
- username = ""
- password = ""
- database = 0
- [redis_cluster3] # beta
- enabled = false
- addresses = [
- "localhost:30001",
- "localhost:30002",
- "localhost:30003",
- "localhost:30004",
- "localhost:30005",
- "localhost:30006",
- ]
- password = ""
- # allows reads from slave servers or the master, but all writes still go to the master
- readOnly = false
- # automatically use the closest Redis server for reads
- routeByLatency = false
- [etcd]
- enabled = false
- servers = "localhost:2379"
- username = ""
- password = ""
- key_prefix = "seaweedfs."
- timeout = "3s"
- [mongodb]
- enabled = false
- uri = "mongodb://localhost:27017"
- option_pool_size = 0
- database = "seaweedfs"
- [elastic7]
- enabled = false
- servers = [
- "http://localhost1:9200",
- "http://localhost2:9200",
- "http://localhost3:9200",
- ]
- username = ""
- password = ""
- sniff_enabled = false
- healthcheck_enabled = false
- # increase the value is recommend, be sure the value in Elastic is greater or equal here
- index.max_result_window = 10000
- [arangodb] # in development dont use it
- enabled = false
- db_name = "seaweedfs"
- servers=["http://localhost:8529"] # list of servers to connect to
- # only basic auth supported for now
- username=""
- password=""
- # skip tls cert validation
- insecure_skip_verify = true
- [ydb] # https://ydb.tech/
- enabled = false
- dsn = "grpc://localhost:2136?database=/local"
- prefix = "seaweedfs"
- useBucketPrefix = true # Fast Bucket Deletion
- poolSizeLimit = 50
- dialTimeOut = 10
- # Authenticate produced with one of next environment variables:
- # YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS=<path/to/sa_key_file> — used service account key file by path
- # YDB_ANONYMOUS_CREDENTIALS="1" — used for authenticate with anonymous access. Anonymous access needs for connect to testing YDB installation
- # YDB_METADATA_CREDENTIALS="1" — used metadata service for authenticate to YDB from yandex cloud virtual machine or from yandex function
- # YDB_ACCESS_TOKEN_CREDENTIALS=<access_token> — used for authenticate to YDB with short-life access token. For example, access token may be IAM token
- ##########################
- ##########################
- # To add path-specific filer store:
- #
- # 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
- # 2. Add a location configuration. E.g., location = "/tmp/"
- # 3. Copy and customize all other configurations.
- # Make sure they are not the same if using the same store type!
- # 4. Set enabled to true
- #
- # The following is just using redis as an example
- ##########################
- [redis2.tmp]
- enabled = false
- location = "/tmp/"
- address = "localhost:6379"
- password = ""
- database = 1
- [tikv]
- enabled = false
- # If you have many pd address, use ',' split then:
- # pdaddrs = "pdhost1:2379, pdhost2:2379, pdhost3:2379"
- pdaddrs = "localhost:2379"
- # Concurrency for TiKV delete range
- deleterange_concurrency = 1
- # Enable 1PC
- enable_1pc = false
- # Set the CA certificate path
- ca_path=""
- # Set the certificate path
- cert_path=""
- # Set the private key path
- key_path=""
- # The name list used to verify the cn name
- verify_cn=""
|