filer.toml 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. # A sample TOML config file for SeaweedFS filer store
  2. # Used with "weed filer" or "weed server -filer"
  3. # Put this file to one of the location, with descending priority
  4. # ./filer.toml
  5. # $HOME/.seaweedfs/filer.toml
  6. # /etc/seaweedfs/filer.toml
  7. ####################################################
  8. # Customizable filer server options
  9. ####################################################
  10. [filer.options]
  11. # with http DELETE, by default the filer would check whether a folder is empty.
  12. # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
  13. recursive_delete = false
  14. #max_file_name_length = 255
  15. ####################################################
  16. # The following are filer store options
  17. ####################################################
  18. [leveldb2]
  19. # local on disk, mostly for simple single-machine setup, fairly scalable
  20. # faster than previous leveldb, recommended.
  21. enabled = true
  22. dir = "./filerldb2" # directory to store level db files
  23. [leveldb3]
  24. # similar to leveldb2.
  25. # each bucket has its own meta store.
  26. enabled = false
  27. dir = "./filerldb3" # directory to store level db files
  28. [rocksdb]
  29. # local on disk, similar to leveldb
  30. # since it is using a C wrapper, you need to install rocksdb and build it by yourself
  31. enabled = false
  32. dir = "./filerrdb" # directory to store rocksdb files
  33. [sqlite]
  34. # local on disk, similar to leveldb
  35. enabled = false
  36. dbFile = "./filer.db" # sqlite db file
  37. [mysql] # or memsql, tidb
  38. # CREATE TABLE IF NOT EXISTS `filemeta` (
  39. # `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field',
  40. # `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name',
  41. # `directory` TEXT NOT NULL COMMENT 'full path to parent directory',
  42. # `meta` LONGBLOB,
  43. # PRIMARY KEY (`dirhash`, `name`)
  44. # ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
  45. enabled = false
  46. # dsn will take priority over "hostname, port, username, password, database".
  47. # [username[:password]@][protocol[(address)]]/dbname[?param1=value1&...&paramN=valueN]
  48. dsn = "root@tcp(localhost:3306)/seaweedfs?collation=utf8mb4_bin"
  49. hostname = "localhost"
  50. port = 3306
  51. username = "root"
  52. password = ""
  53. database = "" # create or use an existing database
  54. connection_max_idle = 2
  55. connection_max_open = 100
  56. connection_max_lifetime_seconds = 0
  57. interpolateParams = false
  58. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  59. enableUpsert = true
  60. upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
  61. [mysql2] # or memsql, tidb
  62. enabled = false
  63. createTable = """
  64. CREATE TABLE IF NOT EXISTS `%s` (
  65. `dirhash` BIGINT NOT NULL,
  66. `name` VARCHAR(766) NOT NULL,
  67. `directory` TEXT NOT NULL,
  68. `meta` LONGBLOB,
  69. PRIMARY KEY (`dirhash`, `name`)
  70. ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
  71. """
  72. hostname = "localhost"
  73. port = 3306
  74. username = "root"
  75. password = ""
  76. database = "" # create or use an existing database
  77. connection_max_idle = 2
  78. connection_max_open = 100
  79. connection_max_lifetime_seconds = 0
  80. interpolateParams = false
  81. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  82. enableUpsert = true
  83. upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
  84. [postgres] # or cockroachdb, YugabyteDB
  85. # CREATE TABLE IF NOT EXISTS filemeta (
  86. # dirhash BIGINT,
  87. # name VARCHAR(65535),
  88. # directory VARCHAR(65535),
  89. # meta bytea,
  90. # PRIMARY KEY (dirhash, name)
  91. # );
  92. enabled = false
  93. hostname = "localhost"
  94. port = 5432
  95. username = "postgres"
  96. password = ""
  97. database = "postgres" # create or use an existing database
  98. schema = ""
  99. sslmode = "disable"
  100. connection_max_idle = 100
  101. connection_max_open = 100
  102. connection_max_lifetime_seconds = 0
  103. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  104. enableUpsert = true
  105. upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
  106. [postgres2]
  107. enabled = false
  108. createTable = """
  109. CREATE TABLE IF NOT EXISTS "%s" (
  110. dirhash BIGINT,
  111. name VARCHAR(65535),
  112. directory VARCHAR(65535),
  113. meta bytea,
  114. PRIMARY KEY (dirhash, name)
  115. );
  116. """
  117. hostname = "localhost"
  118. port = 5432
  119. username = "postgres"
  120. password = ""
  121. database = "postgres" # create or use an existing database
  122. schema = ""
  123. sslmode = "disable"
  124. connection_max_idle = 100
  125. connection_max_open = 100
  126. connection_max_lifetime_seconds = 0
  127. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  128. enableUpsert = true
  129. upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
  130. [cassandra]
  131. # CREATE TABLE filemeta (
  132. # directory varchar,
  133. # name varchar,
  134. # meta blob,
  135. # PRIMARY KEY (directory, name)
  136. # ) WITH CLUSTERING ORDER BY (name ASC);
  137. enabled = false
  138. keyspace = "seaweedfs"
  139. hosts = [
  140. "localhost:9042",
  141. ]
  142. username = ""
  143. password = ""
  144. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  145. superLargeDirectories = []
  146. # Name of the datacenter local to this filer, used as host selection fallback.
  147. localDC = ""
  148. # Gocql connection timeout, default: 600ms
  149. connection_timeout_millisecond = 600
  150. [hbase]
  151. enabled = false
  152. zkquorum = ""
  153. table = "seaweedfs"
  154. [redis2]
  155. enabled = false
  156. address = "localhost:6379"
  157. password = ""
  158. database = 0
  159. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  160. superLargeDirectories = []
  161. [redis2_sentinel]
  162. enabled = false
  163. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  164. masterName = "master"
  165. username = ""
  166. password = ""
  167. database = 0
  168. [redis_cluster2]
  169. enabled = false
  170. addresses = [
  171. "localhost:30001",
  172. "localhost:30002",
  173. "localhost:30003",
  174. "localhost:30004",
  175. "localhost:30005",
  176. "localhost:30006",
  177. ]
  178. password = ""
  179. # allows reads from slave servers or the master, but all writes still go to the master
  180. readOnly = false
  181. # automatically use the closest Redis server for reads
  182. routeByLatency = false
  183. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  184. superLargeDirectories = []
  185. [redis_lua]
  186. enabled = false
  187. address = "localhost:6379"
  188. password = ""
  189. database = 0
  190. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  191. superLargeDirectories = []
  192. [redis_lua_sentinel]
  193. enabled = false
  194. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  195. masterName = "master"
  196. username = ""
  197. password = ""
  198. database = 0
  199. [redis_lua_cluster]
  200. enabled = false
  201. addresses = [
  202. "localhost:30001",
  203. "localhost:30002",
  204. "localhost:30003",
  205. "localhost:30004",
  206. "localhost:30005",
  207. "localhost:30006",
  208. ]
  209. password = ""
  210. # allows reads from slave servers or the master, but all writes still go to the master
  211. readOnly = false
  212. # automatically use the closest Redis server for reads
  213. routeByLatency = false
  214. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  215. superLargeDirectories = []
  216. [redis3] # beta
  217. enabled = false
  218. address = "localhost:6379"
  219. password = ""
  220. database = 0
  221. [redis3_sentinel]
  222. enabled = false
  223. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  224. masterName = "master"
  225. username = ""
  226. password = ""
  227. database = 0
  228. [redis_cluster3] # beta
  229. enabled = false
  230. addresses = [
  231. "localhost:30001",
  232. "localhost:30002",
  233. "localhost:30003",
  234. "localhost:30004",
  235. "localhost:30005",
  236. "localhost:30006",
  237. ]
  238. password = ""
  239. # allows reads from slave servers or the master, but all writes still go to the master
  240. readOnly = false
  241. # automatically use the closest Redis server for reads
  242. routeByLatency = false
  243. [etcd]
  244. enabled = false
  245. servers = "localhost:2379"
  246. username = ""
  247. password = ""
  248. key_prefix = "seaweedfs."
  249. timeout = "3s"
  250. # Set the CA certificate path
  251. tls_ca_file=""
  252. # Set the client certificate path
  253. tls_client_crt_file=""
  254. # Set the client private key path
  255. tls_client_key_file=""
  256. [mongodb]
  257. enabled = false
  258. uri = "mongodb://localhost:27017"
  259. username = ""
  260. password = ""
  261. ssl = false
  262. ssl_ca_file = ""
  263. ssl_cert_file = ""
  264. ssl_key_file = ""
  265. insecure_skip_verify = false
  266. option_pool_size = 0
  267. database = "seaweedfs"
  268. [elastic7]
  269. enabled = false
  270. servers = [
  271. "http://localhost1:9200",
  272. "http://localhost2:9200",
  273. "http://localhost3:9200",
  274. ]
  275. username = ""
  276. password = ""
  277. sniff_enabled = false
  278. healthcheck_enabled = false
  279. # increase the value is recommend, be sure the value in Elastic is greater or equal here
  280. index.max_result_window = 10000
  281. [arangodb] # in development dont use it
  282. enabled = false
  283. db_name = "seaweedfs"
  284. servers=["http://localhost:8529"] # list of servers to connect to
  285. # only basic auth supported for now
  286. username=""
  287. password=""
  288. # skip tls cert validation
  289. insecure_skip_verify = true
  290. [ydb] # https://ydb.tech/
  291. enabled = false
  292. dsn = "grpc://localhost:2136?database=/local"
  293. prefix = "seaweedfs"
  294. useBucketPrefix = true # Fast Bucket Deletion
  295. poolSizeLimit = 50
  296. dialTimeOut = 10
  297. # Authenticate produced with one of next environment variables:
  298. # YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS=<path/to/sa_key_file> — used service account key file by path
  299. # YDB_ANONYMOUS_CREDENTIALS="1" — used for authenticate with anonymous access. Anonymous access needs for connect to testing YDB installation
  300. # YDB_METADATA_CREDENTIALS="1" — used metadata service for authenticate to YDB from yandex cloud virtual machine or from yandex function
  301. # YDB_ACCESS_TOKEN_CREDENTIALS=<access_token> — used for authenticate to YDB with short-life access token. For example, access token may be IAM token
  302. ##########################
  303. ##########################
  304. # To add path-specific filer store:
  305. #
  306. # 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
  307. # 2. Add a location configuration. E.g., location = "/tmp/"
  308. # 3. Copy and customize all other configurations.
  309. # Make sure they are not the same if using the same store type!
  310. # 4. Set enabled to true
  311. #
  312. # The following is just using redis as an example
  313. ##########################
  314. [redis2.tmp]
  315. enabled = false
  316. location = "/tmp/"
  317. address = "localhost:6379"
  318. password = ""
  319. database = 1
  320. [tikv]
  321. enabled = false
  322. # If you have many pd address, use ',' split then:
  323. # pdaddrs = "pdhost1:2379, pdhost2:2379, pdhost3:2379"
  324. pdaddrs = "localhost:2379"
  325. # Concurrency for TiKV delete range
  326. deleterange_concurrency = 1
  327. # Enable 1PC
  328. enable_1pc = false
  329. # Set the CA certificate path
  330. ca_path=""
  331. # Set the certificate path
  332. cert_path=""
  333. # Set the private key path
  334. key_path=""
  335. # The name list used to verify the cn name
  336. verify_cn=""