filer.toml 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. # A sample TOML config file for SeaweedFS filer store
  2. # Used with "weed filer" or "weed server -filer"
  3. # Put this file to one of the location, with descending priority
  4. # ./filer.toml
  5. # $HOME/.seaweedfs/filer.toml
  6. # /etc/seaweedfs/filer.toml
  7. ####################################################
  8. # Customizable filer server options
  9. ####################################################
  10. [filer.options]
  11. # with http DELETE, by default the filer would check whether a folder is empty.
  12. # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
  13. recursive_delete = false
  14. ####################################################
  15. # The following are filer store options
  16. ####################################################
  17. [leveldb2]
  18. # local on disk, mostly for simple single-machine setup, fairly scalable
  19. # faster than previous leveldb, recommended.
  20. enabled = true
  21. dir = "./filerldb2" # directory to store level db files
  22. [leveldb3]
  23. # similar to leveldb2.
  24. # each bucket has its own meta store.
  25. enabled = false
  26. dir = "./filerldb3" # directory to store level db files
  27. [rocksdb]
  28. # local on disk, similar to leveldb
  29. # since it is using a C wrapper, you need to install rocksdb and build it by yourself
  30. enabled = false
  31. dir = "./filerrdb" # directory to store rocksdb files
  32. [sqlite]
  33. # local on disk, similar to leveldb
  34. enabled = false
  35. dbFile = "./filer.db" # sqlite db file
  36. [mysql] # or memsql, tidb
  37. # CREATE TABLE IF NOT EXISTS `filemeta` (
  38. # `dirhash` BIGINT NOT NULL COMMENT 'first 64 bits of MD5 hash value of directory field',
  39. # `name` VARCHAR(766) NOT NULL COMMENT 'directory or file name',
  40. # `directory` TEXT NOT NULL COMMENT 'full path to parent directory',
  41. # `meta` LONGBLOB,
  42. # PRIMARY KEY (`dirhash`, `name`)
  43. # ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
  44. enabled = false
  45. hostname = "localhost"
  46. port = 3306
  47. username = "root"
  48. password = ""
  49. database = "" # create or use an existing database
  50. connection_max_idle = 2
  51. connection_max_open = 100
  52. connection_max_lifetime_seconds = 0
  53. interpolateParams = false
  54. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  55. enableUpsert = true
  56. upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
  57. [mysql2] # or memsql, tidb
  58. enabled = false
  59. createTable = """
  60. CREATE TABLE IF NOT EXISTS `%s` (
  61. `dirhash` BIGINT NOT NULL,
  62. `name` VARCHAR(766) NOT NULL,
  63. `directory` TEXT NOT NULL,
  64. `meta` LONGBLOB,
  65. PRIMARY KEY (`dirhash`, `name`)
  66. ) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
  67. """
  68. hostname = "localhost"
  69. port = 3306
  70. username = "root"
  71. password = ""
  72. database = "" # create or use an existing database
  73. connection_max_idle = 2
  74. connection_max_open = 100
  75. connection_max_lifetime_seconds = 0
  76. interpolateParams = false
  77. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  78. enableUpsert = true
  79. upsertQuery = """INSERT INTO `%s` (`dirhash`,`name`,`directory`,`meta`) VALUES (?,?,?,?) AS `new` ON DUPLICATE KEY UPDATE `meta` = `new`.`meta`"""
  80. [postgres] # or cockroachdb, YugabyteDB
  81. # CREATE TABLE IF NOT EXISTS filemeta (
  82. # dirhash BIGINT,
  83. # name VARCHAR(65535),
  84. # directory VARCHAR(65535),
  85. # meta bytea,
  86. # PRIMARY KEY (dirhash, name)
  87. # );
  88. enabled = false
  89. hostname = "localhost"
  90. port = 5432
  91. username = "postgres"
  92. password = ""
  93. database = "postgres" # create or use an existing database
  94. schema = ""
  95. sslmode = "disable"
  96. connection_max_idle = 100
  97. connection_max_open = 100
  98. connection_max_lifetime_seconds = 0
  99. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  100. enableUpsert = true
  101. upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
  102. [postgres2]
  103. enabled = false
  104. createTable = """
  105. CREATE TABLE IF NOT EXISTS "%s" (
  106. dirhash BIGINT,
  107. name VARCHAR(65535),
  108. directory VARCHAR(65535),
  109. meta bytea,
  110. PRIMARY KEY (dirhash, name)
  111. );
  112. """
  113. hostname = "localhost"
  114. port = 5432
  115. username = "postgres"
  116. password = ""
  117. database = "postgres" # create or use an existing database
  118. schema = ""
  119. sslmode = "disable"
  120. connection_max_idle = 100
  121. connection_max_open = 100
  122. connection_max_lifetime_seconds = 0
  123. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  124. enableUpsert = true
  125. upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
  126. [cassandra]
  127. # CREATE TABLE filemeta (
  128. # directory varchar,
  129. # name varchar,
  130. # meta blob,
  131. # PRIMARY KEY (directory, name)
  132. # ) WITH CLUSTERING ORDER BY (name ASC);
  133. enabled = false
  134. keyspace = "seaweedfs"
  135. hosts = [
  136. "localhost:9042",
  137. ]
  138. username = ""
  139. password = ""
  140. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  141. superLargeDirectories = []
  142. # Name of the datacenter local to this filer, used as host selection fallback.
  143. localDC = ""
  144. # Gocql connection timeout, default: 600ms
  145. connection_timeout_millisecond = 600
  146. [hbase]
  147. enabled = false
  148. zkquorum = ""
  149. table = "seaweedfs"
  150. [redis2]
  151. enabled = false
  152. address = "localhost:6379"
  153. password = ""
  154. database = 0
  155. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  156. superLargeDirectories = []
  157. [redis2_sentinel]
  158. enabled = false
  159. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  160. masterName = "master"
  161. username = ""
  162. password = ""
  163. database = 0
  164. [redis_cluster2]
  165. enabled = false
  166. addresses = [
  167. "localhost:30001",
  168. "localhost:30002",
  169. "localhost:30003",
  170. "localhost:30004",
  171. "localhost:30005",
  172. "localhost:30006",
  173. ]
  174. password = ""
  175. # allows reads from slave servers or the master, but all writes still go to the master
  176. readOnly = false
  177. # automatically use the closest Redis server for reads
  178. routeByLatency = false
  179. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  180. superLargeDirectories = []
  181. [redis_lua]
  182. enabled = false
  183. address = "localhost:6379"
  184. password = ""
  185. database = 0
  186. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  187. superLargeDirectories = []
  188. [redis_lua_sentinel]
  189. enabled = false
  190. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  191. masterName = "master"
  192. username = ""
  193. password = ""
  194. database = 0
  195. [redis_lua_cluster]
  196. enabled = false
  197. addresses = [
  198. "localhost:30001",
  199. "localhost:30002",
  200. "localhost:30003",
  201. "localhost:30004",
  202. "localhost:30005",
  203. "localhost:30006",
  204. ]
  205. password = ""
  206. # allows reads from slave servers or the master, but all writes still go to the master
  207. readOnly = false
  208. # automatically use the closest Redis server for reads
  209. routeByLatency = false
  210. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  211. superLargeDirectories = []
  212. [redis3] # beta
  213. enabled = false
  214. address = "localhost:6379"
  215. password = ""
  216. database = 0
  217. [redis3_sentinel]
  218. enabled = false
  219. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  220. masterName = "master"
  221. username = ""
  222. password = ""
  223. database = 0
  224. [redis_cluster3] # beta
  225. enabled = false
  226. addresses = [
  227. "localhost:30001",
  228. "localhost:30002",
  229. "localhost:30003",
  230. "localhost:30004",
  231. "localhost:30005",
  232. "localhost:30006",
  233. ]
  234. password = ""
  235. # allows reads from slave servers or the master, but all writes still go to the master
  236. readOnly = false
  237. # automatically use the closest Redis server for reads
  238. routeByLatency = false
  239. [etcd]
  240. enabled = false
  241. servers = "localhost:2379"
  242. timeout = "3s"
  243. [mongodb]
  244. enabled = false
  245. uri = "mongodb://localhost:27017"
  246. option_pool_size = 0
  247. database = "seaweedfs"
  248. [elastic7]
  249. enabled = false
  250. servers = [
  251. "http://localhost1:9200",
  252. "http://localhost2:9200",
  253. "http://localhost3:9200",
  254. ]
  255. username = ""
  256. password = ""
  257. sniff_enabled = false
  258. healthcheck_enabled = false
  259. # increase the value is recommend, be sure the value in Elastic is greater or equal here
  260. index.max_result_window = 10000
  261. [arangodb] # in development dont use it
  262. enabled = false
  263. db_name = "seaweedfs"
  264. servers=["http://localhost:8529"] # list of servers to connect to
  265. # only basic auth supported for now
  266. username=""
  267. password=""
  268. # skip tls cert validation
  269. insecure_skip_verify = true
  270. [ydb] # https://ydb.tech/
  271. enabled = false
  272. dsn = "grpc://localhost:2136?database=/local"
  273. prefix = "seaweedfs"
  274. useBucketPrefix = true # Fast Bucket Deletion
  275. poolSizeLimit = 50
  276. dialTimeOut = 10
  277. # Authenticate produced with one of next environment variables:
  278. # YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS=<path/to/sa_key_file> — used service account key file by path
  279. # YDB_ANONYMOUS_CREDENTIALS="1" — used for authenticate with anonymous access. Anonymous access needs for connect to testing YDB installation
  280. # YDB_METADATA_CREDENTIALS="1" — used metadata service for authenticate to YDB from yandex cloud virtual machine or from yandex function
  281. # YDB_ACCESS_TOKEN_CREDENTIALS=<access_token> — used for authenticate to YDB with short-life access token. For example, access token may be IAM token
  282. ##########################
  283. ##########################
  284. # To add path-specific filer store:
  285. #
  286. # 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
  287. # 2. Add a location configuration. E.g., location = "/tmp/"
  288. # 3. Copy and customize all other configurations.
  289. # Make sure they are not the same if using the same store type!
  290. # 4. Set enabled to true
  291. #
  292. # The following is just using redis as an example
  293. ##########################
  294. [redis2.tmp]
  295. enabled = false
  296. location = "/tmp/"
  297. address = "localhost:6379"
  298. password = ""
  299. database = 1
  300. [tikv]
  301. enabled = false
  302. # If you have many pd address, use ',' split then:
  303. # pdaddrs = "pdhost1:2379, pdhost2:2379, pdhost3:2379"
  304. pdaddrs = "localhost:2379"
  305. # Concurrency for TiKV delete range
  306. deleterange_concurrency = 1
  307. # Enable 1PC
  308. enable_1pc = false
  309. # Set the CA certificate path
  310. ca_path=""
  311. # Set the certificate path
  312. cert_path=""
  313. # Set the private key path
  314. key_path=""
  315. # The name list used to verify the cn name
  316. verify_cn=""