filer.toml 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. # A sample TOML config file for SeaweedFS filer store
  2. # Used with "weed filer" or "weed server -filer"
  3. # Put this file to one of the location, with descending priority
  4. # ./filer.toml
  5. # $HOME/.seaweedfs/filer.toml
  6. # /etc/seaweedfs/filer.toml
  7. ####################################################
  8. # Customizable filer server options
  9. ####################################################
  10. [filer.options]
  11. # with http DELETE, by default the filer would check whether a folder is empty.
  12. # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
  13. recursive_delete = false
  14. ####################################################
  15. # The following are filer store options
  16. ####################################################
  17. [leveldb2]
  18. # local on disk, mostly for simple single-machine setup, fairly scalable
  19. # faster than previous leveldb, recommended.
  20. enabled = true
  21. dir = "./filerldb2" # directory to store level db files
  22. [leveldb3]
  23. # similar to leveldb2.
  24. # each bucket has its own meta store.
  25. enabled = false
  26. dir = "./filerldb3" # directory to store level db files
  27. [rocksdb]
  28. # local on disk, similar to leveldb
  29. # since it is using a C wrapper, you need to install rocksdb and build it by yourself
  30. enabled = false
  31. dir = "./filerrdb" # directory to store rocksdb files
  32. [sqlite]
  33. # local on disk, similar to leveldb
  34. enabled = false
  35. dbFile = "./filer.db" # sqlite db file
  36. [mysql] # or memsql, tidb
  37. # CREATE TABLE IF NOT EXISTS filemeta (
  38. # dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
  39. # name VARCHAR(1000) BINARY COMMENT 'directory or file name',
  40. # directory TEXT BINARY COMMENT 'full path to parent directory',
  41. # meta LONGBLOB,
  42. # PRIMARY KEY (dirhash, name)
  43. # ) DEFAULT CHARSET=utf8;
  44. enabled = false
  45. hostname = "localhost"
  46. port = 3306
  47. username = "root"
  48. password = ""
  49. database = "" # create or use an existing database
  50. connection_max_idle = 2
  51. connection_max_open = 100
  52. connection_max_lifetime_seconds = 0
  53. interpolateParams = false
  54. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  55. enableUpsert = true
  56. upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
  57. [mysql2] # or memsql, tidb
  58. enabled = false
  59. createTable = """
  60. CREATE TABLE IF NOT EXISTS `%s` (
  61. dirhash BIGINT,
  62. name VARCHAR(1000) BINARY,
  63. directory TEXT BINARY,
  64. meta LONGBLOB,
  65. PRIMARY KEY (dirhash, name)
  66. ) DEFAULT CHARSET=utf8;
  67. """
  68. hostname = "localhost"
  69. port = 3306
  70. username = "root"
  71. password = ""
  72. database = "" # create or use an existing database
  73. connection_max_idle = 2
  74. connection_max_open = 100
  75. connection_max_lifetime_seconds = 0
  76. interpolateParams = false
  77. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  78. enableUpsert = true
  79. upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
  80. [postgres] # or cockroachdb, YugabyteDB
  81. # CREATE TABLE IF NOT EXISTS filemeta (
  82. # dirhash BIGINT,
  83. # name VARCHAR(65535),
  84. # directory VARCHAR(65535),
  85. # meta bytea,
  86. # PRIMARY KEY (dirhash, name)
  87. # );
  88. enabled = false
  89. hostname = "localhost"
  90. port = 5432
  91. username = "postgres"
  92. password = ""
  93. database = "postgres" # create or use an existing database
  94. schema = ""
  95. sslmode = "disable"
  96. connection_max_idle = 100
  97. connection_max_open = 100
  98. connection_max_lifetime_seconds = 0
  99. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  100. enableUpsert = true
  101. upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
  102. [postgres2]
  103. enabled = false
  104. createTable = """
  105. CREATE TABLE IF NOT EXISTS "%s" (
  106. dirhash BIGINT,
  107. name VARCHAR(65535),
  108. directory VARCHAR(65535),
  109. meta bytea,
  110. PRIMARY KEY (dirhash, name)
  111. );
  112. """
  113. hostname = "localhost"
  114. port = 5432
  115. username = "postgres"
  116. password = ""
  117. database = "postgres" # create or use an existing database
  118. schema = ""
  119. sslmode = "disable"
  120. connection_max_idle = 100
  121. connection_max_open = 100
  122. connection_max_lifetime_seconds = 0
  123. # if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
  124. enableUpsert = true
  125. upsertQuery = """INSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4) ON CONFLICT (dirhash,name) DO UPDATE SET meta = EXCLUDED.meta WHERE "%[1]s".meta != EXCLUDED.meta"""
  126. [cassandra]
  127. # CREATE TABLE filemeta (
  128. # directory varchar,
  129. # name varchar,
  130. # meta blob,
  131. # PRIMARY KEY (directory, name)
  132. # ) WITH CLUSTERING ORDER BY (name ASC);
  133. enabled = false
  134. keyspace = "seaweedfs"
  135. hosts = [
  136. "localhost:9042",
  137. ]
  138. username = ""
  139. password = ""
  140. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  141. superLargeDirectories = []
  142. # Name of the datacenter local to this filer, used as host selection fallback.
  143. localDC = ""
  144. [hbase]
  145. enabled = false
  146. zkquorum = ""
  147. table = "seaweedfs"
  148. [redis2]
  149. enabled = false
  150. address = "localhost:6379"
  151. password = ""
  152. database = 0
  153. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  154. superLargeDirectories = []
  155. [redis2_sentinel]
  156. enabled = false
  157. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  158. masterName = "master"
  159. username = ""
  160. password = ""
  161. database = 0
  162. [redis_cluster2]
  163. enabled = false
  164. addresses = [
  165. "localhost:30001",
  166. "localhost:30002",
  167. "localhost:30003",
  168. "localhost:30004",
  169. "localhost:30005",
  170. "localhost:30006",
  171. ]
  172. password = ""
  173. # allows reads from slave servers or the master, but all writes still go to the master
  174. readOnly = false
  175. # automatically use the closest Redis server for reads
  176. routeByLatency = false
  177. # This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
  178. superLargeDirectories = []
  179. [redis3] # beta
  180. enabled = false
  181. address = "localhost:6379"
  182. password = ""
  183. database = 0
  184. [redis3_sentinel]
  185. enabled = false
  186. addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
  187. masterName = "master"
  188. username = ""
  189. password = ""
  190. database = 0
  191. [redis_cluster3] # beta
  192. enabled = false
  193. addresses = [
  194. "localhost:30001",
  195. "localhost:30002",
  196. "localhost:30003",
  197. "localhost:30004",
  198. "localhost:30005",
  199. "localhost:30006",
  200. ]
  201. password = ""
  202. # allows reads from slave servers or the master, but all writes still go to the master
  203. readOnly = false
  204. # automatically use the closest Redis server for reads
  205. routeByLatency = false
  206. [etcd]
  207. enabled = false
  208. servers = "localhost:2379"
  209. timeout = "3s"
  210. [mongodb]
  211. enabled = false
  212. uri = "mongodb://localhost:27017"
  213. option_pool_size = 0
  214. database = "seaweedfs"
  215. [elastic7]
  216. enabled = false
  217. servers = [
  218. "http://localhost1:9200",
  219. "http://localhost2:9200",
  220. "http://localhost3:9200",
  221. ]
  222. username = ""
  223. password = ""
  224. sniff_enabled = false
  225. healthcheck_enabled = false
  226. # increase the value is recommend, be sure the value in Elastic is greater or equal here
  227. index.max_result_window = 10000
  228. ##########################
  229. ##########################
  230. # To add path-specific filer store:
  231. #
  232. # 1. Add a name following the store type separated by a dot ".". E.g., cassandra.tmp
  233. # 2. Add a location configuraiton. E.g., location = "/tmp/"
  234. # 3. Copy and customize all other configurations.
  235. # Make sure they are not the same if using the same store type!
  236. # 4. Set enabled to true
  237. #
  238. # The following is just using redis as an example
  239. ##########################
  240. [redis2.tmp]
  241. enabled = false
  242. location = "/tmp/"
  243. address = "localhost:6379"
  244. password = ""
  245. database = 1