scaffold.go 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. package command
  2. import (
  3. "io/ioutil"
  4. "path/filepath"
  5. )
  6. func init() {
  7. cmdScaffold.Run = runScaffold // break init cycle
  8. }
  9. var cmdScaffold = &Command{
  10. UsageLine: "scaffold -config=[filer|notification|replication|security|master]",
  11. Short: "generate basic configuration files",
  12. Long: `Generate filer.toml with all possible configurations for you to customize.
  13. The options can also be overwritten by environment variables.
  14. For example, the filer.toml mysql password can be overwritten by environment variable
  15. export WEED_MYSQL_PASSWORD=some_password
  16. Environment variable rules:
  17. * Prefix the variable name with "WEED_"
  18. * Upppercase the reset of variable name.
  19. * Replace '.' with '_'
  20. `,
  21. }
  22. var (
  23. outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory")
  24. config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate")
  25. )
  26. func runScaffold(cmd *Command, args []string) bool {
  27. content := ""
  28. switch *config {
  29. case "filer":
  30. content = FILER_TOML_EXAMPLE
  31. case "notification":
  32. content = NOTIFICATION_TOML_EXAMPLE
  33. case "replication":
  34. content = REPLICATION_TOML_EXAMPLE
  35. case "security":
  36. content = SECURITY_TOML_EXAMPLE
  37. case "master":
  38. content = MASTER_TOML_EXAMPLE
  39. }
  40. if content == "" {
  41. println("need a valid -config option")
  42. return false
  43. }
  44. if *outputPath != "" {
  45. ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
  46. } else {
  47. println(content)
  48. }
  49. return true
  50. }
  51. const (
  52. FILER_TOML_EXAMPLE = `
  53. # A sample TOML config file for SeaweedFS filer store
  54. # Used with "weed filer" or "weed server -filer"
  55. # Put this file to one of the location, with descending priority
  56. # ./filer.toml
  57. # $HOME/.seaweedfs/filer.toml
  58. # /etc/seaweedfs/filer.toml
  59. ####################################################
  60. # Customizable filer server options
  61. ####################################################
  62. [filer.options]
  63. # with http DELETE, by default the filer would check whether a folder is empty.
  64. # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
  65. recursive_delete = false
  66. # directories under this folder will be automatically creating a separate bucket
  67. buckets_folder = "/buckets"
  68. buckets_fsync = [ # a list of buckets with all write requests fsync=true
  69. "important_bucket",
  70. "should_always_fsync",
  71. ]
  72. ####################################################
  73. # The following are filer store options
  74. ####################################################
  75. [leveldb2]
  76. # local on disk, mostly for simple single-machine setup, fairly scalable
  77. # faster than previous leveldb, recommended.
  78. enabled = true
  79. dir = "." # directory to store level db files
  80. [mysql] # or tidb
  81. # CREATE TABLE IF NOT EXISTS filemeta (
  82. # dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
  83. # name VARCHAR(1000) COMMENT 'directory or file name',
  84. # directory TEXT COMMENT 'full path to parent directory',
  85. # meta LONGBLOB,
  86. # PRIMARY KEY (dirhash, name)
  87. # ) DEFAULT CHARSET=utf8;
  88. enabled = false
  89. hostname = "localhost"
  90. port = 3306
  91. username = "root"
  92. password = ""
  93. database = "" # create or use an existing database
  94. connection_max_idle = 2
  95. connection_max_open = 100
  96. interpolateParams = false
  97. [postgres] # or cockroachdb
  98. # CREATE TABLE IF NOT EXISTS filemeta (
  99. # dirhash BIGINT,
  100. # name VARCHAR(65535),
  101. # directory VARCHAR(65535),
  102. # meta bytea,
  103. # PRIMARY KEY (dirhash, name)
  104. # );
  105. enabled = false
  106. hostname = "localhost"
  107. port = 5432
  108. username = "postgres"
  109. password = ""
  110. database = "" # create or use an existing database
  111. sslmode = "disable"
  112. connection_max_idle = 100
  113. connection_max_open = 100
  114. [cassandra]
  115. # CREATE TABLE filemeta (
  116. # directory varchar,
  117. # name varchar,
  118. # meta blob,
  119. # PRIMARY KEY (directory, name)
  120. # ) WITH CLUSTERING ORDER BY (name ASC);
  121. enabled = false
  122. keyspace="seaweedfs"
  123. hosts=[
  124. "localhost:9042",
  125. ]
  126. [redis2]
  127. enabled = false
  128. address = "localhost:6379"
  129. password = ""
  130. database = 0
  131. [redis_cluster2]
  132. enabled = false
  133. addresses = [
  134. "localhost:30001",
  135. "localhost:30002",
  136. "localhost:30003",
  137. "localhost:30004",
  138. "localhost:30005",
  139. "localhost:30006",
  140. ]
  141. password = ""
  142. # allows reads from slave servers or the master, but all writes still go to the master
  143. readOnly = true
  144. # automatically use the closest Redis server for reads
  145. routeByLatency = true
  146. [etcd]
  147. enabled = false
  148. servers = "localhost:2379"
  149. timeout = "3s"
  150. [mongodb]
  151. enabled = false
  152. uri = "mongodb://localhost:27017"
  153. option_pool_size = 0
  154. database = "seaweedfs"
  155. `
  156. NOTIFICATION_TOML_EXAMPLE = `
  157. # A sample TOML config file for SeaweedFS filer store
  158. # Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
  159. # Put this file to one of the location, with descending priority
  160. # ./notification.toml
  161. # $HOME/.seaweedfs/notification.toml
  162. # /etc/seaweedfs/notification.toml
  163. ####################################################
  164. # notification
  165. # send and receive filer updates for each file to an external message queue
  166. ####################################################
  167. [notification.log]
  168. # this is only for debugging perpose and does not work with "weed filer.replicate"
  169. enabled = false
  170. [notification.kafka]
  171. enabled = false
  172. hosts = [
  173. "localhost:9092"
  174. ]
  175. topic = "seaweedfs_filer"
  176. offsetFile = "./last.offset"
  177. offsetSaveIntervalSeconds = 10
  178. [notification.aws_sqs]
  179. # experimental, let me know if it works
  180. enabled = false
  181. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  182. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  183. region = "us-east-2"
  184. sqs_queue_name = "my_filer_queue" # an existing queue name
  185. [notification.google_pub_sub]
  186. # read credentials doc at https://cloud.google.com/docs/authentication/getting-started
  187. enabled = false
  188. google_application_credentials = "/path/to/x.json" # path to json credential file
  189. project_id = "" # an existing project id
  190. topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
  191. [notification.gocdk_pub_sub]
  192. # The Go Cloud Development Kit (https://gocloud.dev).
  193. # PubSub API (https://godoc.org/gocloud.dev/pubsub).
  194. # Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
  195. enabled = false
  196. # This URL will Dial the RabbitMQ server at the URL in the environment
  197. # variable RABBIT_SERVER_URL and open the exchange "myexchange".
  198. # The exchange must have already been created by some other means, like
  199. # the RabbitMQ management plugin.
  200. topic_url = "rabbit://myexchange"
  201. sub_url = "rabbit://myqueue"
  202. `
  203. REPLICATION_TOML_EXAMPLE = `
  204. # A sample TOML config file for replicating SeaweedFS filer
  205. # Used with "weed filer.replicate"
  206. # Put this file to one of the location, with descending priority
  207. # ./replication.toml
  208. # $HOME/.seaweedfs/replication.toml
  209. # /etc/seaweedfs/replication.toml
  210. [source.filer]
  211. enabled = true
  212. grpcAddress = "localhost:18888"
  213. # all files under this directory tree are replicated.
  214. # this is not a directory on your hard drive, but on your filer.
  215. # i.e., all files with this "prefix" are sent to notification message queue.
  216. directory = "/buckets"
  217. [sink.filer]
  218. enabled = false
  219. grpcAddress = "localhost:18888"
  220. # all replicated files are under this directory tree
  221. # this is not a directory on your hard drive, but on your filer.
  222. # i.e., all received files will be "prefixed" to this directory.
  223. directory = "/backup"
  224. replication = ""
  225. collection = ""
  226. ttlSec = 0
  227. [sink.s3]
  228. # read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
  229. # default loads credentials from the shared credentials file (~/.aws/credentials).
  230. enabled = false
  231. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  232. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  233. region = "us-east-2"
  234. bucket = "your_bucket_name" # an existing bucket
  235. directory = "/" # destination directory
  236. endpoint = ""
  237. [sink.google_cloud_storage]
  238. # read credentials doc at https://cloud.google.com/docs/authentication/getting-started
  239. enabled = false
  240. google_application_credentials = "/path/to/x.json" # path to json credential file
  241. bucket = "your_bucket_seaweedfs" # an existing bucket
  242. directory = "/" # destination directory
  243. [sink.azure]
  244. # experimental, let me know if it works
  245. enabled = false
  246. account_name = ""
  247. account_key = ""
  248. container = "mycontainer" # an existing container
  249. directory = "/" # destination directory
  250. [sink.backblaze]
  251. enabled = false
  252. b2_account_id = ""
  253. b2_master_application_key = ""
  254. bucket = "mybucket" # an existing bucket
  255. directory = "/" # destination directory
  256. `
  257. SECURITY_TOML_EXAMPLE = `
  258. # Put this file to one of the location, with descending priority
  259. # ./security.toml
  260. # $HOME/.seaweedfs/security.toml
  261. # /etc/seaweedfs/security.toml
  262. # this file is read by master, volume server, and filer
  263. # the jwt signing key is read by master and volume server.
  264. # a jwt defaults to expire after 10 seconds.
  265. [jwt.signing]
  266. key = ""
  267. expires_after_seconds = 10 # seconds
  268. # jwt for read is only supported with master+volume setup. Filer does not support this mode.
  269. [jwt.signing.read]
  270. key = ""
  271. expires_after_seconds = 10 # seconds
  272. # all grpc tls authentications are mutual
  273. # the values for the following ca, cert, and key are paths to the PERM files.
  274. # the host name is not checked, so the PERM files can be shared.
  275. [grpc]
  276. ca = ""
  277. [grpc.volume]
  278. cert = ""
  279. key = ""
  280. [grpc.master]
  281. cert = ""
  282. key = ""
  283. [grpc.filer]
  284. cert = ""
  285. key = ""
  286. [grpc.msg_broker]
  287. cert = ""
  288. key = ""
  289. # use this for any place needs a grpc client
  290. # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
  291. [grpc.client]
  292. cert = ""
  293. key = ""
  294. # volume server https options
  295. # Note: work in progress!
  296. # this does not work with other clients, e.g., "weed filer|mount" etc, yet.
  297. [https.client]
  298. enabled = true
  299. [https.volume]
  300. cert = ""
  301. key = ""
  302. `
  303. MASTER_TOML_EXAMPLE = `
  304. # Put this file to one of the location, with descending priority
  305. # ./master.toml
  306. # $HOME/.seaweedfs/master.toml
  307. # /etc/seaweedfs/master.toml
  308. # this file is read by master
  309. [master.maintenance]
  310. # periodically run these scripts are the same as running them from 'weed shell'
  311. scripts = """
  312. lock
  313. ec.encode -fullPercent=95 -quietFor=1h
  314. ec.rebuild -force
  315. ec.balance -force
  316. volume.balance -force
  317. volume.fix.replication
  318. unlock
  319. """
  320. sleep_minutes = 17 # sleep minutes between each script execution
  321. [master.filer]
  322. default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
  323. [master.sequencer]
  324. type = "memory" # Choose [memory|etcd] type for storing the file id sequence
  325. # when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
  326. # example : http://127.0.0.1:2379,http://127.0.0.1:2389
  327. sequencer_etcd_urls = "http://127.0.0.1:2379"
  328. # configurations for tiered cloud storage
  329. # old volumes are transparently moved to cloud for cost efficiency
  330. [storage.backend]
  331. [storage.backend.s3.default]
  332. enabled = false
  333. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  334. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  335. region = "us-east-2"
  336. bucket = "your_bucket_name" # an existing bucket
  337. endpoint = ""
  338. # create this number of logical volumes if no more writable volumes
  339. # count_x means how many copies of data.
  340. # e.g.:
  341. # 000 has only one copy, copy_1
  342. # 010 and 001 has two copies, copy_2
  343. # 011 has only 3 copies, copy_3
  344. [master.volume_growth]
  345. copy_1 = 7 # create 1 x 7 = 7 actual volumes
  346. copy_2 = 6 # create 2 x 6 = 12 actual volumes
  347. copy_3 = 3 # create 3 x 3 = 9 actual volumes
  348. copy_other = 1 # create n x 1 = n actual volumes
  349. # configuration flags for replication
  350. [master.replication]
  351. # any replication counts should be considered minimums. If you specify 010 and
  352. # have 3 different racks, that's still considered writable. Writes will still
  353. # try to replicate to all available volumes. You should only use this option
  354. # if you are doing your own replication or periodic sync of volumes.
  355. treat_replication_as_minimums = false
  356. `
  357. )