scaffold.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. package command
  2. import (
  3. "io/ioutil"
  4. "path/filepath"
  5. )
  6. func init() {
  7. cmdScaffold.Run = runScaffold // break init cycle
  8. }
  9. var cmdScaffold = &Command{
  10. UsageLine: "scaffold -config=[filer|notification|replication|security|master]",
  11. Short: "generate basic configuration files",
  12. Long: `Generate filer.toml with all possible configurations for you to customize.
  13. The options can also be overwritten by environment variables.
  14. For example, the filer.toml mysql password can be overwritten by environment variable
  15. export WEED_MYSQL_PASSWORD=some_password
  16. Environment variable rules:
  17. * Prefix fix with "WEED_"
  18. * Upppercase the reset of variable name.
  19. * Replace '.' with '_'
  20. `,
  21. }
  22. var (
  23. outputPath = cmdScaffold.Flag.String("output", "", "if not empty, save the configuration file to this directory")
  24. config = cmdScaffold.Flag.String("config", "filer", "[filer|notification|replication|security|master] the configuration file to generate")
  25. )
  26. func runScaffold(cmd *Command, args []string) bool {
  27. content := ""
  28. switch *config {
  29. case "filer":
  30. content = FILER_TOML_EXAMPLE
  31. case "notification":
  32. content = NOTIFICATION_TOML_EXAMPLE
  33. case "replication":
  34. content = REPLICATION_TOML_EXAMPLE
  35. case "security":
  36. content = SECURITY_TOML_EXAMPLE
  37. case "master":
  38. content = MASTER_TOML_EXAMPLE
  39. }
  40. if content == "" {
  41. println("need a valid -config option")
  42. return false
  43. }
  44. if *outputPath != "" {
  45. ioutil.WriteFile(filepath.Join(*outputPath, *config+".toml"), []byte(content), 0644)
  46. } else {
  47. println(content)
  48. }
  49. return true
  50. }
  51. const (
  52. FILER_TOML_EXAMPLE = `
  53. # A sample TOML config file for SeaweedFS filer store
  54. # Used with "weed filer" or "weed server -filer"
  55. # Put this file to one of the location, with descending priority
  56. # ./filer.toml
  57. # $HOME/.seaweedfs/filer.toml
  58. # /etc/seaweedfs/filer.toml
  59. ####################################################
  60. # Customizable filer server options
  61. ####################################################
  62. [filer.options]
  63. # with http DELETE, by default the filer would check whether a folder is empty.
  64. # recursive_delete will delete all sub folders and files, similar to "rm -Rf"
  65. recursive_delete = false
  66. ####################################################
  67. # The following are filer store options
  68. ####################################################
  69. [leveldb2]
  70. # local on disk, mostly for simple single-machine setup, fairly scalable
  71. # faster than previous leveldb, recommended.
  72. enabled = true
  73. dir = "." # directory to store level db files
  74. [mysql] # or tidb
  75. # CREATE TABLE IF NOT EXISTS filemeta (
  76. # dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
  77. # name VARCHAR(1000) COMMENT 'directory or file name',
  78. # directory TEXT COMMENT 'full path to parent directory',
  79. # meta LONGBLOB,
  80. # PRIMARY KEY (dirhash, name)
  81. # ) DEFAULT CHARSET=utf8;
  82. enabled = false
  83. hostname = "localhost"
  84. port = 3306
  85. username = "root"
  86. password = ""
  87. database = "" # create or use an existing database
  88. connection_max_idle = 2
  89. connection_max_open = 100
  90. interpolateParams = false
  91. [postgres] # or cockroachdb
  92. # CREATE TABLE IF NOT EXISTS filemeta (
  93. # dirhash BIGINT,
  94. # name VARCHAR(65535),
  95. # directory VARCHAR(65535),
  96. # meta bytea,
  97. # PRIMARY KEY (dirhash, name)
  98. # );
  99. enabled = false
  100. hostname = "localhost"
  101. port = 5432
  102. username = "postgres"
  103. password = ""
  104. database = "" # create or use an existing database
  105. sslmode = "disable"
  106. connection_max_idle = 100
  107. connection_max_open = 100
  108. [cassandra]
  109. # CREATE TABLE filemeta (
  110. # directory varchar,
  111. # name varchar,
  112. # meta blob,
  113. # PRIMARY KEY (directory, name)
  114. # ) WITH CLUSTERING ORDER BY (name ASC);
  115. enabled = false
  116. keyspace="seaweedfs"
  117. hosts=[
  118. "localhost:9042",
  119. ]
  120. [redis]
  121. enabled = false
  122. address = "localhost:6379"
  123. password = ""
  124. database = 0
  125. [redis_cluster]
  126. enabled = false
  127. addresses = [
  128. "localhost:30001",
  129. "localhost:30002",
  130. "localhost:30003",
  131. "localhost:30004",
  132. "localhost:30005",
  133. "localhost:30006",
  134. ]
  135. password = ""
  136. # allows reads from slave servers or the master, but all writes still go to the master
  137. readOnly = true
  138. # automatically use the closest Redis server for reads
  139. routeByLatency = true
  140. [etcd]
  141. enabled = false
  142. servers = "localhost:2379"
  143. timeout = "3s"
  144. [tikv]
  145. enabled = false
  146. pdAddress = "192.168.199.113:2379"
  147. `
  148. NOTIFICATION_TOML_EXAMPLE = `
  149. # A sample TOML config file for SeaweedFS filer store
  150. # Used by both "weed filer" or "weed server -filer" and "weed filer.replicate"
  151. # Put this file to one of the location, with descending priority
  152. # ./notification.toml
  153. # $HOME/.seaweedfs/notification.toml
  154. # /etc/seaweedfs/notification.toml
  155. ####################################################
  156. # notification
  157. # send and receive filer updates for each file to an external message queue
  158. ####################################################
  159. [notification.log]
  160. # this is only for debugging perpose and does not work with "weed filer.replicate"
  161. enabled = false
  162. [notification.kafka]
  163. enabled = false
  164. hosts = [
  165. "localhost:9092"
  166. ]
  167. topic = "seaweedfs_filer"
  168. offsetFile = "./last.offset"
  169. offsetSaveIntervalSeconds = 10
  170. [notification.aws_sqs]
  171. # experimental, let me know if it works
  172. enabled = false
  173. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  174. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  175. region = "us-east-2"
  176. sqs_queue_name = "my_filer_queue" # an existing queue name
  177. [notification.google_pub_sub]
  178. # read credentials doc at https://cloud.google.com/docs/authentication/getting-started
  179. enabled = false
  180. google_application_credentials = "/path/to/x.json" # path to json credential file
  181. project_id = "" # an existing project id
  182. topic = "seaweedfs_filer_topic" # a topic, auto created if does not exists
  183. [notification.gocdk_pub_sub]
  184. # The Go Cloud Development Kit (https://gocloud.dev).
  185. # PubSub API (https://godoc.org/gocloud.dev/pubsub).
  186. # Supports AWS SNS/SQS, Azure Service Bus, Google PubSub, NATS and RabbitMQ.
  187. enabled = false
  188. # This URL will Dial the RabbitMQ server at the URL in the environment
  189. # variable RABBIT_SERVER_URL and open the exchange "myexchange".
  190. # The exchange must have already been created by some other means, like
  191. # the RabbitMQ management plugin.
  192. topic_url = "rabbit://myexchange"
  193. sub_url = "rabbit://myqueue"
  194. `
  195. REPLICATION_TOML_EXAMPLE = `
  196. # A sample TOML config file for replicating SeaweedFS filer
  197. # Used with "weed filer.replicate"
  198. # Put this file to one of the location, with descending priority
  199. # ./replication.toml
  200. # $HOME/.seaweedfs/replication.toml
  201. # /etc/seaweedfs/replication.toml
  202. [source.filer]
  203. enabled = true
  204. grpcAddress = "localhost:18888"
  205. # all files under this directory tree are replicated.
  206. # this is not a directory on your hard drive, but on your filer.
  207. # i.e., all files with this "prefix" are sent to notification message queue.
  208. directory = "/buckets"
  209. [sink.filer]
  210. enabled = false
  211. grpcAddress = "localhost:18888"
  212. # all replicated files are under this directory tree
  213. # this is not a directory on your hard drive, but on your filer.
  214. # i.e., all received files will be "prefixed" to this directory.
  215. directory = "/backup"
  216. replication = ""
  217. collection = ""
  218. ttlSec = 0
  219. [sink.s3]
  220. # read credentials doc at https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sessions.html
  221. # default loads credentials from the shared credentials file (~/.aws/credentials).
  222. enabled = false
  223. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  224. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  225. region = "us-east-2"
  226. bucket = "your_bucket_name" # an existing bucket
  227. directory = "/" # destination directory
  228. [sink.google_cloud_storage]
  229. # read credentials doc at https://cloud.google.com/docs/authentication/getting-started
  230. enabled = false
  231. google_application_credentials = "/path/to/x.json" # path to json credential file
  232. bucket = "your_bucket_seaweedfs" # an existing bucket
  233. directory = "/" # destination directory
  234. [sink.azure]
  235. # experimental, let me know if it works
  236. enabled = false
  237. account_name = ""
  238. account_key = ""
  239. container = "mycontainer" # an existing container
  240. directory = "/" # destination directory
  241. [sink.backblaze]
  242. enabled = false
  243. b2_account_id = ""
  244. b2_master_application_key = ""
  245. bucket = "mybucket" # an existing bucket
  246. directory = "/" # destination directory
  247. `
  248. SECURITY_TOML_EXAMPLE = `
  249. # Put this file to one of the location, with descending priority
  250. # ./security.toml
  251. # $HOME/.seaweedfs/security.toml
  252. # /etc/seaweedfs/security.toml
  253. # this file is read by master, volume server, and filer
  254. # the jwt signing key is read by master and volume server.
  255. # a jwt defaults to expire after 10 seconds.
  256. [jwt.signing]
  257. key = ""
  258. expires_after_seconds = 10 # seconds
  259. # jwt for read is only supported with master+volume setup. Filer does not support this mode.
  260. [jwt.signing.read]
  261. key = ""
  262. expires_after_seconds = 10 # seconds
  263. # all grpc tls authentications are mutual
  264. # the values for the following ca, cert, and key are paths to the PERM files.
  265. # the host name is not checked, so the PERM files can be shared.
  266. [grpc]
  267. ca = ""
  268. [grpc.volume]
  269. cert = ""
  270. key = ""
  271. [grpc.master]
  272. cert = ""
  273. key = ""
  274. [grpc.filer]
  275. cert = ""
  276. key = ""
  277. # use this for any place needs a grpc client
  278. # i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
  279. [grpc.client]
  280. cert = ""
  281. key = ""
  282. # volume server https options
  283. # Note: work in progress!
  284. # this does not work with other clients, e.g., "weed filer|mount" etc, yet.
  285. [https.client]
  286. enabled = true
  287. [https.volume]
  288. cert = ""
  289. key = ""
  290. `
  291. MASTER_TOML_EXAMPLE = `
  292. # Put this file to one of the location, with descending priority
  293. # ./master.toml
  294. # $HOME/.seaweedfs/master.toml
  295. # /etc/seaweedfs/master.toml
  296. # this file is read by master
  297. [master.maintenance]
  298. # periodically run these scripts are the same as running them from 'weed shell'
  299. scripts = """
  300. ec.encode -fullPercent=95 -quietFor=1h
  301. ec.rebuild -force
  302. ec.balance -force
  303. volume.balance -force
  304. """
  305. sleep_minutes = 17 # sleep minutes between each script execution
  306. [master.filer]
  307. default_filer_url = "http://localhost:8888/"
  308. [master.sequencer]
  309. type = "memory" # Choose [memory|etcd] type for storing the file id sequence
  310. # when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
  311. # example : http://127.0.0.1:2379,http://127.0.0.1:2389
  312. sequencer_etcd_urls = "http://127.0.0.1:2379"
  313. # configurations for tiered cloud storage
  314. # old volumes are transparently moved to cloud for cost efficiency
  315. [storage.backend]
  316. [storage.backend.s3.default]
  317. enabled = false
  318. aws_access_key_id = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  319. aws_secret_access_key = "" # if empty, loads from the shared credentials file (~/.aws/credentials).
  320. region = "us-east-2"
  321. bucket = "your_bucket_name" # an existing bucket
  322. # create this number of logical volumes if no more writable volumes
  323. [master.volume_growth]
  324. count_1 = 7 # create 1 x 7 = 7 actual volumes
  325. count_2 = 6 # create 2 x 6 = 12 actual volumes
  326. count_3 = 3 # create 3 x 3 = 9 actual volumes
  327. count_other = 1 # create n x 1 = n actual volumes
  328. `
  329. )