schema.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. #!/usr/bin/env python3
  2. #
  3. # schema.py
  4. #
  5. # Used by signature.py via common-dependencies.py to generate a schema file during the PlatformIO build.
  6. # This script can also be run standalone from within the Marlin repo to generate all schema files.
  7. #
  8. import re,json
  9. from pathlib import Path
  10. def extend_dict(d:dict, k:tuple):
  11. if len(k) >= 1 and k[0] not in d:
  12. d[k[0]] = {}
  13. if len(k) >= 2 and k[1] not in d[k[0]]:
  14. d[k[0]][k[1]] = {}
  15. if len(k) >= 3 and k[2] not in d[k[0]][k[1]]:
  16. d[k[0]][k[1]][k[2]] = {}
  17. grouping_patterns = [
  18. re.compile(r'^([XYZIJKUVW]|[XYZ]2|Z[34]|E[0-7])$'),
  19. re.compile(r'^AXIS\d$'),
  20. re.compile(r'^(MIN|MAX)$'),
  21. re.compile(r'^[0-8]$'),
  22. re.compile(r'^HOTEND[0-7]$'),
  23. re.compile(r'^(HOTENDS|BED|PROBE|COOLER)$'),
  24. re.compile(r'^[XYZIJKUVW]M(IN|AX)$')
  25. ]
  26. # If the indexed part of the option name matches a pattern
  27. # then add it to the dictionary.
  28. def find_grouping(gdict, filekey, sectkey, optkey, pindex):
  29. optparts = optkey.split('_')
  30. if 1 < len(optparts) > pindex:
  31. for patt in grouping_patterns:
  32. if patt.match(optparts[pindex]):
  33. subkey = optparts[pindex]
  34. modkey = '_'.join(optparts)
  35. optparts[pindex] = '*'
  36. wildkey = '_'.join(optparts)
  37. kkey = f'{filekey}|{sectkey}|{wildkey}'
  38. if kkey not in gdict: gdict[kkey] = []
  39. gdict[kkey].append((subkey, modkey))
  40. # Build a list of potential groups. Only those with multiple items will be grouped.
  41. def group_options(schema):
  42. for pindex in range(10, -1, -1):
  43. found_groups = {}
  44. for filekey, f in schema.items():
  45. for sectkey, s in f.items():
  46. for optkey in s:
  47. find_grouping(found_groups, filekey, sectkey, optkey, pindex)
  48. fkeys = [ k for k in found_groups.keys() ]
  49. for kkey in fkeys:
  50. items = found_groups[kkey]
  51. if len(items) > 1:
  52. f, s, w = kkey.split('|')
  53. extend_dict(schema, (f, s, w)) # Add wildcard group to schema
  54. for subkey, optkey in items: # Add all items to wildcard group
  55. schema[f][s][w][subkey] = schema[f][s][optkey] # Move non-wildcard item to wildcard group
  56. del schema[f][s][optkey]
  57. del found_groups[kkey]
  58. # Extract all board names from boards.h
  59. def load_boards():
  60. bpath = Path("Marlin/src/core/boards.h")
  61. if bpath.is_file():
  62. with bpath.open() as bfile:
  63. boards = []
  64. for line in bfile:
  65. if line.startswith("#define BOARD_"):
  66. bname = line.split()[1]
  67. if bname != "BOARD_UNKNOWN": boards.append(bname)
  68. return "['" + "','".join(boards) + "']"
  69. return ''
  70. #
  71. # Extract a schema from the current configuration files
  72. #
  73. def extract():
  74. # Load board names from boards.h
  75. boards = load_boards()
  76. # Parsing states
  77. class Parse:
  78. NORMAL = 0 # No condition yet
  79. BLOCK_COMMENT = 1 # Looking for the end of the block comment
  80. EOL_COMMENT = 2 # EOL comment started, maybe add the next comment?
  81. GET_SENSORS = 3 # Gathering temperature sensor options
  82. ERROR = 9 # Syntax error
  83. # List of files to process, with shorthand
  84. filekey = { 'Configuration.h':'basic', 'Configuration_adv.h':'advanced' }
  85. # A JSON object to store the data
  86. sch_out = { 'basic':{}, 'advanced':{} }
  87. # Regex for #define NAME [VALUE] [COMMENT] with sanitized line
  88. defgrep = re.compile(r'^(//)?\s*(#define)\s+([A-Za-z0-9_]+)\s*(.*?)\s*(//.+)?$')
  89. # Defines to ignore
  90. ignore = ('CONFIGURATION_H_VERSION', 'CONFIGURATION_ADV_H_VERSION', 'CONFIG_EXAMPLES_DIR', 'CONFIG_EXPORT')
  91. # Start with unknown state
  92. state = Parse.NORMAL
  93. # Serial ID
  94. sid = 0
  95. # Loop through files and parse them line by line
  96. for fn, fk in filekey.items():
  97. with Path("Marlin", fn).open() as fileobj:
  98. section = 'none' # Current Settings section
  99. line_number = 0 # Counter for the line number of the file
  100. conditions = [] # Create a condition stack for the current file
  101. comment_buff = [] # A temporary buffer for comments
  102. options_json = '' # A buffer for the most recent options JSON found
  103. eol_options = False # The options came from end of line, so only apply once
  104. join_line = False # A flag that the line should be joined with the previous one
  105. line = '' # A line buffer to handle \ continuation
  106. last_added_ref = None # Reference to the last added item
  107. # Loop through the lines in the file
  108. for the_line in fileobj.readlines():
  109. line_number += 1
  110. # Clean the line for easier parsing
  111. the_line = the_line.strip()
  112. if join_line: # A previous line is being made longer
  113. line += (' ' if line else '') + the_line
  114. else: # Otherwise, start the line anew
  115. line, line_start = the_line, line_number
  116. # If the resulting line ends with a \, don't process now.
  117. # Strip the end off. The next line will be joined with it.
  118. join_line = line.endswith("\\")
  119. if join_line:
  120. line = line[:-1].strip()
  121. continue
  122. else:
  123. line_end = line_number
  124. defmatch = defgrep.match(line)
  125. # Special handling for EOL comments after a #define.
  126. # At this point the #define is already digested and inserted,
  127. # so we have to extend it
  128. if state == Parse.EOL_COMMENT:
  129. # If the line is not a comment, we're done with the EOL comment
  130. if not defmatch and the_line.startswith('//'):
  131. comment_buff.append(the_line[2:].strip())
  132. else:
  133. last_added_ref['comment'] = ' '.join(comment_buff)
  134. comment_buff = []
  135. state = Parse.NORMAL
  136. def use_comment(c, opt, sec, bufref):
  137. if c.startswith(':'): # If the comment starts with : then it has magic JSON
  138. d = c[1:].strip() # Strip the leading :
  139. cbr = c.rindex('}') if d.startswith('{') else c.rindex(']') if d.startswith('[') else 0
  140. if cbr:
  141. opt, cmt = c[1:cbr+1].strip(), c[cbr+1:].strip()
  142. if cmt != '': bufref.append(cmt)
  143. else:
  144. opt = c[1:].strip()
  145. elif c.startswith('@section'): # Start a new section
  146. sec = c[8:].strip()
  147. elif not c.startswith('========'):
  148. bufref.append(c)
  149. return opt, sec
  150. # In a block comment, capture lines up to the end of the comment.
  151. # Assume nothing follows the comment closure.
  152. if state in (Parse.BLOCK_COMMENT, Parse.GET_SENSORS):
  153. endpos = line.find('*/')
  154. if endpos < 0:
  155. cline = line
  156. else:
  157. cline, line = line[:endpos].strip(), line[endpos+2:].strip()
  158. # Temperature sensors are done
  159. if state == Parse.GET_SENSORS:
  160. options_json = f'[ {options_json[:-2]} ]'
  161. state = Parse.NORMAL
  162. # Strip the leading '*' from block comments
  163. if cline.startswith('*'): cline = cline[1:].strip()
  164. # Collect temperature sensors
  165. if state == Parse.GET_SENSORS:
  166. sens = re.match(r'^(-?\d+)\s*:\s*(.+)$', cline)
  167. if sens:
  168. s2 = sens[2].replace("'","''")
  169. options_json += f"{sens[1]}:'{s2}', "
  170. elif state == Parse.BLOCK_COMMENT:
  171. # Look for temperature sensors
  172. if cline == "Temperature sensors available:":
  173. state, cline = Parse.GET_SENSORS, "Temperature Sensors"
  174. options_json, section = use_comment(cline, options_json, section, comment_buff)
  175. # For the normal state we're looking for any non-blank line
  176. elif state == Parse.NORMAL:
  177. # Skip a commented define when evaluating comment opening
  178. st = 2 if re.match(r'^//\s*#define', line) else 0
  179. cpos1 = line.find('/*') # Start a block comment on the line?
  180. cpos2 = line.find('//', st) # Start an end of line comment on the line?
  181. # Only the first comment starter gets evaluated
  182. cpos = -1
  183. if cpos1 != -1 and (cpos1 < cpos2 or cpos2 == -1):
  184. cpos = cpos1
  185. comment_buff = []
  186. state = Parse.BLOCK_COMMENT
  187. eol_options = False
  188. elif cpos2 != -1 and (cpos2 < cpos1 or cpos1 == -1):
  189. cpos = cpos2
  190. # Comment after a define may be continued on the following lines
  191. if defmatch != None and cpos > 10:
  192. state = Parse.EOL_COMMENT
  193. comment_buff = []
  194. # Process the start of a new comment
  195. if cpos != -1:
  196. cline, line = line[cpos+2:].strip(), line[:cpos].strip()
  197. if state == Parse.BLOCK_COMMENT:
  198. # Strip leading '*' from block comments
  199. if cline.startswith('*'): cline = cline[1:].strip()
  200. else:
  201. # Expire end-of-line options after first use
  202. if cline.startswith(':'): eol_options = True
  203. # Buffer a non-empty comment start
  204. if cline != '':
  205. options_json, section = use_comment(cline, options_json, section, comment_buff)
  206. # If the line has nothing before the comment, go to the next line
  207. if line == '':
  208. options_json = ''
  209. continue
  210. # Parenthesize the given expression if needed
  211. def atomize(s):
  212. if s == '' \
  213. or re.match(r'^[A-Za-z0-9_]*(\([^)]+\))?$', s) \
  214. or re.match(r'^[A-Za-z0-9_]+ == \d+?$', s):
  215. return s
  216. return f'({s})'
  217. #
  218. # The conditions stack is an array containing condition-arrays.
  219. # Each condition-array lists the conditions for the current block.
  220. # IF/N/DEF adds a new condition-array to the stack.
  221. # ELSE/ELIF/ENDIF pop the condition-array.
  222. # ELSE/ELIF negate the last item in the popped condition-array.
  223. # ELIF adds a new condition to the end of the array.
  224. # ELSE/ELIF re-push the condition-array.
  225. #
  226. cparts = line.split()
  227. iselif, iselse = cparts[0] == '#elif', cparts[0] == '#else'
  228. if iselif or iselse or cparts[0] == '#endif':
  229. if len(conditions) == 0:
  230. raise Exception(f'no #if block at line {line_number}')
  231. # Pop the last condition-array from the stack
  232. prev = conditions.pop()
  233. if iselif or iselse:
  234. prev[-1] = '!' + prev[-1] # Invert the last condition
  235. if iselif: prev.append(atomize(line[5:].strip()))
  236. conditions.append(prev)
  237. elif cparts[0] == '#if':
  238. conditions.append([ atomize(line[3:].strip()) ])
  239. elif cparts[0] == '#ifdef':
  240. conditions.append([ f'defined({line[6:].strip()})' ])
  241. elif cparts[0] == '#ifndef':
  242. conditions.append([ f'!defined({line[7:].strip()})' ])
  243. # Handle a complete #define line
  244. elif defmatch != None:
  245. # Get the match groups into vars
  246. enabled, define_name, val = defmatch[1] == None, defmatch[3], defmatch[4]
  247. # Increment the serial ID
  248. sid += 1
  249. # Create a new dictionary for the current #define
  250. define_info = {
  251. 'section': section,
  252. 'name': define_name,
  253. 'enabled': enabled,
  254. 'line': line_start,
  255. 'sid': sid
  256. }
  257. # Type is based on the value
  258. if val == '':
  259. value_type = 'switch'
  260. elif re.match(r'^(true|false)$', val):
  261. value_type = 'bool'
  262. val = val == 'true'
  263. elif re.match(r'^[-+]?\s*\d+$', val):
  264. value_type = 'int'
  265. val = int(val)
  266. elif re.match(r'[-+]?\s*(\d+\.|\d*\.\d+)([eE][-+]?\d+)?[fF]?', val):
  267. value_type = 'float'
  268. val = float(val.replace('f',''))
  269. else:
  270. value_type = 'string' if val[0] == '"' \
  271. else 'char' if val[0] == "'" \
  272. else 'state' if re.match(r'^(LOW|HIGH)$', val) \
  273. else 'enum' if re.match(r'^[A-Za-z0-9_]{3,}$', val) \
  274. else 'int[]' if re.match(r'^{(\s*[-+]?\s*\d+\s*(,\s*)?)+}$', val) \
  275. else 'float[]' if re.match(r'^{(\s*[-+]?\s*(\d+\.|\d*\.\d+)([eE][-+]?\d+)?[fF]?\s*(,\s*)?)+}$', val) \
  276. else 'array' if val[0] == '{' \
  277. else ''
  278. if val != '': define_info['value'] = val
  279. if value_type != '': define_info['type'] = value_type
  280. # Join up accumulated conditions with &&
  281. if conditions: define_info['requires'] = ' && '.join(sum(conditions, []))
  282. # If the comment_buff is not empty, add the comment to the info
  283. if comment_buff:
  284. full_comment = '\n'.join(comment_buff)
  285. # An EOL comment will be added later
  286. # The handling could go here instead of above
  287. if state == Parse.EOL_COMMENT:
  288. define_info['comment'] = ''
  289. else:
  290. define_info['comment'] = full_comment
  291. comment_buff = []
  292. # If the comment specifies units, add that to the info
  293. units = re.match(r'^\(([^)]+)\)', full_comment)
  294. if units:
  295. units = units[1]
  296. if units == 's' or units == 'sec': units = 'seconds'
  297. define_info['units'] = units
  298. # Set the options for the current #define
  299. if define_name == "MOTHERBOARD" and boards != '':
  300. define_info['options'] = boards
  301. elif options_json != '':
  302. define_info['options'] = options_json
  303. if eol_options: options_json = ''
  304. # Create section dict if it doesn't exist yet
  305. if section not in sch_out[fk]: sch_out[fk][section] = {}
  306. # If define has already been seen...
  307. if define_name in sch_out[fk][section]:
  308. info = sch_out[fk][section][define_name]
  309. if isinstance(info, dict): info = [ info ] # Convert a single dict into a list
  310. info.append(define_info) # Add to the list
  311. else:
  312. # Add the define dict with name as key
  313. sch_out[fk][section][define_name] = define_info
  314. if state == Parse.EOL_COMMENT:
  315. last_added_ref = define_info
  316. return sch_out
  317. def dump_json(schema:dict, jpath:Path):
  318. with jpath.open('w') as jfile:
  319. json.dump(schema, jfile, ensure_ascii=False, indent=2)
  320. def dump_yaml(schema:dict, ypath:Path):
  321. import yaml
  322. with ypath.open('w') as yfile:
  323. yaml.dump(schema, yfile, default_flow_style=False, width=120, indent=2)
  324. def main():
  325. try:
  326. schema = extract()
  327. except Exception as exc:
  328. print("Error: " + str(exc))
  329. schema = None
  330. if schema:
  331. # Get the first command line argument
  332. import sys
  333. if len(sys.argv) > 1:
  334. arg = sys.argv[1]
  335. else:
  336. arg = 'some'
  337. # JSON schema
  338. if arg in ['some', 'json', 'jsons']:
  339. print("Generating JSON ...")
  340. dump_json(schema, Path('schema.json'))
  341. # JSON schema (wildcard names)
  342. if arg in ['group', 'jsons']:
  343. group_options(schema)
  344. dump_json(schema, Path('schema_grouped.json'))
  345. # YAML
  346. if arg in ['some', 'yml', 'yaml']:
  347. try:
  348. import yaml
  349. except ImportError:
  350. print("Installing YAML module ...")
  351. import subprocess
  352. try:
  353. subprocess.run(['python3', '-m', 'pip', 'install', 'pyyaml'])
  354. import yaml
  355. except:
  356. print("Failed to install YAML module")
  357. return
  358. print("Generating YML ...")
  359. dump_yaml(schema, Path('schema.yml'))
  360. if __name__ == '__main__':
  361. main()