xpathparser.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650
  1. # -*- test-case-name: twisted.words.test.test_xpath -*-
  2. # Copyright (c) Twisted Matrix Laboratories.
  3. # See LICENSE for details.
  4. # pylint: disable=W9401,W9402
  5. # DO NOT EDIT xpathparser.py!
  6. #
  7. # It is generated from xpathparser.g using Yapps. Make needed changes there.
  8. # This also means that the generated Python may not conform to Twisted's coding
  9. # standards, so it is wrapped in exec to prevent automated checkers from
  10. # complaining.
  11. # HOWTO Generate me:
  12. #
  13. # 1.) Grab a copy of yapps2:
  14. # https://github.com/smurfix/yapps
  15. #
  16. # Note: Do NOT use the package in debian/ubuntu as it has incompatible
  17. # modifications. The original at http://theory.stanford.edu/~amitp/yapps/
  18. # hasn't been touched since 2003 and has not been updated to work with
  19. # Python 3.
  20. #
  21. # 2.) Generate the grammar:
  22. #
  23. # yapps2 xpathparser.g xpathparser.py.proto
  24. #
  25. # 3.) Edit the output to depend on the embedded runtime, and remove extraneous
  26. # imports:
  27. #
  28. # sed -e '/^# Begin/,${/^[^ ].*mport/d}' -e '/^[^#]/s/runtime\.//g' \
  29. # -e "s/^\(from __future\)/exec(r'''\n\1/" -e"\$a''')"
  30. # xpathparser.py.proto > xpathparser.py
  31. """
  32. XPath Parser.
  33. Besides the parser code produced by Yapps, this module also defines the
  34. parse-time exception classes, a scanner class, a base class for parsers
  35. produced by Yapps, and a context class that keeps track of the parse stack.
  36. These have been copied from the Yapps runtime module.
  37. """
  38. exec(r'''
  39. from __future__ import print_function
  40. import sys, re
  41. MIN_WINDOW=4096
  42. # File lookup window
  43. class SyntaxError(Exception):
  44. """When we run into an unexpected token, this is the exception to use"""
  45. def __init__(self, pos=None, msg="Bad Token", context=None):
  46. Exception.__init__(self)
  47. self.pos = pos
  48. self.msg = msg
  49. self.context = context
  50. def __str__(self):
  51. if not self.pos: return 'SyntaxError'
  52. else: return 'SyntaxError@%s(%s)' % (repr(self.pos), self.msg)
  53. class NoMoreTokens(Exception):
  54. """Another exception object, for when we run out of tokens"""
  55. pass
  56. class Token(object):
  57. """Yapps token.
  58. This is a container for a scanned token.
  59. """
  60. def __init__(self, type,value, pos=None):
  61. """Initialize a token."""
  62. self.type = type
  63. self.value = value
  64. self.pos = pos
  65. def __repr__(self):
  66. output = '<%s: %s' % (self.type, repr(self.value))
  67. if self.pos:
  68. output += " @ "
  69. if self.pos[0]:
  70. output += "%s:" % self.pos[0]
  71. if self.pos[1]:
  72. output += "%d" % self.pos[1]
  73. if self.pos[2] is not None:
  74. output += ".%d" % self.pos[2]
  75. output += ">"
  76. return output
  77. in_name=0
  78. class Scanner(object):
  79. """Yapps scanner.
  80. The Yapps scanner can work in context sensitive or context
  81. insensitive modes. The token(i) method is used to retrieve the
  82. i-th token. It takes a restrict set that limits the set of tokens
  83. it is allowed to return. In context sensitive mode, this restrict
  84. set guides the scanner. In context insensitive mode, there is no
  85. restriction (the set is always the full set of tokens).
  86. """
  87. def __init__(self, patterns, ignore, input="",
  88. file=None,filename=None,stacked=False):
  89. """Initialize the scanner.
  90. Parameters:
  91. patterns : [(terminal, uncompiled regex), ...] or None
  92. ignore : {terminal:None, ...}
  93. input : string
  94. If patterns is None, we assume that the subclass has
  95. defined self.patterns : [(terminal, compiled regex), ...].
  96. Note that the patterns parameter expects uncompiled regexes,
  97. whereas the self.patterns field expects compiled regexes.
  98. The 'ignore' value is either None or a callable, which is called
  99. with the scanner and the to-be-ignored match object; this can
  100. be used for include file or comment handling.
  101. """
  102. if not filename:
  103. global in_name
  104. filename="<f.%d>" % in_name
  105. in_name += 1
  106. self.input = input
  107. self.ignore = ignore
  108. self.file = file
  109. self.filename = filename
  110. self.pos = 0
  111. self.del_pos = 0 # skipped
  112. self.line = 1
  113. self.del_line = 0 # skipped
  114. self.col = 0
  115. self.tokens = []
  116. self.stack = None
  117. self.stacked = stacked
  118. self.last_read_token = None
  119. self.last_token = None
  120. self.last_types = None
  121. if patterns is not None:
  122. # Compile the regex strings into regex objects
  123. self.patterns = []
  124. for terminal, regex in patterns:
  125. self.patterns.append( (terminal, re.compile(regex)) )
  126. def stack_input(self, input="", file=None, filename=None):
  127. """Temporarily parse from a second file."""
  128. # Already reading from somewhere else: Go on top of that, please.
  129. if self.stack:
  130. # autogenerate a recursion-level-identifying filename
  131. if not filename:
  132. filename = 1
  133. else:
  134. try:
  135. filename += 1
  136. except TypeError:
  137. pass
  138. # now pass off to the include file
  139. self.stack.stack_input(input,file,filename)
  140. else:
  141. try:
  142. filename += 0
  143. except TypeError:
  144. pass
  145. else:
  146. filename = "<str_%d>" % filename
  147. # self.stack = object.__new__(self.__class__)
  148. # Scanner.__init__(self.stack,self.patterns,self.ignore,input,file,filename, stacked=True)
  149. # Note that the pattern+ignore are added by the generated
  150. # scanner code
  151. self.stack = self.__class__(input,file,filename, stacked=True)
  152. def get_pos(self):
  153. """Return a file/line/char tuple."""
  154. if self.stack: return self.stack.get_pos()
  155. return (self.filename, self.line+self.del_line, self.col)
  156. # def __repr__(self):
  157. # """Print the last few tokens that have been scanned in"""
  158. # output = ''
  159. # for t in self.tokens:
  160. # output += '%s\n' % (repr(t),)
  161. # return output
  162. def print_line_with_pointer(self, pos, length=0, out=sys.stderr):
  163. """Print the line of 'text' that includes position 'p',
  164. along with a second line with a single caret (^) at position p"""
  165. file,line,p = pos
  166. if file != self.filename:
  167. if self.stack: return self.stack.print_line_with_pointer(pos,length=length,out=out)
  168. print >>out, "(%s: not in input buffer)" % file
  169. return
  170. text = self.input
  171. p += length-1 # starts at pos 1
  172. origline=line
  173. line -= self.del_line
  174. spos=0
  175. if line > 0:
  176. while 1:
  177. line = line - 1
  178. try:
  179. cr = text.index("\n",spos)
  180. except ValueError:
  181. if line:
  182. text = ""
  183. break
  184. if line == 0:
  185. text = text[spos:cr]
  186. break
  187. spos = cr+1
  188. else:
  189. print >>out, "(%s:%d not in input buffer)" % (file,origline)
  190. return
  191. # Now try printing part of the line
  192. text = text[max(p-80, 0):p+80]
  193. p = p - max(p-80, 0)
  194. # Strip to the left
  195. i = text[:p].rfind('\n')
  196. j = text[:p].rfind('\r')
  197. if i < 0 or (0 <= j < i): i = j
  198. if 0 <= i < p:
  199. p = p - i - 1
  200. text = text[i+1:]
  201. # Strip to the right
  202. i = text.find('\n', p)
  203. j = text.find('\r', p)
  204. if i < 0 or (0 <= j < i): i = j
  205. if i >= 0:
  206. text = text[:i]
  207. # Now shorten the text
  208. while len(text) > 70 and p > 60:
  209. # Cut off 10 chars
  210. text = "..." + text[10:]
  211. p = p - 7
  212. # Now print the string, along with an indicator
  213. print >>out, '> ',text
  214. print >>out, '> ',' '*p + '^'
  215. def grab_input(self):
  216. """Get more input if possible."""
  217. if not self.file: return
  218. if len(self.input) - self.pos >= MIN_WINDOW: return
  219. data = self.file.read(MIN_WINDOW)
  220. if data is None or data == "":
  221. self.file = None
  222. # Drop bytes from the start, if necessary.
  223. if self.pos > 2*MIN_WINDOW:
  224. self.del_pos += MIN_WINDOW
  225. self.del_line += self.input[:MIN_WINDOW].count("\n")
  226. self.pos -= MIN_WINDOW
  227. self.input = self.input[MIN_WINDOW:] + data
  228. else:
  229. self.input = self.input + data
  230. def getchar(self):
  231. """Return the next character."""
  232. self.grab_input()
  233. c = self.input[self.pos]
  234. self.pos += 1
  235. return c
  236. def token(self, restrict, context=None):
  237. """Scan for another token."""
  238. while 1:
  239. if self.stack:
  240. try:
  241. return self.stack.token(restrict, context)
  242. except StopIteration:
  243. self.stack = None
  244. # Keep looking for a token, ignoring any in self.ignore
  245. self.grab_input()
  246. # special handling for end-of-file
  247. if self.stacked and self.pos==len(self.input):
  248. raise StopIteration
  249. # Search the patterns for the longest match, with earlier
  250. # tokens in the list having preference
  251. best_match = -1
  252. best_pat = '(error)'
  253. best_m = None
  254. for p, regexp in self.patterns:
  255. # First check to see if we're ignoring this token
  256. if restrict and p not in restrict and p not in self.ignore:
  257. continue
  258. m = regexp.match(self.input, self.pos)
  259. if m and m.end()-m.start() > best_match:
  260. # We got a match that's better than the previous one
  261. best_pat = p
  262. best_match = m.end()-m.start()
  263. best_m = m
  264. # If we didn't find anything, raise an error
  265. if best_pat == '(error)' and best_match < 0:
  266. msg = 'Bad Token'
  267. if restrict:
  268. msg = 'Trying to find one of '+', '.join(restrict)
  269. raise SyntaxError(self.get_pos(), msg, context=context)
  270. ignore = best_pat in self.ignore
  271. value = self.input[self.pos:self.pos+best_match]
  272. if not ignore:
  273. tok=Token(type=best_pat, value=value, pos=self.get_pos())
  274. self.pos += best_match
  275. npos = value.rfind("\n")
  276. if npos > -1:
  277. self.col = best_match-npos
  278. self.line += value.count("\n")
  279. else:
  280. self.col += best_match
  281. # If we found something that isn't to be ignored, return it
  282. if not ignore:
  283. if len(self.tokens) >= 10:
  284. del self.tokens[0]
  285. self.tokens.append(tok)
  286. self.last_read_token = tok
  287. # print repr(tok)
  288. return tok
  289. else:
  290. ignore = self.ignore[best_pat]
  291. if ignore:
  292. ignore(self, best_m)
  293. def peek(self, *types, **kw):
  294. """Returns the token type for lookahead; if there are any args
  295. then the list of args is the set of token types to allow"""
  296. context = kw.get("context",None)
  297. if self.last_token is None:
  298. self.last_types = types
  299. self.last_token = self.token(types,context)
  300. elif self.last_types:
  301. for t in types:
  302. if t not in self.last_types:
  303. raise NotImplementedError("Unimplemented: restriction set changed")
  304. return self.last_token.type
  305. def scan(self, type, **kw):
  306. """Returns the matched text, and moves to the next token"""
  307. context = kw.get("context",None)
  308. if self.last_token is None:
  309. tok = self.token([type],context)
  310. else:
  311. if self.last_types and type not in self.last_types:
  312. raise NotImplementedError("Unimplemented: restriction set changed")
  313. tok = self.last_token
  314. self.last_token = None
  315. if tok.type != type:
  316. if not self.last_types: self.last_types=[]
  317. raise SyntaxError(tok.pos, 'Trying to find '+type+': '+ ', '.join(self.last_types)+", got "+tok.type, context=context)
  318. return tok.value
  319. class Parser(object):
  320. """Base class for Yapps-generated parsers.
  321. """
  322. def __init__(self, scanner):
  323. self._scanner = scanner
  324. def _stack(self, input="",file=None,filename=None):
  325. """Temporarily read from someplace else"""
  326. self._scanner.stack_input(input,file,filename)
  327. self._tok = None
  328. def _peek(self, *types, **kw):
  329. """Returns the token type for lookahead; if there are any args
  330. then the list of args is the set of token types to allow"""
  331. return self._scanner.peek(*types, **kw)
  332. def _scan(self, type, **kw):
  333. """Returns the matched text, and moves to the next token"""
  334. return self._scanner.scan(type, **kw)
  335. class Context(object):
  336. """Class to represent the parser's call stack.
  337. Every rule creates a Context that links to its parent rule. The
  338. contexts can be used for debugging.
  339. """
  340. def __init__(self, parent, scanner, rule, args=()):
  341. """Create a new context.
  342. Args:
  343. parent: Context object or None
  344. scanner: Scanner object
  345. rule: string (name of the rule)
  346. args: tuple listing parameters to the rule
  347. """
  348. self.parent = parent
  349. self.scanner = scanner
  350. self.rule = rule
  351. self.args = args
  352. while scanner.stack: scanner = scanner.stack
  353. self.token = scanner.last_read_token
  354. def __str__(self):
  355. output = ''
  356. if self.parent: output = str(self.parent) + ' > '
  357. output += self.rule
  358. return output
  359. def print_error(err, scanner, max_ctx=None):
  360. """Print error messages, the parser stack, and the input text -- for human-readable error messages."""
  361. # NOTE: this function assumes 80 columns :-(
  362. # Figure out the line number
  363. pos = err.pos
  364. if not pos:
  365. pos = scanner.get_pos()
  366. file_name, line_number, column_number = pos
  367. print('%s:%d:%d: %s' % (file_name, line_number, column_number, err.msg), file=sys.stderr)
  368. scanner.print_line_with_pointer(pos)
  369. context = err.context
  370. token = None
  371. while context:
  372. print('while parsing %s%s:' % (context.rule, tuple(context.args)), file=sys.stderr)
  373. if context.token:
  374. token = context.token
  375. if token:
  376. scanner.print_line_with_pointer(token.pos, length=len(token.value))
  377. context = context.parent
  378. if max_ctx:
  379. max_ctx = max_ctx-1
  380. if not max_ctx:
  381. break
  382. def wrap_error_reporter(parser, rule, *args,**kw):
  383. try:
  384. return getattr(parser, rule)(*args,**kw)
  385. except SyntaxError as e:
  386. print_error(e, parser._scanner)
  387. except NoMoreTokens:
  388. print('Could not complete parsing; stopped around here:', file=sys.stderr)
  389. print(parser._scanner, file=sys.stderr)
  390. from twisted.words.xish.xpath import AttribValue, BooleanValue, CompareValue
  391. from twisted.words.xish.xpath import Function, IndexValue, LiteralValue
  392. from twisted.words.xish.xpath import _AnyLocation, _Location
  393. # Begin -- grammar generated by Yapps
  394. class XPathParserScanner(Scanner):
  395. patterns = [
  396. ('","', re.compile(',')),
  397. ('"@"', re.compile('@')),
  398. ('"\\)"', re.compile('\\)')),
  399. ('"\\("', re.compile('\\(')),
  400. ('"\\]"', re.compile('\\]')),
  401. ('"\\["', re.compile('\\[')),
  402. ('"//"', re.compile('//')),
  403. ('"/"', re.compile('/')),
  404. ('\\s+', re.compile('\\s+')),
  405. ('INDEX', re.compile('[0-9]+')),
  406. ('WILDCARD', re.compile('\\*')),
  407. ('IDENTIFIER', re.compile('[a-zA-Z][a-zA-Z0-9_\\-]*')),
  408. ('ATTRIBUTE', re.compile('\\@[a-zA-Z][a-zA-Z0-9_\\-]*')),
  409. ('FUNCNAME', re.compile('[a-zA-Z][a-zA-Z0-9_]*')),
  410. ('CMP_EQ', re.compile('\\=')),
  411. ('CMP_NE', re.compile('\\!\\=')),
  412. ('STR_DQ', re.compile('"([^"]|(\\"))*?"')),
  413. ('STR_SQ', re.compile("'([^']|(\\'))*?'")),
  414. ('OP_AND', re.compile('and')),
  415. ('OP_OR', re.compile('or')),
  416. ('END', re.compile('$')),
  417. ]
  418. def __init__(self, str,*args,**kw):
  419. Scanner.__init__(self,None,{'\\s+':None,},str,*args,**kw)
  420. class XPathParser(Parser):
  421. Context = Context
  422. def XPATH(self, _parent=None):
  423. _context = self.Context(_parent, self._scanner, 'XPATH', [])
  424. PATH = self.PATH(_context)
  425. result = PATH; current = result
  426. while self._peek('END', '"/"', '"//"', context=_context) != 'END':
  427. PATH = self.PATH(_context)
  428. current.childLocation = PATH; current = current.childLocation
  429. END = self._scan('END', context=_context)
  430. return result
  431. def PATH(self, _parent=None):
  432. _context = self.Context(_parent, self._scanner, 'PATH', [])
  433. _token = self._peek('"/"', '"//"', context=_context)
  434. if _token == '"/"':
  435. self._scan('"/"', context=_context)
  436. result = _Location()
  437. else: # == '"//"'
  438. self._scan('"//"', context=_context)
  439. result = _AnyLocation()
  440. _token = self._peek('IDENTIFIER', 'WILDCARD', context=_context)
  441. if _token == 'IDENTIFIER':
  442. IDENTIFIER = self._scan('IDENTIFIER', context=_context)
  443. result.elementName = IDENTIFIER
  444. else: # == 'WILDCARD'
  445. WILDCARD = self._scan('WILDCARD', context=_context)
  446. result.elementName = None
  447. while self._peek('"\\["', 'END', '"/"', '"//"', context=_context) == '"\\["':
  448. self._scan('"\\["', context=_context)
  449. PREDICATE = self.PREDICATE(_context)
  450. result.predicates.append(PREDICATE)
  451. self._scan('"\\]"', context=_context)
  452. return result
  453. def PREDICATE(self, _parent=None):
  454. _context = self.Context(_parent, self._scanner, 'PREDICATE', [])
  455. _token = self._peek('INDEX', '"\\("', '"@"', 'FUNCNAME', 'STR_DQ', 'STR_SQ', context=_context)
  456. if _token != 'INDEX':
  457. EXPR = self.EXPR(_context)
  458. return EXPR
  459. else: # == 'INDEX'
  460. INDEX = self._scan('INDEX', context=_context)
  461. return IndexValue(INDEX)
  462. def EXPR(self, _parent=None):
  463. _context = self.Context(_parent, self._scanner, 'EXPR', [])
  464. FACTOR = self.FACTOR(_context)
  465. e = FACTOR
  466. while self._peek('OP_AND', 'OP_OR', '"\\)"', '"\\]"', context=_context) in ['OP_AND', 'OP_OR']:
  467. BOOLOP = self.BOOLOP(_context)
  468. FACTOR = self.FACTOR(_context)
  469. e = BooleanValue(e, BOOLOP, FACTOR)
  470. return e
  471. def BOOLOP(self, _parent=None):
  472. _context = self.Context(_parent, self._scanner, 'BOOLOP', [])
  473. _token = self._peek('OP_AND', 'OP_OR', context=_context)
  474. if _token == 'OP_AND':
  475. OP_AND = self._scan('OP_AND', context=_context)
  476. return OP_AND
  477. else: # == 'OP_OR'
  478. OP_OR = self._scan('OP_OR', context=_context)
  479. return OP_OR
  480. def FACTOR(self, _parent=None):
  481. _context = self.Context(_parent, self._scanner, 'FACTOR', [])
  482. _token = self._peek('"\\("', '"@"', 'FUNCNAME', 'STR_DQ', 'STR_SQ', context=_context)
  483. if _token != '"\\("':
  484. TERM = self.TERM(_context)
  485. return TERM
  486. else: # == '"\\("'
  487. self._scan('"\\("', context=_context)
  488. EXPR = self.EXPR(_context)
  489. self._scan('"\\)"', context=_context)
  490. return EXPR
  491. def TERM(self, _parent=None):
  492. _context = self.Context(_parent, self._scanner, 'TERM', [])
  493. VALUE = self.VALUE(_context)
  494. t = VALUE
  495. if self._peek('CMP_EQ', 'CMP_NE', 'OP_AND', 'OP_OR', '"\\)"', '"\\]"', context=_context) in ['CMP_EQ', 'CMP_NE']:
  496. CMP = self.CMP(_context)
  497. VALUE = self.VALUE(_context)
  498. t = CompareValue(t, CMP, VALUE)
  499. return t
  500. def VALUE(self, _parent=None):
  501. _context = self.Context(_parent, self._scanner, 'VALUE', [])
  502. _token = self._peek('"@"', 'FUNCNAME', 'STR_DQ', 'STR_SQ', context=_context)
  503. if _token == '"@"':
  504. self._scan('"@"', context=_context)
  505. IDENTIFIER = self._scan('IDENTIFIER', context=_context)
  506. return AttribValue(IDENTIFIER)
  507. elif _token == 'FUNCNAME':
  508. FUNCNAME = self._scan('FUNCNAME', context=_context)
  509. f = Function(FUNCNAME); args = []
  510. self._scan('"\\("', context=_context)
  511. if self._peek('"\\)"', '"@"', 'FUNCNAME', '","', 'STR_DQ', 'STR_SQ', context=_context) not in ['"\\)"', '","']:
  512. VALUE = self.VALUE(_context)
  513. args.append(VALUE)
  514. while self._peek('","', '"\\)"', context=_context) == '","':
  515. self._scan('","', context=_context)
  516. VALUE = self.VALUE(_context)
  517. args.append(VALUE)
  518. self._scan('"\\)"', context=_context)
  519. f.setParams(*args); return f
  520. else: # in ['STR_DQ', 'STR_SQ']
  521. STR = self.STR(_context)
  522. return LiteralValue(STR[1:len(STR)-1])
  523. def CMP(self, _parent=None):
  524. _context = self.Context(_parent, self._scanner, 'CMP', [])
  525. _token = self._peek('CMP_EQ', 'CMP_NE', context=_context)
  526. if _token == 'CMP_EQ':
  527. CMP_EQ = self._scan('CMP_EQ', context=_context)
  528. return CMP_EQ
  529. else: # == 'CMP_NE'
  530. CMP_NE = self._scan('CMP_NE', context=_context)
  531. return CMP_NE
  532. def STR(self, _parent=None):
  533. _context = self.Context(_parent, self._scanner, 'STR', [])
  534. _token = self._peek('STR_DQ', 'STR_SQ', context=_context)
  535. if _token == 'STR_DQ':
  536. STR_DQ = self._scan('STR_DQ', context=_context)
  537. return STR_DQ
  538. else: # == 'STR_SQ'
  539. STR_SQ = self._scan('STR_SQ', context=_context)
  540. return STR_SQ
  541. def parse(rule, text):
  542. P = XPathParser(XPathParserScanner(text))
  543. return wrap_error_reporter(P, rule)
  544. if __name__ == '__main__':
  545. from sys import argv, stdin
  546. if len(argv) >= 2:
  547. if len(argv) >= 3:
  548. f = open(argv[2],'r')
  549. else:
  550. f = stdin
  551. print(parse(argv[1], f.read()))
  552. else: print ('Args: <rule> [<filename>]', file=sys.stderr)
  553. # End -- grammar generated by Yapps
  554. ''')