lexers.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. # -*- coding: utf-8 -*-
  2. """
  3. Defines a variety of Pygments lexers for highlighting IPython code.
  4. This includes:
  5. IPythonLexer, IPython3Lexer
  6. Lexers for pure IPython (python + magic/shell commands)
  7. IPythonPartialTracebackLexer, IPythonTracebackLexer
  8. Supports 2.x and 3.x via keyword `python3`. The partial traceback
  9. lexer reads everything but the Python code appearing in a traceback.
  10. The full lexer combines the partial lexer with an IPython lexer.
  11. IPythonConsoleLexer
  12. A lexer for IPython console sessions, with support for tracebacks.
  13. IPyLexer
  14. A friendly lexer which examines the first line of text and from it,
  15. decides whether to use an IPython lexer or an IPython console lexer.
  16. This is probably the only lexer that needs to be explicitly added
  17. to Pygments.
  18. """
  19. #-----------------------------------------------------------------------------
  20. # Copyright (c) 2013, the IPython Development Team.
  21. #
  22. # Distributed under the terms of the Modified BSD License.
  23. #
  24. # The full license is in the file COPYING.txt, distributed with this software.
  25. #-----------------------------------------------------------------------------
  26. # Standard library
  27. import re
  28. # Third party
  29. from pygments.lexers import BashLexer, Python3Lexer
  30. try:
  31. # PythonLexer was renamed to Python2Lexer in pygments 2.5
  32. from pygments.lexers import Python2Lexer
  33. except ImportError:
  34. from pygments.lexers import PythonLexer as Python2Lexer
  35. from pygments.lexer import (
  36. Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using,
  37. )
  38. from pygments.token import (
  39. Generic, Keyword, Literal, Name, Operator, Other, Text, Error,
  40. )
  41. from pygments.util import get_bool_opt
  42. # Local
  43. line_re = re.compile('.*?\n')
  44. __all__ = ['build_ipy_lexer', 'IPython3Lexer', 'IPythonLexer',
  45. 'IPythonPartialTracebackLexer', 'IPythonTracebackLexer',
  46. 'IPythonConsoleLexer', 'IPyLexer']
  47. ipython_tokens = [
  48. (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)),
  49. (r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))),
  50. (r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)),
  51. (r"\b(\?\??)(\s*)$", bygroups(Operator, Text)),
  52. (r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword,
  53. using(BashLexer), Text)),
  54. (r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)),
  55. (r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
  56. (r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
  57. (r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)),
  58. (r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)),
  59. ]
  60. def build_ipy_lexer(python3):
  61. """Builds IPython lexers depending on the value of `python3`.
  62. The lexer inherits from an appropriate Python lexer and then adds
  63. information about IPython specific keywords (i.e. magic commands,
  64. shell commands, etc.)
  65. Parameters
  66. ----------
  67. python3 : bool
  68. If `True`, then build an IPython lexer from a Python 3 lexer.
  69. """
  70. # It would be nice to have a single IPython lexer class which takes
  71. # a boolean `python3`. But since there are two Python lexer classes,
  72. # we will also have two IPython lexer classes.
  73. if python3:
  74. PyLexer = Python3Lexer
  75. name = 'IPython3'
  76. aliases = ['ipython3']
  77. doc = """IPython3 Lexer"""
  78. else:
  79. PyLexer = Python2Lexer
  80. name = 'IPython'
  81. aliases = ['ipython2', 'ipython']
  82. doc = """IPython Lexer"""
  83. tokens = PyLexer.tokens.copy()
  84. tokens['root'] = ipython_tokens + tokens['root']
  85. attrs = {'name': name, 'aliases': aliases, 'filenames': [],
  86. '__doc__': doc, 'tokens': tokens}
  87. return type(name, (PyLexer,), attrs)
  88. IPython3Lexer = build_ipy_lexer(python3=True)
  89. IPythonLexer = build_ipy_lexer(python3=False)
  90. class IPythonPartialTracebackLexer(RegexLexer):
  91. """
  92. Partial lexer for IPython tracebacks.
  93. Handles all the non-python output. This works for both Python 2.x and 3.x.
  94. """
  95. name = 'IPython Partial Traceback'
  96. tokens = {
  97. 'root': [
  98. # Tracebacks for syntax errors have a different style.
  99. # For both types of tracebacks, we mark the first line with
  100. # Generic.Traceback. For syntax errors, we mark the filename
  101. # as we mark the filenames for non-syntax tracebacks.
  102. #
  103. # These two regexps define how IPythonConsoleLexer finds a
  104. # traceback.
  105. #
  106. ## Non-syntax traceback
  107. (r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)),
  108. ## Syntax traceback
  109. (r'^( File)(.*)(, line )(\d+\n)',
  110. bygroups(Generic.Traceback, Name.Namespace,
  111. Generic.Traceback, Literal.Number.Integer)),
  112. # (Exception Identifier)(Whitespace)(Traceback Message)
  113. (r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)',
  114. bygroups(Name.Exception, Generic.Whitespace, Text)),
  115. # (Module/Filename)(Text)(Callee)(Function Signature)
  116. # Better options for callee and function signature?
  117. (r'(.*)( in )(.*)(\(.*\)\n)',
  118. bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)),
  119. # Regular line: (Whitespace)(Line Number)(Python Code)
  120. (r'(\s*?)(\d+)(.*?\n)',
  121. bygroups(Generic.Whitespace, Literal.Number.Integer, Other)),
  122. # Emphasized line: (Arrow)(Line Number)(Python Code)
  123. # Using Exception token so arrow color matches the Exception.
  124. (r'(-*>?\s?)(\d+)(.*?\n)',
  125. bygroups(Name.Exception, Literal.Number.Integer, Other)),
  126. # (Exception Identifier)(Message)
  127. (r'(?u)(^[^\d\W]\w*)(:.*?\n)',
  128. bygroups(Name.Exception, Text)),
  129. # Tag everything else as Other, will be handled later.
  130. (r'.*\n', Other),
  131. ],
  132. }
  133. class IPythonTracebackLexer(DelegatingLexer):
  134. """
  135. IPython traceback lexer.
  136. For doctests, the tracebacks can be snipped as much as desired with the
  137. exception to the lines that designate a traceback. For non-syntax error
  138. tracebacks, this is the line of hyphens. For syntax error tracebacks,
  139. this is the line which lists the File and line number.
  140. """
  141. # The lexer inherits from DelegatingLexer. The "root" lexer is an
  142. # appropriate IPython lexer, which depends on the value of the boolean
  143. # `python3`. First, we parse with the partial IPython traceback lexer.
  144. # Then, any code marked with the "Other" token is delegated to the root
  145. # lexer.
  146. #
  147. name = 'IPython Traceback'
  148. aliases = ['ipythontb']
  149. def __init__(self, **options):
  150. self.python3 = get_bool_opt(options, 'python3', False)
  151. if self.python3:
  152. self.aliases = ['ipython3tb']
  153. else:
  154. self.aliases = ['ipython2tb', 'ipythontb']
  155. if self.python3:
  156. IPyLexer = IPython3Lexer
  157. else:
  158. IPyLexer = IPythonLexer
  159. DelegatingLexer.__init__(self, IPyLexer,
  160. IPythonPartialTracebackLexer, **options)
  161. class IPythonConsoleLexer(Lexer):
  162. """
  163. An IPython console lexer for IPython code-blocks and doctests, such as:
  164. .. code-block:: rst
  165. .. code-block:: ipythonconsole
  166. In [1]: a = 'foo'
  167. In [2]: a
  168. Out[2]: 'foo'
  169. In [3]: print a
  170. foo
  171. In [4]: 1 / 0
  172. Support is also provided for IPython exceptions:
  173. .. code-block:: rst
  174. .. code-block:: ipythonconsole
  175. In [1]: raise Exception
  176. ---------------------------------------------------------------------------
  177. Exception Traceback (most recent call last)
  178. <ipython-input-1-fca2ab0ca76b> in <module>()
  179. ----> 1 raise Exception
  180. Exception:
  181. """
  182. name = 'IPython console session'
  183. aliases = ['ipythonconsole']
  184. mimetypes = ['text/x-ipython-console']
  185. # The regexps used to determine what is input and what is output.
  186. # The default prompts for IPython are:
  187. #
  188. # in = 'In [#]: '
  189. # continuation = ' .D.: '
  190. # template = 'Out[#]: '
  191. #
  192. # Where '#' is the 'prompt number' or 'execution count' and 'D'
  193. # D is a number of dots matching the width of the execution count
  194. #
  195. in1_regex = r'In \[[0-9]+\]: '
  196. in2_regex = r' \.\.+\.: '
  197. out_regex = r'Out\[[0-9]+\]: '
  198. #: The regex to determine when a traceback starts.
  199. ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)')
  200. def __init__(self, **options):
  201. """Initialize the IPython console lexer.
  202. Parameters
  203. ----------
  204. python3 : bool
  205. If `True`, then the console inputs are parsed using a Python 3
  206. lexer. Otherwise, they are parsed using a Python 2 lexer.
  207. in1_regex : RegexObject
  208. The compiled regular expression used to detect the start
  209. of inputs. Although the IPython configuration setting may have a
  210. trailing whitespace, do not include it in the regex. If `None`,
  211. then the default input prompt is assumed.
  212. in2_regex : RegexObject
  213. The compiled regular expression used to detect the continuation
  214. of inputs. Although the IPython configuration setting may have a
  215. trailing whitespace, do not include it in the regex. If `None`,
  216. then the default input prompt is assumed.
  217. out_regex : RegexObject
  218. The compiled regular expression used to detect outputs. If `None`,
  219. then the default output prompt is assumed.
  220. """
  221. self.python3 = get_bool_opt(options, 'python3', False)
  222. if self.python3:
  223. self.aliases = ['ipython3console']
  224. else:
  225. self.aliases = ['ipython2console', 'ipythonconsole']
  226. in1_regex = options.get('in1_regex', self.in1_regex)
  227. in2_regex = options.get('in2_regex', self.in2_regex)
  228. out_regex = options.get('out_regex', self.out_regex)
  229. # So that we can work with input and output prompts which have been
  230. # rstrip'd (possibly by editors) we also need rstrip'd variants. If
  231. # we do not do this, then such prompts will be tagged as 'output'.
  232. # The reason can't just use the rstrip'd variants instead is because
  233. # we want any whitespace associated with the prompt to be inserted
  234. # with the token. This allows formatted code to be modified so as hide
  235. # the appearance of prompts, with the whitespace included. One example
  236. # use of this is in copybutton.js from the standard lib Python docs.
  237. in1_regex_rstrip = in1_regex.rstrip() + '\n'
  238. in2_regex_rstrip = in2_regex.rstrip() + '\n'
  239. out_regex_rstrip = out_regex.rstrip() + '\n'
  240. # Compile and save them all.
  241. attrs = ['in1_regex', 'in2_regex', 'out_regex',
  242. 'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip']
  243. for attr in attrs:
  244. self.__setattr__(attr, re.compile(locals()[attr]))
  245. Lexer.__init__(self, **options)
  246. if self.python3:
  247. pylexer = IPython3Lexer
  248. tblexer = IPythonTracebackLexer
  249. else:
  250. pylexer = IPythonLexer
  251. tblexer = IPythonTracebackLexer
  252. self.pylexer = pylexer(**options)
  253. self.tblexer = tblexer(**options)
  254. self.reset()
  255. def reset(self):
  256. self.mode = 'output'
  257. self.index = 0
  258. self.buffer = u''
  259. self.insertions = []
  260. def buffered_tokens(self):
  261. """
  262. Generator of unprocessed tokens after doing insertions and before
  263. changing to a new state.
  264. """
  265. if self.mode == 'output':
  266. tokens = [(0, Generic.Output, self.buffer)]
  267. elif self.mode == 'input':
  268. tokens = self.pylexer.get_tokens_unprocessed(self.buffer)
  269. else: # traceback
  270. tokens = self.tblexer.get_tokens_unprocessed(self.buffer)
  271. for i, t, v in do_insertions(self.insertions, tokens):
  272. # All token indexes are relative to the buffer.
  273. yield self.index + i, t, v
  274. # Clear it all
  275. self.index += len(self.buffer)
  276. self.buffer = u''
  277. self.insertions = []
  278. def get_mci(self, line):
  279. """
  280. Parses the line and returns a 3-tuple: (mode, code, insertion).
  281. `mode` is the next mode (or state) of the lexer, and is always equal
  282. to 'input', 'output', or 'tb'.
  283. `code` is a portion of the line that should be added to the buffer
  284. corresponding to the next mode and eventually lexed by another lexer.
  285. For example, `code` could be Python code if `mode` were 'input'.
  286. `insertion` is a 3-tuple (index, token, text) representing an
  287. unprocessed "token" that will be inserted into the stream of tokens
  288. that are created from the buffer once we change modes. This is usually
  289. the input or output prompt.
  290. In general, the next mode depends on current mode and on the contents
  291. of `line`.
  292. """
  293. # To reduce the number of regex match checks, we have multiple
  294. # 'if' blocks instead of 'if-elif' blocks.
  295. # Check for possible end of input
  296. in2_match = self.in2_regex.match(line)
  297. in2_match_rstrip = self.in2_regex_rstrip.match(line)
  298. if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \
  299. in2_match_rstrip:
  300. end_input = True
  301. else:
  302. end_input = False
  303. if end_input and self.mode != 'tb':
  304. # Only look for an end of input when not in tb mode.
  305. # An ellipsis could appear within the traceback.
  306. mode = 'output'
  307. code = u''
  308. insertion = (0, Generic.Prompt, line)
  309. return mode, code, insertion
  310. # Check for output prompt
  311. out_match = self.out_regex.match(line)
  312. out_match_rstrip = self.out_regex_rstrip.match(line)
  313. if out_match or out_match_rstrip:
  314. mode = 'output'
  315. if out_match:
  316. idx = out_match.end()
  317. else:
  318. idx = out_match_rstrip.end()
  319. code = line[idx:]
  320. # Use the 'heading' token for output. We cannot use Generic.Error
  321. # since it would conflict with exceptions.
  322. insertion = (0, Generic.Heading, line[:idx])
  323. return mode, code, insertion
  324. # Check for input or continuation prompt (non stripped version)
  325. in1_match = self.in1_regex.match(line)
  326. if in1_match or (in2_match and self.mode != 'tb'):
  327. # New input or when not in tb, continued input.
  328. # We do not check for continued input when in tb since it is
  329. # allowable to replace a long stack with an ellipsis.
  330. mode = 'input'
  331. if in1_match:
  332. idx = in1_match.end()
  333. else: # in2_match
  334. idx = in2_match.end()
  335. code = line[idx:]
  336. insertion = (0, Generic.Prompt, line[:idx])
  337. return mode, code, insertion
  338. # Check for input or continuation prompt (stripped version)
  339. in1_match_rstrip = self.in1_regex_rstrip.match(line)
  340. if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'):
  341. # New input or when not in tb, continued input.
  342. # We do not check for continued input when in tb since it is
  343. # allowable to replace a long stack with an ellipsis.
  344. mode = 'input'
  345. if in1_match_rstrip:
  346. idx = in1_match_rstrip.end()
  347. else: # in2_match
  348. idx = in2_match_rstrip.end()
  349. code = line[idx:]
  350. insertion = (0, Generic.Prompt, line[:idx])
  351. return mode, code, insertion
  352. # Check for traceback
  353. if self.ipytb_start.match(line):
  354. mode = 'tb'
  355. code = line
  356. insertion = None
  357. return mode, code, insertion
  358. # All other stuff...
  359. if self.mode in ('input', 'output'):
  360. # We assume all other text is output. Multiline input that
  361. # does not use the continuation marker cannot be detected.
  362. # For example, the 3 in the following is clearly output:
  363. #
  364. # In [1]: print 3
  365. # 3
  366. #
  367. # But the following second line is part of the input:
  368. #
  369. # In [2]: while True:
  370. # print True
  371. #
  372. # In both cases, the 2nd line will be 'output'.
  373. #
  374. mode = 'output'
  375. else:
  376. mode = 'tb'
  377. code = line
  378. insertion = None
  379. return mode, code, insertion
  380. def get_tokens_unprocessed(self, text):
  381. self.reset()
  382. for match in line_re.finditer(text):
  383. line = match.group()
  384. mode, code, insertion = self.get_mci(line)
  385. if mode != self.mode:
  386. # Yield buffered tokens before transitioning to new mode.
  387. for token in self.buffered_tokens():
  388. yield token
  389. self.mode = mode
  390. if insertion:
  391. self.insertions.append((len(self.buffer), [insertion]))
  392. self.buffer += code
  393. for token in self.buffered_tokens():
  394. yield token
  395. class IPyLexer(Lexer):
  396. """
  397. Primary lexer for all IPython-like code.
  398. This is a simple helper lexer. If the first line of the text begins with
  399. "In \[[0-9]+\]:", then the entire text is parsed with an IPython console
  400. lexer. If not, then the entire text is parsed with an IPython lexer.
  401. The goal is to reduce the number of lexers that are registered
  402. with Pygments.
  403. """
  404. name = 'IPy session'
  405. aliases = ['ipy']
  406. def __init__(self, **options):
  407. self.python3 = get_bool_opt(options, 'python3', False)
  408. if self.python3:
  409. self.aliases = ['ipy3']
  410. else:
  411. self.aliases = ['ipy2', 'ipy']
  412. Lexer.__init__(self, **options)
  413. self.IPythonLexer = IPythonLexer(**options)
  414. self.IPythonConsoleLexer = IPythonConsoleLexer(**options)
  415. def get_tokens_unprocessed(self, text):
  416. # Search for the input prompt anywhere...this allows code blocks to
  417. # begin with comments as well.
  418. if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL):
  419. lex = self.IPythonConsoleLexer
  420. else:
  421. lex = self.IPythonLexer
  422. for token in lex.get_tokens_unprocessed(text):
  423. yield token