tnt.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. """
  2. pygments.lexers.tnt
  3. ~~~~~~~~~~~~~~~~~~~
  4. Lexer for Typographic Number Theory.
  5. :copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
  6. :license: BSD, see LICENSE for details.
  7. """
  8. import re
  9. from pygments.lexer import Lexer
  10. from pygments.token import Text, Comment, Operator, Keyword, Name, Number, \
  11. Punctuation, Error
  12. __all__ = ['TNTLexer']
  13. class TNTLexer(Lexer):
  14. """
  15. Lexer for Typographic Number Theory, as described in the book
  16. Gödel, Escher, Bach, by Douglas R. Hofstadter
  17. """
  18. name = 'Typographic Number Theory'
  19. url = 'https://github.com/Kenny2github/language-tnt'
  20. aliases = ['tnt']
  21. filenames = ['*.tnt']
  22. version_added = '2.7'
  23. cur = []
  24. LOGIC = set('⊃→]&∧^|∨Vv')
  25. OPERATORS = set('+.⋅*')
  26. VARIABLES = set('abcde')
  27. PRIMES = set("'′")
  28. NEGATORS = set('~!')
  29. QUANTIFIERS = set('AE∀∃')
  30. NUMBERS = set('0123456789')
  31. WHITESPACE = set('\t \v\n')
  32. RULES = re.compile('''(?xi)
  33. joining | separation | double-tilde | fantasy\\ rule
  34. | carry[- ]over(?:\\ of)?(?:\\ line)?\\ ([0-9]+) | detachment
  35. | contrapositive | De\\ Morgan | switcheroo
  36. | specification | generalization | interchange
  37. | existence | symmetry | transitivity
  38. | add\\ S | drop\\ S | induction
  39. | axiom\\ ([1-5]) | premise | push | pop
  40. ''')
  41. LINENOS = re.compile(r'(?:[0-9]+)(?:(?:, ?|,? and )(?:[0-9]+))*')
  42. COMMENT = re.compile(r'\[[^\n\]]+\]')
  43. def __init__(self, *args, **kwargs):
  44. Lexer.__init__(self, *args, **kwargs)
  45. self.cur = []
  46. def whitespace(self, start, text, required=False):
  47. """Tokenize whitespace."""
  48. end = start
  49. try:
  50. while text[end] in self.WHITESPACE:
  51. end += 1
  52. except IndexError:
  53. end = len(text)
  54. if required and end == start:
  55. raise AssertionError
  56. if end != start:
  57. self.cur.append((start, Text, text[start:end]))
  58. return end
  59. def variable(self, start, text):
  60. """Tokenize a variable."""
  61. if text[start] not in self.VARIABLES:
  62. raise AssertionError
  63. end = start+1
  64. while text[end] in self.PRIMES:
  65. end += 1
  66. self.cur.append((start, Name.Variable, text[start:end]))
  67. return end
  68. def term(self, start, text):
  69. """Tokenize a term."""
  70. if text[start] == 'S': # S...S(...) or S...0
  71. end = start+1
  72. while text[end] == 'S':
  73. end += 1
  74. self.cur.append((start, Number.Integer, text[start:end]))
  75. return self.term(end, text)
  76. if text[start] == '0': # the singleton 0
  77. self.cur.append((start, Number.Integer, text[start]))
  78. return start+1
  79. if text[start] in self.VARIABLES: # a''...
  80. return self.variable(start, text)
  81. if text[start] == '(': # (...+...)
  82. self.cur.append((start, Punctuation, text[start]))
  83. start = self.term(start+1, text)
  84. if text[start] not in self.OPERATORS:
  85. raise AssertionError
  86. self.cur.append((start, Operator, text[start]))
  87. start = self.term(start+1, text)
  88. if text[start] != ')':
  89. raise AssertionError
  90. self.cur.append((start, Punctuation, text[start]))
  91. return start+1
  92. raise AssertionError # no matches
  93. def formula(self, start, text):
  94. """Tokenize a formula."""
  95. if text[start] in self.NEGATORS: # ~<...>
  96. end = start+1
  97. while text[end] in self.NEGATORS:
  98. end += 1
  99. self.cur.append((start, Operator, text[start:end]))
  100. return self.formula(end, text)
  101. if text[start] in self.QUANTIFIERS: # Aa:<...>
  102. self.cur.append((start, Keyword.Declaration, text[start]))
  103. start = self.variable(start+1, text)
  104. if text[start] != ':':
  105. raise AssertionError
  106. self.cur.append((start, Punctuation, text[start]))
  107. return self.formula(start+1, text)
  108. if text[start] == '<': # <...&...>
  109. self.cur.append((start, Punctuation, text[start]))
  110. start = self.formula(start+1, text)
  111. if text[start] not in self.LOGIC:
  112. raise AssertionError
  113. self.cur.append((start, Operator, text[start]))
  114. start = self.formula(start+1, text)
  115. if text[start] != '>':
  116. raise AssertionError
  117. self.cur.append((start, Punctuation, text[start]))
  118. return start+1
  119. # ...=...
  120. start = self.term(start, text)
  121. if text[start] != '=':
  122. raise AssertionError
  123. self.cur.append((start, Operator, text[start]))
  124. start = self.term(start+1, text)
  125. return start
  126. def rule(self, start, text):
  127. """Tokenize a rule."""
  128. match = self.RULES.match(text, start)
  129. if match is None:
  130. raise AssertionError
  131. groups = sorted(match.regs[1:]) # exclude whole match
  132. for group in groups:
  133. if group[0] >= 0: # this group matched
  134. self.cur.append((start, Keyword, text[start:group[0]]))
  135. self.cur.append((group[0], Number.Integer,
  136. text[group[0]:group[1]]))
  137. if group[1] != match.end():
  138. self.cur.append((group[1], Keyword,
  139. text[group[1]:match.end()]))
  140. break
  141. else:
  142. self.cur.append((start, Keyword, text[start:match.end()]))
  143. return match.end()
  144. def lineno(self, start, text):
  145. """Tokenize a line referral."""
  146. end = start
  147. while text[end] not in self.NUMBERS:
  148. end += 1
  149. self.cur.append((start, Punctuation, text[start]))
  150. self.cur.append((start+1, Text, text[start+1:end]))
  151. start = end
  152. match = self.LINENOS.match(text, start)
  153. if match is None:
  154. raise AssertionError
  155. if text[match.end()] != ')':
  156. raise AssertionError
  157. self.cur.append((match.start(), Number.Integer, match.group(0)))
  158. self.cur.append((match.end(), Punctuation, text[match.end()]))
  159. return match.end() + 1
  160. def error_till_line_end(self, start, text):
  161. """Mark everything from ``start`` to the end of the line as Error."""
  162. end = start
  163. try:
  164. while text[end] != '\n': # there's whitespace in rules
  165. end += 1
  166. except IndexError:
  167. end = len(text)
  168. if end != start:
  169. self.cur.append((start, Error, text[start:end]))
  170. end = self.whitespace(end, text)
  171. return end
  172. def get_tokens_unprocessed(self, text):
  173. """Returns a list of TNT tokens."""
  174. self.cur = []
  175. start = end = self.whitespace(0, text)
  176. while start <= end < len(text):
  177. try:
  178. # try line number
  179. while text[end] in self.NUMBERS:
  180. end += 1
  181. if end != start: # actual number present
  182. self.cur.append((start, Number.Integer, text[start:end]))
  183. # whitespace is required after a line number
  184. orig = len(self.cur)
  185. try:
  186. start = end = self.whitespace(end, text, True)
  187. except AssertionError:
  188. del self.cur[orig:]
  189. start = end = self.error_till_line_end(end, text)
  190. continue
  191. # at this point it could be a comment
  192. match = self.COMMENT.match(text, start)
  193. if match is not None:
  194. self.cur.append((start, Comment, text[start:match.end()]))
  195. start = end = match.end()
  196. # anything after the closing bracket is invalid
  197. start = end = self.error_till_line_end(start, text)
  198. # do not attempt to process the rest
  199. continue
  200. del match
  201. if text[start] in '[]': # fantasy push or pop
  202. self.cur.append((start, Keyword, text[start]))
  203. start += 1
  204. end += 1
  205. else:
  206. # one formula, possibly containing subformulae
  207. orig = len(self.cur)
  208. try:
  209. start = end = self.formula(start, text)
  210. except (AssertionError, RecursionError): # not well-formed
  211. del self.cur[orig:]
  212. while text[end] not in self.WHITESPACE:
  213. end += 1
  214. self.cur.append((start, Error, text[start:end]))
  215. start = end
  216. # skip whitespace after formula
  217. orig = len(self.cur)
  218. try:
  219. start = end = self.whitespace(end, text, True)
  220. except AssertionError:
  221. del self.cur[orig:]
  222. start = end = self.error_till_line_end(start, text)
  223. continue
  224. # rule proving this formula a theorem
  225. orig = len(self.cur)
  226. try:
  227. start = end = self.rule(start, text)
  228. except AssertionError:
  229. del self.cur[orig:]
  230. start = end = self.error_till_line_end(start, text)
  231. continue
  232. # skip whitespace after rule
  233. start = end = self.whitespace(end, text)
  234. # line marker
  235. if text[start] == '(':
  236. orig = len(self.cur)
  237. try:
  238. start = end = self.lineno(start, text)
  239. except AssertionError:
  240. del self.cur[orig:]
  241. start = end = self.error_till_line_end(start, text)
  242. continue
  243. start = end = self.whitespace(start, text)
  244. except IndexError:
  245. try:
  246. del self.cur[orig:]
  247. except NameError:
  248. pass # if orig was never defined, fine
  249. self.error_till_line_end(start, text)
  250. return self.cur