_tokenize.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897
  1. """
  2. FIXME:https://github.com/twisted/twisted/issues/3843
  3. This can be removed once t.persisted.aot is removed.
  4. New code should not make use of this.
  5. Tokenization help for Python programs.
  6. vendored from https://github.com/python/cpython/blob/6b825c1b8a14460641ca6f1647d83005c68199aa/Lib/tokenize.py
  7. Licence: https://docs.python.org/3/license.html
  8. tokenize(readline) is a generator that breaks a stream of bytes into
  9. Python tokens. It decodes the bytes according to PEP-0263 for
  10. determining source file encoding.
  11. It accepts a readline-like method which is called repeatedly to get the
  12. next line of input (or b"" for EOF). It generates 5-tuples with these
  13. members:
  14. the token type (see token.py)
  15. the token (a string)
  16. the starting (row, column) indices of the token (a 2-tuple of ints)
  17. the ending (row, column) indices of the token (a 2-tuple of ints)
  18. the original line (string)
  19. It is designed to match the working of the Python tokenizer exactly, except
  20. that it produces COMMENT tokens for comments and gives type OP for all
  21. operators. Additionally, all token lists start with an ENCODING token
  22. which tells you which encoding was used to decode the bytes stream.
  23. """
  24. __author__ = "Ka-Ping Yee <ping@lfw.org>"
  25. __credits__ = (
  26. "GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, "
  27. "Skip Montanaro, Raymond Hettinger, Trent Nelson, "
  28. "Michael Foord"
  29. )
  30. import collections
  31. import functools
  32. import itertools as _itertools
  33. import re
  34. import sys
  35. from builtins import open as _builtin_open
  36. from codecs import BOM_UTF8, lookup
  37. from io import TextIOWrapper
  38. from ._token import (
  39. AMPER,
  40. AMPEREQUAL,
  41. ASYNC,
  42. AT,
  43. ATEQUAL,
  44. AWAIT,
  45. CIRCUMFLEX,
  46. CIRCUMFLEXEQUAL,
  47. COLON,
  48. COLONEQUAL,
  49. COMMA,
  50. COMMENT,
  51. DEDENT,
  52. DOT,
  53. DOUBLESLASH,
  54. DOUBLESLASHEQUAL,
  55. DOUBLESTAR,
  56. DOUBLESTAREQUAL,
  57. ELLIPSIS,
  58. ENCODING,
  59. ENDMARKER,
  60. EQEQUAL,
  61. EQUAL,
  62. ERRORTOKEN,
  63. EXACT_TOKEN_TYPES,
  64. GREATER,
  65. GREATEREQUAL,
  66. INDENT,
  67. ISEOF,
  68. ISNONTERMINAL,
  69. ISTERMINAL,
  70. LBRACE,
  71. LEFTSHIFT,
  72. LEFTSHIFTEQUAL,
  73. LESS,
  74. LESSEQUAL,
  75. LPAR,
  76. LSQB,
  77. MINEQUAL,
  78. MINUS,
  79. N_TOKENS,
  80. NAME,
  81. NEWLINE,
  82. NL,
  83. NOTEQUAL,
  84. NT_OFFSET,
  85. NUMBER,
  86. OP,
  87. PERCENT,
  88. PERCENTEQUAL,
  89. PLUS,
  90. PLUSEQUAL,
  91. RARROW,
  92. RBRACE,
  93. RIGHTSHIFT,
  94. RIGHTSHIFTEQUAL,
  95. RPAR,
  96. RSQB,
  97. SEMI,
  98. SLASH,
  99. SLASHEQUAL,
  100. SOFT_KEYWORD,
  101. STAR,
  102. STAREQUAL,
  103. STRING,
  104. TILDE,
  105. TYPE_COMMENT,
  106. TYPE_IGNORE,
  107. VBAR,
  108. VBAREQUAL,
  109. tok_name,
  110. )
  111. __all__ = [
  112. "tok_name",
  113. "ISTERMINAL",
  114. "ISNONTERMINAL",
  115. "ISEOF",
  116. "ENDMARKER",
  117. "NAME",
  118. "NUMBER",
  119. "STRING",
  120. "NEWLINE",
  121. "INDENT",
  122. "DEDENT",
  123. "LPAR",
  124. "RPAR",
  125. "LSQB",
  126. "RSQB",
  127. "COLON",
  128. "COMMA",
  129. "SEMI",
  130. "PLUS",
  131. "MINUS",
  132. "STAR",
  133. "SLASH",
  134. "VBAR",
  135. "AMPER",
  136. "LESS",
  137. "GREATER",
  138. "EQUAL",
  139. "DOT",
  140. "PERCENT",
  141. "LBRACE",
  142. "RBRACE",
  143. "EQEQUAL",
  144. "NOTEQUAL",
  145. "LESSEQUAL",
  146. "GREATEREQUAL",
  147. "TILDE",
  148. "CIRCUMFLEX",
  149. "LEFTSHIFT",
  150. "RIGHTSHIFT",
  151. "DOUBLESTAR",
  152. "PLUSEQUAL",
  153. "MINEQUAL",
  154. "STAREQUAL",
  155. "SLASHEQUAL",
  156. "PERCENTEQUAL",
  157. "AMPEREQUAL",
  158. "VBAREQUAL",
  159. "CIRCUMFLEXEQUAL",
  160. "LEFTSHIFTEQUAL",
  161. "RIGHTSHIFTEQUAL",
  162. "DOUBLESTAREQUAL",
  163. "DOUBLESLASH",
  164. "DOUBLESLASHEQUAL",
  165. "AT",
  166. "ATEQUAL",
  167. "RARROW",
  168. "ELLIPSIS",
  169. "COLONEQUAL",
  170. "OP",
  171. "AWAIT",
  172. "ASYNC",
  173. "TYPE_IGNORE",
  174. "TYPE_COMMENT",
  175. "SOFT_KEYWORD",
  176. "ERRORTOKEN",
  177. "COMMENT",
  178. "NL",
  179. "ENCODING",
  180. "N_TOKENS",
  181. "NT_OFFSET",
  182. "tokenize",
  183. "generate_tokens",
  184. "detect_encoding",
  185. "untokenize",
  186. "TokenInfo",
  187. ]
  188. cookie_re = re.compile(r"^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)", re.ASCII)
  189. blank_re = re.compile(rb"^[ \t\f]*(?:[#\r\n]|$)", re.ASCII)
  190. class TokenInfo(collections.namedtuple("TokenInfo", "type string start end line")):
  191. def __repr__(self):
  192. annotated_type = "%d (%s)" % (self.type, tok_name[self.type])
  193. return (
  194. "TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)"
  195. % self._replace(type=annotated_type)
  196. )
  197. @property
  198. def exact_type(self):
  199. if self.type == OP and self.string in EXACT_TOKEN_TYPES:
  200. return EXACT_TOKEN_TYPES[self.string]
  201. else:
  202. return self.type
  203. def group(*choices):
  204. return "(" + "|".join(choices) + ")"
  205. def any(*choices):
  206. return group(*choices) + "*"
  207. def maybe(*choices):
  208. return group(*choices) + "?"
  209. # Note: we use unicode matching for names ("\w") but ascii matching for
  210. # number literals.
  211. Whitespace = r"[ \f\t]*"
  212. Comment = r"#[^\r\n]*"
  213. Ignore = Whitespace + any(r"\\\r?\n" + Whitespace) + maybe(Comment)
  214. Name = r"\w+"
  215. Hexnumber = r"0[xX](?:_?[0-9a-fA-F])+"
  216. Binnumber = r"0[bB](?:_?[01])+"
  217. Octnumber = r"0[oO](?:_?[0-7])+"
  218. Decnumber = r"(?:0(?:_?0)*|[1-9](?:_?[0-9])*)"
  219. Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
  220. Exponent = r"[eE][-+]?[0-9](?:_?[0-9])*"
  221. Pointfloat = group(
  222. r"[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?", r"\.[0-9](?:_?[0-9])*"
  223. ) + maybe(Exponent)
  224. Expfloat = r"[0-9](?:_?[0-9])*" + Exponent
  225. Floatnumber = group(Pointfloat, Expfloat)
  226. Imagnumber = group(r"[0-9](?:_?[0-9])*[jJ]", Floatnumber + r"[jJ]")
  227. Number = group(Imagnumber, Floatnumber, Intnumber)
  228. # Return the empty string, plus all of the valid string prefixes.
  229. def _all_string_prefixes():
  230. # The valid string prefixes. Only contain the lower case versions,
  231. # and don't contain any permutations (include 'fr', but not
  232. # 'rf'). The various permutations will be generated.
  233. _valid_string_prefixes = ["b", "r", "u", "f", "br", "fr"]
  234. # if we add binary f-strings, add: ['fb', 'fbr']
  235. result = {""}
  236. for prefix in _valid_string_prefixes:
  237. for t in _itertools.permutations(prefix):
  238. # create a list with upper and lower versions of each
  239. # character
  240. for u in _itertools.product(*[(c, c.upper()) for c in t]):
  241. result.add("".join(u))
  242. return result
  243. @functools.lru_cache(None)
  244. def _compile(expr):
  245. return re.compile(expr, re.UNICODE)
  246. # Note that since _all_string_prefixes includes the empty string,
  247. # StringPrefix can be the empty string (making it optional).
  248. StringPrefix = group(*_all_string_prefixes())
  249. # Tail end of ' string.
  250. Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
  251. # Tail end of " string.
  252. Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
  253. # Tail end of ''' string.
  254. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
  255. # Tail end of """ string.
  256. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
  257. Triple = group(StringPrefix + "'''", StringPrefix + '"""')
  258. # Single-line ' or " string.
  259. String = group(
  260. StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
  261. StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"',
  262. )
  263. # Sorting in reverse order puts the long operators before their prefixes.
  264. # Otherwise if = came before ==, == would get recognized as two instances
  265. # of =.
  266. Special = group(*(re.escape(x) for x in sorted(EXACT_TOKEN_TYPES, reverse=True)))
  267. Funny = group(r"\r?\n", Special)
  268. PlainToken = group(Number, Funny, String, Name)
  269. Token = Ignore + PlainToken
  270. # First (or only) line of ' or " string.
  271. ContStr = group(
  272. StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r"\\\r?\n"),
  273. StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r"\\\r?\n"),
  274. )
  275. PseudoExtras = group(r"\\\r?\n|\Z", Comment, Triple)
  276. PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
  277. # For a given string prefix plus quotes, endpats maps it to a regex
  278. # to match the remainder of that string. _prefix can be empty, for
  279. # a normal single or triple quoted string (with no prefix).
  280. endpats = {}
  281. for _prefix in _all_string_prefixes():
  282. endpats[_prefix + "'"] = Single
  283. endpats[_prefix + '"'] = Double
  284. endpats[_prefix + "'''"] = Single3
  285. endpats[_prefix + '"""'] = Double3
  286. del _prefix
  287. # A set of all of the single and triple quoted string prefixes,
  288. # including the opening quotes.
  289. single_quoted = set()
  290. triple_quoted = set()
  291. for t in _all_string_prefixes():
  292. for u in (t + '"', t + "'"):
  293. single_quoted.add(u)
  294. for u in (t + '"""', t + "'''"):
  295. triple_quoted.add(u)
  296. del t, u
  297. tabsize = 8
  298. class TokenError(Exception):
  299. pass
  300. class StopTokenizing(Exception):
  301. pass
  302. class Untokenizer:
  303. def __init__(self):
  304. self.tokens = []
  305. self.prev_row = 1
  306. self.prev_col = 0
  307. self.encoding = None
  308. def add_whitespace(self, start):
  309. row, col = start
  310. if row < self.prev_row or row == self.prev_row and col < self.prev_col:
  311. raise ValueError(
  312. "start ({},{}) precedes previous end ({},{})".format(
  313. row, col, self.prev_row, self.prev_col
  314. )
  315. )
  316. row_offset = row - self.prev_row
  317. if row_offset:
  318. self.tokens.append("\\\n" * row_offset)
  319. self.prev_col = 0
  320. col_offset = col - self.prev_col
  321. if col_offset:
  322. self.tokens.append(" " * col_offset)
  323. def untokenize(self, iterable):
  324. it = iter(iterable)
  325. indents = []
  326. startline = False
  327. for t in it:
  328. if len(t) == 2:
  329. self.compat(t, it)
  330. break
  331. tok_type, token, start, end, line = t
  332. if tok_type == ENCODING:
  333. self.encoding = token
  334. continue
  335. if tok_type == ENDMARKER:
  336. break
  337. if tok_type == INDENT:
  338. indents.append(token)
  339. continue
  340. elif tok_type == DEDENT:
  341. indents.pop()
  342. self.prev_row, self.prev_col = end
  343. continue
  344. elif tok_type in (NEWLINE, NL):
  345. startline = True
  346. elif startline and indents:
  347. indent = indents[-1]
  348. if start[1] >= len(indent):
  349. self.tokens.append(indent)
  350. self.prev_col = len(indent)
  351. startline = False
  352. self.add_whitespace(start)
  353. self.tokens.append(token)
  354. self.prev_row, self.prev_col = end
  355. if tok_type in (NEWLINE, NL):
  356. self.prev_row += 1
  357. self.prev_col = 0
  358. return "".join(self.tokens)
  359. def compat(self, token, iterable):
  360. indents = []
  361. toks_append = self.tokens.append
  362. startline = token[0] in (NEWLINE, NL)
  363. prevstring = False
  364. for tok in _itertools.chain([token], iterable):
  365. toknum, tokval = tok[:2]
  366. if toknum == ENCODING:
  367. self.encoding = tokval
  368. continue
  369. if toknum in (NAME, NUMBER):
  370. tokval += " "
  371. # Insert a space between two consecutive strings
  372. if toknum == STRING:
  373. if prevstring:
  374. tokval = " " + tokval
  375. prevstring = True
  376. else:
  377. prevstring = False
  378. if toknum == INDENT:
  379. indents.append(tokval)
  380. continue
  381. elif toknum == DEDENT:
  382. indents.pop()
  383. continue
  384. elif toknum in (NEWLINE, NL):
  385. startline = True
  386. elif startline and indents:
  387. toks_append(indents[-1])
  388. startline = False
  389. toks_append(tokval)
  390. def untokenize(iterable):
  391. """Transform tokens back into Python source code.
  392. It returns a bytes object, encoded using the ENCODING
  393. token, which is the first token sequence output by tokenize.
  394. Each element returned by the iterable must be a token sequence
  395. with at least two elements, a token number and token value. If
  396. only two tokens are passed, the resulting output is poor.
  397. Round-trip invariant for full input:
  398. Untokenized source will match input source exactly
  399. Round-trip invariant for limited input:
  400. # Output bytes will tokenize back to the input
  401. t1 = [tok[:2] for tok in tokenize(f.readline)]
  402. newcode = untokenize(t1)
  403. readline = BytesIO(newcode).readline
  404. t2 = [tok[:2] for tok in tokenize(readline)]
  405. assert t1 == t2
  406. """
  407. ut = Untokenizer()
  408. out = ut.untokenize(iterable)
  409. if ut.encoding is not None:
  410. out = out.encode(ut.encoding)
  411. return out
  412. def _get_normal_name(orig_enc):
  413. """Imitates get_normal_name in tokenizer.c."""
  414. # Only care about the first 12 characters.
  415. enc = orig_enc[:12].lower().replace("_", "-")
  416. if enc == "utf-8" or enc.startswith("utf-8-"):
  417. return "utf-8"
  418. if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or enc.startswith(
  419. ("latin-1-", "iso-8859-1-", "iso-latin-1-")
  420. ):
  421. return "iso-8859-1"
  422. return orig_enc
  423. def detect_encoding(readline):
  424. """
  425. The detect_encoding() function is used to detect the encoding that should
  426. be used to decode a Python source file. It requires one argument, readline,
  427. in the same way as the tokenize() generator.
  428. It will call readline a maximum of twice, and return the encoding used
  429. (as a string) and a list of any lines (left as bytes) it has read in.
  430. It detects the encoding from the presence of a utf-8 bom or an encoding
  431. cookie as specified in pep-0263. If both a bom and a cookie are present,
  432. but disagree, a SyntaxError will be raised. If the encoding cookie is an
  433. invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
  434. 'utf-8-sig' is returned.
  435. If no encoding is specified, then the default of 'utf-8' will be returned.
  436. """
  437. try:
  438. filename = readline.__self__.name
  439. except AttributeError:
  440. filename = None
  441. bom_found = False
  442. encoding = None
  443. default = "utf-8"
  444. def read_or_stop():
  445. try:
  446. return readline()
  447. except StopIteration:
  448. return b""
  449. def find_cookie(line):
  450. try:
  451. # Decode as UTF-8. Either the line is an encoding declaration,
  452. # in which case it should be pure ASCII, or it must be UTF-8
  453. # per default encoding.
  454. line_string = line.decode("utf-8")
  455. except UnicodeDecodeError:
  456. msg = "invalid or missing encoding declaration"
  457. if filename is not None:
  458. msg = "{} for {!r}".format(msg, filename)
  459. raise SyntaxError(msg)
  460. match = cookie_re.match(line_string)
  461. if not match:
  462. return None
  463. encoding = _get_normal_name(match.group(1))
  464. try:
  465. lookup(encoding)
  466. except LookupError:
  467. # This behaviour mimics the Python interpreter
  468. if filename is None:
  469. msg = "unknown encoding: " + encoding
  470. else:
  471. msg = "unknown encoding for {!r}: {}".format(filename, encoding)
  472. raise SyntaxError(msg)
  473. if bom_found:
  474. if encoding != "utf-8":
  475. # This behaviour mimics the Python interpreter
  476. if filename is None:
  477. msg = "encoding problem: utf-8"
  478. else:
  479. msg = "encoding problem for {!r}: utf-8".format(filename)
  480. raise SyntaxError(msg)
  481. encoding += "-sig"
  482. return encoding
  483. first = read_or_stop()
  484. if first.startswith(BOM_UTF8):
  485. bom_found = True
  486. first = first[3:]
  487. default = "utf-8-sig"
  488. if not first:
  489. return default, []
  490. encoding = find_cookie(first)
  491. if encoding:
  492. return encoding, [first]
  493. if not blank_re.match(first):
  494. return default, [first]
  495. second = read_or_stop()
  496. if not second:
  497. return default, [first]
  498. encoding = find_cookie(second)
  499. if encoding:
  500. return encoding, [first, second]
  501. return default, [first, second]
  502. def open(filename):
  503. """Open a file in read only mode using the encoding detected by
  504. detect_encoding().
  505. """
  506. buffer = _builtin_open(filename, "rb")
  507. try:
  508. encoding, lines = detect_encoding(buffer.readline)
  509. buffer.seek(0)
  510. text = TextIOWrapper(buffer, encoding, line_buffering=True)
  511. text.mode = "r"
  512. return text
  513. except BaseException:
  514. buffer.close()
  515. raise
  516. def tokenize(readline):
  517. """
  518. The tokenize() generator requires one argument, readline, which
  519. must be a callable object which provides the same interface as the
  520. readline() method of built-in file objects. Each call to the function
  521. should return one line of input as bytes. Alternatively, readline
  522. can be a callable function terminating with StopIteration:
  523. readline = open(myfile, 'rb').__next__ # Example of alternate readline
  524. The generator produces 5-tuples with these members: the token type; the
  525. token string; a 2-tuple (srow, scol) of ints specifying the row and
  526. column where the token begins in the source; a 2-tuple (erow, ecol) of
  527. ints specifying the row and column where the token ends in the source;
  528. and the line on which the token was found. The line passed is the
  529. physical line.
  530. The first token sequence will always be an ENCODING token
  531. which tells you which encoding was used to decode the bytes stream.
  532. """
  533. encoding, consumed = detect_encoding(readline)
  534. empty = _itertools.repeat(b"")
  535. rl_gen = _itertools.chain(consumed, iter(readline, b""), empty)
  536. return _tokenize(rl_gen.__next__, encoding)
  537. def _tokenize(readline, encoding):
  538. strstart = None
  539. endprog = None
  540. lnum = parenlev = continued = 0
  541. numchars = "0123456789"
  542. contstr, needcont = "", 0
  543. contline = None
  544. indents = [0]
  545. if encoding is not None:
  546. if encoding == "utf-8-sig":
  547. # BOM will already have been stripped.
  548. encoding = "utf-8"
  549. yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), "")
  550. last_line = b""
  551. line = b""
  552. while True: # loop over lines in stream
  553. try:
  554. # We capture the value of the line variable here because
  555. # readline uses the empty string '' to signal end of input,
  556. # hence `line` itself will always be overwritten at the end
  557. # of this loop.
  558. last_line = line
  559. line = readline()
  560. except StopIteration:
  561. line = b""
  562. if encoding is not None:
  563. line = line.decode(encoding)
  564. lnum += 1
  565. pos, max = 0, len(line)
  566. if contstr: # continued string
  567. if not line:
  568. raise TokenError("EOF in multi-line string", strstart)
  569. endmatch = endprog.match(line)
  570. if endmatch:
  571. pos = end = endmatch.end(0)
  572. yield TokenInfo(
  573. STRING, contstr + line[:end], strstart, (lnum, end), contline + line
  574. )
  575. contstr, needcont = "", 0
  576. contline = None
  577. elif needcont and line[-2:] != "\\\n" and line[-3:] != "\\\r\n":
  578. yield TokenInfo(
  579. ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline
  580. )
  581. contstr = ""
  582. contline = None
  583. continue
  584. else:
  585. contstr = contstr + line
  586. contline = contline + line
  587. continue
  588. elif parenlev == 0 and not continued: # new statement
  589. if not line:
  590. break
  591. column = 0
  592. while pos < max: # measure leading whitespace
  593. if line[pos] == " ":
  594. column += 1
  595. elif line[pos] == "\t":
  596. column = (column // tabsize + 1) * tabsize
  597. elif line[pos] == "\f":
  598. column = 0
  599. else:
  600. break
  601. pos += 1
  602. if pos == max:
  603. break
  604. if line[pos] in "#\r\n": # skip comments or blank lines
  605. if line[pos] == "#":
  606. comment_token = line[pos:].rstrip("\r\n")
  607. yield TokenInfo(
  608. COMMENT,
  609. comment_token,
  610. (lnum, pos),
  611. (lnum, pos + len(comment_token)),
  612. line,
  613. )
  614. pos += len(comment_token)
  615. yield TokenInfo(NL, line[pos:], (lnum, pos), (lnum, len(line)), line)
  616. continue
  617. if column > indents[-1]: # count indents or dedents
  618. indents.append(column)
  619. yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
  620. while column < indents[-1]:
  621. if column not in indents:
  622. raise IndentationError(
  623. "unindent does not match any outer indentation level",
  624. ("<tokenize>", lnum, pos, line),
  625. )
  626. indents = indents[:-1]
  627. yield TokenInfo(DEDENT, "", (lnum, pos), (lnum, pos), line)
  628. else: # continued statement
  629. if not line:
  630. raise TokenError("EOF in multi-line statement", (lnum, 0))
  631. continued = 0
  632. while pos < max:
  633. pseudomatch = _compile(PseudoToken).match(line, pos)
  634. if pseudomatch: # scan for tokens
  635. start, end = pseudomatch.span(1)
  636. spos, epos, pos = (lnum, start), (lnum, end), end
  637. if start == end:
  638. continue
  639. token, initial = line[start:end], line[start]
  640. if initial in numchars or ( # ordinary number
  641. initial == "." and token != "." and token != "..."
  642. ):
  643. yield TokenInfo(NUMBER, token, spos, epos, line)
  644. elif initial in "\r\n":
  645. if parenlev > 0:
  646. yield TokenInfo(NL, token, spos, epos, line)
  647. else:
  648. yield TokenInfo(NEWLINE, token, spos, epos, line)
  649. elif initial == "#":
  650. assert not token.endswith("\n")
  651. yield TokenInfo(COMMENT, token, spos, epos, line)
  652. elif token in triple_quoted:
  653. endprog = _compile(endpats[token])
  654. endmatch = endprog.match(line, pos)
  655. if endmatch: # all on one line
  656. pos = endmatch.end(0)
  657. token = line[start:pos]
  658. yield TokenInfo(STRING, token, spos, (lnum, pos), line)
  659. else:
  660. strstart = (lnum, start) # multiple lines
  661. contstr = line[start:]
  662. contline = line
  663. break
  664. # Check up to the first 3 chars of the token to see if
  665. # they're in the single_quoted set. If so, they start
  666. # a string.
  667. # We're using the first 3, because we're looking for
  668. # "rb'" (for example) at the start of the token. If
  669. # we switch to longer prefixes, this needs to be
  670. # adjusted.
  671. # Note that initial == token[:1].
  672. # Also note that single quote checking must come after
  673. # triple quote checking (above).
  674. elif (
  675. initial in single_quoted
  676. or token[:2] in single_quoted
  677. or token[:3] in single_quoted
  678. ):
  679. if token[-1] == "\n": # continued string
  680. strstart = (lnum, start)
  681. # Again, using the first 3 chars of the
  682. # token. This is looking for the matching end
  683. # regex for the correct type of quote
  684. # character. So it's really looking for
  685. # endpats["'"] or endpats['"'], by trying to
  686. # skip string prefix characters, if any.
  687. endprog = _compile(
  688. endpats.get(initial)
  689. or endpats.get(token[1])
  690. or endpats.get(token[2])
  691. )
  692. contstr, needcont = line[start:], 1
  693. contline = line
  694. break
  695. else: # ordinary string
  696. yield TokenInfo(STRING, token, spos, epos, line)
  697. elif initial.isidentifier(): # ordinary name
  698. yield TokenInfo(NAME, token, spos, epos, line)
  699. elif initial == "\\": # continued stmt
  700. continued = 1
  701. else:
  702. if initial in "([{":
  703. parenlev += 1
  704. elif initial in ")]}":
  705. parenlev -= 1
  706. yield TokenInfo(OP, token, spos, epos, line)
  707. else:
  708. yield TokenInfo(
  709. ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos + 1), line
  710. )
  711. pos += 1
  712. # Add an implicit NEWLINE if the input doesn't end in one
  713. if (
  714. last_line
  715. and last_line[-1] not in "\r\n"
  716. and not last_line.strip().startswith("#")
  717. ):
  718. yield TokenInfo(
  719. NEWLINE, "", (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), ""
  720. )
  721. for indent in indents[1:]: # pop remaining indent levels
  722. yield TokenInfo(DEDENT, "", (lnum, 0), (lnum, 0), "")
  723. yield TokenInfo(ENDMARKER, "", (lnum, 0), (lnum, 0), "")
  724. def generate_tokens(readline):
  725. """Tokenize a source reading Python code as unicode strings.
  726. This has the same API as tokenize(), except that it expects the *readline*
  727. callable to return str objects instead of bytes.
  728. """
  729. return _tokenize(readline, None)
  730. def main():
  731. import argparse
  732. # Helper error handling routines
  733. def perror(message):
  734. sys.stderr.write(message)
  735. sys.stderr.write("\n")
  736. def error(message, filename=None, location=None):
  737. if location:
  738. args = (filename,) + location + (message,)
  739. perror("%s:%d:%d: error: %s" % args)
  740. elif filename:
  741. perror("%s: error: %s" % (filename, message))
  742. else:
  743. perror("error: %s" % message)
  744. sys.exit(1)
  745. # Parse the arguments and options
  746. parser = argparse.ArgumentParser(prog="python -m tokenize")
  747. parser.add_argument(
  748. dest="filename",
  749. nargs="?",
  750. metavar="filename.py",
  751. help="the file to tokenize; defaults to stdin",
  752. )
  753. parser.add_argument(
  754. "-e",
  755. "--exact",
  756. dest="exact",
  757. action="store_true",
  758. help="display token names using the exact type",
  759. )
  760. args = parser.parse_args()
  761. try:
  762. # Tokenize the input
  763. if args.filename:
  764. filename = args.filename
  765. with _builtin_open(filename, "rb") as f:
  766. tokens = list(tokenize(f.readline))
  767. else:
  768. filename = "<stdin>"
  769. tokens = _tokenize(sys.stdin.readline, None)
  770. # Output the tokenization
  771. for token in tokens:
  772. token_type = token.type
  773. if args.exact:
  774. token_type = token.exact_type
  775. token_range = "%d,%d-%d,%d:" % (token.start + token.end)
  776. print("%-20s%-15s%-15r" % (token_range, tok_name[token_type], token.string))
  777. except IndentationError as err:
  778. line, column = err.args[1][1:3]
  779. error(err.args[0], filename, (line, column))
  780. except TokenError as err:
  781. line, column = err.args[1]
  782. error(err.args[0], filename, (line, column))
  783. except SyntaxError as err:
  784. error(err, filename)
  785. except OSError as err:
  786. error(err)
  787. except KeyboardInterrupt:
  788. print("interrupted\n")
  789. except Exception as err:
  790. perror("unexpected error: %s" % err)
  791. raise
  792. if __name__ == "__main__":
  793. main()