Python-tokenize.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. #include "Python.h"
  2. #include "errcode.h"
  3. #include "../Parser/tokenizer.h"
  4. #include "../Parser/pegen.h" // _PyPegen_byte_offset_to_character_offset()
  5. #include "../Parser/pegen.h" // _PyPegen_byte_offset_to_character_offset()
  6. static struct PyModuleDef _tokenizemodule;
  7. typedef struct {
  8. PyTypeObject *TokenizerIter;
  9. } tokenize_state;
  10. static tokenize_state *
  11. get_tokenize_state(PyObject *module) {
  12. return (tokenize_state *)PyModule_GetState(module);
  13. }
  14. #define _tokenize_get_state_by_type(type) \
  15. get_tokenize_state(PyType_GetModuleByDef(type, &_tokenizemodule))
  16. #include "pycore_runtime.h"
  17. #include "clinic/Python-tokenize.c.h"
  18. /*[clinic input]
  19. module _tokenizer
  20. class _tokenizer.tokenizeriter "tokenizeriterobject *" "_tokenize_get_state_by_type(type)->TokenizerIter"
  21. [clinic start generated code]*/
  22. /*[clinic end generated code: output=da39a3ee5e6b4b0d input=96d98ee2fef7a8bc]*/
  23. typedef struct
  24. {
  25. PyObject_HEAD struct tok_state *tok;
  26. int done;
  27. /* Needed to cache line for performance */
  28. PyObject *last_line;
  29. Py_ssize_t last_lineno;
  30. Py_ssize_t byte_col_offset_diff;
  31. } tokenizeriterobject;
  32. /*[clinic input]
  33. @classmethod
  34. _tokenizer.tokenizeriter.__new__ as tokenizeriter_new
  35. readline: object
  36. /
  37. *
  38. extra_tokens: bool
  39. encoding: str(c_default="NULL") = 'utf-8'
  40. [clinic start generated code]*/
  41. static PyObject *
  42. tokenizeriter_new_impl(PyTypeObject *type, PyObject *readline,
  43. int extra_tokens, const char *encoding)
  44. /*[clinic end generated code: output=7501a1211683ce16 input=f7dddf8a613ae8bd]*/
  45. {
  46. tokenizeriterobject *self = (tokenizeriterobject *)type->tp_alloc(type, 0);
  47. if (self == NULL) {
  48. return NULL;
  49. }
  50. PyObject *filename = PyUnicode_FromString("<string>");
  51. if (filename == NULL) {
  52. return NULL;
  53. }
  54. self->tok = _PyTokenizer_FromReadline(readline, encoding, 1, 1);
  55. if (self->tok == NULL) {
  56. Py_DECREF(filename);
  57. return NULL;
  58. }
  59. self->tok->filename = filename;
  60. if (extra_tokens) {
  61. self->tok->tok_extra_tokens = 1;
  62. }
  63. self->done = 0;
  64. self->last_line = NULL;
  65. self->byte_col_offset_diff = 0;
  66. self->last_lineno = 0;
  67. return (PyObject *)self;
  68. }
  69. static int
  70. _tokenizer_error(struct tok_state *tok)
  71. {
  72. if (PyErr_Occurred()) {
  73. return -1;
  74. }
  75. const char *msg = NULL;
  76. PyObject* errtype = PyExc_SyntaxError;
  77. switch (tok->done) {
  78. case E_TOKEN:
  79. msg = "invalid token";
  80. break;
  81. case E_EOF:
  82. PyErr_SetString(PyExc_SyntaxError, "unexpected EOF in multi-line statement");
  83. PyErr_SyntaxLocationObject(tok->filename, tok->lineno,
  84. tok->inp - tok->buf < 0 ? 0 : (int)(tok->inp - tok->buf));
  85. return -1;
  86. case E_DEDENT:
  87. msg = "unindent does not match any outer indentation level";
  88. errtype = PyExc_IndentationError;
  89. break;
  90. case E_INTR:
  91. if (!PyErr_Occurred()) {
  92. PyErr_SetNone(PyExc_KeyboardInterrupt);
  93. }
  94. return -1;
  95. case E_NOMEM:
  96. PyErr_NoMemory();
  97. return -1;
  98. case E_TABSPACE:
  99. errtype = PyExc_TabError;
  100. msg = "inconsistent use of tabs and spaces in indentation";
  101. break;
  102. case E_TOODEEP:
  103. errtype = PyExc_IndentationError;
  104. msg = "too many levels of indentation";
  105. break;
  106. case E_LINECONT: {
  107. msg = "unexpected character after line continuation character";
  108. break;
  109. }
  110. default:
  111. msg = "unknown tokenization error";
  112. }
  113. PyObject* errstr = NULL;
  114. PyObject* error_line = NULL;
  115. PyObject* tmp = NULL;
  116. PyObject* value = NULL;
  117. int result = 0;
  118. Py_ssize_t size = tok->inp - tok->buf;
  119. assert(tok->buf[size-1] == '\n');
  120. size -= 1; // Remove the newline character from the end of the line
  121. error_line = PyUnicode_DecodeUTF8(tok->buf, size, "replace");
  122. if (!error_line) {
  123. result = -1;
  124. goto exit;
  125. }
  126. Py_ssize_t offset = _PyPegen_byte_offset_to_character_offset(error_line, tok->inp - tok->buf);
  127. if (offset == -1) {
  128. result = -1;
  129. goto exit;
  130. }
  131. tmp = Py_BuildValue("(OnnOOO)", tok->filename, tok->lineno, offset, error_line, Py_None, Py_None);
  132. if (!tmp) {
  133. result = -1;
  134. goto exit;
  135. }
  136. errstr = PyUnicode_FromString(msg);
  137. if (!errstr) {
  138. result = -1;
  139. goto exit;
  140. }
  141. value = PyTuple_Pack(2, errstr, tmp);
  142. if (!value) {
  143. result = -1;
  144. goto exit;
  145. }
  146. PyErr_SetObject(errtype, value);
  147. exit:
  148. Py_XDECREF(errstr);
  149. Py_XDECREF(error_line);
  150. Py_XDECREF(tmp);
  151. Py_XDECREF(value);
  152. return result;
  153. }
  154. static PyObject *
  155. tokenizeriter_next(tokenizeriterobject *it)
  156. {
  157. PyObject* result = NULL;
  158. struct token token;
  159. _PyToken_Init(&token);
  160. int type = _PyTokenizer_Get(it->tok, &token);
  161. if (type == ERRORTOKEN) {
  162. if(!PyErr_Occurred()) {
  163. _tokenizer_error(it->tok);
  164. assert(PyErr_Occurred());
  165. }
  166. goto exit;
  167. }
  168. if (it->done || type == ERRORTOKEN) {
  169. PyErr_SetString(PyExc_StopIteration, "EOF");
  170. it->done = 1;
  171. goto exit;
  172. }
  173. PyObject *str = NULL;
  174. if (token.start == NULL || token.end == NULL) {
  175. str = PyUnicode_FromString("");
  176. }
  177. else {
  178. str = PyUnicode_FromStringAndSize(token.start, token.end - token.start);
  179. }
  180. if (str == NULL) {
  181. goto exit;
  182. }
  183. int is_trailing_token = 0;
  184. if (type == ENDMARKER || (type == DEDENT && it->tok->done == E_EOF)) {
  185. is_trailing_token = 1;
  186. }
  187. const char *line_start = ISSTRINGLIT(type) ? it->tok->multi_line_start : it->tok->line_start;
  188. PyObject* line = NULL;
  189. if (it->tok->tok_extra_tokens && is_trailing_token) {
  190. line = PyUnicode_FromString("");
  191. } else {
  192. Py_ssize_t size = it->tok->inp - line_start;
  193. if (size >= 1 && it->tok->implicit_newline) {
  194. size -= 1;
  195. }
  196. if (it->tok->lineno != it->last_lineno) {
  197. // Line has changed since last token, so we fetch the new line and cache it
  198. // in the iter object.
  199. Py_XDECREF(it->last_line);
  200. line = PyUnicode_DecodeUTF8(line_start, size, "replace");
  201. it->last_line = line;
  202. it->byte_col_offset_diff = 0;
  203. } else {
  204. // Line hasn't changed so we reuse the cached one.
  205. line = it->last_line;
  206. }
  207. }
  208. if (line == NULL) {
  209. Py_DECREF(str);
  210. goto exit;
  211. }
  212. Py_ssize_t lineno = ISSTRINGLIT(type) ? it->tok->first_lineno : it->tok->lineno;
  213. Py_ssize_t end_lineno = it->tok->lineno;
  214. it->last_lineno = lineno;
  215. Py_ssize_t col_offset = -1;
  216. Py_ssize_t end_col_offset = -1;
  217. Py_ssize_t byte_offset = -1;
  218. if (token.start != NULL && token.start >= line_start) {
  219. byte_offset = token.start - line_start;
  220. col_offset = byte_offset - it->byte_col_offset_diff;
  221. }
  222. if (token.end != NULL && token.end >= it->tok->line_start) {
  223. Py_ssize_t end_byte_offset = token.end - it->tok->line_start;
  224. if (lineno == end_lineno) {
  225. // If the whole token is at the same line, we can just use the token.start
  226. // buffer for figuring out the new column offset, since using line is not
  227. // performant for very long lines.
  228. Py_ssize_t token_col_offset = _PyPegen_byte_offset_to_character_offset_line(line, byte_offset, end_byte_offset);
  229. end_col_offset = col_offset + token_col_offset;
  230. it->byte_col_offset_diff += token.end - token.start - token_col_offset;
  231. } else {
  232. end_col_offset = _PyPegen_byte_offset_to_character_offset_raw(it->tok->line_start, end_byte_offset);
  233. it->byte_col_offset_diff += end_byte_offset - end_col_offset;
  234. }
  235. }
  236. if (it->tok->tok_extra_tokens) {
  237. if (is_trailing_token) {
  238. lineno = end_lineno = lineno + 1;
  239. col_offset = end_col_offset = 0;
  240. }
  241. // Necessary adjustments to match the original Python tokenize
  242. // implementation
  243. if (type > DEDENT && type < OP) {
  244. type = OP;
  245. }
  246. else if (type == ASYNC || type == AWAIT) {
  247. type = NAME;
  248. }
  249. else if (type == NEWLINE) {
  250. Py_DECREF(str);
  251. if (!it->tok->implicit_newline) {
  252. if (it->tok->start[0] == '\r') {
  253. str = PyUnicode_FromString("\r\n");
  254. } else {
  255. str = PyUnicode_FromString("\n");
  256. }
  257. }
  258. end_col_offset++;
  259. }
  260. else if (type == NL) {
  261. if (it->tok->implicit_newline) {
  262. Py_DECREF(str);
  263. str = PyUnicode_FromString("");
  264. }
  265. }
  266. if (str == NULL) {
  267. Py_DECREF(line);
  268. goto exit;
  269. }
  270. }
  271. result = Py_BuildValue("(iN(nn)(nn)O)", type, str, lineno, col_offset, end_lineno, end_col_offset, line);
  272. exit:
  273. _PyToken_Free(&token);
  274. if (type == ENDMARKER) {
  275. it->done = 1;
  276. }
  277. return result;
  278. }
  279. static void
  280. tokenizeriter_dealloc(tokenizeriterobject *it)
  281. {
  282. PyTypeObject *tp = Py_TYPE(it);
  283. Py_XDECREF(it->last_line);
  284. _PyTokenizer_Free(it->tok);
  285. tp->tp_free(it);
  286. Py_DECREF(tp);
  287. }
  288. static PyType_Slot tokenizeriter_slots[] = {
  289. {Py_tp_new, tokenizeriter_new},
  290. {Py_tp_dealloc, tokenizeriter_dealloc},
  291. {Py_tp_getattro, PyObject_GenericGetAttr},
  292. {Py_tp_iter, PyObject_SelfIter},
  293. {Py_tp_iternext, tokenizeriter_next},
  294. {0, NULL},
  295. };
  296. static PyType_Spec tokenizeriter_spec = {
  297. .name = "_tokenize.TokenizerIter",
  298. .basicsize = sizeof(tokenizeriterobject),
  299. .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_IMMUTABLETYPE),
  300. .slots = tokenizeriter_slots,
  301. };
  302. static int
  303. tokenizemodule_exec(PyObject *m)
  304. {
  305. tokenize_state *state = get_tokenize_state(m);
  306. if (state == NULL) {
  307. return -1;
  308. }
  309. state->TokenizerIter = (PyTypeObject *)PyType_FromModuleAndSpec(m, &tokenizeriter_spec, NULL);
  310. if (state->TokenizerIter == NULL) {
  311. return -1;
  312. }
  313. if (PyModule_AddType(m, state->TokenizerIter) < 0) {
  314. return -1;
  315. }
  316. return 0;
  317. }
  318. static PyMethodDef tokenize_methods[] = {
  319. {NULL, NULL, 0, NULL} /* Sentinel */
  320. };
  321. static PyModuleDef_Slot tokenizemodule_slots[] = {
  322. {Py_mod_exec, tokenizemodule_exec},
  323. {Py_mod_multiple_interpreters, Py_MOD_PER_INTERPRETER_GIL_SUPPORTED},
  324. {0, NULL}
  325. };
  326. static int
  327. tokenizemodule_traverse(PyObject *m, visitproc visit, void *arg)
  328. {
  329. tokenize_state *state = get_tokenize_state(m);
  330. Py_VISIT(state->TokenizerIter);
  331. return 0;
  332. }
  333. static int
  334. tokenizemodule_clear(PyObject *m)
  335. {
  336. tokenize_state *state = get_tokenize_state(m);
  337. Py_CLEAR(state->TokenizerIter);
  338. return 0;
  339. }
  340. static void
  341. tokenizemodule_free(void *m)
  342. {
  343. tokenizemodule_clear((PyObject *)m);
  344. }
  345. static struct PyModuleDef _tokenizemodule = {
  346. PyModuleDef_HEAD_INIT,
  347. .m_name = "_tokenize",
  348. .m_size = sizeof(tokenize_state),
  349. .m_slots = tokenizemodule_slots,
  350. .m_methods = tokenize_methods,
  351. .m_traverse = tokenizemodule_traverse,
  352. .m_clear = tokenizemodule_clear,
  353. .m_free = tokenizemodule_free,
  354. };
  355. PyMODINIT_FUNC
  356. PyInit__tokenize(void)
  357. {
  358. return PyModuleDef_Init(&_tokenizemodule);
  359. }