tokenizer.c 95 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086
  1. /* Tokenizer implementation */
  2. #define PY_SSIZE_T_CLEAN
  3. #include "Python.h"
  4. #include "pycore_call.h" // _PyObject_CallNoArgs()
  5. #include <ctype.h>
  6. #include <assert.h>
  7. #include "tokenizer.h"
  8. #include "errcode.h"
  9. /* Alternate tab spacing */
  10. #define ALTTABSIZE 1
  11. #define is_potential_identifier_start(c) (\
  12. (c >= 'a' && c <= 'z')\
  13. || (c >= 'A' && c <= 'Z')\
  14. || c == '_'\
  15. || (c >= 128))
  16. #define is_potential_identifier_char(c) (\
  17. (c >= 'a' && c <= 'z')\
  18. || (c >= 'A' && c <= 'Z')\
  19. || (c >= '0' && c <= '9')\
  20. || c == '_'\
  21. || (c >= 128))
  22. /* Don't ever change this -- it would break the portability of Python code */
  23. #define TABSIZE 8
  24. #define MAKE_TOKEN(token_type) token_setup(tok, token, token_type, p_start, p_end)
  25. #define MAKE_TYPE_COMMENT_TOKEN(token_type, col_offset, end_col_offset) (\
  26. type_comment_token_setup(tok, token, token_type, col_offset, end_col_offset, p_start, p_end))
  27. #define ADVANCE_LINENO() \
  28. tok->lineno++; \
  29. tok->col_offset = 0;
  30. #define INSIDE_FSTRING(tok) (tok->tok_mode_stack_index > 0)
  31. #define INSIDE_FSTRING_EXPR(tok) (tok->curly_bracket_expr_start_depth >= 0)
  32. #ifdef Py_DEBUG
  33. static inline tokenizer_mode* TOK_GET_MODE(struct tok_state* tok) {
  34. assert(tok->tok_mode_stack_index >= 0);
  35. assert(tok->tok_mode_stack_index < MAXFSTRINGLEVEL);
  36. return &(tok->tok_mode_stack[tok->tok_mode_stack_index]);
  37. }
  38. static inline tokenizer_mode* TOK_NEXT_MODE(struct tok_state* tok) {
  39. assert(tok->tok_mode_stack_index >= 0);
  40. assert(tok->tok_mode_stack_index + 1 < MAXFSTRINGLEVEL);
  41. return &(tok->tok_mode_stack[++tok->tok_mode_stack_index]);
  42. }
  43. #else
  44. #define TOK_GET_MODE(tok) (&(tok->tok_mode_stack[tok->tok_mode_stack_index]))
  45. #define TOK_NEXT_MODE(tok) (&(tok->tok_mode_stack[++tok->tok_mode_stack_index]))
  46. #endif
  47. /* Forward */
  48. static struct tok_state *tok_new(void);
  49. static int tok_nextc(struct tok_state *tok);
  50. static void tok_backup(struct tok_state *tok, int c);
  51. static int syntaxerror(struct tok_state *tok, const char *format, ...);
  52. /* Spaces in this constant are treated as "zero or more spaces or tabs" when
  53. tokenizing. */
  54. static const char* type_comment_prefix = "# type: ";
  55. /* Create and initialize a new tok_state structure */
  56. static struct tok_state *
  57. tok_new(void)
  58. {
  59. struct tok_state *tok = (struct tok_state *)PyMem_Malloc(
  60. sizeof(struct tok_state));
  61. if (tok == NULL)
  62. return NULL;
  63. tok->buf = tok->cur = tok->inp = NULL;
  64. tok->fp_interactive = 0;
  65. tok->interactive_src_start = NULL;
  66. tok->interactive_src_end = NULL;
  67. tok->start = NULL;
  68. tok->end = NULL;
  69. tok->done = E_OK;
  70. tok->fp = NULL;
  71. tok->input = NULL;
  72. tok->tabsize = TABSIZE;
  73. tok->indent = 0;
  74. tok->indstack[0] = 0;
  75. tok->atbol = 1;
  76. tok->pendin = 0;
  77. tok->prompt = tok->nextprompt = NULL;
  78. tok->lineno = 0;
  79. tok->starting_col_offset = -1;
  80. tok->col_offset = -1;
  81. tok->level = 0;
  82. tok->altindstack[0] = 0;
  83. tok->decoding_state = STATE_INIT;
  84. tok->decoding_erred = 0;
  85. tok->enc = NULL;
  86. tok->encoding = NULL;
  87. tok->cont_line = 0;
  88. tok->filename = NULL;
  89. tok->decoding_readline = NULL;
  90. tok->decoding_buffer = NULL;
  91. tok->readline = NULL;
  92. tok->type_comments = 0;
  93. tok->async_hacks = 0;
  94. tok->async_def = 0;
  95. tok->async_def_indent = 0;
  96. tok->async_def_nl = 0;
  97. tok->interactive_underflow = IUNDERFLOW_NORMAL;
  98. tok->str = NULL;
  99. tok->report_warnings = 1;
  100. tok->tok_extra_tokens = 0;
  101. tok->comment_newline = 0;
  102. tok->implicit_newline = 0;
  103. tok->tok_mode_stack[0] = (tokenizer_mode){.kind =TOK_REGULAR_MODE, .f_string_quote='\0', .f_string_quote_size = 0, .f_string_debug=0};
  104. tok->tok_mode_stack_index = 0;
  105. #ifdef Py_DEBUG
  106. tok->debug = _Py_GetConfig()->parser_debug;
  107. #endif
  108. return tok;
  109. }
  110. static char *
  111. new_string(const char *s, Py_ssize_t len, struct tok_state *tok)
  112. {
  113. char* result = (char *)PyMem_Malloc(len + 1);
  114. if (!result) {
  115. tok->done = E_NOMEM;
  116. return NULL;
  117. }
  118. memcpy(result, s, len);
  119. result[len] = '\0';
  120. return result;
  121. }
  122. static char *
  123. error_ret(struct tok_state *tok) /* XXX */
  124. {
  125. tok->decoding_erred = 1;
  126. if ((tok->fp != NULL || tok->readline != NULL) && tok->buf != NULL) {/* see _PyTokenizer_Free */
  127. PyMem_Free(tok->buf);
  128. }
  129. tok->buf = tok->cur = tok->inp = NULL;
  130. tok->start = NULL;
  131. tok->end = NULL;
  132. tok->done = E_DECODE;
  133. return NULL; /* as if it were EOF */
  134. }
  135. static const char *
  136. get_normal_name(const char *s) /* for utf-8 and latin-1 */
  137. {
  138. char buf[13];
  139. int i;
  140. for (i = 0; i < 12; i++) {
  141. int c = s[i];
  142. if (c == '\0')
  143. break;
  144. else if (c == '_')
  145. buf[i] = '-';
  146. else
  147. buf[i] = tolower(c);
  148. }
  149. buf[i] = '\0';
  150. if (strcmp(buf, "utf-8") == 0 ||
  151. strncmp(buf, "utf-8-", 6) == 0)
  152. return "utf-8";
  153. else if (strcmp(buf, "latin-1") == 0 ||
  154. strcmp(buf, "iso-8859-1") == 0 ||
  155. strcmp(buf, "iso-latin-1") == 0 ||
  156. strncmp(buf, "latin-1-", 8) == 0 ||
  157. strncmp(buf, "iso-8859-1-", 11) == 0 ||
  158. strncmp(buf, "iso-latin-1-", 12) == 0)
  159. return "iso-8859-1";
  160. else
  161. return s;
  162. }
  163. /* Return the coding spec in S, or NULL if none is found. */
  164. static int
  165. get_coding_spec(const char *s, char **spec, Py_ssize_t size, struct tok_state *tok)
  166. {
  167. Py_ssize_t i;
  168. *spec = NULL;
  169. /* Coding spec must be in a comment, and that comment must be
  170. * the only statement on the source code line. */
  171. for (i = 0; i < size - 6; i++) {
  172. if (s[i] == '#')
  173. break;
  174. if (s[i] != ' ' && s[i] != '\t' && s[i] != '\014')
  175. return 1;
  176. }
  177. for (; i < size - 6; i++) { /* XXX inefficient search */
  178. const char* t = s + i;
  179. if (memcmp(t, "coding", 6) == 0) {
  180. const char* begin = NULL;
  181. t += 6;
  182. if (t[0] != ':' && t[0] != '=')
  183. continue;
  184. do {
  185. t++;
  186. } while (t[0] == ' ' || t[0] == '\t');
  187. begin = t;
  188. while (Py_ISALNUM(t[0]) ||
  189. t[0] == '-' || t[0] == '_' || t[0] == '.')
  190. t++;
  191. if (begin < t) {
  192. char* r = new_string(begin, t - begin, tok);
  193. const char* q;
  194. if (!r)
  195. return 0;
  196. q = get_normal_name(r);
  197. if (r != q) {
  198. PyMem_Free(r);
  199. r = new_string(q, strlen(q), tok);
  200. if (!r)
  201. return 0;
  202. }
  203. *spec = r;
  204. break;
  205. }
  206. }
  207. }
  208. return 1;
  209. }
  210. /* Check whether the line contains a coding spec. If it does,
  211. invoke the set_readline function for the new encoding.
  212. This function receives the tok_state and the new encoding.
  213. Return 1 on success, 0 on failure. */
  214. static int
  215. check_coding_spec(const char* line, Py_ssize_t size, struct tok_state *tok,
  216. int set_readline(struct tok_state *, const char *))
  217. {
  218. char *cs;
  219. if (tok->cont_line) {
  220. /* It's a continuation line, so it can't be a coding spec. */
  221. tok->decoding_state = STATE_NORMAL;
  222. return 1;
  223. }
  224. if (!get_coding_spec(line, &cs, size, tok)) {
  225. return 0;
  226. }
  227. if (!cs) {
  228. Py_ssize_t i;
  229. for (i = 0; i < size; i++) {
  230. if (line[i] == '#' || line[i] == '\n' || line[i] == '\r')
  231. break;
  232. if (line[i] != ' ' && line[i] != '\t' && line[i] != '\014') {
  233. /* Stop checking coding spec after a line containing
  234. * anything except a comment. */
  235. tok->decoding_state = STATE_NORMAL;
  236. break;
  237. }
  238. }
  239. return 1;
  240. }
  241. tok->decoding_state = STATE_NORMAL;
  242. if (tok->encoding == NULL) {
  243. assert(tok->decoding_readline == NULL);
  244. if (strcmp(cs, "utf-8") != 0 && !set_readline(tok, cs)) {
  245. error_ret(tok);
  246. PyErr_Format(PyExc_SyntaxError, "encoding problem: %s", cs);
  247. PyMem_Free(cs);
  248. return 0;
  249. }
  250. tok->encoding = cs;
  251. } else { /* then, compare cs with BOM */
  252. if (strcmp(tok->encoding, cs) != 0) {
  253. error_ret(tok);
  254. PyErr_Format(PyExc_SyntaxError,
  255. "encoding problem: %s with BOM", cs);
  256. PyMem_Free(cs);
  257. return 0;
  258. }
  259. PyMem_Free(cs);
  260. }
  261. return 1;
  262. }
  263. /* See whether the file starts with a BOM. If it does,
  264. invoke the set_readline function with the new encoding.
  265. Return 1 on success, 0 on failure. */
  266. static int
  267. check_bom(int get_char(struct tok_state *),
  268. void unget_char(int, struct tok_state *),
  269. int set_readline(struct tok_state *, const char *),
  270. struct tok_state *tok)
  271. {
  272. int ch1, ch2, ch3;
  273. ch1 = get_char(tok);
  274. tok->decoding_state = STATE_SEEK_CODING;
  275. if (ch1 == EOF) {
  276. return 1;
  277. } else if (ch1 == 0xEF) {
  278. ch2 = get_char(tok);
  279. if (ch2 != 0xBB) {
  280. unget_char(ch2, tok);
  281. unget_char(ch1, tok);
  282. return 1;
  283. }
  284. ch3 = get_char(tok);
  285. if (ch3 != 0xBF) {
  286. unget_char(ch3, tok);
  287. unget_char(ch2, tok);
  288. unget_char(ch1, tok);
  289. return 1;
  290. }
  291. } else {
  292. unget_char(ch1, tok);
  293. return 1;
  294. }
  295. if (tok->encoding != NULL)
  296. PyMem_Free(tok->encoding);
  297. tok->encoding = new_string("utf-8", 5, tok);
  298. if (!tok->encoding)
  299. return 0;
  300. /* No need to set_readline: input is already utf-8 */
  301. return 1;
  302. }
  303. static int
  304. tok_concatenate_interactive_new_line(struct tok_state *tok, const char *line) {
  305. assert(tok->fp_interactive);
  306. if (!line) {
  307. return 0;
  308. }
  309. Py_ssize_t current_size = tok->interactive_src_end - tok->interactive_src_start;
  310. Py_ssize_t line_size = strlen(line);
  311. char last_char = line[line_size > 0 ? line_size - 1 : line_size];
  312. if (last_char != '\n') {
  313. line_size += 1;
  314. }
  315. char* new_str = tok->interactive_src_start;
  316. new_str = PyMem_Realloc(new_str, current_size + line_size + 1);
  317. if (!new_str) {
  318. if (tok->interactive_src_start) {
  319. PyMem_Free(tok->interactive_src_start);
  320. }
  321. tok->interactive_src_start = NULL;
  322. tok->interactive_src_end = NULL;
  323. tok->done = E_NOMEM;
  324. return -1;
  325. }
  326. strcpy(new_str + current_size, line);
  327. tok->implicit_newline = 0;
  328. if (last_char != '\n') {
  329. /* Last line does not end in \n, fake one */
  330. new_str[current_size + line_size - 1] = '\n';
  331. new_str[current_size + line_size] = '\0';
  332. tok->implicit_newline = 1;
  333. }
  334. tok->interactive_src_start = new_str;
  335. tok->interactive_src_end = new_str + current_size + line_size;
  336. return 0;
  337. }
  338. /* Traverse and remember all f-string buffers, in order to be able to restore
  339. them after reallocating tok->buf */
  340. static void
  341. remember_fstring_buffers(struct tok_state *tok)
  342. {
  343. int index;
  344. tokenizer_mode *mode;
  345. for (index = tok->tok_mode_stack_index; index >= 0; --index) {
  346. mode = &(tok->tok_mode_stack[index]);
  347. mode->f_string_start_offset = mode->f_string_start - tok->buf;
  348. mode->f_string_multi_line_start_offset = mode->f_string_multi_line_start - tok->buf;
  349. }
  350. }
  351. /* Traverse and restore all f-string buffers after reallocating tok->buf */
  352. static void
  353. restore_fstring_buffers(struct tok_state *tok)
  354. {
  355. int index;
  356. tokenizer_mode *mode;
  357. for (index = tok->tok_mode_stack_index; index >= 0; --index) {
  358. mode = &(tok->tok_mode_stack[index]);
  359. mode->f_string_start = tok->buf + mode->f_string_start_offset;
  360. mode->f_string_multi_line_start = tok->buf + mode->f_string_multi_line_start_offset;
  361. }
  362. }
  363. static int
  364. set_fstring_expr(struct tok_state* tok, struct token *token, char c) {
  365. assert(token != NULL);
  366. assert(c == '}' || c == ':' || c == '!');
  367. tokenizer_mode *tok_mode = TOK_GET_MODE(tok);
  368. if (!tok_mode->f_string_debug || token->metadata) {
  369. return 0;
  370. }
  371. PyObject *res = NULL;
  372. // Check if there is a # character in the expression
  373. int hash_detected = 0;
  374. for (Py_ssize_t i = 0; i < tok_mode->last_expr_size - tok_mode->last_expr_end; i++) {
  375. if (tok_mode->last_expr_buffer[i] == '#') {
  376. hash_detected = 1;
  377. break;
  378. }
  379. }
  380. if (hash_detected) {
  381. Py_ssize_t input_length = tok_mode->last_expr_size - tok_mode->last_expr_end;
  382. char *result = (char *)PyObject_Malloc((input_length + 1) * sizeof(char));
  383. if (!result) {
  384. return -1;
  385. }
  386. Py_ssize_t i = 0;
  387. Py_ssize_t j = 0;
  388. for (i = 0, j = 0; i < input_length; i++) {
  389. if (tok_mode->last_expr_buffer[i] == '#') {
  390. // Skip characters until newline or end of string
  391. while (tok_mode->last_expr_buffer[i] != '\0' && i < input_length) {
  392. if (tok_mode->last_expr_buffer[i] == '\n') {
  393. result[j++] = tok_mode->last_expr_buffer[i];
  394. break;
  395. }
  396. i++;
  397. }
  398. } else {
  399. result[j++] = tok_mode->last_expr_buffer[i];
  400. }
  401. }
  402. result[j] = '\0'; // Null-terminate the result string
  403. res = PyUnicode_DecodeUTF8(result, j, NULL);
  404. PyObject_Free(result);
  405. } else {
  406. res = PyUnicode_DecodeUTF8(
  407. tok_mode->last_expr_buffer,
  408. tok_mode->last_expr_size - tok_mode->last_expr_end,
  409. NULL
  410. );
  411. }
  412. if (!res) {
  413. return -1;
  414. }
  415. token->metadata = res;
  416. return 0;
  417. }
  418. static int
  419. update_fstring_expr(struct tok_state *tok, char cur)
  420. {
  421. assert(tok->cur != NULL);
  422. Py_ssize_t size = strlen(tok->cur);
  423. tokenizer_mode *tok_mode = TOK_GET_MODE(tok);
  424. switch (cur) {
  425. case 0:
  426. if (!tok_mode->last_expr_buffer || tok_mode->last_expr_end >= 0) {
  427. return 1;
  428. }
  429. char *new_buffer = PyMem_Realloc(
  430. tok_mode->last_expr_buffer,
  431. tok_mode->last_expr_size + size
  432. );
  433. if (new_buffer == NULL) {
  434. PyMem_Free(tok_mode->last_expr_buffer);
  435. goto error;
  436. }
  437. tok_mode->last_expr_buffer = new_buffer;
  438. strncpy(tok_mode->last_expr_buffer + tok_mode->last_expr_size, tok->cur, size);
  439. tok_mode->last_expr_size += size;
  440. break;
  441. case '{':
  442. if (tok_mode->last_expr_buffer != NULL) {
  443. PyMem_Free(tok_mode->last_expr_buffer);
  444. }
  445. tok_mode->last_expr_buffer = PyMem_Malloc(size);
  446. if (tok_mode->last_expr_buffer == NULL) {
  447. goto error;
  448. }
  449. tok_mode->last_expr_size = size;
  450. tok_mode->last_expr_end = -1;
  451. strncpy(tok_mode->last_expr_buffer, tok->cur, size);
  452. break;
  453. case '}':
  454. case '!':
  455. case ':':
  456. if (tok_mode->last_expr_end == -1) {
  457. tok_mode->last_expr_end = strlen(tok->start);
  458. }
  459. break;
  460. default:
  461. Py_UNREACHABLE();
  462. }
  463. return 1;
  464. error:
  465. tok->done = E_NOMEM;
  466. return 0;
  467. }
  468. static void
  469. free_fstring_expressions(struct tok_state *tok)
  470. {
  471. int index;
  472. tokenizer_mode *mode;
  473. for (index = tok->tok_mode_stack_index; index >= 0; --index) {
  474. mode = &(tok->tok_mode_stack[index]);
  475. if (mode->last_expr_buffer != NULL) {
  476. PyMem_Free(mode->last_expr_buffer);
  477. mode->last_expr_buffer = NULL;
  478. mode->last_expr_size = 0;
  479. mode->last_expr_end = -1;
  480. }
  481. }
  482. }
  483. /* Read a line of text from TOK into S, using the stream in TOK.
  484. Return NULL on failure, else S.
  485. On entry, tok->decoding_buffer will be one of:
  486. 1) NULL: need to call tok->decoding_readline to get a new line
  487. 2) PyUnicodeObject *: decoding_feof has called tok->decoding_readline and
  488. stored the result in tok->decoding_buffer
  489. 3) PyByteArrayObject *: previous call to tok_readline_recode did not have enough room
  490. (in the s buffer) to copy entire contents of the line read
  491. by tok->decoding_readline. tok->decoding_buffer has the overflow.
  492. In this case, tok_readline_recode is called in a loop (with an expanded buffer)
  493. until the buffer ends with a '\n' (or until the end of the file is
  494. reached): see tok_nextc and its calls to tok_reserve_buf.
  495. */
  496. static int
  497. tok_reserve_buf(struct tok_state *tok, Py_ssize_t size)
  498. {
  499. Py_ssize_t cur = tok->cur - tok->buf;
  500. Py_ssize_t oldsize = tok->inp - tok->buf;
  501. Py_ssize_t newsize = oldsize + Py_MAX(size, oldsize >> 1);
  502. if (newsize > tok->end - tok->buf) {
  503. char *newbuf = tok->buf;
  504. Py_ssize_t start = tok->start == NULL ? -1 : tok->start - tok->buf;
  505. Py_ssize_t line_start = tok->start == NULL ? -1 : tok->line_start - tok->buf;
  506. Py_ssize_t multi_line_start = tok->multi_line_start - tok->buf;
  507. remember_fstring_buffers(tok);
  508. newbuf = (char *)PyMem_Realloc(newbuf, newsize);
  509. if (newbuf == NULL) {
  510. tok->done = E_NOMEM;
  511. return 0;
  512. }
  513. tok->buf = newbuf;
  514. tok->cur = tok->buf + cur;
  515. tok->inp = tok->buf + oldsize;
  516. tok->end = tok->buf + newsize;
  517. tok->start = start < 0 ? NULL : tok->buf + start;
  518. tok->line_start = line_start < 0 ? NULL : tok->buf + line_start;
  519. tok->multi_line_start = multi_line_start < 0 ? NULL : tok->buf + multi_line_start;
  520. restore_fstring_buffers(tok);
  521. }
  522. return 1;
  523. }
  524. static inline int
  525. contains_null_bytes(const char* str, size_t size) {
  526. return memchr(str, 0, size) != NULL;
  527. }
  528. static int
  529. tok_readline_recode(struct tok_state *tok) {
  530. PyObject *line;
  531. const char *buf;
  532. Py_ssize_t buflen;
  533. line = tok->decoding_buffer;
  534. if (line == NULL) {
  535. line = PyObject_CallNoArgs(tok->decoding_readline);
  536. if (line == NULL) {
  537. error_ret(tok);
  538. goto error;
  539. }
  540. }
  541. else {
  542. tok->decoding_buffer = NULL;
  543. }
  544. buf = PyUnicode_AsUTF8AndSize(line, &buflen);
  545. if (buf == NULL) {
  546. error_ret(tok);
  547. goto error;
  548. }
  549. // Make room for the null terminator *and* potentially
  550. // an extra newline character that we may need to artificially
  551. // add.
  552. size_t buffer_size = buflen + 2;
  553. if (!tok_reserve_buf(tok, buffer_size)) {
  554. goto error;
  555. }
  556. memcpy(tok->inp, buf, buflen);
  557. tok->inp += buflen;
  558. *tok->inp = '\0';
  559. if (tok->fp_interactive &&
  560. tok_concatenate_interactive_new_line(tok, buf) == -1) {
  561. goto error;
  562. }
  563. Py_DECREF(line);
  564. return 1;
  565. error:
  566. Py_XDECREF(line);
  567. return 0;
  568. }
  569. /* Set the readline function for TOK to a StreamReader's
  570. readline function. The StreamReader is named ENC.
  571. This function is called from check_bom and check_coding_spec.
  572. ENC is usually identical to the future value of tok->encoding,
  573. except for the (currently unsupported) case of UTF-16.
  574. Return 1 on success, 0 on failure. */
  575. static int
  576. fp_setreadl(struct tok_state *tok, const char* enc)
  577. {
  578. PyObject *readline, *open, *stream;
  579. int fd;
  580. long pos;
  581. fd = fileno(tok->fp);
  582. /* Due to buffering the file offset for fd can be different from the file
  583. * position of tok->fp. If tok->fp was opened in text mode on Windows,
  584. * its file position counts CRLF as one char and can't be directly mapped
  585. * to the file offset for fd. Instead we step back one byte and read to
  586. * the end of line.*/
  587. pos = ftell(tok->fp);
  588. if (pos == -1 ||
  589. lseek(fd, (off_t)(pos > 0 ? pos - 1 : pos), SEEK_SET) == (off_t)-1) {
  590. PyErr_SetFromErrnoWithFilename(PyExc_OSError, NULL);
  591. return 0;
  592. }
  593. open = _PyImport_GetModuleAttrString("io", "open");
  594. if (open == NULL) {
  595. return 0;
  596. }
  597. stream = PyObject_CallFunction(open, "isisOOO",
  598. fd, "r", -1, enc, Py_None, Py_None, Py_False);
  599. Py_DECREF(open);
  600. if (stream == NULL) {
  601. return 0;
  602. }
  603. readline = PyObject_GetAttr(stream, &_Py_ID(readline));
  604. Py_DECREF(stream);
  605. if (readline == NULL) {
  606. return 0;
  607. }
  608. Py_XSETREF(tok->decoding_readline, readline);
  609. if (pos > 0) {
  610. PyObject *bufobj = _PyObject_CallNoArgs(readline);
  611. if (bufobj == NULL) {
  612. return 0;
  613. }
  614. Py_DECREF(bufobj);
  615. }
  616. return 1;
  617. }
  618. /* Fetch the next byte from TOK. */
  619. static int fp_getc(struct tok_state *tok) {
  620. return getc(tok->fp);
  621. }
  622. /* Unfetch the last byte back into TOK. */
  623. static void fp_ungetc(int c, struct tok_state *tok) {
  624. ungetc(c, tok->fp);
  625. }
  626. /* Check whether the characters at s start a valid
  627. UTF-8 sequence. Return the number of characters forming
  628. the sequence if yes, 0 if not. The special cases match
  629. those in stringlib/codecs.h:utf8_decode.
  630. */
  631. static int
  632. valid_utf8(const unsigned char* s)
  633. {
  634. int expected = 0;
  635. int length;
  636. if (*s < 0x80) {
  637. /* single-byte code */
  638. return 1;
  639. }
  640. else if (*s < 0xE0) {
  641. /* \xC2\x80-\xDF\xBF -- 0080-07FF */
  642. if (*s < 0xC2) {
  643. /* invalid sequence
  644. \x80-\xBF -- continuation byte
  645. \xC0-\xC1 -- fake 0000-007F */
  646. return 0;
  647. }
  648. expected = 1;
  649. }
  650. else if (*s < 0xF0) {
  651. /* \xE0\xA0\x80-\xEF\xBF\xBF -- 0800-FFFF */
  652. if (*s == 0xE0 && *(s + 1) < 0xA0) {
  653. /* invalid sequence
  654. \xE0\x80\x80-\xE0\x9F\xBF -- fake 0000-0800 */
  655. return 0;
  656. }
  657. else if (*s == 0xED && *(s + 1) >= 0xA0) {
  658. /* Decoding UTF-8 sequences in range \xED\xA0\x80-\xED\xBF\xBF
  659. will result in surrogates in range D800-DFFF. Surrogates are
  660. not valid UTF-8 so they are rejected.
  661. See https://www.unicode.org/versions/Unicode5.2.0/ch03.pdf
  662. (table 3-7) and http://www.rfc-editor.org/rfc/rfc3629.txt */
  663. return 0;
  664. }
  665. expected = 2;
  666. }
  667. else if (*s < 0xF5) {
  668. /* \xF0\x90\x80\x80-\xF4\x8F\xBF\xBF -- 10000-10FFFF */
  669. if (*(s + 1) < 0x90 ? *s == 0xF0 : *s == 0xF4) {
  670. /* invalid sequence -- one of:
  671. \xF0\x80\x80\x80-\xF0\x8F\xBF\xBF -- fake 0000-FFFF
  672. \xF4\x90\x80\x80- -- 110000- overflow */
  673. return 0;
  674. }
  675. expected = 3;
  676. }
  677. else {
  678. /* invalid start byte */
  679. return 0;
  680. }
  681. length = expected + 1;
  682. for (; expected; expected--)
  683. if (s[expected] < 0x80 || s[expected] >= 0xC0)
  684. return 0;
  685. return length;
  686. }
  687. static int
  688. ensure_utf8(char *line, struct tok_state *tok)
  689. {
  690. int badchar = 0;
  691. unsigned char *c;
  692. int length;
  693. for (c = (unsigned char *)line; *c; c += length) {
  694. if (!(length = valid_utf8(c))) {
  695. badchar = *c;
  696. break;
  697. }
  698. }
  699. if (badchar) {
  700. PyErr_Format(PyExc_SyntaxError,
  701. "Non-UTF-8 code starting with '\\x%.2x' "
  702. "in file %U on line %i, "
  703. "but no encoding declared; "
  704. "see https://peps.python.org/pep-0263/ for details",
  705. badchar, tok->filename, tok->lineno);
  706. return 0;
  707. }
  708. return 1;
  709. }
  710. /* Fetch a byte from TOK, using the string buffer. */
  711. static int
  712. buf_getc(struct tok_state *tok) {
  713. return Py_CHARMASK(*tok->str++);
  714. }
  715. /* Unfetch a byte from TOK, using the string buffer. */
  716. static void
  717. buf_ungetc(int c, struct tok_state *tok) {
  718. tok->str--;
  719. assert(Py_CHARMASK(*tok->str) == c); /* tok->cur may point to read-only segment */
  720. }
  721. /* Set the readline function for TOK to ENC. For the string-based
  722. tokenizer, this means to just record the encoding. */
  723. static int
  724. buf_setreadl(struct tok_state *tok, const char* enc) {
  725. tok->enc = enc;
  726. return 1;
  727. }
  728. /* Return a UTF-8 encoding Python string object from the
  729. C byte string STR, which is encoded with ENC. */
  730. static PyObject *
  731. translate_into_utf8(const char* str, const char* enc) {
  732. PyObject *utf8;
  733. PyObject* buf = PyUnicode_Decode(str, strlen(str), enc, NULL);
  734. if (buf == NULL)
  735. return NULL;
  736. utf8 = PyUnicode_AsUTF8String(buf);
  737. Py_DECREF(buf);
  738. return utf8;
  739. }
  740. static char *
  741. translate_newlines(const char *s, int exec_input, int preserve_crlf,
  742. struct tok_state *tok) {
  743. int skip_next_lf = 0;
  744. #if defined(__has_feature)
  745. # if __has_feature(memory_sanitizer)
  746. __msan_unpoison_string(s);
  747. # endif
  748. #endif
  749. size_t needed_length = strlen(s) + 2, final_length;
  750. char *buf, *current;
  751. char c = '\0';
  752. buf = PyMem_Malloc(needed_length);
  753. if (buf == NULL) {
  754. tok->done = E_NOMEM;
  755. return NULL;
  756. }
  757. for (current = buf; *s; s++, current++) {
  758. c = *s;
  759. if (skip_next_lf) {
  760. skip_next_lf = 0;
  761. if (c == '\n') {
  762. c = *++s;
  763. if (!c)
  764. break;
  765. }
  766. }
  767. if (!preserve_crlf && c == '\r') {
  768. skip_next_lf = 1;
  769. c = '\n';
  770. }
  771. *current = c;
  772. }
  773. /* If this is exec input, add a newline to the end of the string if
  774. there isn't one already. */
  775. if (exec_input && c != '\n' && c != '\0') {
  776. *current = '\n';
  777. current++;
  778. }
  779. *current = '\0';
  780. final_length = current - buf + 1;
  781. if (final_length < needed_length && final_length) {
  782. /* should never fail */
  783. char* result = PyMem_Realloc(buf, final_length);
  784. if (result == NULL) {
  785. PyMem_Free(buf);
  786. }
  787. buf = result;
  788. }
  789. return buf;
  790. }
  791. /* Decode a byte string STR for use as the buffer of TOK.
  792. Look for encoding declarations inside STR, and record them
  793. inside TOK. */
  794. static char *
  795. decode_str(const char *input, int single, struct tok_state *tok, int preserve_crlf)
  796. {
  797. PyObject* utf8 = NULL;
  798. char *str;
  799. const char *s;
  800. const char *newl[2] = {NULL, NULL};
  801. int lineno = 0;
  802. tok->input = str = translate_newlines(input, single, preserve_crlf, tok);
  803. if (str == NULL)
  804. return NULL;
  805. tok->enc = NULL;
  806. tok->str = str;
  807. if (!check_bom(buf_getc, buf_ungetc, buf_setreadl, tok))
  808. return error_ret(tok);
  809. str = tok->str; /* string after BOM if any */
  810. assert(str);
  811. if (tok->enc != NULL) {
  812. utf8 = translate_into_utf8(str, tok->enc);
  813. if (utf8 == NULL)
  814. return error_ret(tok);
  815. str = PyBytes_AsString(utf8);
  816. }
  817. for (s = str;; s++) {
  818. if (*s == '\0') break;
  819. else if (*s == '\n') {
  820. assert(lineno < 2);
  821. newl[lineno] = s;
  822. lineno++;
  823. if (lineno == 2) break;
  824. }
  825. }
  826. tok->enc = NULL;
  827. /* need to check line 1 and 2 separately since check_coding_spec
  828. assumes a single line as input */
  829. if (newl[0]) {
  830. if (!check_coding_spec(str, newl[0] - str, tok, buf_setreadl)) {
  831. return NULL;
  832. }
  833. if (tok->enc == NULL && tok->decoding_state != STATE_NORMAL && newl[1]) {
  834. if (!check_coding_spec(newl[0]+1, newl[1] - newl[0],
  835. tok, buf_setreadl))
  836. return NULL;
  837. }
  838. }
  839. if (tok->enc != NULL) {
  840. assert(utf8 == NULL);
  841. utf8 = translate_into_utf8(str, tok->enc);
  842. if (utf8 == NULL)
  843. return error_ret(tok);
  844. str = PyBytes_AS_STRING(utf8);
  845. }
  846. assert(tok->decoding_buffer == NULL);
  847. tok->decoding_buffer = utf8; /* CAUTION */
  848. return str;
  849. }
  850. /* Set up tokenizer for string */
  851. struct tok_state *
  852. _PyTokenizer_FromString(const char *str, int exec_input, int preserve_crlf)
  853. {
  854. struct tok_state *tok = tok_new();
  855. char *decoded;
  856. if (tok == NULL)
  857. return NULL;
  858. decoded = decode_str(str, exec_input, tok, preserve_crlf);
  859. if (decoded == NULL) {
  860. _PyTokenizer_Free(tok);
  861. return NULL;
  862. }
  863. tok->buf = tok->cur = tok->inp = decoded;
  864. tok->end = decoded;
  865. return tok;
  866. }
  867. struct tok_state *
  868. _PyTokenizer_FromReadline(PyObject* readline, const char* enc,
  869. int exec_input, int preserve_crlf)
  870. {
  871. struct tok_state *tok = tok_new();
  872. if (tok == NULL)
  873. return NULL;
  874. if ((tok->buf = (char *)PyMem_Malloc(BUFSIZ)) == NULL) {
  875. _PyTokenizer_Free(tok);
  876. return NULL;
  877. }
  878. tok->cur = tok->inp = tok->buf;
  879. tok->end = tok->buf + BUFSIZ;
  880. tok->fp = NULL;
  881. if (enc != NULL) {
  882. tok->encoding = new_string(enc, strlen(enc), tok);
  883. if (!tok->encoding) {
  884. _PyTokenizer_Free(tok);
  885. return NULL;
  886. }
  887. }
  888. tok->decoding_state = STATE_NORMAL;
  889. Py_INCREF(readline);
  890. tok->readline = readline;
  891. return tok;
  892. }
  893. /* Set up tokenizer for UTF-8 string */
  894. struct tok_state *
  895. _PyTokenizer_FromUTF8(const char *str, int exec_input, int preserve_crlf)
  896. {
  897. struct tok_state *tok = tok_new();
  898. char *translated;
  899. if (tok == NULL)
  900. return NULL;
  901. tok->input = translated = translate_newlines(str, exec_input, preserve_crlf, tok);
  902. if (translated == NULL) {
  903. _PyTokenizer_Free(tok);
  904. return NULL;
  905. }
  906. tok->decoding_state = STATE_NORMAL;
  907. tok->enc = NULL;
  908. tok->str = translated;
  909. tok->encoding = new_string("utf-8", 5, tok);
  910. if (!tok->encoding) {
  911. _PyTokenizer_Free(tok);
  912. return NULL;
  913. }
  914. tok->buf = tok->cur = tok->inp = translated;
  915. tok->end = translated;
  916. return tok;
  917. }
  918. /* Set up tokenizer for file */
  919. struct tok_state *
  920. _PyTokenizer_FromFile(FILE *fp, const char* enc,
  921. const char *ps1, const char *ps2)
  922. {
  923. struct tok_state *tok = tok_new();
  924. if (tok == NULL)
  925. return NULL;
  926. if ((tok->buf = (char *)PyMem_Malloc(BUFSIZ)) == NULL) {
  927. _PyTokenizer_Free(tok);
  928. return NULL;
  929. }
  930. tok->cur = tok->inp = tok->buf;
  931. tok->end = tok->buf + BUFSIZ;
  932. tok->fp = fp;
  933. tok->prompt = ps1;
  934. tok->nextprompt = ps2;
  935. if (enc != NULL) {
  936. /* Must copy encoding declaration since it
  937. gets copied into the parse tree. */
  938. tok->encoding = new_string(enc, strlen(enc), tok);
  939. if (!tok->encoding) {
  940. _PyTokenizer_Free(tok);
  941. return NULL;
  942. }
  943. tok->decoding_state = STATE_NORMAL;
  944. }
  945. return tok;
  946. }
  947. /* Free a tok_state structure */
  948. void
  949. _PyTokenizer_Free(struct tok_state *tok)
  950. {
  951. if (tok->encoding != NULL) {
  952. PyMem_Free(tok->encoding);
  953. }
  954. Py_XDECREF(tok->decoding_readline);
  955. Py_XDECREF(tok->decoding_buffer);
  956. Py_XDECREF(tok->readline);
  957. Py_XDECREF(tok->filename);
  958. if ((tok->readline != NULL || tok->fp != NULL ) && tok->buf != NULL) {
  959. PyMem_Free(tok->buf);
  960. }
  961. if (tok->input) {
  962. PyMem_Free(tok->input);
  963. }
  964. if (tok->interactive_src_start != NULL) {
  965. PyMem_Free(tok->interactive_src_start);
  966. }
  967. free_fstring_expressions(tok);
  968. PyMem_Free(tok);
  969. }
  970. void
  971. _PyToken_Free(struct token *token) {
  972. Py_XDECREF(token->metadata);
  973. }
  974. void
  975. _PyToken_Init(struct token *token) {
  976. token->metadata = NULL;
  977. }
  978. static int
  979. tok_readline_raw(struct tok_state *tok)
  980. {
  981. do {
  982. if (!tok_reserve_buf(tok, BUFSIZ)) {
  983. return 0;
  984. }
  985. int n_chars = (int)(tok->end - tok->inp);
  986. size_t line_size = 0;
  987. char *line = _Py_UniversalNewlineFgetsWithSize(tok->inp, n_chars, tok->fp, NULL, &line_size);
  988. if (line == NULL) {
  989. return 1;
  990. }
  991. if (tok->fp_interactive &&
  992. tok_concatenate_interactive_new_line(tok, line) == -1) {
  993. return 0;
  994. }
  995. tok->inp += line_size;
  996. if (tok->inp == tok->buf) {
  997. return 0;
  998. }
  999. } while (tok->inp[-1] != '\n');
  1000. return 1;
  1001. }
  1002. static int
  1003. tok_readline_string(struct tok_state* tok) {
  1004. PyObject* line = NULL;
  1005. PyObject* raw_line = PyObject_CallNoArgs(tok->readline);
  1006. if (raw_line == NULL) {
  1007. if (PyErr_ExceptionMatches(PyExc_StopIteration)) {
  1008. PyErr_Clear();
  1009. return 1;
  1010. }
  1011. error_ret(tok);
  1012. goto error;
  1013. }
  1014. if(tok->encoding != NULL) {
  1015. if (!PyBytes_Check(raw_line)) {
  1016. PyErr_Format(PyExc_TypeError, "readline() returned a non-bytes object");
  1017. error_ret(tok);
  1018. goto error;
  1019. }
  1020. line = PyUnicode_Decode(PyBytes_AS_STRING(raw_line), PyBytes_GET_SIZE(raw_line),
  1021. tok->encoding, "replace");
  1022. Py_CLEAR(raw_line);
  1023. if (line == NULL) {
  1024. error_ret(tok);
  1025. goto error;
  1026. }
  1027. } else {
  1028. if(!PyUnicode_Check(raw_line)) {
  1029. PyErr_Format(PyExc_TypeError, "readline() returned a non-string object");
  1030. error_ret(tok);
  1031. goto error;
  1032. }
  1033. line = raw_line;
  1034. raw_line = NULL;
  1035. }
  1036. Py_ssize_t buflen;
  1037. const char* buf = PyUnicode_AsUTF8AndSize(line, &buflen);
  1038. if (buf == NULL) {
  1039. error_ret(tok);
  1040. goto error;
  1041. }
  1042. // Make room for the null terminator *and* potentially
  1043. // an extra newline character that we may need to artificially
  1044. // add.
  1045. size_t buffer_size = buflen + 2;
  1046. if (!tok_reserve_buf(tok, buffer_size)) {
  1047. goto error;
  1048. }
  1049. memcpy(tok->inp, buf, buflen);
  1050. tok->inp += buflen;
  1051. *tok->inp = '\0';
  1052. tok->line_start = tok->cur;
  1053. Py_DECREF(line);
  1054. return 1;
  1055. error:
  1056. Py_XDECREF(raw_line);
  1057. Py_XDECREF(line);
  1058. return 0;
  1059. }
  1060. static int
  1061. tok_underflow_string(struct tok_state *tok) {
  1062. char *end = strchr(tok->inp, '\n');
  1063. if (end != NULL) {
  1064. end++;
  1065. }
  1066. else {
  1067. end = strchr(tok->inp, '\0');
  1068. if (end == tok->inp) {
  1069. tok->done = E_EOF;
  1070. return 0;
  1071. }
  1072. }
  1073. if (tok->start == NULL) {
  1074. tok->buf = tok->cur;
  1075. }
  1076. tok->line_start = tok->cur;
  1077. ADVANCE_LINENO();
  1078. tok->inp = end;
  1079. return 1;
  1080. }
  1081. static int
  1082. tok_underflow_interactive(struct tok_state *tok) {
  1083. if (tok->interactive_underflow == IUNDERFLOW_STOP) {
  1084. tok->done = E_INTERACT_STOP;
  1085. return 1;
  1086. }
  1087. char *newtok = PyOS_Readline(tok->fp ? tok->fp : stdin, stdout, tok->prompt);
  1088. if (newtok != NULL) {
  1089. char *translated = translate_newlines(newtok, 0, 0, tok);
  1090. PyMem_Free(newtok);
  1091. if (translated == NULL) {
  1092. return 0;
  1093. }
  1094. newtok = translated;
  1095. }
  1096. if (tok->encoding && newtok && *newtok) {
  1097. /* Recode to UTF-8 */
  1098. Py_ssize_t buflen;
  1099. const char* buf;
  1100. PyObject *u = translate_into_utf8(newtok, tok->encoding);
  1101. PyMem_Free(newtok);
  1102. if (u == NULL) {
  1103. tok->done = E_DECODE;
  1104. return 0;
  1105. }
  1106. buflen = PyBytes_GET_SIZE(u);
  1107. buf = PyBytes_AS_STRING(u);
  1108. newtok = PyMem_Malloc(buflen+1);
  1109. if (newtok == NULL) {
  1110. Py_DECREF(u);
  1111. tok->done = E_NOMEM;
  1112. return 0;
  1113. }
  1114. strcpy(newtok, buf);
  1115. Py_DECREF(u);
  1116. }
  1117. if (tok->fp_interactive &&
  1118. tok_concatenate_interactive_new_line(tok, newtok) == -1) {
  1119. PyMem_Free(newtok);
  1120. return 0;
  1121. }
  1122. if (tok->nextprompt != NULL) {
  1123. tok->prompt = tok->nextprompt;
  1124. }
  1125. if (newtok == NULL) {
  1126. tok->done = E_INTR;
  1127. }
  1128. else if (*newtok == '\0') {
  1129. PyMem_Free(newtok);
  1130. tok->done = E_EOF;
  1131. }
  1132. else if (tok->start != NULL) {
  1133. Py_ssize_t cur_multi_line_start = tok->multi_line_start - tok->buf;
  1134. remember_fstring_buffers(tok);
  1135. size_t size = strlen(newtok);
  1136. ADVANCE_LINENO();
  1137. if (!tok_reserve_buf(tok, size + 1)) {
  1138. PyMem_Free(tok->buf);
  1139. tok->buf = NULL;
  1140. PyMem_Free(newtok);
  1141. return 0;
  1142. }
  1143. memcpy(tok->cur, newtok, size + 1);
  1144. PyMem_Free(newtok);
  1145. tok->inp += size;
  1146. tok->multi_line_start = tok->buf + cur_multi_line_start;
  1147. restore_fstring_buffers(tok);
  1148. }
  1149. else {
  1150. remember_fstring_buffers(tok);
  1151. ADVANCE_LINENO();
  1152. PyMem_Free(tok->buf);
  1153. tok->buf = newtok;
  1154. tok->cur = tok->buf;
  1155. tok->line_start = tok->buf;
  1156. tok->inp = strchr(tok->buf, '\0');
  1157. tok->end = tok->inp + 1;
  1158. restore_fstring_buffers(tok);
  1159. }
  1160. if (tok->done != E_OK) {
  1161. if (tok->prompt != NULL) {
  1162. PySys_WriteStderr("\n");
  1163. }
  1164. return 0;
  1165. }
  1166. if (tok->tok_mode_stack_index && !update_fstring_expr(tok, 0)) {
  1167. return 0;
  1168. }
  1169. return 1;
  1170. }
  1171. static int
  1172. tok_underflow_file(struct tok_state *tok) {
  1173. if (tok->start == NULL && !INSIDE_FSTRING(tok)) {
  1174. tok->cur = tok->inp = tok->buf;
  1175. }
  1176. if (tok->decoding_state == STATE_INIT) {
  1177. /* We have not yet determined the encoding.
  1178. If an encoding is found, use the file-pointer
  1179. reader functions from now on. */
  1180. if (!check_bom(fp_getc, fp_ungetc, fp_setreadl, tok)) {
  1181. error_ret(tok);
  1182. return 0;
  1183. }
  1184. assert(tok->decoding_state != STATE_INIT);
  1185. }
  1186. /* Read until '\n' or EOF */
  1187. if (tok->decoding_readline != NULL) {
  1188. /* We already have a codec associated with this input. */
  1189. if (!tok_readline_recode(tok)) {
  1190. return 0;
  1191. }
  1192. }
  1193. else {
  1194. /* We want a 'raw' read. */
  1195. if (!tok_readline_raw(tok)) {
  1196. return 0;
  1197. }
  1198. }
  1199. if (tok->inp == tok->cur) {
  1200. tok->done = E_EOF;
  1201. return 0;
  1202. }
  1203. tok->implicit_newline = 0;
  1204. if (tok->inp[-1] != '\n') {
  1205. assert(tok->inp + 1 < tok->end);
  1206. /* Last line does not end in \n, fake one */
  1207. *tok->inp++ = '\n';
  1208. *tok->inp = '\0';
  1209. tok->implicit_newline = 1;
  1210. }
  1211. if (tok->tok_mode_stack_index && !update_fstring_expr(tok, 0)) {
  1212. return 0;
  1213. }
  1214. ADVANCE_LINENO();
  1215. if (tok->decoding_state != STATE_NORMAL) {
  1216. if (tok->lineno > 2) {
  1217. tok->decoding_state = STATE_NORMAL;
  1218. }
  1219. else if (!check_coding_spec(tok->cur, strlen(tok->cur),
  1220. tok, fp_setreadl))
  1221. {
  1222. return 0;
  1223. }
  1224. }
  1225. /* The default encoding is UTF-8, so make sure we don't have any
  1226. non-UTF-8 sequences in it. */
  1227. if (!tok->encoding && !ensure_utf8(tok->cur, tok)) {
  1228. error_ret(tok);
  1229. return 0;
  1230. }
  1231. assert(tok->done == E_OK);
  1232. return tok->done == E_OK;
  1233. }
  1234. static int
  1235. tok_underflow_readline(struct tok_state* tok) {
  1236. assert(tok->decoding_state == STATE_NORMAL);
  1237. assert(tok->fp == NULL && tok->input == NULL && tok->decoding_readline == NULL);
  1238. if (tok->start == NULL && !INSIDE_FSTRING(tok)) {
  1239. tok->cur = tok->inp = tok->buf;
  1240. }
  1241. if (!tok_readline_string(tok)) {
  1242. return 0;
  1243. }
  1244. if (tok->inp == tok->cur) {
  1245. tok->done = E_EOF;
  1246. return 0;
  1247. }
  1248. tok->implicit_newline = 0;
  1249. if (tok->inp[-1] != '\n') {
  1250. assert(tok->inp + 1 < tok->end);
  1251. /* Last line does not end in \n, fake one */
  1252. *tok->inp++ = '\n';
  1253. *tok->inp = '\0';
  1254. tok->implicit_newline = 1;
  1255. }
  1256. if (tok->tok_mode_stack_index && !update_fstring_expr(tok, 0)) {
  1257. return 0;
  1258. }
  1259. ADVANCE_LINENO();
  1260. /* The default encoding is UTF-8, so make sure we don't have any
  1261. non-UTF-8 sequences in it. */
  1262. if (!tok->encoding && !ensure_utf8(tok->cur, tok)) {
  1263. error_ret(tok);
  1264. return 0;
  1265. }
  1266. assert(tok->done == E_OK);
  1267. return tok->done == E_OK;
  1268. }
  1269. #if defined(Py_DEBUG)
  1270. static void
  1271. print_escape(FILE *f, const char *s, Py_ssize_t size)
  1272. {
  1273. if (s == NULL) {
  1274. fputs("NULL", f);
  1275. return;
  1276. }
  1277. putc('"', f);
  1278. while (size-- > 0) {
  1279. unsigned char c = *s++;
  1280. switch (c) {
  1281. case '\n': fputs("\\n", f); break;
  1282. case '\r': fputs("\\r", f); break;
  1283. case '\t': fputs("\\t", f); break;
  1284. case '\f': fputs("\\f", f); break;
  1285. case '\'': fputs("\\'", f); break;
  1286. case '"': fputs("\\\"", f); break;
  1287. default:
  1288. if (0x20 <= c && c <= 0x7f)
  1289. putc(c, f);
  1290. else
  1291. fprintf(f, "\\x%02x", c);
  1292. }
  1293. }
  1294. putc('"', f);
  1295. }
  1296. #endif
  1297. /* Get next char, updating state; error code goes into tok->done */
  1298. static int
  1299. tok_nextc(struct tok_state *tok)
  1300. {
  1301. int rc;
  1302. for (;;) {
  1303. if (tok->cur != tok->inp) {
  1304. if ((unsigned int) tok->col_offset >= (unsigned int) INT_MAX) {
  1305. tok->done = E_COLUMNOVERFLOW;
  1306. return EOF;
  1307. }
  1308. tok->col_offset++;
  1309. return Py_CHARMASK(*tok->cur++); /* Fast path */
  1310. }
  1311. if (tok->done != E_OK) {
  1312. return EOF;
  1313. }
  1314. if (tok->readline) {
  1315. rc = tok_underflow_readline(tok);
  1316. }
  1317. else if (tok->fp == NULL) {
  1318. rc = tok_underflow_string(tok);
  1319. }
  1320. else if (tok->prompt != NULL) {
  1321. rc = tok_underflow_interactive(tok);
  1322. }
  1323. else {
  1324. rc = tok_underflow_file(tok);
  1325. }
  1326. #if defined(Py_DEBUG)
  1327. if (tok->debug) {
  1328. fprintf(stderr, "line[%d] = ", tok->lineno);
  1329. print_escape(stderr, tok->cur, tok->inp - tok->cur);
  1330. fprintf(stderr, " tok->done = %d\n", tok->done);
  1331. }
  1332. #endif
  1333. if (!rc) {
  1334. tok->cur = tok->inp;
  1335. return EOF;
  1336. }
  1337. tok->line_start = tok->cur;
  1338. if (contains_null_bytes(tok->line_start, tok->inp - tok->line_start)) {
  1339. syntaxerror(tok, "source code cannot contain null bytes");
  1340. tok->cur = tok->inp;
  1341. return EOF;
  1342. }
  1343. }
  1344. Py_UNREACHABLE();
  1345. }
  1346. /* Back-up one character */
  1347. static void
  1348. tok_backup(struct tok_state *tok, int c)
  1349. {
  1350. if (c != EOF) {
  1351. if (--tok->cur < tok->buf) {
  1352. Py_FatalError("tokenizer beginning of buffer");
  1353. }
  1354. if ((int)(unsigned char)*tok->cur != Py_CHARMASK(c)) {
  1355. Py_FatalError("tok_backup: wrong character");
  1356. }
  1357. tok->col_offset--;
  1358. }
  1359. }
  1360. static int
  1361. _syntaxerror_range(struct tok_state *tok, const char *format,
  1362. int col_offset, int end_col_offset,
  1363. va_list vargs)
  1364. {
  1365. // In release builds, we don't want to overwrite a previous error, but in debug builds we
  1366. // want to fail if we are not doing it so we can fix it.
  1367. assert(tok->done != E_ERROR);
  1368. if (tok->done == E_ERROR) {
  1369. return ERRORTOKEN;
  1370. }
  1371. PyObject *errmsg, *errtext, *args;
  1372. errmsg = PyUnicode_FromFormatV(format, vargs);
  1373. if (!errmsg) {
  1374. goto error;
  1375. }
  1376. errtext = PyUnicode_DecodeUTF8(tok->line_start, tok->cur - tok->line_start,
  1377. "replace");
  1378. if (!errtext) {
  1379. goto error;
  1380. }
  1381. if (col_offset == -1) {
  1382. col_offset = (int)PyUnicode_GET_LENGTH(errtext);
  1383. }
  1384. if (end_col_offset == -1) {
  1385. end_col_offset = col_offset;
  1386. }
  1387. Py_ssize_t line_len = strcspn(tok->line_start, "\n");
  1388. if (line_len != tok->cur - tok->line_start) {
  1389. Py_DECREF(errtext);
  1390. errtext = PyUnicode_DecodeUTF8(tok->line_start, line_len,
  1391. "replace");
  1392. }
  1393. if (!errtext) {
  1394. goto error;
  1395. }
  1396. args = Py_BuildValue("(O(OiiNii))", errmsg, tok->filename, tok->lineno,
  1397. col_offset, errtext, tok->lineno, end_col_offset);
  1398. if (args) {
  1399. PyErr_SetObject(PyExc_SyntaxError, args);
  1400. Py_DECREF(args);
  1401. }
  1402. error:
  1403. Py_XDECREF(errmsg);
  1404. tok->done = E_ERROR;
  1405. return ERRORTOKEN;
  1406. }
  1407. static int
  1408. syntaxerror(struct tok_state *tok, const char *format, ...)
  1409. {
  1410. // This errors are cleaned on startup. Todo: Fix it.
  1411. va_list vargs;
  1412. va_start(vargs, format);
  1413. int ret = _syntaxerror_range(tok, format, -1, -1, vargs);
  1414. va_end(vargs);
  1415. return ret;
  1416. }
  1417. static int
  1418. syntaxerror_known_range(struct tok_state *tok,
  1419. int col_offset, int end_col_offset,
  1420. const char *format, ...)
  1421. {
  1422. va_list vargs;
  1423. va_start(vargs, format);
  1424. int ret = _syntaxerror_range(tok, format, col_offset, end_col_offset, vargs);
  1425. va_end(vargs);
  1426. return ret;
  1427. }
  1428. static int
  1429. indenterror(struct tok_state *tok)
  1430. {
  1431. tok->done = E_TABSPACE;
  1432. tok->cur = tok->inp;
  1433. return ERRORTOKEN;
  1434. }
  1435. static int
  1436. parser_warn(struct tok_state *tok, PyObject *category, const char *format, ...)
  1437. {
  1438. if (!tok->report_warnings) {
  1439. return 0;
  1440. }
  1441. PyObject *errmsg;
  1442. va_list vargs;
  1443. va_start(vargs, format);
  1444. errmsg = PyUnicode_FromFormatV(format, vargs);
  1445. va_end(vargs);
  1446. if (!errmsg) {
  1447. goto error;
  1448. }
  1449. if (PyErr_WarnExplicitObject(category, errmsg, tok->filename,
  1450. tok->lineno, NULL, NULL) < 0) {
  1451. if (PyErr_ExceptionMatches(category)) {
  1452. /* Replace the DeprecationWarning exception with a SyntaxError
  1453. to get a more accurate error report */
  1454. PyErr_Clear();
  1455. syntaxerror(tok, "%U", errmsg);
  1456. }
  1457. goto error;
  1458. }
  1459. Py_DECREF(errmsg);
  1460. return 0;
  1461. error:
  1462. Py_XDECREF(errmsg);
  1463. tok->done = E_ERROR;
  1464. return -1;
  1465. }
  1466. static int
  1467. warn_invalid_escape_sequence(struct tok_state *tok, int first_invalid_escape_char)
  1468. {
  1469. if (!tok->report_warnings) {
  1470. return 0;
  1471. }
  1472. PyObject *msg = PyUnicode_FromFormat(
  1473. "invalid escape sequence '\\%c'",
  1474. (char) first_invalid_escape_char
  1475. );
  1476. if (msg == NULL) {
  1477. return -1;
  1478. }
  1479. if (PyErr_WarnExplicitObject(PyExc_SyntaxWarning, msg, tok->filename,
  1480. tok->lineno, NULL, NULL) < 0) {
  1481. Py_DECREF(msg);
  1482. if (PyErr_ExceptionMatches(PyExc_SyntaxWarning)) {
  1483. /* Replace the SyntaxWarning exception with a SyntaxError
  1484. to get a more accurate error report */
  1485. PyErr_Clear();
  1486. return syntaxerror(tok, "invalid escape sequence '\\%c'", (char) first_invalid_escape_char);
  1487. }
  1488. return -1;
  1489. }
  1490. Py_DECREF(msg);
  1491. return 0;
  1492. }
  1493. static int
  1494. lookahead(struct tok_state *tok, const char *test)
  1495. {
  1496. const char *s = test;
  1497. int res = 0;
  1498. while (1) {
  1499. int c = tok_nextc(tok);
  1500. if (*s == 0) {
  1501. res = !is_potential_identifier_char(c);
  1502. }
  1503. else if (c == *s) {
  1504. s++;
  1505. continue;
  1506. }
  1507. tok_backup(tok, c);
  1508. while (s != test) {
  1509. tok_backup(tok, *--s);
  1510. }
  1511. return res;
  1512. }
  1513. }
  1514. static int
  1515. verify_end_of_number(struct tok_state *tok, int c, const char *kind) {
  1516. if (tok->tok_extra_tokens) {
  1517. // When we are parsing extra tokens, we don't want to emit warnings
  1518. // about invalid literals, because we want to be a bit more liberal.
  1519. return 1;
  1520. }
  1521. /* Emit a deprecation warning only if the numeric literal is immediately
  1522. * followed by one of keywords which can occur after a numeric literal
  1523. * in valid code: "and", "else", "for", "if", "in", "is" and "or".
  1524. * It allows to gradually deprecate existing valid code without adding
  1525. * warning before error in most cases of invalid numeric literal (which
  1526. * would be confusing and break existing tests).
  1527. * Raise a syntax error with slightly better message than plain
  1528. * "invalid syntax" if the numeric literal is immediately followed by
  1529. * other keyword or identifier.
  1530. */
  1531. int r = 0;
  1532. if (c == 'a') {
  1533. r = lookahead(tok, "nd");
  1534. }
  1535. else if (c == 'e') {
  1536. r = lookahead(tok, "lse");
  1537. }
  1538. else if (c == 'f') {
  1539. r = lookahead(tok, "or");
  1540. }
  1541. else if (c == 'i') {
  1542. int c2 = tok_nextc(tok);
  1543. if (c2 == 'f' || c2 == 'n' || c2 == 's') {
  1544. r = 1;
  1545. }
  1546. tok_backup(tok, c2);
  1547. }
  1548. else if (c == 'o') {
  1549. r = lookahead(tok, "r");
  1550. }
  1551. else if (c == 'n') {
  1552. r = lookahead(tok, "ot");
  1553. }
  1554. if (r) {
  1555. tok_backup(tok, c);
  1556. if (parser_warn(tok, PyExc_SyntaxWarning,
  1557. "invalid %s literal", kind))
  1558. {
  1559. return 0;
  1560. }
  1561. tok_nextc(tok);
  1562. }
  1563. else /* In future releases, only error will remain. */
  1564. if (c < 128 && is_potential_identifier_char(c)) {
  1565. tok_backup(tok, c);
  1566. syntaxerror(tok, "invalid %s literal", kind);
  1567. return 0;
  1568. }
  1569. return 1;
  1570. }
  1571. /* Verify that the identifier follows PEP 3131.
  1572. All identifier strings are guaranteed to be "ready" unicode objects.
  1573. */
  1574. static int
  1575. verify_identifier(struct tok_state *tok)
  1576. {
  1577. if (tok->tok_extra_tokens) {
  1578. return 1;
  1579. }
  1580. PyObject *s;
  1581. if (tok->decoding_erred)
  1582. return 0;
  1583. s = PyUnicode_DecodeUTF8(tok->start, tok->cur - tok->start, NULL);
  1584. if (s == NULL) {
  1585. if (PyErr_ExceptionMatches(PyExc_UnicodeDecodeError)) {
  1586. tok->done = E_DECODE;
  1587. }
  1588. else {
  1589. tok->done = E_ERROR;
  1590. }
  1591. return 0;
  1592. }
  1593. Py_ssize_t invalid = _PyUnicode_ScanIdentifier(s);
  1594. if (invalid < 0) {
  1595. Py_DECREF(s);
  1596. tok->done = E_ERROR;
  1597. return 0;
  1598. }
  1599. assert(PyUnicode_GET_LENGTH(s) > 0);
  1600. if (invalid < PyUnicode_GET_LENGTH(s)) {
  1601. Py_UCS4 ch = PyUnicode_READ_CHAR(s, invalid);
  1602. if (invalid + 1 < PyUnicode_GET_LENGTH(s)) {
  1603. /* Determine the offset in UTF-8 encoded input */
  1604. Py_SETREF(s, PyUnicode_Substring(s, 0, invalid + 1));
  1605. if (s != NULL) {
  1606. Py_SETREF(s, PyUnicode_AsUTF8String(s));
  1607. }
  1608. if (s == NULL) {
  1609. tok->done = E_ERROR;
  1610. return 0;
  1611. }
  1612. tok->cur = (char *)tok->start + PyBytes_GET_SIZE(s);
  1613. }
  1614. Py_DECREF(s);
  1615. if (Py_UNICODE_ISPRINTABLE(ch)) {
  1616. syntaxerror(tok, "invalid character '%c' (U+%04X)", ch, ch);
  1617. }
  1618. else {
  1619. syntaxerror(tok, "invalid non-printable character U+%04X", ch);
  1620. }
  1621. return 0;
  1622. }
  1623. Py_DECREF(s);
  1624. return 1;
  1625. }
  1626. static int
  1627. tok_decimal_tail(struct tok_state *tok)
  1628. {
  1629. int c;
  1630. while (1) {
  1631. do {
  1632. c = tok_nextc(tok);
  1633. } while (isdigit(c));
  1634. if (c != '_') {
  1635. break;
  1636. }
  1637. c = tok_nextc(tok);
  1638. if (!isdigit(c)) {
  1639. tok_backup(tok, c);
  1640. syntaxerror(tok, "invalid decimal literal");
  1641. return 0;
  1642. }
  1643. }
  1644. return c;
  1645. }
  1646. static inline int
  1647. tok_continuation_line(struct tok_state *tok) {
  1648. int c = tok_nextc(tok);
  1649. if (c == '\r') {
  1650. c = tok_nextc(tok);
  1651. }
  1652. if (c != '\n') {
  1653. tok->done = E_LINECONT;
  1654. return -1;
  1655. }
  1656. c = tok_nextc(tok);
  1657. if (c == EOF) {
  1658. tok->done = E_EOF;
  1659. tok->cur = tok->inp;
  1660. return -1;
  1661. } else {
  1662. tok_backup(tok, c);
  1663. }
  1664. return c;
  1665. }
  1666. static int
  1667. type_comment_token_setup(struct tok_state *tok, struct token *token, int type, int col_offset,
  1668. int end_col_offset, const char *start, const char *end)
  1669. {
  1670. token->level = tok->level;
  1671. token->lineno = token->end_lineno = tok->lineno;
  1672. token->col_offset = col_offset;
  1673. token->end_col_offset = end_col_offset;
  1674. token->start = start;
  1675. token->end = end;
  1676. return type;
  1677. }
  1678. static int
  1679. token_setup(struct tok_state *tok, struct token *token, int type, const char *start, const char *end)
  1680. {
  1681. assert((start == NULL && end == NULL) || (start != NULL && end != NULL));
  1682. token->level = tok->level;
  1683. if (ISSTRINGLIT(type)) {
  1684. token->lineno = tok->first_lineno;
  1685. }
  1686. else {
  1687. token->lineno = tok->lineno;
  1688. }
  1689. token->end_lineno = tok->lineno;
  1690. token->col_offset = token->end_col_offset = -1;
  1691. token->start = start;
  1692. token->end = end;
  1693. if (start != NULL && end != NULL) {
  1694. token->col_offset = tok->starting_col_offset;
  1695. token->end_col_offset = tok->col_offset;
  1696. }
  1697. return type;
  1698. }
  1699. static int
  1700. tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct token *token)
  1701. {
  1702. int c;
  1703. int blankline, nonascii;
  1704. const char *p_start = NULL;
  1705. const char *p_end = NULL;
  1706. nextline:
  1707. tok->start = NULL;
  1708. tok->starting_col_offset = -1;
  1709. blankline = 0;
  1710. /* Get indentation level */
  1711. if (tok->atbol) {
  1712. int col = 0;
  1713. int altcol = 0;
  1714. tok->atbol = 0;
  1715. int cont_line_col = 0;
  1716. for (;;) {
  1717. c = tok_nextc(tok);
  1718. if (c == ' ') {
  1719. col++, altcol++;
  1720. }
  1721. else if (c == '\t') {
  1722. col = (col / tok->tabsize + 1) * tok->tabsize;
  1723. altcol = (altcol / ALTTABSIZE + 1) * ALTTABSIZE;
  1724. }
  1725. else if (c == '\014') {/* Control-L (formfeed) */
  1726. col = altcol = 0; /* For Emacs users */
  1727. }
  1728. else if (c == '\\') {
  1729. // Indentation cannot be split over multiple physical lines
  1730. // using backslashes. This means that if we found a backslash
  1731. // preceded by whitespace, **the first one we find** determines
  1732. // the level of indentation of whatever comes next.
  1733. cont_line_col = cont_line_col ? cont_line_col : col;
  1734. if ((c = tok_continuation_line(tok)) == -1) {
  1735. return MAKE_TOKEN(ERRORTOKEN);
  1736. }
  1737. }
  1738. else {
  1739. break;
  1740. }
  1741. }
  1742. tok_backup(tok, c);
  1743. if (c == '#' || c == '\n' || c == '\r') {
  1744. /* Lines with only whitespace and/or comments
  1745. shouldn't affect the indentation and are
  1746. not passed to the parser as NEWLINE tokens,
  1747. except *totally* empty lines in interactive
  1748. mode, which signal the end of a command group. */
  1749. if (col == 0 && c == '\n' && tok->prompt != NULL) {
  1750. blankline = 0; /* Let it through */
  1751. }
  1752. else if (tok->prompt != NULL && tok->lineno == 1) {
  1753. /* In interactive mode, if the first line contains
  1754. only spaces and/or a comment, let it through. */
  1755. blankline = 0;
  1756. col = altcol = 0;
  1757. }
  1758. else {
  1759. blankline = 1; /* Ignore completely */
  1760. }
  1761. /* We can't jump back right here since we still
  1762. may need to skip to the end of a comment */
  1763. }
  1764. if (!blankline && tok->level == 0) {
  1765. col = cont_line_col ? cont_line_col : col;
  1766. altcol = cont_line_col ? cont_line_col : altcol;
  1767. if (col == tok->indstack[tok->indent]) {
  1768. /* No change */
  1769. if (altcol != tok->altindstack[tok->indent]) {
  1770. return MAKE_TOKEN(indenterror(tok));
  1771. }
  1772. }
  1773. else if (col > tok->indstack[tok->indent]) {
  1774. /* Indent -- always one */
  1775. if (tok->indent+1 >= MAXINDENT) {
  1776. tok->done = E_TOODEEP;
  1777. tok->cur = tok->inp;
  1778. return MAKE_TOKEN(ERRORTOKEN);
  1779. }
  1780. if (altcol <= tok->altindstack[tok->indent]) {
  1781. return MAKE_TOKEN(indenterror(tok));
  1782. }
  1783. tok->pendin++;
  1784. tok->indstack[++tok->indent] = col;
  1785. tok->altindstack[tok->indent] = altcol;
  1786. }
  1787. else /* col < tok->indstack[tok->indent] */ {
  1788. /* Dedent -- any number, must be consistent */
  1789. while (tok->indent > 0 &&
  1790. col < tok->indstack[tok->indent]) {
  1791. tok->pendin--;
  1792. tok->indent--;
  1793. }
  1794. if (col != tok->indstack[tok->indent]) {
  1795. tok->done = E_DEDENT;
  1796. tok->cur = tok->inp;
  1797. return MAKE_TOKEN(ERRORTOKEN);
  1798. }
  1799. if (altcol != tok->altindstack[tok->indent]) {
  1800. return MAKE_TOKEN(indenterror(tok));
  1801. }
  1802. }
  1803. }
  1804. }
  1805. tok->start = tok->cur;
  1806. tok->starting_col_offset = tok->col_offset;
  1807. /* Return pending indents/dedents */
  1808. if (tok->pendin != 0) {
  1809. if (tok->pendin < 0) {
  1810. if (tok->tok_extra_tokens) {
  1811. p_start = tok->cur;
  1812. p_end = tok->cur;
  1813. }
  1814. tok->pendin++;
  1815. return MAKE_TOKEN(DEDENT);
  1816. }
  1817. else {
  1818. if (tok->tok_extra_tokens) {
  1819. p_start = tok->buf;
  1820. p_end = tok->cur;
  1821. }
  1822. tok->pendin--;
  1823. return MAKE_TOKEN(INDENT);
  1824. }
  1825. }
  1826. /* Peek ahead at the next character */
  1827. c = tok_nextc(tok);
  1828. tok_backup(tok, c);
  1829. /* Check if we are closing an async function */
  1830. if (tok->async_def
  1831. && !blankline
  1832. /* Due to some implementation artifacts of type comments,
  1833. * a TYPE_COMMENT at the start of a function won't set an
  1834. * indentation level and it will produce a NEWLINE after it.
  1835. * To avoid spuriously ending an async function due to this,
  1836. * wait until we have some non-newline char in front of us. */
  1837. && c != '\n'
  1838. && tok->level == 0
  1839. /* There was a NEWLINE after ASYNC DEF,
  1840. so we're past the signature. */
  1841. && tok->async_def_nl
  1842. /* Current indentation level is less than where
  1843. the async function was defined */
  1844. && tok->async_def_indent >= tok->indent)
  1845. {
  1846. tok->async_def = 0;
  1847. tok->async_def_indent = 0;
  1848. tok->async_def_nl = 0;
  1849. }
  1850. again:
  1851. tok->start = NULL;
  1852. /* Skip spaces */
  1853. do {
  1854. c = tok_nextc(tok);
  1855. } while (c == ' ' || c == '\t' || c == '\014');
  1856. /* Set start of current token */
  1857. tok->start = tok->cur == NULL ? NULL : tok->cur - 1;
  1858. tok->starting_col_offset = tok->col_offset - 1;
  1859. /* Skip comment, unless it's a type comment */
  1860. if (c == '#') {
  1861. const char* p = NULL;
  1862. const char *prefix, *type_start;
  1863. int current_starting_col_offset;
  1864. while (c != EOF && c != '\n' && c != '\r') {
  1865. c = tok_nextc(tok);
  1866. }
  1867. if (tok->tok_extra_tokens) {
  1868. p = tok->start;
  1869. }
  1870. if (tok->type_comments) {
  1871. p = tok->start;
  1872. current_starting_col_offset = tok->starting_col_offset;
  1873. prefix = type_comment_prefix;
  1874. while (*prefix && p < tok->cur) {
  1875. if (*prefix == ' ') {
  1876. while (*p == ' ' || *p == '\t') {
  1877. p++;
  1878. current_starting_col_offset++;
  1879. }
  1880. } else if (*prefix == *p) {
  1881. p++;
  1882. current_starting_col_offset++;
  1883. } else {
  1884. break;
  1885. }
  1886. prefix++;
  1887. }
  1888. /* This is a type comment if we matched all of type_comment_prefix. */
  1889. if (!*prefix) {
  1890. int is_type_ignore = 1;
  1891. // +6 in order to skip the word 'ignore'
  1892. const char *ignore_end = p + 6;
  1893. const int ignore_end_col_offset = current_starting_col_offset + 6;
  1894. tok_backup(tok, c); /* don't eat the newline or EOF */
  1895. type_start = p;
  1896. /* A TYPE_IGNORE is "type: ignore" followed by the end of the token
  1897. * or anything ASCII and non-alphanumeric. */
  1898. is_type_ignore = (
  1899. tok->cur >= ignore_end && memcmp(p, "ignore", 6) == 0
  1900. && !(tok->cur > ignore_end
  1901. && ((unsigned char)ignore_end[0] >= 128 || Py_ISALNUM(ignore_end[0]))));
  1902. if (is_type_ignore) {
  1903. p_start = ignore_end;
  1904. p_end = tok->cur;
  1905. /* If this type ignore is the only thing on the line, consume the newline also. */
  1906. if (blankline) {
  1907. tok_nextc(tok);
  1908. tok->atbol = 1;
  1909. }
  1910. return MAKE_TYPE_COMMENT_TOKEN(TYPE_IGNORE, ignore_end_col_offset, tok->col_offset);
  1911. } else {
  1912. p_start = type_start;
  1913. p_end = tok->cur;
  1914. return MAKE_TYPE_COMMENT_TOKEN(TYPE_COMMENT, current_starting_col_offset, tok->col_offset);
  1915. }
  1916. }
  1917. }
  1918. if (tok->tok_extra_tokens) {
  1919. tok_backup(tok, c); /* don't eat the newline or EOF */
  1920. p_start = p;
  1921. p_end = tok->cur;
  1922. tok->comment_newline = blankline;
  1923. return MAKE_TOKEN(COMMENT);
  1924. }
  1925. }
  1926. if (tok->done == E_INTERACT_STOP) {
  1927. return MAKE_TOKEN(ENDMARKER);
  1928. }
  1929. /* Check for EOF and errors now */
  1930. if (c == EOF) {
  1931. if (tok->level) {
  1932. return MAKE_TOKEN(ERRORTOKEN);
  1933. }
  1934. return MAKE_TOKEN(tok->done == E_EOF ? ENDMARKER : ERRORTOKEN);
  1935. }
  1936. /* Identifier (most frequent token!) */
  1937. nonascii = 0;
  1938. if (is_potential_identifier_start(c)) {
  1939. /* Process the various legal combinations of b"", r"", u"", and f"". */
  1940. int saw_b = 0, saw_r = 0, saw_u = 0, saw_f = 0;
  1941. while (1) {
  1942. if (!(saw_b || saw_u || saw_f) && (c == 'b' || c == 'B'))
  1943. saw_b = 1;
  1944. /* Since this is a backwards compatibility support literal we don't
  1945. want to support it in arbitrary order like byte literals. */
  1946. else if (!(saw_b || saw_u || saw_r || saw_f)
  1947. && (c == 'u'|| c == 'U')) {
  1948. saw_u = 1;
  1949. }
  1950. /* ur"" and ru"" are not supported */
  1951. else if (!(saw_r || saw_u) && (c == 'r' || c == 'R')) {
  1952. saw_r = 1;
  1953. }
  1954. else if (!(saw_f || saw_b || saw_u) && (c == 'f' || c == 'F')) {
  1955. saw_f = 1;
  1956. }
  1957. else {
  1958. break;
  1959. }
  1960. c = tok_nextc(tok);
  1961. if (c == '"' || c == '\'') {
  1962. if (saw_f) {
  1963. goto f_string_quote;
  1964. }
  1965. goto letter_quote;
  1966. }
  1967. }
  1968. while (is_potential_identifier_char(c)) {
  1969. if (c >= 128) {
  1970. nonascii = 1;
  1971. }
  1972. c = tok_nextc(tok);
  1973. }
  1974. tok_backup(tok, c);
  1975. if (nonascii && !verify_identifier(tok)) {
  1976. return MAKE_TOKEN(ERRORTOKEN);
  1977. }
  1978. p_start = tok->start;
  1979. p_end = tok->cur;
  1980. /* async/await parsing block. */
  1981. if (tok->cur - tok->start == 5 && tok->start[0] == 'a') {
  1982. /* May be an 'async' or 'await' token. For Python 3.7 or
  1983. later we recognize them unconditionally. For Python
  1984. 3.5 or 3.6 we recognize 'async' in front of 'def', and
  1985. either one inside of 'async def'. (Technically we
  1986. shouldn't recognize these at all for 3.4 or earlier,
  1987. but there's no *valid* Python 3.4 code that would be
  1988. rejected, and async functions will be rejected in a
  1989. later phase.) */
  1990. if (!tok->async_hacks || tok->async_def) {
  1991. /* Always recognize the keywords. */
  1992. if (memcmp(tok->start, "async", 5) == 0) {
  1993. return MAKE_TOKEN(ASYNC);
  1994. }
  1995. if (memcmp(tok->start, "await", 5) == 0) {
  1996. return MAKE_TOKEN(AWAIT);
  1997. }
  1998. }
  1999. else if (memcmp(tok->start, "async", 5) == 0) {
  2000. /* The current token is 'async'.
  2001. Look ahead one token to see if that is 'def'. */
  2002. struct tok_state ahead_tok;
  2003. struct token ahead_token;
  2004. _PyToken_Init(&ahead_token);
  2005. int ahead_tok_kind;
  2006. memcpy(&ahead_tok, tok, sizeof(ahead_tok));
  2007. ahead_tok_kind = tok_get_normal_mode(&ahead_tok,
  2008. current_tok,
  2009. &ahead_token);
  2010. if (ahead_tok_kind == NAME
  2011. && ahead_tok.cur - ahead_tok.start == 3
  2012. && memcmp(ahead_tok.start, "def", 3) == 0)
  2013. {
  2014. /* The next token is going to be 'def', so instead of
  2015. returning a plain NAME token, return ASYNC. */
  2016. tok->async_def_indent = tok->indent;
  2017. tok->async_def = 1;
  2018. _PyToken_Free(&ahead_token);
  2019. return MAKE_TOKEN(ASYNC);
  2020. }
  2021. _PyToken_Free(&ahead_token);
  2022. }
  2023. }
  2024. return MAKE_TOKEN(NAME);
  2025. }
  2026. if (c == '\r') {
  2027. c = tok_nextc(tok);
  2028. }
  2029. /* Newline */
  2030. if (c == '\n') {
  2031. tok->atbol = 1;
  2032. if (blankline || tok->level > 0) {
  2033. if (tok->tok_extra_tokens) {
  2034. if (tok->comment_newline) {
  2035. tok->comment_newline = 0;
  2036. }
  2037. p_start = tok->start;
  2038. p_end = tok->cur;
  2039. return MAKE_TOKEN(NL);
  2040. }
  2041. goto nextline;
  2042. }
  2043. if (tok->comment_newline && tok->tok_extra_tokens) {
  2044. tok->comment_newline = 0;
  2045. p_start = tok->start;
  2046. p_end = tok->cur;
  2047. return MAKE_TOKEN(NL);
  2048. }
  2049. p_start = tok->start;
  2050. p_end = tok->cur - 1; /* Leave '\n' out of the string */
  2051. tok->cont_line = 0;
  2052. if (tok->async_def) {
  2053. /* We're somewhere inside an 'async def' function, and
  2054. we've encountered a NEWLINE after its signature. */
  2055. tok->async_def_nl = 1;
  2056. }
  2057. return MAKE_TOKEN(NEWLINE);
  2058. }
  2059. /* Period or number starting with period? */
  2060. if (c == '.') {
  2061. c = tok_nextc(tok);
  2062. if (isdigit(c)) {
  2063. goto fraction;
  2064. } else if (c == '.') {
  2065. c = tok_nextc(tok);
  2066. if (c == '.') {
  2067. p_start = tok->start;
  2068. p_end = tok->cur;
  2069. return MAKE_TOKEN(ELLIPSIS);
  2070. }
  2071. else {
  2072. tok_backup(tok, c);
  2073. }
  2074. tok_backup(tok, '.');
  2075. }
  2076. else {
  2077. tok_backup(tok, c);
  2078. }
  2079. p_start = tok->start;
  2080. p_end = tok->cur;
  2081. return MAKE_TOKEN(DOT);
  2082. }
  2083. /* Number */
  2084. if (isdigit(c)) {
  2085. if (c == '0') {
  2086. /* Hex, octal or binary -- maybe. */
  2087. c = tok_nextc(tok);
  2088. if (c == 'x' || c == 'X') {
  2089. /* Hex */
  2090. c = tok_nextc(tok);
  2091. do {
  2092. if (c == '_') {
  2093. c = tok_nextc(tok);
  2094. }
  2095. if (!isxdigit(c)) {
  2096. tok_backup(tok, c);
  2097. return MAKE_TOKEN(syntaxerror(tok, "invalid hexadecimal literal"));
  2098. }
  2099. do {
  2100. c = tok_nextc(tok);
  2101. } while (isxdigit(c));
  2102. } while (c == '_');
  2103. if (!verify_end_of_number(tok, c, "hexadecimal")) {
  2104. return MAKE_TOKEN(ERRORTOKEN);
  2105. }
  2106. }
  2107. else if (c == 'o' || c == 'O') {
  2108. /* Octal */
  2109. c = tok_nextc(tok);
  2110. do {
  2111. if (c == '_') {
  2112. c = tok_nextc(tok);
  2113. }
  2114. if (c < '0' || c >= '8') {
  2115. if (isdigit(c)) {
  2116. return MAKE_TOKEN(syntaxerror(tok,
  2117. "invalid digit '%c' in octal literal", c));
  2118. }
  2119. else {
  2120. tok_backup(tok, c);
  2121. return MAKE_TOKEN(syntaxerror(tok, "invalid octal literal"));
  2122. }
  2123. }
  2124. do {
  2125. c = tok_nextc(tok);
  2126. } while ('0' <= c && c < '8');
  2127. } while (c == '_');
  2128. if (isdigit(c)) {
  2129. return MAKE_TOKEN(syntaxerror(tok,
  2130. "invalid digit '%c' in octal literal", c));
  2131. }
  2132. if (!verify_end_of_number(tok, c, "octal")) {
  2133. return MAKE_TOKEN(ERRORTOKEN);
  2134. }
  2135. }
  2136. else if (c == 'b' || c == 'B') {
  2137. /* Binary */
  2138. c = tok_nextc(tok);
  2139. do {
  2140. if (c == '_') {
  2141. c = tok_nextc(tok);
  2142. }
  2143. if (c != '0' && c != '1') {
  2144. if (isdigit(c)) {
  2145. return MAKE_TOKEN(syntaxerror(tok, "invalid digit '%c' in binary literal", c));
  2146. }
  2147. else {
  2148. tok_backup(tok, c);
  2149. return MAKE_TOKEN(syntaxerror(tok, "invalid binary literal"));
  2150. }
  2151. }
  2152. do {
  2153. c = tok_nextc(tok);
  2154. } while (c == '0' || c == '1');
  2155. } while (c == '_');
  2156. if (isdigit(c)) {
  2157. return MAKE_TOKEN(syntaxerror(tok, "invalid digit '%c' in binary literal", c));
  2158. }
  2159. if (!verify_end_of_number(tok, c, "binary")) {
  2160. return MAKE_TOKEN(ERRORTOKEN);
  2161. }
  2162. }
  2163. else {
  2164. int nonzero = 0;
  2165. /* maybe old-style octal; c is first char of it */
  2166. /* in any case, allow '0' as a literal */
  2167. while (1) {
  2168. if (c == '_') {
  2169. c = tok_nextc(tok);
  2170. if (!isdigit(c)) {
  2171. tok_backup(tok, c);
  2172. return MAKE_TOKEN(syntaxerror(tok, "invalid decimal literal"));
  2173. }
  2174. }
  2175. if (c != '0') {
  2176. break;
  2177. }
  2178. c = tok_nextc(tok);
  2179. }
  2180. char* zeros_end = tok->cur;
  2181. if (isdigit(c)) {
  2182. nonzero = 1;
  2183. c = tok_decimal_tail(tok);
  2184. if (c == 0) {
  2185. return MAKE_TOKEN(ERRORTOKEN);
  2186. }
  2187. }
  2188. if (c == '.') {
  2189. c = tok_nextc(tok);
  2190. goto fraction;
  2191. }
  2192. else if (c == 'e' || c == 'E') {
  2193. goto exponent;
  2194. }
  2195. else if (c == 'j' || c == 'J') {
  2196. goto imaginary;
  2197. }
  2198. else if (nonzero && !tok->tok_extra_tokens) {
  2199. /* Old-style octal: now disallowed. */
  2200. tok_backup(tok, c);
  2201. return MAKE_TOKEN(syntaxerror_known_range(
  2202. tok, (int)(tok->start + 1 - tok->line_start),
  2203. (int)(zeros_end - tok->line_start),
  2204. "leading zeros in decimal integer "
  2205. "literals are not permitted; "
  2206. "use an 0o prefix for octal integers"));
  2207. }
  2208. if (!verify_end_of_number(tok, c, "decimal")) {
  2209. return MAKE_TOKEN(ERRORTOKEN);
  2210. }
  2211. }
  2212. }
  2213. else {
  2214. /* Decimal */
  2215. c = tok_decimal_tail(tok);
  2216. if (c == 0) {
  2217. return MAKE_TOKEN(ERRORTOKEN);
  2218. }
  2219. {
  2220. /* Accept floating point numbers. */
  2221. if (c == '.') {
  2222. c = tok_nextc(tok);
  2223. fraction:
  2224. /* Fraction */
  2225. if (isdigit(c)) {
  2226. c = tok_decimal_tail(tok);
  2227. if (c == 0) {
  2228. return MAKE_TOKEN(ERRORTOKEN);
  2229. }
  2230. }
  2231. }
  2232. if (c == 'e' || c == 'E') {
  2233. int e;
  2234. exponent:
  2235. e = c;
  2236. /* Exponent part */
  2237. c = tok_nextc(tok);
  2238. if (c == '+' || c == '-') {
  2239. c = tok_nextc(tok);
  2240. if (!isdigit(c)) {
  2241. tok_backup(tok, c);
  2242. return MAKE_TOKEN(syntaxerror(tok, "invalid decimal literal"));
  2243. }
  2244. } else if (!isdigit(c)) {
  2245. tok_backup(tok, c);
  2246. if (!verify_end_of_number(tok, e, "decimal")) {
  2247. return MAKE_TOKEN(ERRORTOKEN);
  2248. }
  2249. tok_backup(tok, e);
  2250. p_start = tok->start;
  2251. p_end = tok->cur;
  2252. return MAKE_TOKEN(NUMBER);
  2253. }
  2254. c = tok_decimal_tail(tok);
  2255. if (c == 0) {
  2256. return MAKE_TOKEN(ERRORTOKEN);
  2257. }
  2258. }
  2259. if (c == 'j' || c == 'J') {
  2260. /* Imaginary part */
  2261. imaginary:
  2262. c = tok_nextc(tok);
  2263. if (!verify_end_of_number(tok, c, "imaginary")) {
  2264. return MAKE_TOKEN(ERRORTOKEN);
  2265. }
  2266. }
  2267. else if (!verify_end_of_number(tok, c, "decimal")) {
  2268. return MAKE_TOKEN(ERRORTOKEN);
  2269. }
  2270. }
  2271. }
  2272. tok_backup(tok, c);
  2273. p_start = tok->start;
  2274. p_end = tok->cur;
  2275. return MAKE_TOKEN(NUMBER);
  2276. }
  2277. f_string_quote:
  2278. if (((tolower(*tok->start) == 'f' || tolower(*tok->start) == 'r') && (c == '\'' || c == '"'))) {
  2279. int quote = c;
  2280. int quote_size = 1; /* 1 or 3 */
  2281. /* Nodes of type STRING, especially multi line strings
  2282. must be handled differently in order to get both
  2283. the starting line number and the column offset right.
  2284. (cf. issue 16806) */
  2285. tok->first_lineno = tok->lineno;
  2286. tok->multi_line_start = tok->line_start;
  2287. /* Find the quote size and start of string */
  2288. int after_quote = tok_nextc(tok);
  2289. if (after_quote == quote) {
  2290. int after_after_quote = tok_nextc(tok);
  2291. if (after_after_quote == quote) {
  2292. quote_size = 3;
  2293. }
  2294. else {
  2295. // TODO: Check this
  2296. tok_backup(tok, after_after_quote);
  2297. tok_backup(tok, after_quote);
  2298. }
  2299. }
  2300. if (after_quote != quote) {
  2301. tok_backup(tok, after_quote);
  2302. }
  2303. p_start = tok->start;
  2304. p_end = tok->cur;
  2305. if (tok->tok_mode_stack_index + 1 >= MAXFSTRINGLEVEL) {
  2306. return MAKE_TOKEN(syntaxerror(tok, "too many nested f-strings"));
  2307. }
  2308. tokenizer_mode *the_current_tok = TOK_NEXT_MODE(tok);
  2309. the_current_tok->kind = TOK_FSTRING_MODE;
  2310. the_current_tok->f_string_quote = quote;
  2311. the_current_tok->f_string_quote_size = quote_size;
  2312. the_current_tok->f_string_start = tok->start;
  2313. the_current_tok->f_string_multi_line_start = tok->line_start;
  2314. the_current_tok->f_string_line_start = tok->lineno;
  2315. the_current_tok->f_string_start_offset = -1;
  2316. the_current_tok->f_string_multi_line_start_offset = -1;
  2317. the_current_tok->last_expr_buffer = NULL;
  2318. the_current_tok->last_expr_size = 0;
  2319. the_current_tok->last_expr_end = -1;
  2320. the_current_tok->f_string_debug = 0;
  2321. switch (*tok->start) {
  2322. case 'F':
  2323. case 'f':
  2324. the_current_tok->f_string_raw = tolower(*(tok->start + 1)) == 'r';
  2325. break;
  2326. case 'R':
  2327. case 'r':
  2328. the_current_tok->f_string_raw = 1;
  2329. break;
  2330. default:
  2331. Py_UNREACHABLE();
  2332. }
  2333. the_current_tok->curly_bracket_depth = 0;
  2334. the_current_tok->curly_bracket_expr_start_depth = -1;
  2335. return MAKE_TOKEN(FSTRING_START);
  2336. }
  2337. letter_quote:
  2338. /* String */
  2339. if (c == '\'' || c == '"') {
  2340. int quote = c;
  2341. int quote_size = 1; /* 1 or 3 */
  2342. int end_quote_size = 0;
  2343. /* Nodes of type STRING, especially multi line strings
  2344. must be handled differently in order to get both
  2345. the starting line number and the column offset right.
  2346. (cf. issue 16806) */
  2347. tok->first_lineno = tok->lineno;
  2348. tok->multi_line_start = tok->line_start;
  2349. /* Find the quote size and start of string */
  2350. c = tok_nextc(tok);
  2351. if (c == quote) {
  2352. c = tok_nextc(tok);
  2353. if (c == quote) {
  2354. quote_size = 3;
  2355. }
  2356. else {
  2357. end_quote_size = 1; /* empty string found */
  2358. }
  2359. }
  2360. if (c != quote) {
  2361. tok_backup(tok, c);
  2362. }
  2363. /* Get rest of string */
  2364. while (end_quote_size != quote_size) {
  2365. c = tok_nextc(tok);
  2366. if (tok->done == E_ERROR) {
  2367. return MAKE_TOKEN(ERRORTOKEN);
  2368. }
  2369. if (tok->done == E_DECODE) {
  2370. break;
  2371. }
  2372. if (c == EOF || (quote_size == 1 && c == '\n')) {
  2373. assert(tok->multi_line_start != NULL);
  2374. // shift the tok_state's location into
  2375. // the start of string, and report the error
  2376. // from the initial quote character
  2377. tok->cur = (char *)tok->start;
  2378. tok->cur++;
  2379. tok->line_start = tok->multi_line_start;
  2380. int start = tok->lineno;
  2381. tok->lineno = tok->first_lineno;
  2382. if (INSIDE_FSTRING(tok)) {
  2383. /* When we are in an f-string, before raising the
  2384. * unterminated string literal error, check whether
  2385. * does the initial quote matches with f-strings quotes
  2386. * and if it is, then this must be a missing '}' token
  2387. * so raise the proper error */
  2388. tokenizer_mode *the_current_tok = TOK_GET_MODE(tok);
  2389. if (the_current_tok->f_string_quote == quote &&
  2390. the_current_tok->f_string_quote_size == quote_size) {
  2391. return MAKE_TOKEN(syntaxerror(tok, "f-string: expecting '}'", start));
  2392. }
  2393. }
  2394. if (quote_size == 3) {
  2395. syntaxerror(tok, "unterminated triple-quoted string literal"
  2396. " (detected at line %d)", start);
  2397. if (c != '\n') {
  2398. tok->done = E_EOFS;
  2399. }
  2400. return MAKE_TOKEN(ERRORTOKEN);
  2401. }
  2402. else {
  2403. syntaxerror(tok, "unterminated string literal (detected at"
  2404. " line %d)", start);
  2405. if (c != '\n') {
  2406. tok->done = E_EOLS;
  2407. }
  2408. return MAKE_TOKEN(ERRORTOKEN);
  2409. }
  2410. }
  2411. if (c == quote) {
  2412. end_quote_size += 1;
  2413. }
  2414. else {
  2415. end_quote_size = 0;
  2416. if (c == '\\') {
  2417. c = tok_nextc(tok); /* skip escaped char */
  2418. if (c == '\r') {
  2419. c = tok_nextc(tok);
  2420. }
  2421. }
  2422. }
  2423. }
  2424. p_start = tok->start;
  2425. p_end = tok->cur;
  2426. return MAKE_TOKEN(STRING);
  2427. }
  2428. /* Line continuation */
  2429. if (c == '\\') {
  2430. if ((c = tok_continuation_line(tok)) == -1) {
  2431. return MAKE_TOKEN(ERRORTOKEN);
  2432. }
  2433. tok->cont_line = 1;
  2434. goto again; /* Read next line */
  2435. }
  2436. /* Punctuation character */
  2437. int is_punctuation = (c == ':' || c == '}' || c == '!' || c == '{');
  2438. if (is_punctuation && INSIDE_FSTRING(tok) && INSIDE_FSTRING_EXPR(current_tok)) {
  2439. /* This code block gets executed before the curly_bracket_depth is incremented
  2440. * by the `{` case, so for ensuring that we are on the 0th level, we need
  2441. * to adjust it manually */
  2442. int cursor = current_tok->curly_bracket_depth - (c != '{');
  2443. if (cursor == 0 && !update_fstring_expr(tok, c)) {
  2444. return MAKE_TOKEN(ENDMARKER);
  2445. }
  2446. if (cursor == 0 && c != '{' && set_fstring_expr(tok, token, c)) {
  2447. return MAKE_TOKEN(ERRORTOKEN);
  2448. }
  2449. if (c == ':' && cursor == current_tok->curly_bracket_expr_start_depth) {
  2450. current_tok->kind = TOK_FSTRING_MODE;
  2451. p_start = tok->start;
  2452. p_end = tok->cur;
  2453. return MAKE_TOKEN(_PyToken_OneChar(c));
  2454. }
  2455. }
  2456. /* Check for two-character token */
  2457. {
  2458. int c2 = tok_nextc(tok);
  2459. int current_token = _PyToken_TwoChars(c, c2);
  2460. if (current_token != OP) {
  2461. int c3 = tok_nextc(tok);
  2462. int current_token3 = _PyToken_ThreeChars(c, c2, c3);
  2463. if (current_token3 != OP) {
  2464. current_token = current_token3;
  2465. }
  2466. else {
  2467. tok_backup(tok, c3);
  2468. }
  2469. p_start = tok->start;
  2470. p_end = tok->cur;
  2471. return MAKE_TOKEN(current_token);
  2472. }
  2473. tok_backup(tok, c2);
  2474. }
  2475. /* Keep track of parentheses nesting level */
  2476. switch (c) {
  2477. case '(':
  2478. case '[':
  2479. case '{':
  2480. if (tok->level >= MAXLEVEL) {
  2481. return MAKE_TOKEN(syntaxerror(tok, "too many nested parentheses"));
  2482. }
  2483. tok->parenstack[tok->level] = c;
  2484. tok->parenlinenostack[tok->level] = tok->lineno;
  2485. tok->parencolstack[tok->level] = (int)(tok->start - tok->line_start);
  2486. tok->level++;
  2487. if (INSIDE_FSTRING(tok)) {
  2488. current_tok->curly_bracket_depth++;
  2489. }
  2490. break;
  2491. case ')':
  2492. case ']':
  2493. case '}':
  2494. if (INSIDE_FSTRING(tok) && !current_tok->curly_bracket_depth && c == '}') {
  2495. return MAKE_TOKEN(syntaxerror(tok, "f-string: single '}' is not allowed"));
  2496. }
  2497. if (!tok->tok_extra_tokens && !tok->level) {
  2498. return MAKE_TOKEN(syntaxerror(tok, "unmatched '%c'", c));
  2499. }
  2500. if (tok->level > 0) {
  2501. tok->level--;
  2502. int opening = tok->parenstack[tok->level];
  2503. if (!tok->tok_extra_tokens && !((opening == '(' && c == ')') ||
  2504. (opening == '[' && c == ']') ||
  2505. (opening == '{' && c == '}'))) {
  2506. /* If the opening bracket belongs to an f-string's expression
  2507. part (e.g. f"{)}") and the closing bracket is an arbitrary
  2508. nested expression, then instead of matching a different
  2509. syntactical construct with it; we'll throw an unmatched
  2510. parentheses error. */
  2511. if (INSIDE_FSTRING(tok) && opening == '{') {
  2512. assert(current_tok->curly_bracket_depth >= 0);
  2513. int previous_bracket = current_tok->curly_bracket_depth - 1;
  2514. if (previous_bracket == current_tok->curly_bracket_expr_start_depth) {
  2515. return MAKE_TOKEN(syntaxerror(tok, "f-string: unmatched '%c'", c));
  2516. }
  2517. }
  2518. if (tok->parenlinenostack[tok->level] != tok->lineno) {
  2519. return MAKE_TOKEN(syntaxerror(tok,
  2520. "closing parenthesis '%c' does not match "
  2521. "opening parenthesis '%c' on line %d",
  2522. c, opening, tok->parenlinenostack[tok->level]));
  2523. }
  2524. else {
  2525. return MAKE_TOKEN(syntaxerror(tok,
  2526. "closing parenthesis '%c' does not match "
  2527. "opening parenthesis '%c'",
  2528. c, opening));
  2529. }
  2530. }
  2531. }
  2532. if (INSIDE_FSTRING(tok)) {
  2533. current_tok->curly_bracket_depth--;
  2534. if (c == '}' && current_tok->curly_bracket_depth == current_tok->curly_bracket_expr_start_depth) {
  2535. current_tok->curly_bracket_expr_start_depth--;
  2536. current_tok->kind = TOK_FSTRING_MODE;
  2537. current_tok->f_string_debug = 0;
  2538. }
  2539. }
  2540. break;
  2541. default:
  2542. break;
  2543. }
  2544. if (!Py_UNICODE_ISPRINTABLE(c)) {
  2545. return MAKE_TOKEN(syntaxerror(tok, "invalid non-printable character U+%04X", c));
  2546. }
  2547. if( c == '=' && INSIDE_FSTRING_EXPR(current_tok)) {
  2548. current_tok->f_string_debug = 1;
  2549. }
  2550. /* Punctuation character */
  2551. p_start = tok->start;
  2552. p_end = tok->cur;
  2553. return MAKE_TOKEN(_PyToken_OneChar(c));
  2554. }
  2555. static int
  2556. tok_get_fstring_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct token *token)
  2557. {
  2558. const char *p_start = NULL;
  2559. const char *p_end = NULL;
  2560. int end_quote_size = 0;
  2561. int unicode_escape = 0;
  2562. tok->start = tok->cur;
  2563. tok->first_lineno = tok->lineno;
  2564. tok->starting_col_offset = tok->col_offset;
  2565. // If we start with a bracket, we defer to the normal mode as there is nothing for us to tokenize
  2566. // before it.
  2567. int start_char = tok_nextc(tok);
  2568. if (start_char == '{') {
  2569. int peek1 = tok_nextc(tok);
  2570. tok_backup(tok, peek1);
  2571. tok_backup(tok, start_char);
  2572. if (peek1 != '{') {
  2573. current_tok->curly_bracket_expr_start_depth++;
  2574. if (current_tok->curly_bracket_expr_start_depth >= MAX_EXPR_NESTING) {
  2575. return MAKE_TOKEN(syntaxerror(tok, "f-string: expressions nested too deeply"));
  2576. }
  2577. TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
  2578. return tok_get_normal_mode(tok, current_tok, token);
  2579. }
  2580. }
  2581. else {
  2582. tok_backup(tok, start_char);
  2583. }
  2584. // Check if we are at the end of the string
  2585. for (int i = 0; i < current_tok->f_string_quote_size; i++) {
  2586. int quote = tok_nextc(tok);
  2587. if (quote != current_tok->f_string_quote) {
  2588. tok_backup(tok, quote);
  2589. goto f_string_middle;
  2590. }
  2591. }
  2592. if (current_tok->last_expr_buffer != NULL) {
  2593. PyMem_Free(current_tok->last_expr_buffer);
  2594. current_tok->last_expr_buffer = NULL;
  2595. current_tok->last_expr_size = 0;
  2596. current_tok->last_expr_end = -1;
  2597. }
  2598. p_start = tok->start;
  2599. p_end = tok->cur;
  2600. tok->tok_mode_stack_index--;
  2601. return MAKE_TOKEN(FSTRING_END);
  2602. f_string_middle:
  2603. // TODO: This is a bit of a hack, but it works for now. We need to find a better way to handle
  2604. // this.
  2605. tok->multi_line_start = tok->line_start;
  2606. while (end_quote_size != current_tok->f_string_quote_size) {
  2607. int c = tok_nextc(tok);
  2608. if (tok->done == E_ERROR) {
  2609. return MAKE_TOKEN(ERRORTOKEN);
  2610. }
  2611. int in_format_spec = (
  2612. current_tok->last_expr_end != -1
  2613. &&
  2614. INSIDE_FSTRING_EXPR(current_tok)
  2615. );
  2616. if (c == EOF || (current_tok->f_string_quote_size == 1 && c == '\n')) {
  2617. if (tok->decoding_erred) {
  2618. return MAKE_TOKEN(ERRORTOKEN);
  2619. }
  2620. // If we are in a format spec and we found a newline,
  2621. // it means that the format spec ends here and we should
  2622. // return to the regular mode.
  2623. if (in_format_spec && c == '\n') {
  2624. tok_backup(tok, c);
  2625. TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
  2626. p_start = tok->start;
  2627. p_end = tok->cur;
  2628. return MAKE_TOKEN(FSTRING_MIDDLE);
  2629. }
  2630. assert(tok->multi_line_start != NULL);
  2631. // shift the tok_state's location into
  2632. // the start of string, and report the error
  2633. // from the initial quote character
  2634. tok->cur = (char *)current_tok->f_string_start;
  2635. tok->cur++;
  2636. tok->line_start = current_tok->f_string_multi_line_start;
  2637. int start = tok->lineno;
  2638. tokenizer_mode *the_current_tok = TOK_GET_MODE(tok);
  2639. tok->lineno = the_current_tok->f_string_line_start;
  2640. if (current_tok->f_string_quote_size == 3) {
  2641. syntaxerror(tok,
  2642. "unterminated triple-quoted f-string literal"
  2643. " (detected at line %d)", start);
  2644. if (c != '\n') {
  2645. tok->done = E_EOFS;
  2646. }
  2647. return MAKE_TOKEN(ERRORTOKEN);
  2648. }
  2649. else {
  2650. return MAKE_TOKEN(syntaxerror(tok,
  2651. "unterminated f-string literal (detected at"
  2652. " line %d)", start));
  2653. }
  2654. }
  2655. if (c == current_tok->f_string_quote) {
  2656. end_quote_size += 1;
  2657. continue;
  2658. } else {
  2659. end_quote_size = 0;
  2660. }
  2661. if (c == '{') {
  2662. int peek = tok_nextc(tok);
  2663. if (peek != '{' || in_format_spec) {
  2664. tok_backup(tok, peek);
  2665. tok_backup(tok, c);
  2666. current_tok->curly_bracket_expr_start_depth++;
  2667. if (current_tok->curly_bracket_expr_start_depth >= MAX_EXPR_NESTING) {
  2668. return MAKE_TOKEN(syntaxerror(tok, "f-string: expressions nested too deeply"));
  2669. }
  2670. TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
  2671. p_start = tok->start;
  2672. p_end = tok->cur;
  2673. } else {
  2674. p_start = tok->start;
  2675. p_end = tok->cur - 1;
  2676. }
  2677. return MAKE_TOKEN(FSTRING_MIDDLE);
  2678. } else if (c == '}') {
  2679. if (unicode_escape) {
  2680. p_start = tok->start;
  2681. p_end = tok->cur;
  2682. return MAKE_TOKEN(FSTRING_MIDDLE);
  2683. }
  2684. int peek = tok_nextc(tok);
  2685. // The tokenizer can only be in the format spec if we have already completed the expression
  2686. // scanning (indicated by the end of the expression being set) and we are not at the top level
  2687. // of the bracket stack (-1 is the top level). Since format specifiers can't legally use double
  2688. // brackets, we can bypass it here.
  2689. if (peek == '}' && !in_format_spec) {
  2690. p_start = tok->start;
  2691. p_end = tok->cur - 1;
  2692. } else {
  2693. tok_backup(tok, peek);
  2694. tok_backup(tok, c);
  2695. TOK_GET_MODE(tok)->kind = TOK_REGULAR_MODE;
  2696. p_start = tok->start;
  2697. p_end = tok->cur;
  2698. }
  2699. return MAKE_TOKEN(FSTRING_MIDDLE);
  2700. } else if (c == '\\') {
  2701. int peek = tok_nextc(tok);
  2702. if (peek == '\r') {
  2703. peek = tok_nextc(tok);
  2704. }
  2705. // Special case when the backslash is right before a curly
  2706. // brace. We have to restore and return the control back
  2707. // to the loop for the next iteration.
  2708. if (peek == '{' || peek == '}') {
  2709. if (!current_tok->f_string_raw) {
  2710. if (warn_invalid_escape_sequence(tok, peek)) {
  2711. return MAKE_TOKEN(ERRORTOKEN);
  2712. }
  2713. }
  2714. tok_backup(tok, peek);
  2715. continue;
  2716. }
  2717. if (!current_tok->f_string_raw) {
  2718. if (peek == 'N') {
  2719. /* Handle named unicode escapes (\N{BULLET}) */
  2720. peek = tok_nextc(tok);
  2721. if (peek == '{') {
  2722. unicode_escape = 1;
  2723. } else {
  2724. tok_backup(tok, peek);
  2725. }
  2726. }
  2727. } /* else {
  2728. skip the escaped character
  2729. }*/
  2730. }
  2731. }
  2732. // Backup the f-string quotes to emit a final FSTRING_MIDDLE and
  2733. // add the quotes to the FSTRING_END in the next tokenizer iteration.
  2734. for (int i = 0; i < current_tok->f_string_quote_size; i++) {
  2735. tok_backup(tok, current_tok->f_string_quote);
  2736. }
  2737. p_start = tok->start;
  2738. p_end = tok->cur;
  2739. return MAKE_TOKEN(FSTRING_MIDDLE);
  2740. }
  2741. static int
  2742. tok_get(struct tok_state *tok, struct token *token)
  2743. {
  2744. tokenizer_mode *current_tok = TOK_GET_MODE(tok);
  2745. if (current_tok->kind == TOK_REGULAR_MODE) {
  2746. return tok_get_normal_mode(tok, current_tok, token);
  2747. } else {
  2748. return tok_get_fstring_mode(tok, current_tok, token);
  2749. }
  2750. }
  2751. int
  2752. _PyTokenizer_Get(struct tok_state *tok, struct token *token)
  2753. {
  2754. int result = tok_get(tok, token);
  2755. if (tok->decoding_erred) {
  2756. result = ERRORTOKEN;
  2757. tok->done = E_DECODE;
  2758. }
  2759. return result;
  2760. }
  2761. #if defined(__wasi__) || (defined(__EMSCRIPTEN__) && (__EMSCRIPTEN_major__ >= 3))
  2762. // fdopen() with borrowed fd. WASI does not provide dup() and Emscripten's
  2763. // dup() emulation with open() is slow.
  2764. typedef union {
  2765. void *cookie;
  2766. int fd;
  2767. } borrowed;
  2768. static ssize_t
  2769. borrow_read(void *cookie, char *buf, size_t size)
  2770. {
  2771. borrowed b = {.cookie = cookie};
  2772. return read(b.fd, (void *)buf, size);
  2773. }
  2774. static FILE *
  2775. fdopen_borrow(int fd) {
  2776. // supports only reading. seek fails. close and write are no-ops.
  2777. cookie_io_functions_t io_cb = {borrow_read, NULL, NULL, NULL};
  2778. borrowed b = {.fd = fd};
  2779. return fopencookie(b.cookie, "r", io_cb);
  2780. }
  2781. #else
  2782. static FILE *
  2783. fdopen_borrow(int fd) {
  2784. fd = _Py_dup(fd);
  2785. if (fd < 0) {
  2786. return NULL;
  2787. }
  2788. return fdopen(fd, "r");
  2789. }
  2790. #endif
  2791. /* Get the encoding of a Python file. Check for the coding cookie and check if
  2792. the file starts with a BOM.
  2793. _PyTokenizer_FindEncodingFilename() returns NULL when it can't find the
  2794. encoding in the first or second line of the file (in which case the encoding
  2795. should be assumed to be UTF-8).
  2796. The char* returned is malloc'ed via PyMem_Malloc() and thus must be freed
  2797. by the caller. */
  2798. char *
  2799. _PyTokenizer_FindEncodingFilename(int fd, PyObject *filename)
  2800. {
  2801. struct tok_state *tok;
  2802. FILE *fp;
  2803. char *encoding = NULL;
  2804. fp = fdopen_borrow(fd);
  2805. if (fp == NULL) {
  2806. return NULL;
  2807. }
  2808. tok = _PyTokenizer_FromFile(fp, NULL, NULL, NULL);
  2809. if (tok == NULL) {
  2810. fclose(fp);
  2811. return NULL;
  2812. }
  2813. if (filename != NULL) {
  2814. tok->filename = Py_NewRef(filename);
  2815. }
  2816. else {
  2817. tok->filename = PyUnicode_FromString("<string>");
  2818. if (tok->filename == NULL) {
  2819. fclose(fp);
  2820. _PyTokenizer_Free(tok);
  2821. return encoding;
  2822. }
  2823. }
  2824. struct token token;
  2825. // We don't want to report warnings here because it could cause infinite recursion
  2826. // if fetching the encoding shows a warning.
  2827. tok->report_warnings = 0;
  2828. while (tok->lineno < 2 && tok->done == E_OK) {
  2829. _PyToken_Init(&token);
  2830. _PyTokenizer_Get(tok, &token);
  2831. _PyToken_Free(&token);
  2832. }
  2833. fclose(fp);
  2834. if (tok->encoding) {
  2835. encoding = (char *)PyMem_Malloc(strlen(tok->encoding) + 1);
  2836. if (encoding) {
  2837. strcpy(encoding, tok->encoding);
  2838. }
  2839. }
  2840. _PyTokenizer_Free(tok);
  2841. return encoding;
  2842. }
  2843. #ifdef Py_DEBUG
  2844. void
  2845. tok_dump(int type, char *start, char *end)
  2846. {
  2847. fprintf(stderr, "%s", _PyParser_TokenNames[type]);
  2848. if (type == NAME || type == NUMBER || type == STRING || type == OP)
  2849. fprintf(stderr, "(%.*s)", (int)(end - start), start);
  2850. }
  2851. #endif // Py_DEBUG