_header_value_parser.py 108 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078
  1. """Header value parser implementing various email-related RFC parsing rules.
  2. The parsing methods defined in this module implement various email related
  3. parsing rules. Principal among them is RFC 5322, which is the followon
  4. to RFC 2822 and primarily a clarification of the former. It also implements
  5. RFC 2047 encoded word decoding.
  6. RFC 5322 goes to considerable trouble to maintain backward compatibility with
  7. RFC 822 in the parse phase, while cleaning up the structure on the generation
  8. phase. This parser supports correct RFC 5322 generation by tagging white space
  9. as folding white space only when folding is allowed in the non-obsolete rule
  10. sets. Actually, the parser is even more generous when accepting input than RFC
  11. 5322 mandates, following the spirit of Postel's Law, which RFC 5322 encourages.
  12. Where possible deviations from the standard are annotated on the 'defects'
  13. attribute of tokens that deviate.
  14. The general structure of the parser follows RFC 5322, and uses its terminology
  15. where there is a direct correspondence. Where the implementation requires a
  16. somewhat different structure than that used by the formal grammar, new terms
  17. that mimic the closest existing terms are used. Thus, it really helps to have
  18. a copy of RFC 5322 handy when studying this code.
  19. Input to the parser is a string that has already been unfolded according to
  20. RFC 5322 rules. According to the RFC this unfolding is the very first step, and
  21. this parser leaves the unfolding step to a higher level message parser, which
  22. will have already detected the line breaks that need unfolding while
  23. determining the beginning and end of each header.
  24. The output of the parser is a TokenList object, which is a list subclass. A
  25. TokenList is a recursive data structure. The terminal nodes of the structure
  26. are Terminal objects, which are subclasses of str. These do not correspond
  27. directly to terminal objects in the formal grammar, but are instead more
  28. practical higher level combinations of true terminals.
  29. All TokenList and Terminal objects have a 'value' attribute, which produces the
  30. semantically meaningful value of that part of the parse subtree. The value of
  31. all whitespace tokens (no matter how many sub-tokens they may contain) is a
  32. single space, as per the RFC rules. This includes 'CFWS', which is herein
  33. included in the general class of whitespace tokens. There is one exception to
  34. the rule that whitespace tokens are collapsed into single spaces in values: in
  35. the value of a 'bare-quoted-string' (a quoted-string with no leading or
  36. trailing whitespace), any whitespace that appeared between the quotation marks
  37. is preserved in the returned value. Note that in all Terminal strings quoted
  38. pairs are turned into their unquoted values.
  39. All TokenList and Terminal objects also have a string value, which attempts to
  40. be a "canonical" representation of the RFC-compliant form of the substring that
  41. produced the parsed subtree, including minimal use of quoted pair quoting.
  42. Whitespace runs are not collapsed.
  43. Comment tokens also have a 'content' attribute providing the string found
  44. between the parens (including any nested comments) with whitespace preserved.
  45. All TokenList and Terminal objects have a 'defects' attribute which is a
  46. possibly empty list all of the defects found while creating the token. Defects
  47. may appear on any token in the tree, and a composite list of all defects in the
  48. subtree is available through the 'all_defects' attribute of any node. (For
  49. Terminal notes x.defects == x.all_defects.)
  50. Each object in a parse tree is called a 'token', and each has a 'token_type'
  51. attribute that gives the name from the RFC 5322 grammar that it represents.
  52. Not all RFC 5322 nodes are produced, and there is one non-RFC 5322 node that
  53. may be produced: 'ptext'. A 'ptext' is a string of printable ascii characters.
  54. It is returned in place of lists of (ctext/quoted-pair) and
  55. (qtext/quoted-pair).
  56. XXX: provide complete list of token types.
  57. """
  58. import re
  59. import sys
  60. import urllib # For urllib.parse.unquote
  61. from string import hexdigits
  62. from operator import itemgetter
  63. from email import _encoded_words as _ew
  64. from email import errors
  65. from email import utils
  66. #
  67. # Useful constants and functions
  68. #
  69. WSP = set(' \t')
  70. CFWS_LEADER = WSP | set('(')
  71. SPECIALS = set(r'()<>@,:;.\"[]')
  72. ATOM_ENDS = SPECIALS | WSP
  73. DOT_ATOM_ENDS = ATOM_ENDS - set('.')
  74. # '.', '"', and '(' do not end phrases in order to support obs-phrase
  75. PHRASE_ENDS = SPECIALS - set('."(')
  76. TSPECIALS = (SPECIALS | set('/?=')) - set('.')
  77. TOKEN_ENDS = TSPECIALS | WSP
  78. ASPECIALS = TSPECIALS | set("*'%")
  79. ATTRIBUTE_ENDS = ASPECIALS | WSP
  80. EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%')
  81. NLSET = {'\n', '\r'}
  82. SPECIALSNL = SPECIALS | NLSET
  83. def quote_string(value):
  84. return '"'+str(value).replace('\\', '\\\\').replace('"', r'\"')+'"'
  85. # Match a RFC 2047 word, looks like =?utf-8?q?someword?=
  86. rfc2047_matcher = re.compile(r'''
  87. =\? # literal =?
  88. [^?]* # charset
  89. \? # literal ?
  90. [qQbB] # literal 'q' or 'b', case insensitive
  91. \? # literal ?
  92. .*? # encoded word
  93. \?= # literal ?=
  94. ''', re.VERBOSE | re.MULTILINE)
  95. #
  96. # TokenList and its subclasses
  97. #
  98. class TokenList(list):
  99. token_type = None
  100. syntactic_break = True
  101. ew_combine_allowed = True
  102. def __init__(self, *args, **kw):
  103. super().__init__(*args, **kw)
  104. self.defects = []
  105. def __str__(self):
  106. return ''.join(str(x) for x in self)
  107. def __repr__(self):
  108. return '{}({})'.format(self.__class__.__name__,
  109. super().__repr__())
  110. @property
  111. def value(self):
  112. return ''.join(x.value for x in self if x.value)
  113. @property
  114. def all_defects(self):
  115. return sum((x.all_defects for x in self), self.defects)
  116. def startswith_fws(self):
  117. return self[0].startswith_fws()
  118. @property
  119. def as_ew_allowed(self):
  120. """True if all top level tokens of this part may be RFC2047 encoded."""
  121. return all(part.as_ew_allowed for part in self)
  122. @property
  123. def comments(self):
  124. comments = []
  125. for token in self:
  126. comments.extend(token.comments)
  127. return comments
  128. def fold(self, *, policy):
  129. return _refold_parse_tree(self, policy=policy)
  130. def pprint(self, indent=''):
  131. print(self.ppstr(indent=indent))
  132. def ppstr(self, indent=''):
  133. return '\n'.join(self._pp(indent=indent))
  134. def _pp(self, indent=''):
  135. yield '{}{}/{}('.format(
  136. indent,
  137. self.__class__.__name__,
  138. self.token_type)
  139. for token in self:
  140. if not hasattr(token, '_pp'):
  141. yield (indent + ' !! invalid element in token '
  142. 'list: {!r}'.format(token))
  143. else:
  144. yield from token._pp(indent+' ')
  145. if self.defects:
  146. extra = ' Defects: {}'.format(self.defects)
  147. else:
  148. extra = ''
  149. yield '{}){}'.format(indent, extra)
  150. class WhiteSpaceTokenList(TokenList):
  151. @property
  152. def value(self):
  153. return ' '
  154. @property
  155. def comments(self):
  156. return [x.content for x in self if x.token_type=='comment']
  157. class UnstructuredTokenList(TokenList):
  158. token_type = 'unstructured'
  159. class Phrase(TokenList):
  160. token_type = 'phrase'
  161. class Word(TokenList):
  162. token_type = 'word'
  163. class CFWSList(WhiteSpaceTokenList):
  164. token_type = 'cfws'
  165. class Atom(TokenList):
  166. token_type = 'atom'
  167. class Token(TokenList):
  168. token_type = 'token'
  169. encode_as_ew = False
  170. class EncodedWord(TokenList):
  171. token_type = 'encoded-word'
  172. cte = None
  173. charset = None
  174. lang = None
  175. class QuotedString(TokenList):
  176. token_type = 'quoted-string'
  177. @property
  178. def content(self):
  179. for x in self:
  180. if x.token_type == 'bare-quoted-string':
  181. return x.value
  182. @property
  183. def quoted_value(self):
  184. res = []
  185. for x in self:
  186. if x.token_type == 'bare-quoted-string':
  187. res.append(str(x))
  188. else:
  189. res.append(x.value)
  190. return ''.join(res)
  191. @property
  192. def stripped_value(self):
  193. for token in self:
  194. if token.token_type == 'bare-quoted-string':
  195. return token.value
  196. class BareQuotedString(QuotedString):
  197. token_type = 'bare-quoted-string'
  198. def __str__(self):
  199. return quote_string(''.join(str(x) for x in self))
  200. @property
  201. def value(self):
  202. return ''.join(str(x) for x in self)
  203. class Comment(WhiteSpaceTokenList):
  204. token_type = 'comment'
  205. def __str__(self):
  206. return ''.join(sum([
  207. ["("],
  208. [self.quote(x) for x in self],
  209. [")"],
  210. ], []))
  211. def quote(self, value):
  212. if value.token_type == 'comment':
  213. return str(value)
  214. return str(value).replace('\\', '\\\\').replace(
  215. '(', r'\(').replace(
  216. ')', r'\)')
  217. @property
  218. def content(self):
  219. return ''.join(str(x) for x in self)
  220. @property
  221. def comments(self):
  222. return [self.content]
  223. class AddressList(TokenList):
  224. token_type = 'address-list'
  225. @property
  226. def addresses(self):
  227. return [x for x in self if x.token_type=='address']
  228. @property
  229. def mailboxes(self):
  230. return sum((x.mailboxes
  231. for x in self if x.token_type=='address'), [])
  232. @property
  233. def all_mailboxes(self):
  234. return sum((x.all_mailboxes
  235. for x in self if x.token_type=='address'), [])
  236. class Address(TokenList):
  237. token_type = 'address'
  238. @property
  239. def display_name(self):
  240. if self[0].token_type == 'group':
  241. return self[0].display_name
  242. @property
  243. def mailboxes(self):
  244. if self[0].token_type == 'mailbox':
  245. return [self[0]]
  246. elif self[0].token_type == 'invalid-mailbox':
  247. return []
  248. return self[0].mailboxes
  249. @property
  250. def all_mailboxes(self):
  251. if self[0].token_type == 'mailbox':
  252. return [self[0]]
  253. elif self[0].token_type == 'invalid-mailbox':
  254. return [self[0]]
  255. return self[0].all_mailboxes
  256. class MailboxList(TokenList):
  257. token_type = 'mailbox-list'
  258. @property
  259. def mailboxes(self):
  260. return [x for x in self if x.token_type=='mailbox']
  261. @property
  262. def all_mailboxes(self):
  263. return [x for x in self
  264. if x.token_type in ('mailbox', 'invalid-mailbox')]
  265. class GroupList(TokenList):
  266. token_type = 'group-list'
  267. @property
  268. def mailboxes(self):
  269. if not self or self[0].token_type != 'mailbox-list':
  270. return []
  271. return self[0].mailboxes
  272. @property
  273. def all_mailboxes(self):
  274. if not self or self[0].token_type != 'mailbox-list':
  275. return []
  276. return self[0].all_mailboxes
  277. class Group(TokenList):
  278. token_type = "group"
  279. @property
  280. def mailboxes(self):
  281. if self[2].token_type != 'group-list':
  282. return []
  283. return self[2].mailboxes
  284. @property
  285. def all_mailboxes(self):
  286. if self[2].token_type != 'group-list':
  287. return []
  288. return self[2].all_mailboxes
  289. @property
  290. def display_name(self):
  291. return self[0].display_name
  292. class NameAddr(TokenList):
  293. token_type = 'name-addr'
  294. @property
  295. def display_name(self):
  296. if len(self) == 1:
  297. return None
  298. return self[0].display_name
  299. @property
  300. def local_part(self):
  301. return self[-1].local_part
  302. @property
  303. def domain(self):
  304. return self[-1].domain
  305. @property
  306. def route(self):
  307. return self[-1].route
  308. @property
  309. def addr_spec(self):
  310. return self[-1].addr_spec
  311. class AngleAddr(TokenList):
  312. token_type = 'angle-addr'
  313. @property
  314. def local_part(self):
  315. for x in self:
  316. if x.token_type == 'addr-spec':
  317. return x.local_part
  318. @property
  319. def domain(self):
  320. for x in self:
  321. if x.token_type == 'addr-spec':
  322. return x.domain
  323. @property
  324. def route(self):
  325. for x in self:
  326. if x.token_type == 'obs-route':
  327. return x.domains
  328. @property
  329. def addr_spec(self):
  330. for x in self:
  331. if x.token_type == 'addr-spec':
  332. if x.local_part:
  333. return x.addr_spec
  334. else:
  335. return quote_string(x.local_part) + x.addr_spec
  336. else:
  337. return '<>'
  338. class ObsRoute(TokenList):
  339. token_type = 'obs-route'
  340. @property
  341. def domains(self):
  342. return [x.domain for x in self if x.token_type == 'domain']
  343. class Mailbox(TokenList):
  344. token_type = 'mailbox'
  345. @property
  346. def display_name(self):
  347. if self[0].token_type == 'name-addr':
  348. return self[0].display_name
  349. @property
  350. def local_part(self):
  351. return self[0].local_part
  352. @property
  353. def domain(self):
  354. return self[0].domain
  355. @property
  356. def route(self):
  357. if self[0].token_type == 'name-addr':
  358. return self[0].route
  359. @property
  360. def addr_spec(self):
  361. return self[0].addr_spec
  362. class InvalidMailbox(TokenList):
  363. token_type = 'invalid-mailbox'
  364. @property
  365. def display_name(self):
  366. return None
  367. local_part = domain = route = addr_spec = display_name
  368. class Domain(TokenList):
  369. token_type = 'domain'
  370. as_ew_allowed = False
  371. @property
  372. def domain(self):
  373. return ''.join(super().value.split())
  374. class DotAtom(TokenList):
  375. token_type = 'dot-atom'
  376. class DotAtomText(TokenList):
  377. token_type = 'dot-atom-text'
  378. as_ew_allowed = True
  379. class NoFoldLiteral(TokenList):
  380. token_type = 'no-fold-literal'
  381. as_ew_allowed = False
  382. class AddrSpec(TokenList):
  383. token_type = 'addr-spec'
  384. as_ew_allowed = False
  385. @property
  386. def local_part(self):
  387. return self[0].local_part
  388. @property
  389. def domain(self):
  390. if len(self) < 3:
  391. return None
  392. return self[-1].domain
  393. @property
  394. def value(self):
  395. if len(self) < 3:
  396. return self[0].value
  397. return self[0].value.rstrip()+self[1].value+self[2].value.lstrip()
  398. @property
  399. def addr_spec(self):
  400. nameset = set(self.local_part)
  401. if len(nameset) > len(nameset-DOT_ATOM_ENDS):
  402. lp = quote_string(self.local_part)
  403. else:
  404. lp = self.local_part
  405. if self.domain is not None:
  406. return lp + '@' + self.domain
  407. return lp
  408. class ObsLocalPart(TokenList):
  409. token_type = 'obs-local-part'
  410. as_ew_allowed = False
  411. class DisplayName(Phrase):
  412. token_type = 'display-name'
  413. ew_combine_allowed = False
  414. @property
  415. def display_name(self):
  416. res = TokenList(self)
  417. if len(res) == 0:
  418. return res.value
  419. if res[0].token_type == 'cfws':
  420. res.pop(0)
  421. else:
  422. if (isinstance(res[0], TokenList) and
  423. res[0][0].token_type == 'cfws'):
  424. res[0] = TokenList(res[0][1:])
  425. if res[-1].token_type == 'cfws':
  426. res.pop()
  427. else:
  428. if (isinstance(res[-1], TokenList) and
  429. res[-1][-1].token_type == 'cfws'):
  430. res[-1] = TokenList(res[-1][:-1])
  431. return res.value
  432. @property
  433. def value(self):
  434. quote = False
  435. if self.defects:
  436. quote = True
  437. else:
  438. for x in self:
  439. if x.token_type == 'quoted-string':
  440. quote = True
  441. if len(self) != 0 and quote:
  442. pre = post = ''
  443. if (self[0].token_type == 'cfws' or
  444. isinstance(self[0], TokenList) and
  445. self[0][0].token_type == 'cfws'):
  446. pre = ' '
  447. if (self[-1].token_type == 'cfws' or
  448. isinstance(self[-1], TokenList) and
  449. self[-1][-1].token_type == 'cfws'):
  450. post = ' '
  451. return pre+quote_string(self.display_name)+post
  452. else:
  453. return super().value
  454. class LocalPart(TokenList):
  455. token_type = 'local-part'
  456. as_ew_allowed = False
  457. @property
  458. def value(self):
  459. if self[0].token_type == "quoted-string":
  460. return self[0].quoted_value
  461. else:
  462. return self[0].value
  463. @property
  464. def local_part(self):
  465. # Strip whitespace from front, back, and around dots.
  466. res = [DOT]
  467. last = DOT
  468. last_is_tl = False
  469. for tok in self[0] + [DOT]:
  470. if tok.token_type == 'cfws':
  471. continue
  472. if (last_is_tl and tok.token_type == 'dot' and
  473. last[-1].token_type == 'cfws'):
  474. res[-1] = TokenList(last[:-1])
  475. is_tl = isinstance(tok, TokenList)
  476. if (is_tl and last.token_type == 'dot' and
  477. tok[0].token_type == 'cfws'):
  478. res.append(TokenList(tok[1:]))
  479. else:
  480. res.append(tok)
  481. last = res[-1]
  482. last_is_tl = is_tl
  483. res = TokenList(res[1:-1])
  484. return res.value
  485. class DomainLiteral(TokenList):
  486. token_type = 'domain-literal'
  487. as_ew_allowed = False
  488. @property
  489. def domain(self):
  490. return ''.join(super().value.split())
  491. @property
  492. def ip(self):
  493. for x in self:
  494. if x.token_type == 'ptext':
  495. return x.value
  496. class MIMEVersion(TokenList):
  497. token_type = 'mime-version'
  498. major = None
  499. minor = None
  500. class Parameter(TokenList):
  501. token_type = 'parameter'
  502. sectioned = False
  503. extended = False
  504. charset = 'us-ascii'
  505. @property
  506. def section_number(self):
  507. # Because the first token, the attribute (name) eats CFWS, the second
  508. # token is always the section if there is one.
  509. return self[1].number if self.sectioned else 0
  510. @property
  511. def param_value(self):
  512. # This is part of the "handle quoted extended parameters" hack.
  513. for token in self:
  514. if token.token_type == 'value':
  515. return token.stripped_value
  516. if token.token_type == 'quoted-string':
  517. for token in token:
  518. if token.token_type == 'bare-quoted-string':
  519. for token in token:
  520. if token.token_type == 'value':
  521. return token.stripped_value
  522. return ''
  523. class InvalidParameter(Parameter):
  524. token_type = 'invalid-parameter'
  525. class Attribute(TokenList):
  526. token_type = 'attribute'
  527. @property
  528. def stripped_value(self):
  529. for token in self:
  530. if token.token_type.endswith('attrtext'):
  531. return token.value
  532. class Section(TokenList):
  533. token_type = 'section'
  534. number = None
  535. class Value(TokenList):
  536. token_type = 'value'
  537. @property
  538. def stripped_value(self):
  539. token = self[0]
  540. if token.token_type == 'cfws':
  541. token = self[1]
  542. if token.token_type.endswith(
  543. ('quoted-string', 'attribute', 'extended-attribute')):
  544. return token.stripped_value
  545. return self.value
  546. class MimeParameters(TokenList):
  547. token_type = 'mime-parameters'
  548. syntactic_break = False
  549. @property
  550. def params(self):
  551. # The RFC specifically states that the ordering of parameters is not
  552. # guaranteed and may be reordered by the transport layer. So we have
  553. # to assume the RFC 2231 pieces can come in any order. However, we
  554. # output them in the order that we first see a given name, which gives
  555. # us a stable __str__.
  556. params = {} # Using order preserving dict from Python 3.7+
  557. for token in self:
  558. if not token.token_type.endswith('parameter'):
  559. continue
  560. if token[0].token_type != 'attribute':
  561. continue
  562. name = token[0].value.strip()
  563. if name not in params:
  564. params[name] = []
  565. params[name].append((token.section_number, token))
  566. for name, parts in params.items():
  567. parts = sorted(parts, key=itemgetter(0))
  568. first_param = parts[0][1]
  569. charset = first_param.charset
  570. # Our arbitrary error recovery is to ignore duplicate parameters,
  571. # to use appearance order if there are duplicate rfc 2231 parts,
  572. # and to ignore gaps. This mimics the error recovery of get_param.
  573. if not first_param.extended and len(parts) > 1:
  574. if parts[1][0] == 0:
  575. parts[1][1].defects.append(errors.InvalidHeaderDefect(
  576. 'duplicate parameter name; duplicate(s) ignored'))
  577. parts = parts[:1]
  578. # Else assume the *0* was missing...note that this is different
  579. # from get_param, but we registered a defect for this earlier.
  580. value_parts = []
  581. i = 0
  582. for section_number, param in parts:
  583. if section_number != i:
  584. # We could get fancier here and look for a complete
  585. # duplicate extended parameter and ignore the second one
  586. # seen. But we're not doing that. The old code didn't.
  587. if not param.extended:
  588. param.defects.append(errors.InvalidHeaderDefect(
  589. 'duplicate parameter name; duplicate ignored'))
  590. continue
  591. else:
  592. param.defects.append(errors.InvalidHeaderDefect(
  593. "inconsistent RFC2231 parameter numbering"))
  594. i += 1
  595. value = param.param_value
  596. if param.extended:
  597. try:
  598. value = urllib.parse.unquote_to_bytes(value)
  599. except UnicodeEncodeError:
  600. # source had surrogate escaped bytes. What we do now
  601. # is a bit of an open question. I'm not sure this is
  602. # the best choice, but it is what the old algorithm did
  603. value = urllib.parse.unquote(value, encoding='latin-1')
  604. else:
  605. try:
  606. value = value.decode(charset, 'surrogateescape')
  607. except (LookupError, UnicodeEncodeError):
  608. # XXX: there should really be a custom defect for
  609. # unknown character set to make it easy to find,
  610. # because otherwise unknown charset is a silent
  611. # failure.
  612. value = value.decode('us-ascii', 'surrogateescape')
  613. if utils._has_surrogates(value):
  614. param.defects.append(errors.UndecodableBytesDefect())
  615. value_parts.append(value)
  616. value = ''.join(value_parts)
  617. yield name, value
  618. def __str__(self):
  619. params = []
  620. for name, value in self.params:
  621. if value:
  622. params.append('{}={}'.format(name, quote_string(value)))
  623. else:
  624. params.append(name)
  625. params = '; '.join(params)
  626. return ' ' + params if params else ''
  627. class ParameterizedHeaderValue(TokenList):
  628. # Set this false so that the value doesn't wind up on a new line even
  629. # if it and the parameters would fit there but not on the first line.
  630. syntactic_break = False
  631. @property
  632. def params(self):
  633. for token in reversed(self):
  634. if token.token_type == 'mime-parameters':
  635. return token.params
  636. return {}
  637. class ContentType(ParameterizedHeaderValue):
  638. token_type = 'content-type'
  639. as_ew_allowed = False
  640. maintype = 'text'
  641. subtype = 'plain'
  642. class ContentDisposition(ParameterizedHeaderValue):
  643. token_type = 'content-disposition'
  644. as_ew_allowed = False
  645. content_disposition = None
  646. class ContentTransferEncoding(TokenList):
  647. token_type = 'content-transfer-encoding'
  648. as_ew_allowed = False
  649. cte = '7bit'
  650. class HeaderLabel(TokenList):
  651. token_type = 'header-label'
  652. as_ew_allowed = False
  653. class MsgID(TokenList):
  654. token_type = 'msg-id'
  655. as_ew_allowed = False
  656. def fold(self, policy):
  657. # message-id tokens may not be folded.
  658. return str(self) + policy.linesep
  659. class MessageID(MsgID):
  660. token_type = 'message-id'
  661. class InvalidMessageID(MessageID):
  662. token_type = 'invalid-message-id'
  663. class Header(TokenList):
  664. token_type = 'header'
  665. #
  666. # Terminal classes and instances
  667. #
  668. class Terminal(str):
  669. as_ew_allowed = True
  670. ew_combine_allowed = True
  671. syntactic_break = True
  672. def __new__(cls, value, token_type):
  673. self = super().__new__(cls, value)
  674. self.token_type = token_type
  675. self.defects = []
  676. return self
  677. def __repr__(self):
  678. return "{}({})".format(self.__class__.__name__, super().__repr__())
  679. def pprint(self):
  680. print(self.__class__.__name__ + '/' + self.token_type)
  681. @property
  682. def all_defects(self):
  683. return list(self.defects)
  684. def _pp(self, indent=''):
  685. return ["{}{}/{}({}){}".format(
  686. indent,
  687. self.__class__.__name__,
  688. self.token_type,
  689. super().__repr__(),
  690. '' if not self.defects else ' {}'.format(self.defects),
  691. )]
  692. def pop_trailing_ws(self):
  693. # This terminates the recursion.
  694. return None
  695. @property
  696. def comments(self):
  697. return []
  698. def __getnewargs__(self):
  699. return(str(self), self.token_type)
  700. class WhiteSpaceTerminal(Terminal):
  701. @property
  702. def value(self):
  703. return ' '
  704. def startswith_fws(self):
  705. return True
  706. class ValueTerminal(Terminal):
  707. @property
  708. def value(self):
  709. return self
  710. def startswith_fws(self):
  711. return False
  712. class EWWhiteSpaceTerminal(WhiteSpaceTerminal):
  713. @property
  714. def value(self):
  715. return ''
  716. def __str__(self):
  717. return ''
  718. class _InvalidEwError(errors.HeaderParseError):
  719. """Invalid encoded word found while parsing headers."""
  720. # XXX these need to become classes and used as instances so
  721. # that a program can't change them in a parse tree and screw
  722. # up other parse trees. Maybe should have tests for that, too.
  723. DOT = ValueTerminal('.', 'dot')
  724. ListSeparator = ValueTerminal(',', 'list-separator')
  725. ListSeparator.as_ew_allowed = False
  726. ListSeparator.syntactic_break = False
  727. RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
  728. #
  729. # Parser
  730. #
  731. # Parse strings according to RFC822/2047/2822/5322 rules.
  732. #
  733. # This is a stateless parser. Each get_XXX function accepts a string and
  734. # returns either a Terminal or a TokenList representing the RFC object named
  735. # by the method and a string containing the remaining unparsed characters
  736. # from the input. Thus a parser method consumes the next syntactic construct
  737. # of a given type and returns a token representing the construct plus the
  738. # unparsed remainder of the input string.
  739. #
  740. # For example, if the first element of a structured header is a 'phrase',
  741. # then:
  742. #
  743. # phrase, value = get_phrase(value)
  744. #
  745. # returns the complete phrase from the start of the string value, plus any
  746. # characters left in the string after the phrase is removed.
  747. _wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
  748. _non_atom_end_matcher = re.compile(r"[^{}]+".format(
  749. re.escape(''.join(ATOM_ENDS)))).match
  750. _non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
  751. _non_token_end_matcher = re.compile(r"[^{}]+".format(
  752. re.escape(''.join(TOKEN_ENDS)))).match
  753. _non_attribute_end_matcher = re.compile(r"[^{}]+".format(
  754. re.escape(''.join(ATTRIBUTE_ENDS)))).match
  755. _non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
  756. re.escape(''.join(EXTENDED_ATTRIBUTE_ENDS)))).match
  757. def _validate_xtext(xtext):
  758. """If input token contains ASCII non-printables, register a defect."""
  759. non_printables = _non_printable_finder(xtext)
  760. if non_printables:
  761. xtext.defects.append(errors.NonPrintableDefect(non_printables))
  762. if utils._has_surrogates(xtext):
  763. xtext.defects.append(errors.UndecodableBytesDefect(
  764. "Non-ASCII characters found in header token"))
  765. def _get_ptext_to_endchars(value, endchars):
  766. """Scan printables/quoted-pairs until endchars and return unquoted ptext.
  767. This function turns a run of qcontent, ccontent-without-comments, or
  768. dtext-with-quoted-printables into a single string by unquoting any
  769. quoted printables. It returns the string, the remaining value, and
  770. a flag that is True iff there were any quoted printables decoded.
  771. """
  772. fragment, *remainder = _wsp_splitter(value, 1)
  773. vchars = []
  774. escape = False
  775. had_qp = False
  776. for pos in range(len(fragment)):
  777. if fragment[pos] == '\\':
  778. if escape:
  779. escape = False
  780. had_qp = True
  781. else:
  782. escape = True
  783. continue
  784. if escape:
  785. escape = False
  786. elif fragment[pos] in endchars:
  787. break
  788. vchars.append(fragment[pos])
  789. else:
  790. pos = pos + 1
  791. return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp
  792. def get_fws(value):
  793. """FWS = 1*WSP
  794. This isn't the RFC definition. We're using fws to represent tokens where
  795. folding can be done, but when we are parsing the *un*folding has already
  796. been done so we don't need to watch out for CRLF.
  797. """
  798. newvalue = value.lstrip()
  799. fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
  800. return fws, newvalue
  801. def get_encoded_word(value):
  802. """ encoded-word = "=?" charset "?" encoding "?" encoded-text "?="
  803. """
  804. ew = EncodedWord()
  805. if not value.startswith('=?'):
  806. raise errors.HeaderParseError(
  807. "expected encoded word but found {}".format(value))
  808. tok, *remainder = value[2:].split('?=', 1)
  809. if tok == value[2:]:
  810. raise errors.HeaderParseError(
  811. "expected encoded word but found {}".format(value))
  812. remstr = ''.join(remainder)
  813. if (len(remstr) > 1 and
  814. remstr[0] in hexdigits and
  815. remstr[1] in hexdigits and
  816. tok.count('?') < 2):
  817. # The ? after the CTE was followed by an encoded word escape (=XX).
  818. rest, *remainder = remstr.split('?=', 1)
  819. tok = tok + '?=' + rest
  820. if len(tok.split()) > 1:
  821. ew.defects.append(errors.InvalidHeaderDefect(
  822. "whitespace inside encoded word"))
  823. ew.cte = value
  824. value = ''.join(remainder)
  825. try:
  826. text, charset, lang, defects = _ew.decode('=?' + tok + '?=')
  827. except (ValueError, KeyError):
  828. raise _InvalidEwError(
  829. "encoded word format invalid: '{}'".format(ew.cte))
  830. ew.charset = charset
  831. ew.lang = lang
  832. ew.defects.extend(defects)
  833. while text:
  834. if text[0] in WSP:
  835. token, text = get_fws(text)
  836. ew.append(token)
  837. continue
  838. chars, *remainder = _wsp_splitter(text, 1)
  839. vtext = ValueTerminal(chars, 'vtext')
  840. _validate_xtext(vtext)
  841. ew.append(vtext)
  842. text = ''.join(remainder)
  843. # Encoded words should be followed by a WS
  844. if value and value[0] not in WSP:
  845. ew.defects.append(errors.InvalidHeaderDefect(
  846. "missing trailing whitespace after encoded-word"))
  847. return ew, value
  848. def get_unstructured(value):
  849. """unstructured = (*([FWS] vchar) *WSP) / obs-unstruct
  850. obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS)
  851. obs-utext = %d0 / obs-NO-WS-CTL / LF / CR
  852. obs-NO-WS-CTL is control characters except WSP/CR/LF.
  853. So, basically, we have printable runs, plus control characters or nulls in
  854. the obsolete syntax, separated by whitespace. Since RFC 2047 uses the
  855. obsolete syntax in its specification, but requires whitespace on either
  856. side of the encoded words, I can see no reason to need to separate the
  857. non-printable-non-whitespace from the printable runs if they occur, so we
  858. parse this into xtext tokens separated by WSP tokens.
  859. Because an 'unstructured' value must by definition constitute the entire
  860. value, this 'get' routine does not return a remaining value, only the
  861. parsed TokenList.
  862. """
  863. # XXX: but what about bare CR and LF? They might signal the start or
  864. # end of an encoded word. YAGNI for now, since our current parsers
  865. # will never send us strings with bare CR or LF.
  866. unstructured = UnstructuredTokenList()
  867. while value:
  868. if value[0] in WSP:
  869. token, value = get_fws(value)
  870. unstructured.append(token)
  871. continue
  872. valid_ew = True
  873. if value.startswith('=?'):
  874. try:
  875. token, value = get_encoded_word(value)
  876. except _InvalidEwError:
  877. valid_ew = False
  878. except errors.HeaderParseError:
  879. # XXX: Need to figure out how to register defects when
  880. # appropriate here.
  881. pass
  882. else:
  883. have_ws = True
  884. if len(unstructured) > 0:
  885. if unstructured[-1].token_type != 'fws':
  886. unstructured.defects.append(errors.InvalidHeaderDefect(
  887. "missing whitespace before encoded word"))
  888. have_ws = False
  889. if have_ws and len(unstructured) > 1:
  890. if unstructured[-2].token_type == 'encoded-word':
  891. unstructured[-1] = EWWhiteSpaceTerminal(
  892. unstructured[-1], 'fws')
  893. unstructured.append(token)
  894. continue
  895. tok, *remainder = _wsp_splitter(value, 1)
  896. # Split in the middle of an atom if there is a rfc2047 encoded word
  897. # which does not have WSP on both sides. The defect will be registered
  898. # the next time through the loop.
  899. # This needs to only be performed when the encoded word is valid;
  900. # otherwise, performing it on an invalid encoded word can cause
  901. # the parser to go in an infinite loop.
  902. if valid_ew and rfc2047_matcher.search(tok):
  903. tok, *remainder = value.partition('=?')
  904. vtext = ValueTerminal(tok, 'vtext')
  905. _validate_xtext(vtext)
  906. unstructured.append(vtext)
  907. value = ''.join(remainder)
  908. return unstructured
  909. def get_qp_ctext(value):
  910. r"""ctext = <printable ascii except \ ( )>
  911. This is not the RFC ctext, since we are handling nested comments in comment
  912. and unquoting quoted-pairs here. We allow anything except the '()'
  913. characters, but if we find any ASCII other than the RFC defined printable
  914. ASCII, a NonPrintableDefect is added to the token's defects list. Since
  915. quoted pairs are converted to their unquoted values, what is returned is
  916. a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value
  917. is ' '.
  918. """
  919. ptext, value, _ = _get_ptext_to_endchars(value, '()')
  920. ptext = WhiteSpaceTerminal(ptext, 'ptext')
  921. _validate_xtext(ptext)
  922. return ptext, value
  923. def get_qcontent(value):
  924. """qcontent = qtext / quoted-pair
  925. We allow anything except the DQUOTE character, but if we find any ASCII
  926. other than the RFC defined printable ASCII, a NonPrintableDefect is
  927. added to the token's defects list. Any quoted pairs are converted to their
  928. unquoted values, so what is returned is a 'ptext' token. In this case it
  929. is a ValueTerminal.
  930. """
  931. ptext, value, _ = _get_ptext_to_endchars(value, '"')
  932. ptext = ValueTerminal(ptext, 'ptext')
  933. _validate_xtext(ptext)
  934. return ptext, value
  935. def get_atext(value):
  936. """atext = <matches _atext_matcher>
  937. We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
  938. the token's defects list if we find non-atext characters.
  939. """
  940. m = _non_atom_end_matcher(value)
  941. if not m:
  942. raise errors.HeaderParseError(
  943. "expected atext but found '{}'".format(value))
  944. atext = m.group()
  945. value = value[len(atext):]
  946. atext = ValueTerminal(atext, 'atext')
  947. _validate_xtext(atext)
  948. return atext, value
  949. def get_bare_quoted_string(value):
  950. """bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
  951. A quoted-string without the leading or trailing white space. Its
  952. value is the text between the quote marks, with whitespace
  953. preserved and quoted pairs decoded.
  954. """
  955. if not value or value[0] != '"':
  956. raise errors.HeaderParseError(
  957. "expected '\"' but found '{}'".format(value))
  958. bare_quoted_string = BareQuotedString()
  959. value = value[1:]
  960. if value and value[0] == '"':
  961. token, value = get_qcontent(value)
  962. bare_quoted_string.append(token)
  963. while value and value[0] != '"':
  964. if value[0] in WSP:
  965. token, value = get_fws(value)
  966. elif value[:2] == '=?':
  967. valid_ew = False
  968. try:
  969. token, value = get_encoded_word(value)
  970. bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
  971. "encoded word inside quoted string"))
  972. valid_ew = True
  973. except errors.HeaderParseError:
  974. token, value = get_qcontent(value)
  975. # Collapse the whitespace between two encoded words that occur in a
  976. # bare-quoted-string.
  977. if valid_ew and len(bare_quoted_string) > 1:
  978. if (bare_quoted_string[-1].token_type == 'fws' and
  979. bare_quoted_string[-2].token_type == 'encoded-word'):
  980. bare_quoted_string[-1] = EWWhiteSpaceTerminal(
  981. bare_quoted_string[-1], 'fws')
  982. else:
  983. token, value = get_qcontent(value)
  984. bare_quoted_string.append(token)
  985. if not value:
  986. bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
  987. "end of header inside quoted string"))
  988. return bare_quoted_string, value
  989. return bare_quoted_string, value[1:]
  990. def get_comment(value):
  991. """comment = "(" *([FWS] ccontent) [FWS] ")"
  992. ccontent = ctext / quoted-pair / comment
  993. We handle nested comments here, and quoted-pair in our qp-ctext routine.
  994. """
  995. if value and value[0] != '(':
  996. raise errors.HeaderParseError(
  997. "expected '(' but found '{}'".format(value))
  998. comment = Comment()
  999. value = value[1:]
  1000. while value and value[0] != ")":
  1001. if value[0] in WSP:
  1002. token, value = get_fws(value)
  1003. elif value[0] == '(':
  1004. token, value = get_comment(value)
  1005. else:
  1006. token, value = get_qp_ctext(value)
  1007. comment.append(token)
  1008. if not value:
  1009. comment.defects.append(errors.InvalidHeaderDefect(
  1010. "end of header inside comment"))
  1011. return comment, value
  1012. return comment, value[1:]
  1013. def get_cfws(value):
  1014. """CFWS = (1*([FWS] comment) [FWS]) / FWS
  1015. """
  1016. cfws = CFWSList()
  1017. while value and value[0] in CFWS_LEADER:
  1018. if value[0] in WSP:
  1019. token, value = get_fws(value)
  1020. else:
  1021. token, value = get_comment(value)
  1022. cfws.append(token)
  1023. return cfws, value
  1024. def get_quoted_string(value):
  1025. """quoted-string = [CFWS] <bare-quoted-string> [CFWS]
  1026. 'bare-quoted-string' is an intermediate class defined by this
  1027. parser and not by the RFC grammar. It is the quoted string
  1028. without any attached CFWS.
  1029. """
  1030. quoted_string = QuotedString()
  1031. if value and value[0] in CFWS_LEADER:
  1032. token, value = get_cfws(value)
  1033. quoted_string.append(token)
  1034. token, value = get_bare_quoted_string(value)
  1035. quoted_string.append(token)
  1036. if value and value[0] in CFWS_LEADER:
  1037. token, value = get_cfws(value)
  1038. quoted_string.append(token)
  1039. return quoted_string, value
  1040. def get_atom(value):
  1041. """atom = [CFWS] 1*atext [CFWS]
  1042. An atom could be an rfc2047 encoded word.
  1043. """
  1044. atom = Atom()
  1045. if value and value[0] in CFWS_LEADER:
  1046. token, value = get_cfws(value)
  1047. atom.append(token)
  1048. if value and value[0] in ATOM_ENDS:
  1049. raise errors.HeaderParseError(
  1050. "expected atom but found '{}'".format(value))
  1051. if value.startswith('=?'):
  1052. try:
  1053. token, value = get_encoded_word(value)
  1054. except errors.HeaderParseError:
  1055. # XXX: need to figure out how to register defects when
  1056. # appropriate here.
  1057. token, value = get_atext(value)
  1058. else:
  1059. token, value = get_atext(value)
  1060. atom.append(token)
  1061. if value and value[0] in CFWS_LEADER:
  1062. token, value = get_cfws(value)
  1063. atom.append(token)
  1064. return atom, value
  1065. def get_dot_atom_text(value):
  1066. """ dot-text = 1*atext *("." 1*atext)
  1067. """
  1068. dot_atom_text = DotAtomText()
  1069. if not value or value[0] in ATOM_ENDS:
  1070. raise errors.HeaderParseError("expected atom at a start of "
  1071. "dot-atom-text but found '{}'".format(value))
  1072. while value and value[0] not in ATOM_ENDS:
  1073. token, value = get_atext(value)
  1074. dot_atom_text.append(token)
  1075. if value and value[0] == '.':
  1076. dot_atom_text.append(DOT)
  1077. value = value[1:]
  1078. if dot_atom_text[-1] is DOT:
  1079. raise errors.HeaderParseError("expected atom at end of dot-atom-text "
  1080. "but found '{}'".format('.'+value))
  1081. return dot_atom_text, value
  1082. def get_dot_atom(value):
  1083. """ dot-atom = [CFWS] dot-atom-text [CFWS]
  1084. Any place we can have a dot atom, we could instead have an rfc2047 encoded
  1085. word.
  1086. """
  1087. dot_atom = DotAtom()
  1088. if value[0] in CFWS_LEADER:
  1089. token, value = get_cfws(value)
  1090. dot_atom.append(token)
  1091. if value.startswith('=?'):
  1092. try:
  1093. token, value = get_encoded_word(value)
  1094. except errors.HeaderParseError:
  1095. # XXX: need to figure out how to register defects when
  1096. # appropriate here.
  1097. token, value = get_dot_atom_text(value)
  1098. else:
  1099. token, value = get_dot_atom_text(value)
  1100. dot_atom.append(token)
  1101. if value and value[0] in CFWS_LEADER:
  1102. token, value = get_cfws(value)
  1103. dot_atom.append(token)
  1104. return dot_atom, value
  1105. def get_word(value):
  1106. """word = atom / quoted-string
  1107. Either atom or quoted-string may start with CFWS. We have to peel off this
  1108. CFWS first to determine which type of word to parse. Afterward we splice
  1109. the leading CFWS, if any, into the parsed sub-token.
  1110. If neither an atom or a quoted-string is found before the next special, a
  1111. HeaderParseError is raised.
  1112. The token returned is either an Atom or a QuotedString, as appropriate.
  1113. This means the 'word' level of the formal grammar is not represented in the
  1114. parse tree; this is because having that extra layer when manipulating the
  1115. parse tree is more confusing than it is helpful.
  1116. """
  1117. if value[0] in CFWS_LEADER:
  1118. leader, value = get_cfws(value)
  1119. else:
  1120. leader = None
  1121. if not value:
  1122. raise errors.HeaderParseError(
  1123. "Expected 'atom' or 'quoted-string' but found nothing.")
  1124. if value[0]=='"':
  1125. token, value = get_quoted_string(value)
  1126. elif value[0] in SPECIALS:
  1127. raise errors.HeaderParseError("Expected 'atom' or 'quoted-string' "
  1128. "but found '{}'".format(value))
  1129. else:
  1130. token, value = get_atom(value)
  1131. if leader is not None:
  1132. token[:0] = [leader]
  1133. return token, value
  1134. def get_phrase(value):
  1135. """ phrase = 1*word / obs-phrase
  1136. obs-phrase = word *(word / "." / CFWS)
  1137. This means a phrase can be a sequence of words, periods, and CFWS in any
  1138. order as long as it starts with at least one word. If anything other than
  1139. words is detected, an ObsoleteHeaderDefect is added to the token's defect
  1140. list. We also accept a phrase that starts with CFWS followed by a dot;
  1141. this is registered as an InvalidHeaderDefect, since it is not supported by
  1142. even the obsolete grammar.
  1143. """
  1144. phrase = Phrase()
  1145. try:
  1146. token, value = get_word(value)
  1147. phrase.append(token)
  1148. except errors.HeaderParseError:
  1149. phrase.defects.append(errors.InvalidHeaderDefect(
  1150. "phrase does not start with word"))
  1151. while value and value[0] not in PHRASE_ENDS:
  1152. if value[0]=='.':
  1153. phrase.append(DOT)
  1154. phrase.defects.append(errors.ObsoleteHeaderDefect(
  1155. "period in 'phrase'"))
  1156. value = value[1:]
  1157. else:
  1158. try:
  1159. token, value = get_word(value)
  1160. except errors.HeaderParseError:
  1161. if value[0] in CFWS_LEADER:
  1162. token, value = get_cfws(value)
  1163. phrase.defects.append(errors.ObsoleteHeaderDefect(
  1164. "comment found without atom"))
  1165. else:
  1166. raise
  1167. phrase.append(token)
  1168. return phrase, value
  1169. def get_local_part(value):
  1170. """ local-part = dot-atom / quoted-string / obs-local-part
  1171. """
  1172. local_part = LocalPart()
  1173. leader = None
  1174. if value and value[0] in CFWS_LEADER:
  1175. leader, value = get_cfws(value)
  1176. if not value:
  1177. raise errors.HeaderParseError(
  1178. "expected local-part but found '{}'".format(value))
  1179. try:
  1180. token, value = get_dot_atom(value)
  1181. except errors.HeaderParseError:
  1182. try:
  1183. token, value = get_word(value)
  1184. except errors.HeaderParseError:
  1185. if value[0] != '\\' and value[0] in PHRASE_ENDS:
  1186. raise
  1187. token = TokenList()
  1188. if leader is not None:
  1189. token[:0] = [leader]
  1190. local_part.append(token)
  1191. if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
  1192. obs_local_part, value = get_obs_local_part(str(local_part) + value)
  1193. if obs_local_part.token_type == 'invalid-obs-local-part':
  1194. local_part.defects.append(errors.InvalidHeaderDefect(
  1195. "local-part is not dot-atom, quoted-string, or obs-local-part"))
  1196. else:
  1197. local_part.defects.append(errors.ObsoleteHeaderDefect(
  1198. "local-part is not a dot-atom (contains CFWS)"))
  1199. local_part[0] = obs_local_part
  1200. try:
  1201. local_part.value.encode('ascii')
  1202. except UnicodeEncodeError:
  1203. local_part.defects.append(errors.NonASCIILocalPartDefect(
  1204. "local-part contains non-ASCII characters)"))
  1205. return local_part, value
  1206. def get_obs_local_part(value):
  1207. """ obs-local-part = word *("." word)
  1208. """
  1209. obs_local_part = ObsLocalPart()
  1210. last_non_ws_was_dot = False
  1211. while value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
  1212. if value[0] == '.':
  1213. if last_non_ws_was_dot:
  1214. obs_local_part.defects.append(errors.InvalidHeaderDefect(
  1215. "invalid repeated '.'"))
  1216. obs_local_part.append(DOT)
  1217. last_non_ws_was_dot = True
  1218. value = value[1:]
  1219. continue
  1220. elif value[0]=='\\':
  1221. obs_local_part.append(ValueTerminal(value[0],
  1222. 'misplaced-special'))
  1223. value = value[1:]
  1224. obs_local_part.defects.append(errors.InvalidHeaderDefect(
  1225. "'\\' character outside of quoted-string/ccontent"))
  1226. last_non_ws_was_dot = False
  1227. continue
  1228. if obs_local_part and obs_local_part[-1].token_type != 'dot':
  1229. obs_local_part.defects.append(errors.InvalidHeaderDefect(
  1230. "missing '.' between words"))
  1231. try:
  1232. token, value = get_word(value)
  1233. last_non_ws_was_dot = False
  1234. except errors.HeaderParseError:
  1235. if value[0] not in CFWS_LEADER:
  1236. raise
  1237. token, value = get_cfws(value)
  1238. obs_local_part.append(token)
  1239. if not obs_local_part:
  1240. raise errors.HeaderParseError(
  1241. "expected obs-local-part but found '{}'".format(value))
  1242. if (obs_local_part[0].token_type == 'dot' or
  1243. obs_local_part[0].token_type=='cfws' and
  1244. len(obs_local_part) > 1 and
  1245. obs_local_part[1].token_type=='dot'):
  1246. obs_local_part.defects.append(errors.InvalidHeaderDefect(
  1247. "Invalid leading '.' in local part"))
  1248. if (obs_local_part[-1].token_type == 'dot' or
  1249. obs_local_part[-1].token_type=='cfws' and
  1250. len(obs_local_part) > 1 and
  1251. obs_local_part[-2].token_type=='dot'):
  1252. obs_local_part.defects.append(errors.InvalidHeaderDefect(
  1253. "Invalid trailing '.' in local part"))
  1254. if obs_local_part.defects:
  1255. obs_local_part.token_type = 'invalid-obs-local-part'
  1256. return obs_local_part, value
  1257. def get_dtext(value):
  1258. r""" dtext = <printable ascii except \ [ ]> / obs-dtext
  1259. obs-dtext = obs-NO-WS-CTL / quoted-pair
  1260. We allow anything except the excluded characters, but if we find any
  1261. ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is
  1262. added to the token's defects list. Quoted pairs are converted to their
  1263. unquoted values, so what is returned is a ptext token, in this case a
  1264. ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
  1265. added to the returned token's defect list.
  1266. """
  1267. ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
  1268. ptext = ValueTerminal(ptext, 'ptext')
  1269. if had_qp:
  1270. ptext.defects.append(errors.ObsoleteHeaderDefect(
  1271. "quoted printable found in domain-literal"))
  1272. _validate_xtext(ptext)
  1273. return ptext, value
  1274. def _check_for_early_dl_end(value, domain_literal):
  1275. if value:
  1276. return False
  1277. domain_literal.append(errors.InvalidHeaderDefect(
  1278. "end of input inside domain-literal"))
  1279. domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
  1280. return True
  1281. def get_domain_literal(value):
  1282. """ domain-literal = [CFWS] "[" *([FWS] dtext) [FWS] "]" [CFWS]
  1283. """
  1284. domain_literal = DomainLiteral()
  1285. if value[0] in CFWS_LEADER:
  1286. token, value = get_cfws(value)
  1287. domain_literal.append(token)
  1288. if not value:
  1289. raise errors.HeaderParseError("expected domain-literal")
  1290. if value[0] != '[':
  1291. raise errors.HeaderParseError("expected '[' at start of domain-literal "
  1292. "but found '{}'".format(value))
  1293. value = value[1:]
  1294. if _check_for_early_dl_end(value, domain_literal):
  1295. return domain_literal, value
  1296. domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
  1297. if value[0] in WSP:
  1298. token, value = get_fws(value)
  1299. domain_literal.append(token)
  1300. token, value = get_dtext(value)
  1301. domain_literal.append(token)
  1302. if _check_for_early_dl_end(value, domain_literal):
  1303. return domain_literal, value
  1304. if value[0] in WSP:
  1305. token, value = get_fws(value)
  1306. domain_literal.append(token)
  1307. if _check_for_early_dl_end(value, domain_literal):
  1308. return domain_literal, value
  1309. if value[0] != ']':
  1310. raise errors.HeaderParseError("expected ']' at end of domain-literal "
  1311. "but found '{}'".format(value))
  1312. domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
  1313. value = value[1:]
  1314. if value and value[0] in CFWS_LEADER:
  1315. token, value = get_cfws(value)
  1316. domain_literal.append(token)
  1317. return domain_literal, value
  1318. def get_domain(value):
  1319. """ domain = dot-atom / domain-literal / obs-domain
  1320. obs-domain = atom *("." atom))
  1321. """
  1322. domain = Domain()
  1323. leader = None
  1324. if value and value[0] in CFWS_LEADER:
  1325. leader, value = get_cfws(value)
  1326. if not value:
  1327. raise errors.HeaderParseError(
  1328. "expected domain but found '{}'".format(value))
  1329. if value[0] == '[':
  1330. token, value = get_domain_literal(value)
  1331. if leader is not None:
  1332. token[:0] = [leader]
  1333. domain.append(token)
  1334. return domain, value
  1335. try:
  1336. token, value = get_dot_atom(value)
  1337. except errors.HeaderParseError:
  1338. token, value = get_atom(value)
  1339. if value and value[0] == '@':
  1340. raise errors.HeaderParseError('Invalid Domain')
  1341. if leader is not None:
  1342. token[:0] = [leader]
  1343. domain.append(token)
  1344. if value and value[0] == '.':
  1345. domain.defects.append(errors.ObsoleteHeaderDefect(
  1346. "domain is not a dot-atom (contains CFWS)"))
  1347. if domain[0].token_type == 'dot-atom':
  1348. domain[:] = domain[0]
  1349. while value and value[0] == '.':
  1350. domain.append(DOT)
  1351. token, value = get_atom(value[1:])
  1352. domain.append(token)
  1353. return domain, value
  1354. def get_addr_spec(value):
  1355. """ addr-spec = local-part "@" domain
  1356. """
  1357. addr_spec = AddrSpec()
  1358. token, value = get_local_part(value)
  1359. addr_spec.append(token)
  1360. if not value or value[0] != '@':
  1361. addr_spec.defects.append(errors.InvalidHeaderDefect(
  1362. "addr-spec local part with no domain"))
  1363. return addr_spec, value
  1364. addr_spec.append(ValueTerminal('@', 'address-at-symbol'))
  1365. token, value = get_domain(value[1:])
  1366. addr_spec.append(token)
  1367. return addr_spec, value
  1368. def get_obs_route(value):
  1369. """ obs-route = obs-domain-list ":"
  1370. obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
  1371. Returns an obs-route token with the appropriate sub-tokens (that is,
  1372. there is no obs-domain-list in the parse tree).
  1373. """
  1374. obs_route = ObsRoute()
  1375. while value and (value[0]==',' or value[0] in CFWS_LEADER):
  1376. if value[0] in CFWS_LEADER:
  1377. token, value = get_cfws(value)
  1378. obs_route.append(token)
  1379. elif value[0] == ',':
  1380. obs_route.append(ListSeparator)
  1381. value = value[1:]
  1382. if not value or value[0] != '@':
  1383. raise errors.HeaderParseError(
  1384. "expected obs-route domain but found '{}'".format(value))
  1385. obs_route.append(RouteComponentMarker)
  1386. token, value = get_domain(value[1:])
  1387. obs_route.append(token)
  1388. while value and value[0]==',':
  1389. obs_route.append(ListSeparator)
  1390. value = value[1:]
  1391. if not value:
  1392. break
  1393. if value[0] in CFWS_LEADER:
  1394. token, value = get_cfws(value)
  1395. obs_route.append(token)
  1396. if not value:
  1397. break
  1398. if value[0] == '@':
  1399. obs_route.append(RouteComponentMarker)
  1400. token, value = get_domain(value[1:])
  1401. obs_route.append(token)
  1402. if not value:
  1403. raise errors.HeaderParseError("end of header while parsing obs-route")
  1404. if value[0] != ':':
  1405. raise errors.HeaderParseError( "expected ':' marking end of "
  1406. "obs-route but found '{}'".format(value))
  1407. obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker'))
  1408. return obs_route, value[1:]
  1409. def get_angle_addr(value):
  1410. """ angle-addr = [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
  1411. obs-angle-addr = [CFWS] "<" obs-route addr-spec ">" [CFWS]
  1412. """
  1413. angle_addr = AngleAddr()
  1414. if value and value[0] in CFWS_LEADER:
  1415. token, value = get_cfws(value)
  1416. angle_addr.append(token)
  1417. if not value or value[0] != '<':
  1418. raise errors.HeaderParseError(
  1419. "expected angle-addr but found '{}'".format(value))
  1420. angle_addr.append(ValueTerminal('<', 'angle-addr-start'))
  1421. value = value[1:]
  1422. # Although it is not legal per RFC5322, SMTP uses '<>' in certain
  1423. # circumstances.
  1424. if value and value[0] == '>':
  1425. angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
  1426. angle_addr.defects.append(errors.InvalidHeaderDefect(
  1427. "null addr-spec in angle-addr"))
  1428. value = value[1:]
  1429. return angle_addr, value
  1430. try:
  1431. token, value = get_addr_spec(value)
  1432. except errors.HeaderParseError:
  1433. try:
  1434. token, value = get_obs_route(value)
  1435. angle_addr.defects.append(errors.ObsoleteHeaderDefect(
  1436. "obsolete route specification in angle-addr"))
  1437. except errors.HeaderParseError:
  1438. raise errors.HeaderParseError(
  1439. "expected addr-spec or obs-route but found '{}'".format(value))
  1440. angle_addr.append(token)
  1441. token, value = get_addr_spec(value)
  1442. angle_addr.append(token)
  1443. if value and value[0] == '>':
  1444. value = value[1:]
  1445. else:
  1446. angle_addr.defects.append(errors.InvalidHeaderDefect(
  1447. "missing trailing '>' on angle-addr"))
  1448. angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
  1449. if value and value[0] in CFWS_LEADER:
  1450. token, value = get_cfws(value)
  1451. angle_addr.append(token)
  1452. return angle_addr, value
  1453. def get_display_name(value):
  1454. """ display-name = phrase
  1455. Because this is simply a name-rule, we don't return a display-name
  1456. token containing a phrase, but rather a display-name token with
  1457. the content of the phrase.
  1458. """
  1459. display_name = DisplayName()
  1460. token, value = get_phrase(value)
  1461. display_name.extend(token[:])
  1462. display_name.defects = token.defects[:]
  1463. return display_name, value
  1464. def get_name_addr(value):
  1465. """ name-addr = [display-name] angle-addr
  1466. """
  1467. name_addr = NameAddr()
  1468. # Both the optional display name and the angle-addr can start with cfws.
  1469. leader = None
  1470. if not value:
  1471. raise errors.HeaderParseError(
  1472. "expected name-addr but found '{}'".format(value))
  1473. if value[0] in CFWS_LEADER:
  1474. leader, value = get_cfws(value)
  1475. if not value:
  1476. raise errors.HeaderParseError(
  1477. "expected name-addr but found '{}'".format(leader))
  1478. if value[0] != '<':
  1479. if value[0] in PHRASE_ENDS:
  1480. raise errors.HeaderParseError(
  1481. "expected name-addr but found '{}'".format(value))
  1482. token, value = get_display_name(value)
  1483. if not value:
  1484. raise errors.HeaderParseError(
  1485. "expected name-addr but found '{}'".format(token))
  1486. if leader is not None:
  1487. if isinstance(token[0], TokenList):
  1488. token[0][:0] = [leader]
  1489. else:
  1490. token[:0] = [leader]
  1491. leader = None
  1492. name_addr.append(token)
  1493. token, value = get_angle_addr(value)
  1494. if leader is not None:
  1495. token[:0] = [leader]
  1496. name_addr.append(token)
  1497. return name_addr, value
  1498. def get_mailbox(value):
  1499. """ mailbox = name-addr / addr-spec
  1500. """
  1501. # The only way to figure out if we are dealing with a name-addr or an
  1502. # addr-spec is to try parsing each one.
  1503. mailbox = Mailbox()
  1504. try:
  1505. token, value = get_name_addr(value)
  1506. except errors.HeaderParseError:
  1507. try:
  1508. token, value = get_addr_spec(value)
  1509. except errors.HeaderParseError:
  1510. raise errors.HeaderParseError(
  1511. "expected mailbox but found '{}'".format(value))
  1512. if any(isinstance(x, errors.InvalidHeaderDefect)
  1513. for x in token.all_defects):
  1514. mailbox.token_type = 'invalid-mailbox'
  1515. mailbox.append(token)
  1516. return mailbox, value
  1517. def get_invalid_mailbox(value, endchars):
  1518. """ Read everything up to one of the chars in endchars.
  1519. This is outside the formal grammar. The InvalidMailbox TokenList that is
  1520. returned acts like a Mailbox, but the data attributes are None.
  1521. """
  1522. invalid_mailbox = InvalidMailbox()
  1523. while value and value[0] not in endchars:
  1524. if value[0] in PHRASE_ENDS:
  1525. invalid_mailbox.append(ValueTerminal(value[0],
  1526. 'misplaced-special'))
  1527. value = value[1:]
  1528. else:
  1529. token, value = get_phrase(value)
  1530. invalid_mailbox.append(token)
  1531. return invalid_mailbox, value
  1532. def get_mailbox_list(value):
  1533. """ mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list
  1534. obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS])
  1535. For this routine we go outside the formal grammar in order to improve error
  1536. handling. We recognize the end of the mailbox list only at the end of the
  1537. value or at a ';' (the group terminator). This is so that we can turn
  1538. invalid mailboxes into InvalidMailbox tokens and continue parsing any
  1539. remaining valid mailboxes. We also allow all mailbox entries to be null,
  1540. and this condition is handled appropriately at a higher level.
  1541. """
  1542. mailbox_list = MailboxList()
  1543. while value and value[0] != ';':
  1544. try:
  1545. token, value = get_mailbox(value)
  1546. mailbox_list.append(token)
  1547. except errors.HeaderParseError:
  1548. leader = None
  1549. if value[0] in CFWS_LEADER:
  1550. leader, value = get_cfws(value)
  1551. if not value or value[0] in ',;':
  1552. mailbox_list.append(leader)
  1553. mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
  1554. "empty element in mailbox-list"))
  1555. else:
  1556. token, value = get_invalid_mailbox(value, ',;')
  1557. if leader is not None:
  1558. token[:0] = [leader]
  1559. mailbox_list.append(token)
  1560. mailbox_list.defects.append(errors.InvalidHeaderDefect(
  1561. "invalid mailbox in mailbox-list"))
  1562. elif value[0] == ',':
  1563. mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
  1564. "empty element in mailbox-list"))
  1565. else:
  1566. token, value = get_invalid_mailbox(value, ',;')
  1567. if leader is not None:
  1568. token[:0] = [leader]
  1569. mailbox_list.append(token)
  1570. mailbox_list.defects.append(errors.InvalidHeaderDefect(
  1571. "invalid mailbox in mailbox-list"))
  1572. if value and value[0] not in ',;':
  1573. # Crap after mailbox; treat it as an invalid mailbox.
  1574. # The mailbox info will still be available.
  1575. mailbox = mailbox_list[-1]
  1576. mailbox.token_type = 'invalid-mailbox'
  1577. token, value = get_invalid_mailbox(value, ',;')
  1578. mailbox.extend(token)
  1579. mailbox_list.defects.append(errors.InvalidHeaderDefect(
  1580. "invalid mailbox in mailbox-list"))
  1581. if value and value[0] == ',':
  1582. mailbox_list.append(ListSeparator)
  1583. value = value[1:]
  1584. return mailbox_list, value
  1585. def get_group_list(value):
  1586. """ group-list = mailbox-list / CFWS / obs-group-list
  1587. obs-group-list = 1*([CFWS] ",") [CFWS]
  1588. """
  1589. group_list = GroupList()
  1590. if not value:
  1591. group_list.defects.append(errors.InvalidHeaderDefect(
  1592. "end of header before group-list"))
  1593. return group_list, value
  1594. leader = None
  1595. if value and value[0] in CFWS_LEADER:
  1596. leader, value = get_cfws(value)
  1597. if not value:
  1598. # This should never happen in email parsing, since CFWS-only is a
  1599. # legal alternative to group-list in a group, which is the only
  1600. # place group-list appears.
  1601. group_list.defects.append(errors.InvalidHeaderDefect(
  1602. "end of header in group-list"))
  1603. group_list.append(leader)
  1604. return group_list, value
  1605. if value[0] == ';':
  1606. group_list.append(leader)
  1607. return group_list, value
  1608. token, value = get_mailbox_list(value)
  1609. if len(token.all_mailboxes)==0:
  1610. if leader is not None:
  1611. group_list.append(leader)
  1612. group_list.extend(token)
  1613. group_list.defects.append(errors.ObsoleteHeaderDefect(
  1614. "group-list with empty entries"))
  1615. return group_list, value
  1616. if leader is not None:
  1617. token[:0] = [leader]
  1618. group_list.append(token)
  1619. return group_list, value
  1620. def get_group(value):
  1621. """ group = display-name ":" [group-list] ";" [CFWS]
  1622. """
  1623. group = Group()
  1624. token, value = get_display_name(value)
  1625. if not value or value[0] != ':':
  1626. raise errors.HeaderParseError("expected ':' at end of group "
  1627. "display name but found '{}'".format(value))
  1628. group.append(token)
  1629. group.append(ValueTerminal(':', 'group-display-name-terminator'))
  1630. value = value[1:]
  1631. if value and value[0] == ';':
  1632. group.append(ValueTerminal(';', 'group-terminator'))
  1633. return group, value[1:]
  1634. token, value = get_group_list(value)
  1635. group.append(token)
  1636. if not value:
  1637. group.defects.append(errors.InvalidHeaderDefect(
  1638. "end of header in group"))
  1639. elif value[0] != ';':
  1640. raise errors.HeaderParseError(
  1641. "expected ';' at end of group but found {}".format(value))
  1642. group.append(ValueTerminal(';', 'group-terminator'))
  1643. value = value[1:]
  1644. if value and value[0] in CFWS_LEADER:
  1645. token, value = get_cfws(value)
  1646. group.append(token)
  1647. return group, value
  1648. def get_address(value):
  1649. """ address = mailbox / group
  1650. Note that counter-intuitively, an address can be either a single address or
  1651. a list of addresses (a group). This is why the returned Address object has
  1652. a 'mailboxes' attribute which treats a single address as a list of length
  1653. one. When you need to differentiate between to two cases, extract the single
  1654. element, which is either a mailbox or a group token.
  1655. """
  1656. # The formal grammar isn't very helpful when parsing an address. mailbox
  1657. # and group, especially when allowing for obsolete forms, start off very
  1658. # similarly. It is only when you reach one of @, <, or : that you know
  1659. # what you've got. So, we try each one in turn, starting with the more
  1660. # likely of the two. We could perhaps make this more efficient by looking
  1661. # for a phrase and then branching based on the next character, but that
  1662. # would be a premature optimization.
  1663. address = Address()
  1664. try:
  1665. token, value = get_group(value)
  1666. except errors.HeaderParseError:
  1667. try:
  1668. token, value = get_mailbox(value)
  1669. except errors.HeaderParseError:
  1670. raise errors.HeaderParseError(
  1671. "expected address but found '{}'".format(value))
  1672. address.append(token)
  1673. return address, value
  1674. def get_address_list(value):
  1675. """ address_list = (address *("," address)) / obs-addr-list
  1676. obs-addr-list = *([CFWS] ",") address *("," [address / CFWS])
  1677. We depart from the formal grammar here by continuing to parse until the end
  1678. of the input, assuming the input to be entirely composed of an
  1679. address-list. This is always true in email parsing, and allows us
  1680. to skip invalid addresses to parse additional valid ones.
  1681. """
  1682. address_list = AddressList()
  1683. while value:
  1684. try:
  1685. token, value = get_address(value)
  1686. address_list.append(token)
  1687. except errors.HeaderParseError:
  1688. leader = None
  1689. if value[0] in CFWS_LEADER:
  1690. leader, value = get_cfws(value)
  1691. if not value or value[0] == ',':
  1692. address_list.append(leader)
  1693. address_list.defects.append(errors.ObsoleteHeaderDefect(
  1694. "address-list entry with no content"))
  1695. else:
  1696. token, value = get_invalid_mailbox(value, ',')
  1697. if leader is not None:
  1698. token[:0] = [leader]
  1699. address_list.append(Address([token]))
  1700. address_list.defects.append(errors.InvalidHeaderDefect(
  1701. "invalid address in address-list"))
  1702. elif value[0] == ',':
  1703. address_list.defects.append(errors.ObsoleteHeaderDefect(
  1704. "empty element in address-list"))
  1705. else:
  1706. token, value = get_invalid_mailbox(value, ',')
  1707. if leader is not None:
  1708. token[:0] = [leader]
  1709. address_list.append(Address([token]))
  1710. address_list.defects.append(errors.InvalidHeaderDefect(
  1711. "invalid address in address-list"))
  1712. if value and value[0] != ',':
  1713. # Crap after address; treat it as an invalid mailbox.
  1714. # The mailbox info will still be available.
  1715. mailbox = address_list[-1][0]
  1716. mailbox.token_type = 'invalid-mailbox'
  1717. token, value = get_invalid_mailbox(value, ',')
  1718. mailbox.extend(token)
  1719. address_list.defects.append(errors.InvalidHeaderDefect(
  1720. "invalid address in address-list"))
  1721. if value: # Must be a , at this point.
  1722. address_list.append(ListSeparator)
  1723. value = value[1:]
  1724. return address_list, value
  1725. def get_no_fold_literal(value):
  1726. """ no-fold-literal = "[" *dtext "]"
  1727. """
  1728. no_fold_literal = NoFoldLiteral()
  1729. if not value:
  1730. raise errors.HeaderParseError(
  1731. "expected no-fold-literal but found '{}'".format(value))
  1732. if value[0] != '[':
  1733. raise errors.HeaderParseError(
  1734. "expected '[' at the start of no-fold-literal "
  1735. "but found '{}'".format(value))
  1736. no_fold_literal.append(ValueTerminal('[', 'no-fold-literal-start'))
  1737. value = value[1:]
  1738. token, value = get_dtext(value)
  1739. no_fold_literal.append(token)
  1740. if not value or value[0] != ']':
  1741. raise errors.HeaderParseError(
  1742. "expected ']' at the end of no-fold-literal "
  1743. "but found '{}'".format(value))
  1744. no_fold_literal.append(ValueTerminal(']', 'no-fold-literal-end'))
  1745. return no_fold_literal, value[1:]
  1746. def get_msg_id(value):
  1747. """msg-id = [CFWS] "<" id-left '@' id-right ">" [CFWS]
  1748. id-left = dot-atom-text / obs-id-left
  1749. id-right = dot-atom-text / no-fold-literal / obs-id-right
  1750. no-fold-literal = "[" *dtext "]"
  1751. """
  1752. msg_id = MsgID()
  1753. if value and value[0] in CFWS_LEADER:
  1754. token, value = get_cfws(value)
  1755. msg_id.append(token)
  1756. if not value or value[0] != '<':
  1757. raise errors.HeaderParseError(
  1758. "expected msg-id but found '{}'".format(value))
  1759. msg_id.append(ValueTerminal('<', 'msg-id-start'))
  1760. value = value[1:]
  1761. # Parse id-left.
  1762. try:
  1763. token, value = get_dot_atom_text(value)
  1764. except errors.HeaderParseError:
  1765. try:
  1766. # obs-id-left is same as local-part of add-spec.
  1767. token, value = get_obs_local_part(value)
  1768. msg_id.defects.append(errors.ObsoleteHeaderDefect(
  1769. "obsolete id-left in msg-id"))
  1770. except errors.HeaderParseError:
  1771. raise errors.HeaderParseError(
  1772. "expected dot-atom-text or obs-id-left"
  1773. " but found '{}'".format(value))
  1774. msg_id.append(token)
  1775. if not value or value[0] != '@':
  1776. msg_id.defects.append(errors.InvalidHeaderDefect(
  1777. "msg-id with no id-right"))
  1778. # Even though there is no id-right, if the local part
  1779. # ends with `>` let's just parse it too and return
  1780. # along with the defect.
  1781. if value and value[0] == '>':
  1782. msg_id.append(ValueTerminal('>', 'msg-id-end'))
  1783. value = value[1:]
  1784. return msg_id, value
  1785. msg_id.append(ValueTerminal('@', 'address-at-symbol'))
  1786. value = value[1:]
  1787. # Parse id-right.
  1788. try:
  1789. token, value = get_dot_atom_text(value)
  1790. except errors.HeaderParseError:
  1791. try:
  1792. token, value = get_no_fold_literal(value)
  1793. except errors.HeaderParseError:
  1794. try:
  1795. token, value = get_domain(value)
  1796. msg_id.defects.append(errors.ObsoleteHeaderDefect(
  1797. "obsolete id-right in msg-id"))
  1798. except errors.HeaderParseError:
  1799. raise errors.HeaderParseError(
  1800. "expected dot-atom-text, no-fold-literal or obs-id-right"
  1801. " but found '{}'".format(value))
  1802. msg_id.append(token)
  1803. if value and value[0] == '>':
  1804. value = value[1:]
  1805. else:
  1806. msg_id.defects.append(errors.InvalidHeaderDefect(
  1807. "missing trailing '>' on msg-id"))
  1808. msg_id.append(ValueTerminal('>', 'msg-id-end'))
  1809. if value and value[0] in CFWS_LEADER:
  1810. token, value = get_cfws(value)
  1811. msg_id.append(token)
  1812. return msg_id, value
  1813. def parse_message_id(value):
  1814. """message-id = "Message-ID:" msg-id CRLF
  1815. """
  1816. message_id = MessageID()
  1817. try:
  1818. token, value = get_msg_id(value)
  1819. message_id.append(token)
  1820. except errors.HeaderParseError as ex:
  1821. token = get_unstructured(value)
  1822. message_id = InvalidMessageID(token)
  1823. message_id.defects.append(
  1824. errors.InvalidHeaderDefect("Invalid msg-id: {!r}".format(ex)))
  1825. else:
  1826. # Value after parsing a valid msg_id should be None.
  1827. if value:
  1828. message_id.defects.append(errors.InvalidHeaderDefect(
  1829. "Unexpected {!r}".format(value)))
  1830. return message_id
  1831. #
  1832. # XXX: As I begin to add additional header parsers, I'm realizing we probably
  1833. # have two level of parser routines: the get_XXX methods that get a token in
  1834. # the grammar, and parse_XXX methods that parse an entire field value. So
  1835. # get_address_list above should really be a parse_ method, as probably should
  1836. # be get_unstructured.
  1837. #
  1838. def parse_mime_version(value):
  1839. """ mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS]
  1840. """
  1841. # The [CFWS] is implicit in the RFC 2045 BNF.
  1842. # XXX: This routine is a bit verbose, should factor out a get_int method.
  1843. mime_version = MIMEVersion()
  1844. if not value:
  1845. mime_version.defects.append(errors.HeaderMissingRequiredValue(
  1846. "Missing MIME version number (eg: 1.0)"))
  1847. return mime_version
  1848. if value[0] in CFWS_LEADER:
  1849. token, value = get_cfws(value)
  1850. mime_version.append(token)
  1851. if not value:
  1852. mime_version.defects.append(errors.HeaderMissingRequiredValue(
  1853. "Expected MIME version number but found only CFWS"))
  1854. digits = ''
  1855. while value and value[0] != '.' and value[0] not in CFWS_LEADER:
  1856. digits += value[0]
  1857. value = value[1:]
  1858. if not digits.isdigit():
  1859. mime_version.defects.append(errors.InvalidHeaderDefect(
  1860. "Expected MIME major version number but found {!r}".format(digits)))
  1861. mime_version.append(ValueTerminal(digits, 'xtext'))
  1862. else:
  1863. mime_version.major = int(digits)
  1864. mime_version.append(ValueTerminal(digits, 'digits'))
  1865. if value and value[0] in CFWS_LEADER:
  1866. token, value = get_cfws(value)
  1867. mime_version.append(token)
  1868. if not value or value[0] != '.':
  1869. if mime_version.major is not None:
  1870. mime_version.defects.append(errors.InvalidHeaderDefect(
  1871. "Incomplete MIME version; found only major number"))
  1872. if value:
  1873. mime_version.append(ValueTerminal(value, 'xtext'))
  1874. return mime_version
  1875. mime_version.append(ValueTerminal('.', 'version-separator'))
  1876. value = value[1:]
  1877. if value and value[0] in CFWS_LEADER:
  1878. token, value = get_cfws(value)
  1879. mime_version.append(token)
  1880. if not value:
  1881. if mime_version.major is not None:
  1882. mime_version.defects.append(errors.InvalidHeaderDefect(
  1883. "Incomplete MIME version; found only major number"))
  1884. return mime_version
  1885. digits = ''
  1886. while value and value[0] not in CFWS_LEADER:
  1887. digits += value[0]
  1888. value = value[1:]
  1889. if not digits.isdigit():
  1890. mime_version.defects.append(errors.InvalidHeaderDefect(
  1891. "Expected MIME minor version number but found {!r}".format(digits)))
  1892. mime_version.append(ValueTerminal(digits, 'xtext'))
  1893. else:
  1894. mime_version.minor = int(digits)
  1895. mime_version.append(ValueTerminal(digits, 'digits'))
  1896. if value and value[0] in CFWS_LEADER:
  1897. token, value = get_cfws(value)
  1898. mime_version.append(token)
  1899. if value:
  1900. mime_version.defects.append(errors.InvalidHeaderDefect(
  1901. "Excess non-CFWS text after MIME version"))
  1902. mime_version.append(ValueTerminal(value, 'xtext'))
  1903. return mime_version
  1904. def get_invalid_parameter(value):
  1905. """ Read everything up to the next ';'.
  1906. This is outside the formal grammar. The InvalidParameter TokenList that is
  1907. returned acts like a Parameter, but the data attributes are None.
  1908. """
  1909. invalid_parameter = InvalidParameter()
  1910. while value and value[0] != ';':
  1911. if value[0] in PHRASE_ENDS:
  1912. invalid_parameter.append(ValueTerminal(value[0],
  1913. 'misplaced-special'))
  1914. value = value[1:]
  1915. else:
  1916. token, value = get_phrase(value)
  1917. invalid_parameter.append(token)
  1918. return invalid_parameter, value
  1919. def get_ttext(value):
  1920. """ttext = <matches _ttext_matcher>
  1921. We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
  1922. defects list if we find non-ttext characters. We also register defects for
  1923. *any* non-printables even though the RFC doesn't exclude all of them,
  1924. because we follow the spirit of RFC 5322.
  1925. """
  1926. m = _non_token_end_matcher(value)
  1927. if not m:
  1928. raise errors.HeaderParseError(
  1929. "expected ttext but found '{}'".format(value))
  1930. ttext = m.group()
  1931. value = value[len(ttext):]
  1932. ttext = ValueTerminal(ttext, 'ttext')
  1933. _validate_xtext(ttext)
  1934. return ttext, value
  1935. def get_token(value):
  1936. """token = [CFWS] 1*ttext [CFWS]
  1937. The RFC equivalent of ttext is any US-ASCII chars except space, ctls, or
  1938. tspecials. We also exclude tabs even though the RFC doesn't.
  1939. The RFC implies the CFWS but is not explicit about it in the BNF.
  1940. """
  1941. mtoken = Token()
  1942. if value and value[0] in CFWS_LEADER:
  1943. token, value = get_cfws(value)
  1944. mtoken.append(token)
  1945. if value and value[0] in TOKEN_ENDS:
  1946. raise errors.HeaderParseError(
  1947. "expected token but found '{}'".format(value))
  1948. token, value = get_ttext(value)
  1949. mtoken.append(token)
  1950. if value and value[0] in CFWS_LEADER:
  1951. token, value = get_cfws(value)
  1952. mtoken.append(token)
  1953. return mtoken, value
  1954. def get_attrtext(value):
  1955. """attrtext = 1*(any non-ATTRIBUTE_ENDS character)
  1956. We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the
  1957. token's defects list if we find non-attrtext characters. We also register
  1958. defects for *any* non-printables even though the RFC doesn't exclude all of
  1959. them, because we follow the spirit of RFC 5322.
  1960. """
  1961. m = _non_attribute_end_matcher(value)
  1962. if not m:
  1963. raise errors.HeaderParseError(
  1964. "expected attrtext but found {!r}".format(value))
  1965. attrtext = m.group()
  1966. value = value[len(attrtext):]
  1967. attrtext = ValueTerminal(attrtext, 'attrtext')
  1968. _validate_xtext(attrtext)
  1969. return attrtext, value
  1970. def get_attribute(value):
  1971. """ [CFWS] 1*attrtext [CFWS]
  1972. This version of the BNF makes the CFWS explicit, and as usual we use a
  1973. value terminal for the actual run of characters. The RFC equivalent of
  1974. attrtext is the token characters, with the subtraction of '*', "'", and '%'.
  1975. We include tab in the excluded set just as we do for token.
  1976. """
  1977. attribute = Attribute()
  1978. if value and value[0] in CFWS_LEADER:
  1979. token, value = get_cfws(value)
  1980. attribute.append(token)
  1981. if value and value[0] in ATTRIBUTE_ENDS:
  1982. raise errors.HeaderParseError(
  1983. "expected token but found '{}'".format(value))
  1984. token, value = get_attrtext(value)
  1985. attribute.append(token)
  1986. if value and value[0] in CFWS_LEADER:
  1987. token, value = get_cfws(value)
  1988. attribute.append(token)
  1989. return attribute, value
  1990. def get_extended_attrtext(value):
  1991. """attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
  1992. This is a special parsing routine so that we get a value that
  1993. includes % escapes as a single string (which we decode as a single
  1994. string later).
  1995. """
  1996. m = _non_extended_attribute_end_matcher(value)
  1997. if not m:
  1998. raise errors.HeaderParseError(
  1999. "expected extended attrtext but found {!r}".format(value))
  2000. attrtext = m.group()
  2001. value = value[len(attrtext):]
  2002. attrtext = ValueTerminal(attrtext, 'extended-attrtext')
  2003. _validate_xtext(attrtext)
  2004. return attrtext, value
  2005. def get_extended_attribute(value):
  2006. """ [CFWS] 1*extended_attrtext [CFWS]
  2007. This is like the non-extended version except we allow % characters, so that
  2008. we can pick up an encoded value as a single string.
  2009. """
  2010. # XXX: should we have an ExtendedAttribute TokenList?
  2011. attribute = Attribute()
  2012. if value and value[0] in CFWS_LEADER:
  2013. token, value = get_cfws(value)
  2014. attribute.append(token)
  2015. if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:
  2016. raise errors.HeaderParseError(
  2017. "expected token but found '{}'".format(value))
  2018. token, value = get_extended_attrtext(value)
  2019. attribute.append(token)
  2020. if value and value[0] in CFWS_LEADER:
  2021. token, value = get_cfws(value)
  2022. attribute.append(token)
  2023. return attribute, value
  2024. def get_section(value):
  2025. """ '*' digits
  2026. The formal BNF is more complicated because leading 0s are not allowed. We
  2027. check for that and add a defect. We also assume no CFWS is allowed between
  2028. the '*' and the digits, though the RFC is not crystal clear on that.
  2029. The caller should already have dealt with leading CFWS.
  2030. """
  2031. section = Section()
  2032. if not value or value[0] != '*':
  2033. raise errors.HeaderParseError("Expected section but found {}".format(
  2034. value))
  2035. section.append(ValueTerminal('*', 'section-marker'))
  2036. value = value[1:]
  2037. if not value or not value[0].isdigit():
  2038. raise errors.HeaderParseError("Expected section number but "
  2039. "found {}".format(value))
  2040. digits = ''
  2041. while value and value[0].isdigit():
  2042. digits += value[0]
  2043. value = value[1:]
  2044. if digits[0] == '0' and digits != '0':
  2045. section.defects.append(errors.InvalidHeaderDefect(
  2046. "section number has an invalid leading 0"))
  2047. section.number = int(digits)
  2048. section.append(ValueTerminal(digits, 'digits'))
  2049. return section, value
  2050. def get_value(value):
  2051. """ quoted-string / attribute
  2052. """
  2053. v = Value()
  2054. if not value:
  2055. raise errors.HeaderParseError("Expected value but found end of string")
  2056. leader = None
  2057. if value[0] in CFWS_LEADER:
  2058. leader, value = get_cfws(value)
  2059. if not value:
  2060. raise errors.HeaderParseError("Expected value but found "
  2061. "only {}".format(leader))
  2062. if value[0] == '"':
  2063. token, value = get_quoted_string(value)
  2064. else:
  2065. token, value = get_extended_attribute(value)
  2066. if leader is not None:
  2067. token[:0] = [leader]
  2068. v.append(token)
  2069. return v, value
  2070. def get_parameter(value):
  2071. """ attribute [section] ["*"] [CFWS] "=" value
  2072. The CFWS is implied by the RFC but not made explicit in the BNF. This
  2073. simplified form of the BNF from the RFC is made to conform with the RFC BNF
  2074. through some extra checks. We do it this way because it makes both error
  2075. recovery and working with the resulting parse tree easier.
  2076. """
  2077. # It is possible CFWS would also be implicitly allowed between the section
  2078. # and the 'extended-attribute' marker (the '*') , but we've never seen that
  2079. # in the wild and we will therefore ignore the possibility.
  2080. param = Parameter()
  2081. token, value = get_attribute(value)
  2082. param.append(token)
  2083. if not value or value[0] == ';':
  2084. param.defects.append(errors.InvalidHeaderDefect("Parameter contains "
  2085. "name ({}) but no value".format(token)))
  2086. return param, value
  2087. if value[0] == '*':
  2088. try:
  2089. token, value = get_section(value)
  2090. param.sectioned = True
  2091. param.append(token)
  2092. except errors.HeaderParseError:
  2093. pass
  2094. if not value:
  2095. raise errors.HeaderParseError("Incomplete parameter")
  2096. if value[0] == '*':
  2097. param.append(ValueTerminal('*', 'extended-parameter-marker'))
  2098. value = value[1:]
  2099. param.extended = True
  2100. if value[0] != '=':
  2101. raise errors.HeaderParseError("Parameter not followed by '='")
  2102. param.append(ValueTerminal('=', 'parameter-separator'))
  2103. value = value[1:]
  2104. if value and value[0] in CFWS_LEADER:
  2105. token, value = get_cfws(value)
  2106. param.append(token)
  2107. remainder = None
  2108. appendto = param
  2109. if param.extended and value and value[0] == '"':
  2110. # Now for some serious hackery to handle the common invalid case of
  2111. # double quotes around an extended value. We also accept (with defect)
  2112. # a value marked as encoded that isn't really.
  2113. qstring, remainder = get_quoted_string(value)
  2114. inner_value = qstring.stripped_value
  2115. semi_valid = False
  2116. if param.section_number == 0:
  2117. if inner_value and inner_value[0] == "'":
  2118. semi_valid = True
  2119. else:
  2120. token, rest = get_attrtext(inner_value)
  2121. if rest and rest[0] == "'":
  2122. semi_valid = True
  2123. else:
  2124. try:
  2125. token, rest = get_extended_attrtext(inner_value)
  2126. except:
  2127. pass
  2128. else:
  2129. if not rest:
  2130. semi_valid = True
  2131. if semi_valid:
  2132. param.defects.append(errors.InvalidHeaderDefect(
  2133. "Quoted string value for extended parameter is invalid"))
  2134. param.append(qstring)
  2135. for t in qstring:
  2136. if t.token_type == 'bare-quoted-string':
  2137. t[:] = []
  2138. appendto = t
  2139. break
  2140. value = inner_value
  2141. else:
  2142. remainder = None
  2143. param.defects.append(errors.InvalidHeaderDefect(
  2144. "Parameter marked as extended but appears to have a "
  2145. "quoted string value that is non-encoded"))
  2146. if value and value[0] == "'":
  2147. token = None
  2148. else:
  2149. token, value = get_value(value)
  2150. if not param.extended or param.section_number > 0:
  2151. if not value or value[0] != "'":
  2152. appendto.append(token)
  2153. if remainder is not None:
  2154. assert not value, value
  2155. value = remainder
  2156. return param, value
  2157. param.defects.append(errors.InvalidHeaderDefect(
  2158. "Apparent initial-extended-value but attribute "
  2159. "was not marked as extended or was not initial section"))
  2160. if not value:
  2161. # Assume the charset/lang is missing and the token is the value.
  2162. param.defects.append(errors.InvalidHeaderDefect(
  2163. "Missing required charset/lang delimiters"))
  2164. appendto.append(token)
  2165. if remainder is None:
  2166. return param, value
  2167. else:
  2168. if token is not None:
  2169. for t in token:
  2170. if t.token_type == 'extended-attrtext':
  2171. break
  2172. t.token_type == 'attrtext'
  2173. appendto.append(t)
  2174. param.charset = t.value
  2175. if value[0] != "'":
  2176. raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
  2177. "delimiter, but found {!r}".format(value))
  2178. appendto.append(ValueTerminal("'", 'RFC2231-delimiter'))
  2179. value = value[1:]
  2180. if value and value[0] != "'":
  2181. token, value = get_attrtext(value)
  2182. appendto.append(token)
  2183. param.lang = token.value
  2184. if not value or value[0] != "'":
  2185. raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
  2186. "delimiter, but found {}".format(value))
  2187. appendto.append(ValueTerminal("'", 'RFC2231-delimiter'))
  2188. value = value[1:]
  2189. if remainder is not None:
  2190. # Treat the rest of value as bare quoted string content.
  2191. v = Value()
  2192. while value:
  2193. if value[0] in WSP:
  2194. token, value = get_fws(value)
  2195. elif value[0] == '"':
  2196. token = ValueTerminal('"', 'DQUOTE')
  2197. value = value[1:]
  2198. else:
  2199. token, value = get_qcontent(value)
  2200. v.append(token)
  2201. token = v
  2202. else:
  2203. token, value = get_value(value)
  2204. appendto.append(token)
  2205. if remainder is not None:
  2206. assert not value, value
  2207. value = remainder
  2208. return param, value
  2209. def parse_mime_parameters(value):
  2210. """ parameter *( ";" parameter )
  2211. That BNF is meant to indicate this routine should only be called after
  2212. finding and handling the leading ';'. There is no corresponding rule in
  2213. the formal RFC grammar, but it is more convenient for us for the set of
  2214. parameters to be treated as its own TokenList.
  2215. This is 'parse' routine because it consumes the remaining value, but it
  2216. would never be called to parse a full header. Instead it is called to
  2217. parse everything after the non-parameter value of a specific MIME header.
  2218. """
  2219. mime_parameters = MimeParameters()
  2220. while value:
  2221. try:
  2222. token, value = get_parameter(value)
  2223. mime_parameters.append(token)
  2224. except errors.HeaderParseError:
  2225. leader = None
  2226. if value[0] in CFWS_LEADER:
  2227. leader, value = get_cfws(value)
  2228. if not value:
  2229. mime_parameters.append(leader)
  2230. return mime_parameters
  2231. if value[0] == ';':
  2232. if leader is not None:
  2233. mime_parameters.append(leader)
  2234. mime_parameters.defects.append(errors.InvalidHeaderDefect(
  2235. "parameter entry with no content"))
  2236. else:
  2237. token, value = get_invalid_parameter(value)
  2238. if leader:
  2239. token[:0] = [leader]
  2240. mime_parameters.append(token)
  2241. mime_parameters.defects.append(errors.InvalidHeaderDefect(
  2242. "invalid parameter {!r}".format(token)))
  2243. if value and value[0] != ';':
  2244. # Junk after the otherwise valid parameter. Mark it as
  2245. # invalid, but it will have a value.
  2246. param = mime_parameters[-1]
  2247. param.token_type = 'invalid-parameter'
  2248. token, value = get_invalid_parameter(value)
  2249. param.extend(token)
  2250. mime_parameters.defects.append(errors.InvalidHeaderDefect(
  2251. "parameter with invalid trailing text {!r}".format(token)))
  2252. if value:
  2253. # Must be a ';' at this point.
  2254. mime_parameters.append(ValueTerminal(';', 'parameter-separator'))
  2255. value = value[1:]
  2256. return mime_parameters
  2257. def _find_mime_parameters(tokenlist, value):
  2258. """Do our best to find the parameters in an invalid MIME header
  2259. """
  2260. while value and value[0] != ';':
  2261. if value[0] in PHRASE_ENDS:
  2262. tokenlist.append(ValueTerminal(value[0], 'misplaced-special'))
  2263. value = value[1:]
  2264. else:
  2265. token, value = get_phrase(value)
  2266. tokenlist.append(token)
  2267. if not value:
  2268. return
  2269. tokenlist.append(ValueTerminal(';', 'parameter-separator'))
  2270. tokenlist.append(parse_mime_parameters(value[1:]))
  2271. def parse_content_type_header(value):
  2272. """ maintype "/" subtype *( ";" parameter )
  2273. The maintype and substype are tokens. Theoretically they could
  2274. be checked against the official IANA list + x-token, but we
  2275. don't do that.
  2276. """
  2277. ctype = ContentType()
  2278. if not value:
  2279. ctype.defects.append(errors.HeaderMissingRequiredValue(
  2280. "Missing content type specification"))
  2281. return ctype
  2282. try:
  2283. token, value = get_token(value)
  2284. except errors.HeaderParseError:
  2285. ctype.defects.append(errors.InvalidHeaderDefect(
  2286. "Expected content maintype but found {!r}".format(value)))
  2287. _find_mime_parameters(ctype, value)
  2288. return ctype
  2289. ctype.append(token)
  2290. # XXX: If we really want to follow the formal grammar we should make
  2291. # mantype and subtype specialized TokenLists here. Probably not worth it.
  2292. if not value or value[0] != '/':
  2293. ctype.defects.append(errors.InvalidHeaderDefect(
  2294. "Invalid content type"))
  2295. if value:
  2296. _find_mime_parameters(ctype, value)
  2297. return ctype
  2298. ctype.maintype = token.value.strip().lower()
  2299. ctype.append(ValueTerminal('/', 'content-type-separator'))
  2300. value = value[1:]
  2301. try:
  2302. token, value = get_token(value)
  2303. except errors.HeaderParseError:
  2304. ctype.defects.append(errors.InvalidHeaderDefect(
  2305. "Expected content subtype but found {!r}".format(value)))
  2306. _find_mime_parameters(ctype, value)
  2307. return ctype
  2308. ctype.append(token)
  2309. ctype.subtype = token.value.strip().lower()
  2310. if not value:
  2311. return ctype
  2312. if value[0] != ';':
  2313. ctype.defects.append(errors.InvalidHeaderDefect(
  2314. "Only parameters are valid after content type, but "
  2315. "found {!r}".format(value)))
  2316. # The RFC requires that a syntactically invalid content-type be treated
  2317. # as text/plain. Perhaps we should postel this, but we should probably
  2318. # only do that if we were checking the subtype value against IANA.
  2319. del ctype.maintype, ctype.subtype
  2320. _find_mime_parameters(ctype, value)
  2321. return ctype
  2322. ctype.append(ValueTerminal(';', 'parameter-separator'))
  2323. ctype.append(parse_mime_parameters(value[1:]))
  2324. return ctype
  2325. def parse_content_disposition_header(value):
  2326. """ disposition-type *( ";" parameter )
  2327. """
  2328. disp_header = ContentDisposition()
  2329. if not value:
  2330. disp_header.defects.append(errors.HeaderMissingRequiredValue(
  2331. "Missing content disposition"))
  2332. return disp_header
  2333. try:
  2334. token, value = get_token(value)
  2335. except errors.HeaderParseError:
  2336. disp_header.defects.append(errors.InvalidHeaderDefect(
  2337. "Expected content disposition but found {!r}".format(value)))
  2338. _find_mime_parameters(disp_header, value)
  2339. return disp_header
  2340. disp_header.append(token)
  2341. disp_header.content_disposition = token.value.strip().lower()
  2342. if not value:
  2343. return disp_header
  2344. if value[0] != ';':
  2345. disp_header.defects.append(errors.InvalidHeaderDefect(
  2346. "Only parameters are valid after content disposition, but "
  2347. "found {!r}".format(value)))
  2348. _find_mime_parameters(disp_header, value)
  2349. return disp_header
  2350. disp_header.append(ValueTerminal(';', 'parameter-separator'))
  2351. disp_header.append(parse_mime_parameters(value[1:]))
  2352. return disp_header
  2353. def parse_content_transfer_encoding_header(value):
  2354. """ mechanism
  2355. """
  2356. # We should probably validate the values, since the list is fixed.
  2357. cte_header = ContentTransferEncoding()
  2358. if not value:
  2359. cte_header.defects.append(errors.HeaderMissingRequiredValue(
  2360. "Missing content transfer encoding"))
  2361. return cte_header
  2362. try:
  2363. token, value = get_token(value)
  2364. except errors.HeaderParseError:
  2365. cte_header.defects.append(errors.InvalidHeaderDefect(
  2366. "Expected content transfer encoding but found {!r}".format(value)))
  2367. else:
  2368. cte_header.append(token)
  2369. cte_header.cte = token.value.strip().lower()
  2370. if not value:
  2371. return cte_header
  2372. while value:
  2373. cte_header.defects.append(errors.InvalidHeaderDefect(
  2374. "Extra text after content transfer encoding"))
  2375. if value[0] in PHRASE_ENDS:
  2376. cte_header.append(ValueTerminal(value[0], 'misplaced-special'))
  2377. value = value[1:]
  2378. else:
  2379. token, value = get_phrase(value)
  2380. cte_header.append(token)
  2381. return cte_header
  2382. #
  2383. # Header folding
  2384. #
  2385. # Header folding is complex, with lots of rules and corner cases. The
  2386. # following code does its best to obey the rules and handle the corner
  2387. # cases, but you can be sure there are few bugs:)
  2388. #
  2389. # This folder generally canonicalizes as it goes, preferring the stringified
  2390. # version of each token. The tokens contain information that supports the
  2391. # folder, including which tokens can be encoded in which ways.
  2392. #
  2393. # Folded text is accumulated in a simple list of strings ('lines'), each
  2394. # one of which should be less than policy.max_line_length ('maxlen').
  2395. #
  2396. def _steal_trailing_WSP_if_exists(lines):
  2397. wsp = ''
  2398. if lines and lines[-1] and lines[-1][-1] in WSP:
  2399. wsp = lines[-1][-1]
  2400. lines[-1] = lines[-1][:-1]
  2401. return wsp
  2402. def _refold_parse_tree(parse_tree, *, policy):
  2403. """Return string of contents of parse_tree folded according to RFC rules.
  2404. """
  2405. # max_line_length 0/None means no limit, ie: infinitely long.
  2406. maxlen = policy.max_line_length or sys.maxsize
  2407. encoding = 'utf-8' if policy.utf8 else 'us-ascii'
  2408. lines = [''] # Folded lines to be output
  2409. leading_whitespace = '' # When we have whitespace between two encoded
  2410. # words, we may need to encode the whitespace
  2411. # at the beginning of the second word.
  2412. last_ew = None # Points to the last encoded character if there's an ew on
  2413. # the line
  2414. last_charset = None
  2415. wrap_as_ew_blocked = 0
  2416. want_encoding = False # This is set to True if we need to encode this part
  2417. end_ew_not_allowed = Terminal('', 'wrap_as_ew_blocked')
  2418. parts = list(parse_tree)
  2419. while parts:
  2420. part = parts.pop(0)
  2421. if part is end_ew_not_allowed:
  2422. wrap_as_ew_blocked -= 1
  2423. continue
  2424. tstr = str(part)
  2425. if not want_encoding:
  2426. if part.token_type == 'ptext':
  2427. # Encode if tstr contains special characters.
  2428. want_encoding = not SPECIALSNL.isdisjoint(tstr)
  2429. else:
  2430. # Encode if tstr contains newlines.
  2431. want_encoding = not NLSET.isdisjoint(tstr)
  2432. try:
  2433. tstr.encode(encoding)
  2434. charset = encoding
  2435. except UnicodeEncodeError:
  2436. if any(isinstance(x, errors.UndecodableBytesDefect)
  2437. for x in part.all_defects):
  2438. charset = 'unknown-8bit'
  2439. else:
  2440. # If policy.utf8 is false this should really be taken from a
  2441. # 'charset' property on the policy.
  2442. charset = 'utf-8'
  2443. want_encoding = True
  2444. if part.token_type == 'mime-parameters':
  2445. # Mime parameter folding (using RFC2231) is extra special.
  2446. _fold_mime_parameters(part, lines, maxlen, encoding)
  2447. continue
  2448. if want_encoding and not wrap_as_ew_blocked:
  2449. if not part.as_ew_allowed:
  2450. want_encoding = False
  2451. last_ew = None
  2452. if part.syntactic_break:
  2453. encoded_part = part.fold(policy=policy)[:-len(policy.linesep)]
  2454. if policy.linesep not in encoded_part:
  2455. # It fits on a single line
  2456. if len(encoded_part) > maxlen - len(lines[-1]):
  2457. # But not on this one, so start a new one.
  2458. newline = _steal_trailing_WSP_if_exists(lines)
  2459. # XXX what if encoded_part has no leading FWS?
  2460. lines.append(newline)
  2461. lines[-1] += encoded_part
  2462. continue
  2463. # Either this is not a major syntactic break, so we don't
  2464. # want it on a line by itself even if it fits, or it
  2465. # doesn't fit on a line by itself. Either way, fall through
  2466. # to unpacking the subparts and wrapping them.
  2467. if not hasattr(part, 'encode'):
  2468. # It's not a Terminal, do each piece individually.
  2469. parts = list(part) + parts
  2470. want_encoding = False
  2471. continue
  2472. elif part.as_ew_allowed:
  2473. # It's a terminal, wrap it as an encoded word, possibly
  2474. # combining it with previously encoded words if allowed.
  2475. if (last_ew is not None and
  2476. charset != last_charset and
  2477. (last_charset == 'unknown-8bit' or
  2478. last_charset == 'utf-8' and charset != 'us-ascii')):
  2479. last_ew = None
  2480. last_ew = _fold_as_ew(tstr, lines, maxlen, last_ew,
  2481. part.ew_combine_allowed, charset, leading_whitespace)
  2482. # This whitespace has been added to the lines in _fold_as_ew()
  2483. # so clear it now.
  2484. leading_whitespace = ''
  2485. last_charset = charset
  2486. want_encoding = False
  2487. continue
  2488. else:
  2489. # It's a terminal which should be kept non-encoded
  2490. # (e.g. a ListSeparator).
  2491. last_ew = None
  2492. want_encoding = False
  2493. # fall through
  2494. if len(tstr) <= maxlen - len(lines[-1]):
  2495. lines[-1] += tstr
  2496. continue
  2497. # This part is too long to fit. The RFC wants us to break at
  2498. # "major syntactic breaks", so unless we don't consider this
  2499. # to be one, check if it will fit on the next line by itself.
  2500. leading_whitespace = ''
  2501. if (part.syntactic_break and
  2502. len(tstr) + 1 <= maxlen):
  2503. newline = _steal_trailing_WSP_if_exists(lines)
  2504. if newline or part.startswith_fws():
  2505. # We're going to fold the data onto a new line here. Due to
  2506. # the way encoded strings handle continuation lines, we need to
  2507. # be prepared to encode any whitespace if the next line turns
  2508. # out to start with an encoded word.
  2509. lines.append(newline + tstr)
  2510. whitespace_accumulator = []
  2511. for char in lines[-1]:
  2512. if char not in WSP:
  2513. break
  2514. whitespace_accumulator.append(char)
  2515. leading_whitespace = ''.join(whitespace_accumulator)
  2516. last_ew = None
  2517. continue
  2518. if not hasattr(part, 'encode'):
  2519. # It's not a terminal, try folding the subparts.
  2520. newparts = list(part)
  2521. if not part.as_ew_allowed:
  2522. wrap_as_ew_blocked += 1
  2523. newparts.append(end_ew_not_allowed)
  2524. parts = newparts + parts
  2525. continue
  2526. if part.as_ew_allowed and not wrap_as_ew_blocked:
  2527. # It doesn't need CTE encoding, but encode it anyway so we can
  2528. # wrap it.
  2529. parts.insert(0, part)
  2530. want_encoding = True
  2531. continue
  2532. # We can't figure out how to wrap, it, so give up.
  2533. newline = _steal_trailing_WSP_if_exists(lines)
  2534. if newline or part.startswith_fws():
  2535. lines.append(newline + tstr)
  2536. else:
  2537. # We can't fold it onto the next line either...
  2538. lines[-1] += tstr
  2539. return policy.linesep.join(lines) + policy.linesep
  2540. def _fold_as_ew(to_encode, lines, maxlen, last_ew, ew_combine_allowed, charset, leading_whitespace):
  2541. """Fold string to_encode into lines as encoded word, combining if allowed.
  2542. Return the new value for last_ew, or None if ew_combine_allowed is False.
  2543. If there is already an encoded word in the last line of lines (indicated by
  2544. a non-None value for last_ew) and ew_combine_allowed is true, decode the
  2545. existing ew, combine it with to_encode, and re-encode. Otherwise, encode
  2546. to_encode. In either case, split to_encode as necessary so that the
  2547. encoded segments fit within maxlen.
  2548. """
  2549. if last_ew is not None and ew_combine_allowed:
  2550. to_encode = str(
  2551. get_unstructured(lines[-1][last_ew:] + to_encode))
  2552. lines[-1] = lines[-1][:last_ew]
  2553. elif to_encode[0] in WSP:
  2554. # We're joining this to non-encoded text, so don't encode
  2555. # the leading blank.
  2556. leading_wsp = to_encode[0]
  2557. to_encode = to_encode[1:]
  2558. if (len(lines[-1]) == maxlen):
  2559. lines.append(_steal_trailing_WSP_if_exists(lines))
  2560. lines[-1] += leading_wsp
  2561. trailing_wsp = ''
  2562. if to_encode[-1] in WSP:
  2563. # Likewise for the trailing space.
  2564. trailing_wsp = to_encode[-1]
  2565. to_encode = to_encode[:-1]
  2566. new_last_ew = len(lines[-1]) if last_ew is None else last_ew
  2567. encode_as = 'utf-8' if charset == 'us-ascii' else charset
  2568. # The RFC2047 chrome takes up 7 characters plus the length
  2569. # of the charset name.
  2570. chrome_len = len(encode_as) + 7
  2571. if (chrome_len + 1) >= maxlen:
  2572. raise errors.HeaderParseError(
  2573. "max_line_length is too small to fit an encoded word")
  2574. while to_encode:
  2575. remaining_space = maxlen - len(lines[-1])
  2576. text_space = remaining_space - chrome_len - len(leading_whitespace)
  2577. if text_space <= 0:
  2578. lines.append(' ')
  2579. continue
  2580. # If we are at the start of a continuation line, prepend whitespace
  2581. # (we only want to do this when the line starts with an encoded word
  2582. # but if we're folding in this helper function, then we know that we
  2583. # are going to be writing out an encoded word.)
  2584. if len(lines) > 1 and len(lines[-1]) == 1 and leading_whitespace:
  2585. encoded_word = _ew.encode(leading_whitespace, charset=encode_as)
  2586. lines[-1] += encoded_word
  2587. leading_whitespace = ''
  2588. to_encode_word = to_encode[:text_space]
  2589. encoded_word = _ew.encode(to_encode_word, charset=encode_as)
  2590. excess = len(encoded_word) - remaining_space
  2591. while excess > 0:
  2592. # Since the chunk to encode is guaranteed to fit into less than 100 characters,
  2593. # shrinking it by one at a time shouldn't take long.
  2594. to_encode_word = to_encode_word[:-1]
  2595. encoded_word = _ew.encode(to_encode_word, charset=encode_as)
  2596. excess = len(encoded_word) - remaining_space
  2597. lines[-1] += encoded_word
  2598. to_encode = to_encode[len(to_encode_word):]
  2599. leading_whitespace = ''
  2600. if to_encode:
  2601. lines.append(' ')
  2602. new_last_ew = len(lines[-1])
  2603. lines[-1] += trailing_wsp
  2604. return new_last_ew if ew_combine_allowed else None
  2605. def _fold_mime_parameters(part, lines, maxlen, encoding):
  2606. """Fold TokenList 'part' into the 'lines' list as mime parameters.
  2607. Using the decoded list of parameters and values, format them according to
  2608. the RFC rules, including using RFC2231 encoding if the value cannot be
  2609. expressed in 'encoding' and/or the parameter+value is too long to fit
  2610. within 'maxlen'.
  2611. """
  2612. # Special case for RFC2231 encoding: start from decoded values and use
  2613. # RFC2231 encoding iff needed.
  2614. #
  2615. # Note that the 1 and 2s being added to the length calculations are
  2616. # accounting for the possibly-needed spaces and semicolons we'll be adding.
  2617. #
  2618. for name, value in part.params:
  2619. # XXX What if this ';' puts us over maxlen the first time through the
  2620. # loop? We should split the header value onto a newline in that case,
  2621. # but to do that we need to recognize the need earlier or reparse the
  2622. # header, so I'm going to ignore that bug for now. It'll only put us
  2623. # one character over.
  2624. if not lines[-1].rstrip().endswith(';'):
  2625. lines[-1] += ';'
  2626. charset = encoding
  2627. error_handler = 'strict'
  2628. try:
  2629. value.encode(encoding)
  2630. encoding_required = False
  2631. except UnicodeEncodeError:
  2632. encoding_required = True
  2633. if utils._has_surrogates(value):
  2634. charset = 'unknown-8bit'
  2635. error_handler = 'surrogateescape'
  2636. else:
  2637. charset = 'utf-8'
  2638. if encoding_required:
  2639. encoded_value = urllib.parse.quote(
  2640. value, safe='', errors=error_handler)
  2641. tstr = "{}*={}''{}".format(name, charset, encoded_value)
  2642. else:
  2643. tstr = '{}={}'.format(name, quote_string(value))
  2644. if len(lines[-1]) + len(tstr) + 1 < maxlen:
  2645. lines[-1] = lines[-1] + ' ' + tstr
  2646. continue
  2647. elif len(tstr) + 2 <= maxlen:
  2648. lines.append(' ' + tstr)
  2649. continue
  2650. # We need multiple sections. We are allowed to mix encoded and
  2651. # non-encoded sections, but we aren't going to. We'll encode them all.
  2652. section = 0
  2653. extra_chrome = charset + "''"
  2654. while value:
  2655. chrome_len = len(name) + len(str(section)) + 3 + len(extra_chrome)
  2656. if maxlen <= chrome_len + 3:
  2657. # We need room for the leading blank, the trailing semicolon,
  2658. # and at least one character of the value. If we don't
  2659. # have that, we'd be stuck, so in that case fall back to
  2660. # the RFC standard width.
  2661. maxlen = 78
  2662. splitpoint = maxchars = maxlen - chrome_len - 2
  2663. while True:
  2664. partial = value[:splitpoint]
  2665. encoded_value = urllib.parse.quote(
  2666. partial, safe='', errors=error_handler)
  2667. if len(encoded_value) <= maxchars:
  2668. break
  2669. splitpoint -= 1
  2670. lines.append(" {}*{}*={}{}".format(
  2671. name, section, extra_chrome, encoded_value))
  2672. extra_chrome = ''
  2673. section += 1
  2674. value = value[splitpoint:]
  2675. if value:
  2676. lines[-1] += ';'