mime.py 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. # -*- coding: utf-8 -*-
  2. """
  3. pygments.lexers.mime
  4. ~~~~~~~~~~~~~~~~~~~~
  5. Lexer for Multipurpose Internet Mail Extensions (MIME) data.
  6. :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
  7. :license: BSD, see LICENSE for details.
  8. """
  9. import re
  10. from pygments.lexer import RegexLexer, include
  11. from pygments.lexers import get_lexer_for_mimetype
  12. from pygments.token import Text, Name, String, Operator, Comment, Other
  13. from pygments.util import get_int_opt, ClassNotFound
  14. __all__ = ["MIMELexer"]
  15. class MIMELexer(RegexLexer):
  16. """
  17. Lexer for Multipurpose Internet Mail Extensions (MIME) data. This lexer is
  18. designed to process the nested mulitpart data.
  19. It assumes that the given data contains both header and body (and is
  20. splitted by empty line). If no valid header is found, then the entire data
  21. would be treated as body.
  22. Additional options accepted:
  23. `MIME-max-level`
  24. Max recurssion level for nested MIME structure. Any negative number
  25. would treated as unlimited. (default: -1)
  26. `Content-Type`
  27. Treat the data as specific content type. Useful when header is
  28. missing, or this lexer would try to parse from header. (default:
  29. `text/plain`)
  30. `Multipart-Boundary`
  31. Set the default multipart boundary delimiter. This option is only used
  32. when `Content-Type` is `multipart` and header is missing. This lexer
  33. would try to parse from header by default. (default: None)
  34. `Content-Transfer-Encoding`
  35. Treat the data as specific encoding. Or this lexer would try to parse
  36. from header by default. (default: None)
  37. .. versionadded:: 2.5
  38. """
  39. name = "MIME"
  40. aliases = ["mime"]
  41. mimetypes = ["multipart/mixed",
  42. "multipart/related",
  43. "multipart/alternative"]
  44. def __init__(self, **options):
  45. super(MIMELexer, self).__init__(**options)
  46. self.boundary = options.get("Multipart-Boundary")
  47. self.content_transfer_encoding = options.get("Content_Transfer_Encoding")
  48. self.content_type = options.get("Content_Type", "text/plain")
  49. self.max_nested_level = get_int_opt(options, "MIME-max-level", -1)
  50. def analyse_text(text):
  51. try:
  52. header, body = text.strip().split("\n\n", 1)
  53. if not body.strip():
  54. return 0.1
  55. invalid_headers = MIMELexer.tokens["header"].sub("", header)
  56. if invalid_headers.strip():
  57. return 0.1
  58. else:
  59. return 1
  60. except ValueError:
  61. return 0.1
  62. def get_header_tokens(self, match):
  63. field = match.group(1)
  64. if field.lower() in self.attention_headers:
  65. yield match.start(1), Name.Tag, field + ":"
  66. yield match.start(2), Text.Whitespace, match.group(2)
  67. pos = match.end(2)
  68. body = match.group(3)
  69. for i, t, v in self.get_tokens_unprocessed(body, ("root", field.lower())):
  70. yield pos + i, t, v
  71. else:
  72. yield match.start(), Comment, match.group()
  73. def get_body_tokens(self, match):
  74. pos_body_start = match.start()
  75. entire_body = match.group()
  76. # skip first newline
  77. if entire_body[0] == '\n':
  78. yield pos_body_start, Text.Whitespace, u'\n'
  79. pos_body_start = pos_body_start + 1
  80. entire_body = entire_body[1:]
  81. # if it is not a mulitpart
  82. if not self.content_type.startswith("multipart") or not self.boundary:
  83. for i, t, v in self.get_bodypart_tokens(entire_body):
  84. yield pos_body_start + i, t, v
  85. return
  86. # find boundary
  87. bdry_pattern = r"^--%s(--)?\n" % re.escape(self.boundary)
  88. bdry_matcher = re.compile(bdry_pattern, re.MULTILINE)
  89. # some data has prefix text before first boundary
  90. m = bdry_matcher.search(entire_body)
  91. if m:
  92. pos_part_start = pos_body_start + m.end()
  93. pos_iter_start = lpos_end = m.end()
  94. yield pos_body_start, Text, entire_body[:m.start()]
  95. yield pos_body_start + lpos_end, String.Delimiter, m.group()
  96. else:
  97. pos_part_start = pos_body_start
  98. pos_iter_start = 0
  99. # process tokens of each body part
  100. for m in bdry_matcher.finditer(entire_body, pos_iter_start):
  101. # bodypart
  102. lpos_start = pos_part_start - pos_body_start
  103. lpos_end = m.start()
  104. part = entire_body[lpos_start:lpos_end]
  105. for i, t, v in self.get_bodypart_tokens(part):
  106. yield pos_part_start + i, t, v
  107. # boundary
  108. yield pos_body_start + lpos_end, String.Delimiter, m.group()
  109. pos_part_start = pos_body_start + m.end()
  110. # some data has suffix text after last boundary
  111. lpos_start = pos_part_start - pos_body_start
  112. if lpos_start != len(entire_body):
  113. yield pos_part_start, Text, entire_body[lpos_start:]
  114. def get_bodypart_tokens(self, text):
  115. # return if:
  116. # * no content
  117. # * no content type specific
  118. # * content encoding is not readable
  119. # * max recurrsion exceed
  120. if not text.strip() or not self.content_type:
  121. return [(0, Other, text)]
  122. cte = self.content_transfer_encoding
  123. if cte and cte not in {"8bit", "7bit", "quoted-printable"}:
  124. return [(0, Other, text)]
  125. if self.max_nested_level == 0:
  126. return [(0, Other, text)]
  127. # get lexer
  128. try:
  129. lexer = get_lexer_for_mimetype(self.content_type)
  130. except ClassNotFound:
  131. return [(0, Other, text)]
  132. if isinstance(lexer, type(self)):
  133. lexer.max_nested_level = self.max_nested_level - 1
  134. return lexer.get_tokens_unprocessed(text)
  135. def store_content_type(self, match):
  136. self.content_type = match.group(1)
  137. prefix_len = match.start(1) - match.start(0)
  138. yield match.start(0), Text.Whitespace, match.group(0)[:prefix_len]
  139. yield match.start(1), Name.Label, match.group(2)
  140. yield match.end(2), String.Delimiter, u"/"
  141. yield match.start(3), Name.Label, match.group(3)
  142. def get_content_type_subtokens(self, match):
  143. yield match.start(1), Text, match.group(1)
  144. yield match.start(2), Text.Whitespace, match.group(2)
  145. yield match.start(3), Name.Attribute, match.group(3)
  146. yield match.start(4), Operator, match.group(4)
  147. yield match.start(5), String, match.group(5)
  148. if match.group(3).lower() == "boundary":
  149. boundary = match.group(5).strip()
  150. if boundary[0] == '"' and boundary[-1] == '"':
  151. boundary = boundary[1:-1]
  152. self.boundary = boundary
  153. def store_content_transfer_encoding(self, match):
  154. self.content_transfer_encoding = match.group(0).lower()
  155. yield match.start(0), Name.Constant, match.group(0)
  156. attention_headers = {"content-type", "content-transfer-encoding"}
  157. tokens = {
  158. "root": [
  159. (r"^([\w-]+):( *)([\s\S]*?\n)(?![ \t])", get_header_tokens),
  160. (r"^$[\s\S]+", get_body_tokens),
  161. ],
  162. "header": [
  163. # folding
  164. (r"\n[ \t]", Text.Whitespace),
  165. (r"\n(?![ \t])", Text.Whitespace, "#pop"),
  166. ],
  167. "content-type": [
  168. include("header"),
  169. (
  170. r"^\s*((multipart|application|audio|font|image|model|text|video"
  171. r"|message)/([\w-]+))",
  172. store_content_type,
  173. ),
  174. (r'(;)((?:[ \t]|\n[ \t])*)([\w:-]+)(=)([\s\S]*?)(?=;|\n(?![ \t]))',
  175. get_content_type_subtokens),
  176. (r';[ \t]*\n(?![ \t])', Text, '#pop'),
  177. ],
  178. "content-transfer-encoding": [
  179. include("header"),
  180. (r"([\w-]+)", store_content_transfer_encoding),
  181. ],
  182. }