utils.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. import importlib
  2. import logging
  3. import unicodedata
  4. from codecs import IncrementalDecoder
  5. from encodings.aliases import aliases
  6. from functools import lru_cache
  7. from re import findall
  8. from typing import Generator, List, Optional, Set, Tuple, Union
  9. from _multibytecodec import MultibyteIncrementalDecoder
  10. from .constant import (
  11. ENCODING_MARKS,
  12. IANA_SUPPORTED_SIMILAR,
  13. RE_POSSIBLE_ENCODING_INDICATION,
  14. UNICODE_RANGES_COMBINED,
  15. UNICODE_SECONDARY_RANGE_KEYWORD,
  16. UTF8_MAXIMAL_ALLOCATION,
  17. )
  18. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  19. def is_accentuated(character: str) -> bool:
  20. try:
  21. description: str = unicodedata.name(character)
  22. except ValueError:
  23. return False
  24. return (
  25. "WITH GRAVE" in description
  26. or "WITH ACUTE" in description
  27. or "WITH CEDILLA" in description
  28. or "WITH DIAERESIS" in description
  29. or "WITH CIRCUMFLEX" in description
  30. or "WITH TILDE" in description
  31. )
  32. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  33. def remove_accent(character: str) -> str:
  34. decomposed: str = unicodedata.decomposition(character)
  35. if not decomposed:
  36. return character
  37. codes: List[str] = decomposed.split(" ")
  38. return chr(int(codes[0], 16))
  39. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  40. def unicode_range(character: str) -> Optional[str]:
  41. """
  42. Retrieve the Unicode range official name from a single character.
  43. """
  44. character_ord: int = ord(character)
  45. for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
  46. if character_ord in ord_range:
  47. return range_name
  48. return None
  49. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  50. def is_latin(character: str) -> bool:
  51. try:
  52. description: str = unicodedata.name(character)
  53. except ValueError:
  54. return False
  55. return "LATIN" in description
  56. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  57. def is_punctuation(character: str) -> bool:
  58. character_category: str = unicodedata.category(character)
  59. if "P" in character_category:
  60. return True
  61. character_range: Optional[str] = unicode_range(character)
  62. if character_range is None:
  63. return False
  64. return "Punctuation" in character_range
  65. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  66. def is_symbol(character: str) -> bool:
  67. character_category: str = unicodedata.category(character)
  68. if "S" in character_category or "N" in character_category:
  69. return True
  70. character_range: Optional[str] = unicode_range(character)
  71. if character_range is None:
  72. return False
  73. return "Forms" in character_range
  74. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  75. def is_emoticon(character: str) -> bool:
  76. character_range: Optional[str] = unicode_range(character)
  77. if character_range is None:
  78. return False
  79. return "Emoticons" in character_range
  80. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  81. def is_separator(character: str) -> bool:
  82. if character.isspace() or character in {"|", "+", "<", ">"}:
  83. return True
  84. character_category: str = unicodedata.category(character)
  85. return "Z" in character_category or character_category in {"Po", "Pd", "Pc"}
  86. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  87. def is_case_variable(character: str) -> bool:
  88. return character.islower() != character.isupper()
  89. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  90. def is_cjk(character: str) -> bool:
  91. try:
  92. character_name = unicodedata.name(character)
  93. except ValueError:
  94. return False
  95. return "CJK" in character_name
  96. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  97. def is_hiragana(character: str) -> bool:
  98. try:
  99. character_name = unicodedata.name(character)
  100. except ValueError:
  101. return False
  102. return "HIRAGANA" in character_name
  103. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  104. def is_katakana(character: str) -> bool:
  105. try:
  106. character_name = unicodedata.name(character)
  107. except ValueError:
  108. return False
  109. return "KATAKANA" in character_name
  110. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  111. def is_hangul(character: str) -> bool:
  112. try:
  113. character_name = unicodedata.name(character)
  114. except ValueError:
  115. return False
  116. return "HANGUL" in character_name
  117. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  118. def is_thai(character: str) -> bool:
  119. try:
  120. character_name = unicodedata.name(character)
  121. except ValueError:
  122. return False
  123. return "THAI" in character_name
  124. @lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
  125. def is_unicode_range_secondary(range_name: str) -> bool:
  126. return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
  127. @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
  128. def is_unprintable(character: str) -> bool:
  129. return (
  130. character.isspace() is False # includes \n \t \r \v
  131. and character.isprintable() is False
  132. and character != "\x1A" # Why? Its the ASCII substitute character.
  133. and character != "\ufeff" # bug discovered in Python,
  134. # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
  135. )
  136. def any_specified_encoding(sequence: bytes, search_zone: int = 8192) -> Optional[str]:
  137. """
  138. Extract using ASCII-only decoder any specified encoding in the first n-bytes.
  139. """
  140. if not isinstance(sequence, bytes):
  141. raise TypeError
  142. seq_len: int = len(sequence)
  143. results: List[str] = findall(
  144. RE_POSSIBLE_ENCODING_INDICATION,
  145. sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
  146. )
  147. if len(results) == 0:
  148. return None
  149. for specified_encoding in results:
  150. specified_encoding = specified_encoding.lower().replace("-", "_")
  151. encoding_alias: str
  152. encoding_iana: str
  153. for encoding_alias, encoding_iana in aliases.items():
  154. if encoding_alias == specified_encoding:
  155. return encoding_iana
  156. if encoding_iana == specified_encoding:
  157. return encoding_iana
  158. return None
  159. @lru_cache(maxsize=128)
  160. def is_multi_byte_encoding(name: str) -> bool:
  161. """
  162. Verify is a specific encoding is a multi byte one based on it IANA name
  163. """
  164. return name in {
  165. "utf_8",
  166. "utf_8_sig",
  167. "utf_16",
  168. "utf_16_be",
  169. "utf_16_le",
  170. "utf_32",
  171. "utf_32_le",
  172. "utf_32_be",
  173. "utf_7",
  174. } or issubclass(
  175. importlib.import_module("encodings.{}".format(name)).IncrementalDecoder,
  176. MultibyteIncrementalDecoder,
  177. )
  178. def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]:
  179. """
  180. Identify and extract SIG/BOM in given sequence.
  181. """
  182. for iana_encoding in ENCODING_MARKS:
  183. marks: Union[bytes, List[bytes]] = ENCODING_MARKS[iana_encoding]
  184. if isinstance(marks, bytes):
  185. marks = [marks]
  186. for mark in marks:
  187. if sequence.startswith(mark):
  188. return iana_encoding, mark
  189. return None, b""
  190. def should_strip_sig_or_bom(iana_encoding: str) -> bool:
  191. return iana_encoding not in {"utf_16", "utf_32"}
  192. def iana_name(cp_name: str, strict: bool = True) -> str:
  193. cp_name = cp_name.lower().replace("-", "_")
  194. encoding_alias: str
  195. encoding_iana: str
  196. for encoding_alias, encoding_iana in aliases.items():
  197. if cp_name in [encoding_alias, encoding_iana]:
  198. return encoding_iana
  199. if strict:
  200. raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name))
  201. return cp_name
  202. def range_scan(decoded_sequence: str) -> List[str]:
  203. ranges: Set[str] = set()
  204. for character in decoded_sequence:
  205. character_range: Optional[str] = unicode_range(character)
  206. if character_range is None:
  207. continue
  208. ranges.add(character_range)
  209. return list(ranges)
  210. def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
  211. if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
  212. return 0.0
  213. decoder_a = importlib.import_module(
  214. "encodings.{}".format(iana_name_a)
  215. ).IncrementalDecoder
  216. decoder_b = importlib.import_module(
  217. "encodings.{}".format(iana_name_b)
  218. ).IncrementalDecoder
  219. id_a: IncrementalDecoder = decoder_a(errors="ignore")
  220. id_b: IncrementalDecoder = decoder_b(errors="ignore")
  221. character_match_count: int = 0
  222. for i in range(255):
  223. to_be_decoded: bytes = bytes([i])
  224. if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
  225. character_match_count += 1
  226. return character_match_count / 254
  227. def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
  228. """
  229. Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
  230. the function cp_similarity.
  231. """
  232. return (
  233. iana_name_a in IANA_SUPPORTED_SIMILAR
  234. and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
  235. )
  236. def set_logging_handler(
  237. name: str = "charset_normalizer",
  238. level: int = logging.INFO,
  239. format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
  240. ) -> None:
  241. logger = logging.getLogger(name)
  242. logger.setLevel(level)
  243. handler = logging.StreamHandler()
  244. handler.setFormatter(logging.Formatter(format_string))
  245. logger.addHandler(handler)
  246. def cut_sequence_chunks(
  247. sequences: bytes,
  248. encoding_iana: str,
  249. offsets: range,
  250. chunk_size: int,
  251. bom_or_sig_available: bool,
  252. strip_sig_or_bom: bool,
  253. sig_payload: bytes,
  254. is_multi_byte_decoder: bool,
  255. decoded_payload: Optional[str] = None,
  256. ) -> Generator[str, None, None]:
  257. if decoded_payload and is_multi_byte_decoder is False:
  258. for i in offsets:
  259. chunk = decoded_payload[i : i + chunk_size]
  260. if not chunk:
  261. break
  262. yield chunk
  263. else:
  264. for i in offsets:
  265. chunk_end = i + chunk_size
  266. if chunk_end > len(sequences) + 8:
  267. continue
  268. cut_sequence = sequences[i : i + chunk_size]
  269. if bom_or_sig_available and strip_sig_or_bom is False:
  270. cut_sequence = sig_payload + cut_sequence
  271. chunk = cut_sequence.decode(
  272. encoding_iana,
  273. errors="ignore" if is_multi_byte_decoder else "strict",
  274. )
  275. # multi-byte bad cutting detector and adjustment
  276. # not the cleanest way to perform that fix but clever enough for now.
  277. if is_multi_byte_decoder and i > 0:
  278. chunk_partial_size_chk: int = min(chunk_size, 16)
  279. if (
  280. decoded_payload
  281. and chunk[:chunk_partial_size_chk] not in decoded_payload
  282. ):
  283. for j in range(i, i - 4, -1):
  284. cut_sequence = sequences[j:chunk_end]
  285. if bom_or_sig_available and strip_sig_or_bom is False:
  286. cut_sequence = sig_payload + cut_sequence
  287. chunk = cut_sequence.decode(encoding_iana, errors="ignore")
  288. if chunk[:chunk_partial_size_chk] in decoded_payload:
  289. break
  290. yield chunk