api.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. import logging
  2. from os import PathLike
  3. from typing import BinaryIO, List, Optional, Set, Union
  4. from .cd import (
  5. coherence_ratio,
  6. encoding_languages,
  7. mb_encoding_languages,
  8. merge_coherence_ratios,
  9. )
  10. from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
  11. from .md import mess_ratio
  12. from .models import CharsetMatch, CharsetMatches
  13. from .utils import (
  14. any_specified_encoding,
  15. cut_sequence_chunks,
  16. iana_name,
  17. identify_sig_or_bom,
  18. is_cp_similar,
  19. is_multi_byte_encoding,
  20. should_strip_sig_or_bom,
  21. )
  22. # Will most likely be controversial
  23. # logging.addLevelName(TRACE, "TRACE")
  24. logger = logging.getLogger("charset_normalizer")
  25. explain_handler = logging.StreamHandler()
  26. explain_handler.setFormatter(
  27. logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
  28. )
  29. def from_bytes(
  30. sequences: Union[bytes, bytearray],
  31. steps: int = 5,
  32. chunk_size: int = 512,
  33. threshold: float = 0.2,
  34. cp_isolation: Optional[List[str]] = None,
  35. cp_exclusion: Optional[List[str]] = None,
  36. preemptive_behaviour: bool = True,
  37. explain: bool = False,
  38. language_threshold: float = 0.1,
  39. enable_fallback: bool = True,
  40. ) -> CharsetMatches:
  41. """
  42. Given a raw bytes sequence, return the best possibles charset usable to render str objects.
  43. If there is no results, it is a strong indicator that the source is binary/not text.
  44. By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
  45. And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
  46. The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
  47. but never take it for granted. Can improve the performance.
  48. You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
  49. purpose.
  50. This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
  51. By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
  52. toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
  53. Custom logging format and handler can be set manually.
  54. """
  55. if not isinstance(sequences, (bytearray, bytes)):
  56. raise TypeError(
  57. "Expected object of type bytes or bytearray, got: {0}".format(
  58. type(sequences)
  59. )
  60. )
  61. if explain:
  62. previous_logger_level: int = logger.level
  63. logger.addHandler(explain_handler)
  64. logger.setLevel(TRACE)
  65. length: int = len(sequences)
  66. if length == 0:
  67. logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
  68. if explain:
  69. logger.removeHandler(explain_handler)
  70. logger.setLevel(previous_logger_level or logging.WARNING)
  71. return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
  72. if cp_isolation is not None:
  73. logger.log(
  74. TRACE,
  75. "cp_isolation is set. use this flag for debugging purpose. "
  76. "limited list of encoding allowed : %s.",
  77. ", ".join(cp_isolation),
  78. )
  79. cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
  80. else:
  81. cp_isolation = []
  82. if cp_exclusion is not None:
  83. logger.log(
  84. TRACE,
  85. "cp_exclusion is set. use this flag for debugging purpose. "
  86. "limited list of encoding excluded : %s.",
  87. ", ".join(cp_exclusion),
  88. )
  89. cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
  90. else:
  91. cp_exclusion = []
  92. if length <= (chunk_size * steps):
  93. logger.log(
  94. TRACE,
  95. "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
  96. steps,
  97. chunk_size,
  98. length,
  99. )
  100. steps = 1
  101. chunk_size = length
  102. if steps > 1 and length / steps < chunk_size:
  103. chunk_size = int(length / steps)
  104. is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
  105. is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
  106. if is_too_small_sequence:
  107. logger.log(
  108. TRACE,
  109. "Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
  110. length
  111. ),
  112. )
  113. elif is_too_large_sequence:
  114. logger.log(
  115. TRACE,
  116. "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
  117. length
  118. ),
  119. )
  120. prioritized_encodings: List[str] = []
  121. specified_encoding: Optional[str] = (
  122. any_specified_encoding(sequences) if preemptive_behaviour else None
  123. )
  124. if specified_encoding is not None:
  125. prioritized_encodings.append(specified_encoding)
  126. logger.log(
  127. TRACE,
  128. "Detected declarative mark in sequence. Priority +1 given for %s.",
  129. specified_encoding,
  130. )
  131. tested: Set[str] = set()
  132. tested_but_hard_failure: List[str] = []
  133. tested_but_soft_failure: List[str] = []
  134. fallback_ascii: Optional[CharsetMatch] = None
  135. fallback_u8: Optional[CharsetMatch] = None
  136. fallback_specified: Optional[CharsetMatch] = None
  137. results: CharsetMatches = CharsetMatches()
  138. sig_encoding, sig_payload = identify_sig_or_bom(sequences)
  139. if sig_encoding is not None:
  140. prioritized_encodings.append(sig_encoding)
  141. logger.log(
  142. TRACE,
  143. "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
  144. len(sig_payload),
  145. sig_encoding,
  146. )
  147. prioritized_encodings.append("ascii")
  148. if "utf_8" not in prioritized_encodings:
  149. prioritized_encodings.append("utf_8")
  150. for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
  151. if cp_isolation and encoding_iana not in cp_isolation:
  152. continue
  153. if cp_exclusion and encoding_iana in cp_exclusion:
  154. continue
  155. if encoding_iana in tested:
  156. continue
  157. tested.add(encoding_iana)
  158. decoded_payload: Optional[str] = None
  159. bom_or_sig_available: bool = sig_encoding == encoding_iana
  160. strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
  161. encoding_iana
  162. )
  163. if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
  164. logger.log(
  165. TRACE,
  166. "Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
  167. encoding_iana,
  168. )
  169. continue
  170. if encoding_iana in {"utf_7"} and not bom_or_sig_available:
  171. logger.log(
  172. TRACE,
  173. "Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.",
  174. encoding_iana,
  175. )
  176. continue
  177. try:
  178. is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
  179. except (ModuleNotFoundError, ImportError):
  180. logger.log(
  181. TRACE,
  182. "Encoding %s does not provide an IncrementalDecoder",
  183. encoding_iana,
  184. )
  185. continue
  186. try:
  187. if is_too_large_sequence and is_multi_byte_decoder is False:
  188. str(
  189. sequences[: int(50e4)]
  190. if strip_sig_or_bom is False
  191. else sequences[len(sig_payload) : int(50e4)],
  192. encoding=encoding_iana,
  193. )
  194. else:
  195. decoded_payload = str(
  196. sequences
  197. if strip_sig_or_bom is False
  198. else sequences[len(sig_payload) :],
  199. encoding=encoding_iana,
  200. )
  201. except (UnicodeDecodeError, LookupError) as e:
  202. if not isinstance(e, LookupError):
  203. logger.log(
  204. TRACE,
  205. "Code page %s does not fit given bytes sequence at ALL. %s",
  206. encoding_iana,
  207. str(e),
  208. )
  209. tested_but_hard_failure.append(encoding_iana)
  210. continue
  211. similar_soft_failure_test: bool = False
  212. for encoding_soft_failed in tested_but_soft_failure:
  213. if is_cp_similar(encoding_iana, encoding_soft_failed):
  214. similar_soft_failure_test = True
  215. break
  216. if similar_soft_failure_test:
  217. logger.log(
  218. TRACE,
  219. "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
  220. encoding_iana,
  221. encoding_soft_failed,
  222. )
  223. continue
  224. r_ = range(
  225. 0 if not bom_or_sig_available else len(sig_payload),
  226. length,
  227. int(length / steps),
  228. )
  229. multi_byte_bonus: bool = (
  230. is_multi_byte_decoder
  231. and decoded_payload is not None
  232. and len(decoded_payload) < length
  233. )
  234. if multi_byte_bonus:
  235. logger.log(
  236. TRACE,
  237. "Code page %s is a multi byte encoding table and it appear that at least one character "
  238. "was encoded using n-bytes.",
  239. encoding_iana,
  240. )
  241. max_chunk_gave_up: int = int(len(r_) / 4)
  242. max_chunk_gave_up = max(max_chunk_gave_up, 2)
  243. early_stop_count: int = 0
  244. lazy_str_hard_failure = False
  245. md_chunks: List[str] = []
  246. md_ratios = []
  247. try:
  248. for chunk in cut_sequence_chunks(
  249. sequences,
  250. encoding_iana,
  251. r_,
  252. chunk_size,
  253. bom_or_sig_available,
  254. strip_sig_or_bom,
  255. sig_payload,
  256. is_multi_byte_decoder,
  257. decoded_payload,
  258. ):
  259. md_chunks.append(chunk)
  260. md_ratios.append(
  261. mess_ratio(
  262. chunk,
  263. threshold,
  264. explain is True and 1 <= len(cp_isolation) <= 2,
  265. )
  266. )
  267. if md_ratios[-1] >= threshold:
  268. early_stop_count += 1
  269. if (early_stop_count >= max_chunk_gave_up) or (
  270. bom_or_sig_available and strip_sig_or_bom is False
  271. ):
  272. break
  273. except (
  274. UnicodeDecodeError
  275. ) as e: # Lazy str loading may have missed something there
  276. logger.log(
  277. TRACE,
  278. "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
  279. encoding_iana,
  280. str(e),
  281. )
  282. early_stop_count = max_chunk_gave_up
  283. lazy_str_hard_failure = True
  284. # We might want to check the sequence again with the whole content
  285. # Only if initial MD tests passes
  286. if (
  287. not lazy_str_hard_failure
  288. and is_too_large_sequence
  289. and not is_multi_byte_decoder
  290. ):
  291. try:
  292. sequences[int(50e3) :].decode(encoding_iana, errors="strict")
  293. except UnicodeDecodeError as e:
  294. logger.log(
  295. TRACE,
  296. "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
  297. encoding_iana,
  298. str(e),
  299. )
  300. tested_but_hard_failure.append(encoding_iana)
  301. continue
  302. mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
  303. if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
  304. tested_but_soft_failure.append(encoding_iana)
  305. logger.log(
  306. TRACE,
  307. "%s was excluded because of initial chaos probing. Gave up %i time(s). "
  308. "Computed mean chaos is %f %%.",
  309. encoding_iana,
  310. early_stop_count,
  311. round(mean_mess_ratio * 100, ndigits=3),
  312. )
  313. # Preparing those fallbacks in case we got nothing.
  314. if (
  315. enable_fallback
  316. and encoding_iana in ["ascii", "utf_8", specified_encoding]
  317. and not lazy_str_hard_failure
  318. ):
  319. fallback_entry = CharsetMatch(
  320. sequences, encoding_iana, threshold, False, [], decoded_payload
  321. )
  322. if encoding_iana == specified_encoding:
  323. fallback_specified = fallback_entry
  324. elif encoding_iana == "ascii":
  325. fallback_ascii = fallback_entry
  326. else:
  327. fallback_u8 = fallback_entry
  328. continue
  329. logger.log(
  330. TRACE,
  331. "%s passed initial chaos probing. Mean measured chaos is %f %%",
  332. encoding_iana,
  333. round(mean_mess_ratio * 100, ndigits=3),
  334. )
  335. if not is_multi_byte_decoder:
  336. target_languages: List[str] = encoding_languages(encoding_iana)
  337. else:
  338. target_languages = mb_encoding_languages(encoding_iana)
  339. if target_languages:
  340. logger.log(
  341. TRACE,
  342. "{} should target any language(s) of {}".format(
  343. encoding_iana, str(target_languages)
  344. ),
  345. )
  346. cd_ratios = []
  347. # We shall skip the CD when its about ASCII
  348. # Most of the time its not relevant to run "language-detection" on it.
  349. if encoding_iana != "ascii":
  350. for chunk in md_chunks:
  351. chunk_languages = coherence_ratio(
  352. chunk,
  353. language_threshold,
  354. ",".join(target_languages) if target_languages else None,
  355. )
  356. cd_ratios.append(chunk_languages)
  357. cd_ratios_merged = merge_coherence_ratios(cd_ratios)
  358. if cd_ratios_merged:
  359. logger.log(
  360. TRACE,
  361. "We detected language {} using {}".format(
  362. cd_ratios_merged, encoding_iana
  363. ),
  364. )
  365. results.append(
  366. CharsetMatch(
  367. sequences,
  368. encoding_iana,
  369. mean_mess_ratio,
  370. bom_or_sig_available,
  371. cd_ratios_merged,
  372. decoded_payload,
  373. )
  374. )
  375. if (
  376. encoding_iana in [specified_encoding, "ascii", "utf_8"]
  377. and mean_mess_ratio < 0.1
  378. ):
  379. logger.debug(
  380. "Encoding detection: %s is most likely the one.", encoding_iana
  381. )
  382. if explain:
  383. logger.removeHandler(explain_handler)
  384. logger.setLevel(previous_logger_level)
  385. return CharsetMatches([results[encoding_iana]])
  386. if encoding_iana == sig_encoding:
  387. logger.debug(
  388. "Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
  389. "the beginning of the sequence.",
  390. encoding_iana,
  391. )
  392. if explain:
  393. logger.removeHandler(explain_handler)
  394. logger.setLevel(previous_logger_level)
  395. return CharsetMatches([results[encoding_iana]])
  396. if len(results) == 0:
  397. if fallback_u8 or fallback_ascii or fallback_specified:
  398. logger.log(
  399. TRACE,
  400. "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
  401. )
  402. if fallback_specified:
  403. logger.debug(
  404. "Encoding detection: %s will be used as a fallback match",
  405. fallback_specified.encoding,
  406. )
  407. results.append(fallback_specified)
  408. elif (
  409. (fallback_u8 and fallback_ascii is None)
  410. or (
  411. fallback_u8
  412. and fallback_ascii
  413. and fallback_u8.fingerprint != fallback_ascii.fingerprint
  414. )
  415. or (fallback_u8 is not None)
  416. ):
  417. logger.debug("Encoding detection: utf_8 will be used as a fallback match")
  418. results.append(fallback_u8)
  419. elif fallback_ascii:
  420. logger.debug("Encoding detection: ascii will be used as a fallback match")
  421. results.append(fallback_ascii)
  422. if results:
  423. logger.debug(
  424. "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
  425. results.best().encoding, # type: ignore
  426. len(results) - 1,
  427. )
  428. else:
  429. logger.debug("Encoding detection: Unable to determine any suitable charset.")
  430. if explain:
  431. logger.removeHandler(explain_handler)
  432. logger.setLevel(previous_logger_level)
  433. return results
  434. def from_fp(
  435. fp: BinaryIO,
  436. steps: int = 5,
  437. chunk_size: int = 512,
  438. threshold: float = 0.20,
  439. cp_isolation: Optional[List[str]] = None,
  440. cp_exclusion: Optional[List[str]] = None,
  441. preemptive_behaviour: bool = True,
  442. explain: bool = False,
  443. language_threshold: float = 0.1,
  444. enable_fallback: bool = True,
  445. ) -> CharsetMatches:
  446. """
  447. Same thing than the function from_bytes but using a file pointer that is already ready.
  448. Will not close the file pointer.
  449. """
  450. return from_bytes(
  451. fp.read(),
  452. steps,
  453. chunk_size,
  454. threshold,
  455. cp_isolation,
  456. cp_exclusion,
  457. preemptive_behaviour,
  458. explain,
  459. language_threshold,
  460. enable_fallback,
  461. )
  462. def from_path(
  463. path: Union[str, bytes, PathLike], # type: ignore[type-arg]
  464. steps: int = 5,
  465. chunk_size: int = 512,
  466. threshold: float = 0.20,
  467. cp_isolation: Optional[List[str]] = None,
  468. cp_exclusion: Optional[List[str]] = None,
  469. preemptive_behaviour: bool = True,
  470. explain: bool = False,
  471. language_threshold: float = 0.1,
  472. enable_fallback: bool = True,
  473. ) -> CharsetMatches:
  474. """
  475. Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
  476. Can raise IOError.
  477. """
  478. with open(path, "rb") as fp:
  479. return from_fp(
  480. fp,
  481. steps,
  482. chunk_size,
  483. threshold,
  484. cp_isolation,
  485. cp_exclusion,
  486. preemptive_behaviour,
  487. explain,
  488. language_threshold,
  489. enable_fallback,
  490. )
  491. def is_binary(
  492. fp_or_path_or_payload: Union[PathLike, str, BinaryIO, bytes], # type: ignore[type-arg]
  493. steps: int = 5,
  494. chunk_size: int = 512,
  495. threshold: float = 0.20,
  496. cp_isolation: Optional[List[str]] = None,
  497. cp_exclusion: Optional[List[str]] = None,
  498. preemptive_behaviour: bool = True,
  499. explain: bool = False,
  500. language_threshold: float = 0.1,
  501. enable_fallback: bool = False,
  502. ) -> bool:
  503. """
  504. Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string.
  505. Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match
  506. are disabled to be stricter around ASCII-compatible but unlikely to be a string.
  507. """
  508. if isinstance(fp_or_path_or_payload, (str, PathLike)):
  509. guesses = from_path(
  510. fp_or_path_or_payload,
  511. steps=steps,
  512. chunk_size=chunk_size,
  513. threshold=threshold,
  514. cp_isolation=cp_isolation,
  515. cp_exclusion=cp_exclusion,
  516. preemptive_behaviour=preemptive_behaviour,
  517. explain=explain,
  518. language_threshold=language_threshold,
  519. enable_fallback=enable_fallback,
  520. )
  521. elif isinstance(
  522. fp_or_path_or_payload,
  523. (
  524. bytes,
  525. bytearray,
  526. ),
  527. ):
  528. guesses = from_bytes(
  529. fp_or_path_or_payload,
  530. steps=steps,
  531. chunk_size=chunk_size,
  532. threshold=threshold,
  533. cp_isolation=cp_isolation,
  534. cp_exclusion=cp_exclusion,
  535. preemptive_behaviour=preemptive_behaviour,
  536. explain=explain,
  537. language_threshold=language_threshold,
  538. enable_fallback=enable_fallback,
  539. )
  540. else:
  541. guesses = from_fp(
  542. fp_or_path_or_payload,
  543. steps=steps,
  544. chunk_size=chunk_size,
  545. threshold=threshold,
  546. cp_isolation=cp_isolation,
  547. cp_exclusion=cp_exclusion,
  548. preemptive_behaviour=preemptive_behaviour,
  549. explain=explain,
  550. language_threshold=language_threshold,
  551. enable_fallback=enable_fallback,
  552. )
  553. return not guesses