models.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. from encodings.aliases import aliases
  2. from hashlib import sha256
  3. from json import dumps
  4. from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
  5. from .constant import TOO_BIG_SEQUENCE
  6. from .utils import iana_name, is_multi_byte_encoding, unicode_range
  7. class CharsetMatch:
  8. def __init__(
  9. self,
  10. payload: bytes,
  11. guessed_encoding: str,
  12. mean_mess_ratio: float,
  13. has_sig_or_bom: bool,
  14. languages: "CoherenceMatches",
  15. decoded_payload: Optional[str] = None,
  16. ):
  17. self._payload: bytes = payload
  18. self._encoding: str = guessed_encoding
  19. self._mean_mess_ratio: float = mean_mess_ratio
  20. self._languages: CoherenceMatches = languages
  21. self._has_sig_or_bom: bool = has_sig_or_bom
  22. self._unicode_ranges: Optional[List[str]] = None
  23. self._leaves: List[CharsetMatch] = []
  24. self._mean_coherence_ratio: float = 0.0
  25. self._output_payload: Optional[bytes] = None
  26. self._output_encoding: Optional[str] = None
  27. self._string: Optional[str] = decoded_payload
  28. def __eq__(self, other: object) -> bool:
  29. if not isinstance(other, CharsetMatch):
  30. raise TypeError(
  31. "__eq__ cannot be invoked on {} and {}.".format(
  32. str(other.__class__), str(self.__class__)
  33. )
  34. )
  35. return self.encoding == other.encoding and self.fingerprint == other.fingerprint
  36. def __lt__(self, other: object) -> bool:
  37. """
  38. Implemented to make sorted available upon CharsetMatches items.
  39. """
  40. if not isinstance(other, CharsetMatch):
  41. raise ValueError
  42. chaos_difference: float = abs(self.chaos - other.chaos)
  43. coherence_difference: float = abs(self.coherence - other.coherence)
  44. # Below 1% difference --> Use Coherence
  45. if chaos_difference < 0.01 and coherence_difference > 0.02:
  46. return self.coherence > other.coherence
  47. elif chaos_difference < 0.01 and coherence_difference <= 0.02:
  48. # When having a difficult decision, use the result that decoded as many multi-byte as possible.
  49. return self.multi_byte_usage > other.multi_byte_usage
  50. return self.chaos < other.chaos
  51. @property
  52. def multi_byte_usage(self) -> float:
  53. return 1.0 - (len(str(self)) / len(self.raw))
  54. def __str__(self) -> str:
  55. # Lazy Str Loading
  56. if self._string is None:
  57. self._string = str(self._payload, self._encoding, "strict")
  58. return self._string
  59. def __repr__(self) -> str:
  60. return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint)
  61. def add_submatch(self, other: "CharsetMatch") -> None:
  62. if not isinstance(other, CharsetMatch) or other == self:
  63. raise ValueError(
  64. "Unable to add instance <{}> as a submatch of a CharsetMatch".format(
  65. other.__class__
  66. )
  67. )
  68. other._string = None # Unload RAM usage; dirty trick.
  69. self._leaves.append(other)
  70. @property
  71. def encoding(self) -> str:
  72. return self._encoding
  73. @property
  74. def encoding_aliases(self) -> List[str]:
  75. """
  76. Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
  77. """
  78. also_known_as: List[str] = []
  79. for u, p in aliases.items():
  80. if self.encoding == u:
  81. also_known_as.append(p)
  82. elif self.encoding == p:
  83. also_known_as.append(u)
  84. return also_known_as
  85. @property
  86. def bom(self) -> bool:
  87. return self._has_sig_or_bom
  88. @property
  89. def byte_order_mark(self) -> bool:
  90. return self._has_sig_or_bom
  91. @property
  92. def languages(self) -> List[str]:
  93. """
  94. Return the complete list of possible languages found in decoded sequence.
  95. Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
  96. """
  97. return [e[0] for e in self._languages]
  98. @property
  99. def language(self) -> str:
  100. """
  101. Most probable language found in decoded sequence. If none were detected or inferred, the property will return
  102. "Unknown".
  103. """
  104. if not self._languages:
  105. # Trying to infer the language based on the given encoding
  106. # Its either English or we should not pronounce ourselves in certain cases.
  107. if "ascii" in self.could_be_from_charset:
  108. return "English"
  109. # doing it there to avoid circular import
  110. from charset_normalizer.cd import encoding_languages, mb_encoding_languages
  111. languages = (
  112. mb_encoding_languages(self.encoding)
  113. if is_multi_byte_encoding(self.encoding)
  114. else encoding_languages(self.encoding)
  115. )
  116. if len(languages) == 0 or "Latin Based" in languages:
  117. return "Unknown"
  118. return languages[0]
  119. return self._languages[0][0]
  120. @property
  121. def chaos(self) -> float:
  122. return self._mean_mess_ratio
  123. @property
  124. def coherence(self) -> float:
  125. if not self._languages:
  126. return 0.0
  127. return self._languages[0][1]
  128. @property
  129. def percent_chaos(self) -> float:
  130. return round(self.chaos * 100, ndigits=3)
  131. @property
  132. def percent_coherence(self) -> float:
  133. return round(self.coherence * 100, ndigits=3)
  134. @property
  135. def raw(self) -> bytes:
  136. """
  137. Original untouched bytes.
  138. """
  139. return self._payload
  140. @property
  141. def submatch(self) -> List["CharsetMatch"]:
  142. return self._leaves
  143. @property
  144. def has_submatch(self) -> bool:
  145. return len(self._leaves) > 0
  146. @property
  147. def alphabets(self) -> List[str]:
  148. if self._unicode_ranges is not None:
  149. return self._unicode_ranges
  150. # list detected ranges
  151. detected_ranges: List[Optional[str]] = [
  152. unicode_range(char) for char in str(self)
  153. ]
  154. # filter and sort
  155. self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
  156. return self._unicode_ranges
  157. @property
  158. def could_be_from_charset(self) -> List[str]:
  159. """
  160. The complete list of encoding that output the exact SAME str result and therefore could be the originating
  161. encoding.
  162. This list does include the encoding available in property 'encoding'.
  163. """
  164. return [self._encoding] + [m.encoding for m in self._leaves]
  165. def output(self, encoding: str = "utf_8") -> bytes:
  166. """
  167. Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
  168. Any errors will be simply ignored by the encoder NOT replaced.
  169. """
  170. if self._output_encoding is None or self._output_encoding != encoding:
  171. self._output_encoding = encoding
  172. self._output_payload = str(self).encode(encoding, "replace")
  173. return self._output_payload # type: ignore
  174. @property
  175. def fingerprint(self) -> str:
  176. """
  177. Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
  178. """
  179. return sha256(self.output()).hexdigest()
  180. class CharsetMatches:
  181. """
  182. Container with every CharsetMatch items ordered by default from most probable to the less one.
  183. Act like a list(iterable) but does not implements all related methods.
  184. """
  185. def __init__(self, results: Optional[List[CharsetMatch]] = None):
  186. self._results: List[CharsetMatch] = sorted(results) if results else []
  187. def __iter__(self) -> Iterator[CharsetMatch]:
  188. yield from self._results
  189. def __getitem__(self, item: Union[int, str]) -> CharsetMatch:
  190. """
  191. Retrieve a single item either by its position or encoding name (alias may be used here).
  192. Raise KeyError upon invalid index or encoding not present in results.
  193. """
  194. if isinstance(item, int):
  195. return self._results[item]
  196. if isinstance(item, str):
  197. item = iana_name(item, False)
  198. for result in self._results:
  199. if item in result.could_be_from_charset:
  200. return result
  201. raise KeyError
  202. def __len__(self) -> int:
  203. return len(self._results)
  204. def __bool__(self) -> bool:
  205. return len(self._results) > 0
  206. def append(self, item: CharsetMatch) -> None:
  207. """
  208. Insert a single match. Will be inserted accordingly to preserve sort.
  209. Can be inserted as a submatch.
  210. """
  211. if not isinstance(item, CharsetMatch):
  212. raise ValueError(
  213. "Cannot append instance '{}' to CharsetMatches".format(
  214. str(item.__class__)
  215. )
  216. )
  217. # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
  218. if len(item.raw) <= TOO_BIG_SEQUENCE:
  219. for match in self._results:
  220. if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
  221. match.add_submatch(item)
  222. return
  223. self._results.append(item)
  224. self._results = sorted(self._results)
  225. def best(self) -> Optional["CharsetMatch"]:
  226. """
  227. Simply return the first match. Strict equivalent to matches[0].
  228. """
  229. if not self._results:
  230. return None
  231. return self._results[0]
  232. def first(self) -> Optional["CharsetMatch"]:
  233. """
  234. Redundant method, call the method best(). Kept for BC reasons.
  235. """
  236. return self.best()
  237. CoherenceMatch = Tuple[str, float]
  238. CoherenceMatches = List[CoherenceMatch]
  239. class CliDetectionResult:
  240. def __init__(
  241. self,
  242. path: str,
  243. encoding: Optional[str],
  244. encoding_aliases: List[str],
  245. alternative_encodings: List[str],
  246. language: str,
  247. alphabets: List[str],
  248. has_sig_or_bom: bool,
  249. chaos: float,
  250. coherence: float,
  251. unicode_path: Optional[str],
  252. is_preferred: bool,
  253. ):
  254. self.path: str = path
  255. self.unicode_path: Optional[str] = unicode_path
  256. self.encoding: Optional[str] = encoding
  257. self.encoding_aliases: List[str] = encoding_aliases
  258. self.alternative_encodings: List[str] = alternative_encodings
  259. self.language: str = language
  260. self.alphabets: List[str] = alphabets
  261. self.has_sig_or_bom: bool = has_sig_or_bom
  262. self.chaos: float = chaos
  263. self.coherence: float = coherence
  264. self.is_preferred: bool = is_preferred
  265. @property
  266. def __dict__(self) -> Dict[str, Any]: # type: ignore
  267. return {
  268. "path": self.path,
  269. "encoding": self.encoding,
  270. "encoding_aliases": self.encoding_aliases,
  271. "alternative_encodings": self.alternative_encodings,
  272. "language": self.language,
  273. "alphabets": self.alphabets,
  274. "has_sig_or_bom": self.has_sig_or_bom,
  275. "chaos": self.chaos,
  276. "coherence": self.coherence,
  277. "unicode_path": self.unicode_path,
  278. "is_preferred": self.is_preferred,
  279. }
  280. def to_json(self) -> str:
  281. return dumps(self.__dict__, ensure_ascii=True, indent=4)