test.py 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. """
  2. Run chardet on a bunch of documents and see that we get the correct encodings.
  3. :author: Dan Blanchard
  4. :author: Ian Cordasco
  5. """
  6. import textwrap
  7. from difflib import ndiff
  8. from os import listdir
  9. from os.path import dirname, isdir, join, realpath, relpath, splitext
  10. from pprint import pformat
  11. from unicodedata import normalize
  12. try:
  13. import hypothesis.strategies as st
  14. from hypothesis import Verbosity, assume, given, settings
  15. HAVE_HYPOTHESIS = True
  16. except ImportError:
  17. HAVE_HYPOTHESIS = False
  18. import pytest # pylint: disable=import-error
  19. import chardet
  20. from chardet.metadata.languages import LANGUAGES
  21. # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250) after we
  22. # retrain model.
  23. MISSING_ENCODINGS = {
  24. "iso-8859-2",
  25. "iso-8859-6",
  26. "windows-1250",
  27. "windows-1254",
  28. "windows-1256",
  29. }
  30. EXPECTED_FAILURES = {
  31. "tests/iso-8859-9-turkish/_ude_1.txt",
  32. "tests/iso-8859-9-turkish/_ude_2.txt",
  33. "tests/iso-8859-9-turkish/divxplanet.com.xml",
  34. "tests/iso-8859-9-turkish/subtitle.srt",
  35. "tests/iso-8859-9-turkish/wikitop_tr_ISO-8859-9.txt",
  36. }
  37. def gen_test_params():
  38. """Yields tuples of paths and encodings to use for test_encoding_detection"""
  39. import yatest.common
  40. base_path = yatest.common.work_path('test_data/tests')
  41. for encoding in listdir(base_path):
  42. path = join(base_path, encoding)
  43. # Skip files in tests directory
  44. if not isdir(path):
  45. continue
  46. # Remove language suffixes from encoding if present
  47. encoding = encoding.lower()
  48. for language in sorted(LANGUAGES.keys()):
  49. postfix = "-" + language.lower()
  50. if encoding.endswith(postfix):
  51. encoding = encoding.rpartition(postfix)[0]
  52. break
  53. # Skip directories for encodings we don't handle yet.
  54. if encoding in MISSING_ENCODINGS:
  55. continue
  56. # Test encoding detection for each file we have of encoding for
  57. for file_name in listdir(path):
  58. ext = splitext(file_name)[1].lower()
  59. if ext not in [".html", ".txt", ".xml", ".srt"]:
  60. continue
  61. full_path = join(path, file_name)
  62. test_case = full_path, encoding
  63. name_test = full_path.split("/test_data/")[-1]
  64. if name_test in EXPECTED_FAILURES:
  65. test_case = pytest.param(*test_case, marks=pytest.mark.xfail, id=name_test)
  66. else:
  67. test_case = pytest.param(*test_case, id=name_test)
  68. yield test_case
  69. @pytest.mark.parametrize("file_name, encoding", gen_test_params())
  70. def test_encoding_detection(file_name, encoding):
  71. with open(file_name, "rb") as f:
  72. input_bytes = f.read()
  73. result = chardet.detect(input_bytes)
  74. try:
  75. expected_unicode = input_bytes.decode(encoding)
  76. except LookupError:
  77. expected_unicode = ""
  78. try:
  79. detected_unicode = input_bytes.decode(result["encoding"])
  80. except (LookupError, UnicodeDecodeError, TypeError):
  81. detected_unicode = ""
  82. if result:
  83. encoding_match = (result["encoding"] or "").lower() == encoding
  84. else:
  85. encoding_match = False
  86. # Only care about mismatches that would actually result in different
  87. # behavior when decoding
  88. expected_unicode = normalize("NFKC", expected_unicode)
  89. detected_unicode = normalize("NFKC", detected_unicode)
  90. if not encoding_match and expected_unicode != detected_unicode:
  91. wrapped_expected = "\n".join(textwrap.wrap(expected_unicode, 100)) + "\n"
  92. wrapped_detected = "\n".join(textwrap.wrap(detected_unicode, 100)) + "\n"
  93. diff = "".join(
  94. [
  95. line
  96. for line in ndiff(
  97. wrapped_expected.splitlines(True), wrapped_detected.splitlines(True)
  98. )
  99. if not line.startswith(" ")
  100. ][:20]
  101. )
  102. all_encodings = chardet.detect_all(input_bytes, ignore_threshold=True)
  103. else:
  104. diff = ""
  105. encoding_match = True
  106. all_encodings = [result]
  107. assert encoding_match, (
  108. f"Expected {encoding}, but got {result} for {file_name}. First 20 "
  109. f"lines with character differences: \n{diff}\n"
  110. f"All encodings: {pformat(all_encodings)}"
  111. )
  112. @pytest.mark.parametrize("file_name, encoding", gen_test_params())
  113. def test_encoding_detection_rename_legacy(file_name, encoding):
  114. with open(file_name, "rb") as f:
  115. input_bytes = f.read()
  116. result = chardet.detect(input_bytes, should_rename_legacy=True)
  117. try:
  118. expected_unicode = input_bytes.decode(encoding)
  119. except LookupError:
  120. expected_unicode = ""
  121. try:
  122. detected_unicode = input_bytes.decode(result["encoding"])
  123. except (LookupError, UnicodeDecodeError, TypeError):
  124. detected_unicode = ""
  125. if result:
  126. encoding_match = (result["encoding"] or "").lower() == encoding
  127. else:
  128. encoding_match = False
  129. # Only care about mismatches that would actually result in different
  130. # behavior when decoding
  131. expected_unicode = normalize("NFKD", expected_unicode)
  132. detected_unicode = normalize("NFKD", detected_unicode)
  133. if not encoding_match and expected_unicode != detected_unicode:
  134. wrapped_expected = "\n".join(textwrap.wrap(expected_unicode, 100)) + "\n"
  135. wrapped_detected = "\n".join(textwrap.wrap(detected_unicode, 100)) + "\n"
  136. diff = "".join(
  137. [
  138. line
  139. for line in ndiff(
  140. wrapped_expected.splitlines(True), wrapped_detected.splitlines(True)
  141. )
  142. if not line.startswith(" ")
  143. ][:20]
  144. )
  145. all_encodings = chardet.detect_all(
  146. input_bytes, ignore_threshold=True, should_rename_legacy=True
  147. )
  148. else:
  149. diff = ""
  150. encoding_match = True
  151. all_encodings = [result]
  152. assert encoding_match, (
  153. f"Expected {encoding}, but got {result} for {file_name}. First 20 "
  154. f"lines of character differences: \n{diff}\n"
  155. f"All encodings: {pformat(all_encodings)}"
  156. )
  157. if HAVE_HYPOTHESIS:
  158. class JustALengthIssue(Exception):
  159. pass
  160. @pytest.mark.xfail
  161. @given(
  162. st.text(min_size=1),
  163. st.sampled_from(
  164. [
  165. "ascii",
  166. "utf-8",
  167. "utf-16",
  168. "utf-32",
  169. "iso-8859-7",
  170. "iso-8859-8",
  171. "windows-1255",
  172. ]
  173. ),
  174. st.randoms(),
  175. )
  176. @settings(max_examples=200)
  177. def test_never_fails_to_detect_if_there_is_a_valid_encoding(txt, enc, rnd):
  178. try:
  179. data = txt.encode(enc)
  180. except UnicodeEncodeError:
  181. assume(False)
  182. detected = chardet.detect(data)["encoding"]
  183. if detected is None:
  184. with pytest.raises(JustALengthIssue):
  185. @given(st.text(), random=rnd)
  186. @settings(verbosity=Verbosity.quiet, max_examples=50)
  187. def string_poisons_following_text(suffix):
  188. try:
  189. extended = (txt + suffix).encode(enc)
  190. except UnicodeEncodeError:
  191. assume(False)
  192. result = chardet.detect(extended)
  193. if result and result["encoding"] is not None:
  194. raise JustALengthIssue()
  195. @given(
  196. st.text(min_size=1),
  197. st.sampled_from(
  198. [
  199. "ascii",
  200. "utf-8",
  201. "utf-16",
  202. "utf-32",
  203. "iso-8859-7",
  204. "iso-8859-8",
  205. "windows-1255",
  206. ]
  207. ),
  208. st.randoms(),
  209. )
  210. @settings(max_examples=200)
  211. def test_detect_all_and_detect_one_should_agree(txt, enc, _):
  212. try:
  213. data = txt.encode(enc)
  214. except UnicodeEncodeError:
  215. assume(False)
  216. try:
  217. result = chardet.detect(data)
  218. results = chardet.detect_all(data)
  219. assert result["encoding"] == results[0]["encoding"]
  220. except Exception as exc:
  221. raise RuntimeError(f"{result} != {results}") from exc