__init__.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. import functools
  2. import itertools
  3. import re
  4. import textwrap
  5. from typing import Iterable
  6. try:
  7. from importlib.resources import files # type: ignore
  8. except ImportError: # pragma: nocover
  9. from importlib_resources import files # type: ignore
  10. from jaraco.context import ExceptionTrap
  11. from jaraco.functools import compose, method_cache
  12. def substitution(old, new):
  13. """
  14. Return a function that will perform a substitution on a string
  15. """
  16. return lambda s: s.replace(old, new)
  17. def multi_substitution(*substitutions):
  18. """
  19. Take a sequence of pairs specifying substitutions, and create
  20. a function that performs those substitutions.
  21. >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
  22. 'baz'
  23. """
  24. substitutions = itertools.starmap(substitution, substitutions)
  25. # compose function applies last function first, so reverse the
  26. # substitutions to get the expected order.
  27. substitutions = reversed(tuple(substitutions))
  28. return compose(*substitutions)
  29. class FoldedCase(str):
  30. """
  31. A case insensitive string class; behaves just like str
  32. except compares equal when the only variation is case.
  33. >>> s = FoldedCase('hello world')
  34. >>> s == 'Hello World'
  35. True
  36. >>> 'Hello World' == s
  37. True
  38. >>> s != 'Hello World'
  39. False
  40. >>> s.index('O')
  41. 4
  42. >>> s.split('O')
  43. ['hell', ' w', 'rld']
  44. >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
  45. ['alpha', 'Beta', 'GAMMA']
  46. Sequence membership is straightforward.
  47. >>> "Hello World" in [s]
  48. True
  49. >>> s in ["Hello World"]
  50. True
  51. Allows testing for set inclusion, but candidate and elements
  52. must both be folded.
  53. >>> FoldedCase("Hello World") in {s}
  54. True
  55. >>> s in {FoldedCase("Hello World")}
  56. True
  57. String inclusion works as long as the FoldedCase object
  58. is on the right.
  59. >>> "hello" in FoldedCase("Hello World")
  60. True
  61. But not if the FoldedCase object is on the left:
  62. >>> FoldedCase('hello') in 'Hello World'
  63. False
  64. In that case, use ``in_``:
  65. >>> FoldedCase('hello').in_('Hello World')
  66. True
  67. >>> FoldedCase('hello') > FoldedCase('Hello')
  68. False
  69. >>> FoldedCase('ß') == FoldedCase('ss')
  70. True
  71. """
  72. def __lt__(self, other):
  73. return self.casefold() < other.casefold()
  74. def __gt__(self, other):
  75. return self.casefold() > other.casefold()
  76. def __eq__(self, other):
  77. return self.casefold() == other.casefold()
  78. def __ne__(self, other):
  79. return self.casefold() != other.casefold()
  80. def __hash__(self):
  81. return hash(self.casefold())
  82. def __contains__(self, other):
  83. return super().casefold().__contains__(other.casefold())
  84. def in_(self, other):
  85. "Does self appear in other?"
  86. return self in FoldedCase(other)
  87. # cache casefold since it's likely to be called frequently.
  88. @method_cache
  89. def casefold(self):
  90. return super().casefold()
  91. def index(self, sub):
  92. return self.casefold().index(sub.casefold())
  93. def split(self, splitter=' ', maxsplit=0):
  94. pattern = re.compile(re.escape(splitter), re.I)
  95. return pattern.split(self, maxsplit)
  96. # Python 3.8 compatibility
  97. _unicode_trap = ExceptionTrap(UnicodeDecodeError)
  98. @_unicode_trap.passes
  99. def is_decodable(value):
  100. r"""
  101. Return True if the supplied value is decodable (using the default
  102. encoding).
  103. >>> is_decodable(b'\xff')
  104. False
  105. >>> is_decodable(b'\x32')
  106. True
  107. """
  108. value.decode()
  109. def is_binary(value):
  110. r"""
  111. Return True if the value appears to be binary (that is, it's a byte
  112. string and isn't decodable).
  113. >>> is_binary(b'\xff')
  114. True
  115. >>> is_binary('\xff')
  116. False
  117. """
  118. return isinstance(value, bytes) and not is_decodable(value)
  119. def trim(s):
  120. r"""
  121. Trim something like a docstring to remove the whitespace that
  122. is common due to indentation and formatting.
  123. >>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
  124. 'foo = bar\n\tbar = baz'
  125. """
  126. return textwrap.dedent(s).strip()
  127. def wrap(s):
  128. """
  129. Wrap lines of text, retaining existing newlines as
  130. paragraph markers.
  131. >>> print(wrap(lorem_ipsum))
  132. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
  133. eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
  134. minim veniam, quis nostrud exercitation ullamco laboris nisi ut
  135. aliquip ex ea commodo consequat. Duis aute irure dolor in
  136. reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
  137. pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
  138. culpa qui officia deserunt mollit anim id est laborum.
  139. <BLANKLINE>
  140. Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
  141. varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
  142. magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
  143. gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
  144. risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
  145. eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
  146. fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
  147. a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
  148. neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
  149. sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
  150. nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
  151. quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
  152. molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
  153. """
  154. paragraphs = s.splitlines()
  155. wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
  156. return '\n\n'.join(wrapped)
  157. def unwrap(s):
  158. r"""
  159. Given a multi-line string, return an unwrapped version.
  160. >>> wrapped = wrap(lorem_ipsum)
  161. >>> wrapped.count('\n')
  162. 20
  163. >>> unwrapped = unwrap(wrapped)
  164. >>> unwrapped.count('\n')
  165. 1
  166. >>> print(unwrapped)
  167. Lorem ipsum dolor sit amet, consectetur adipiscing ...
  168. Curabitur pretium tincidunt lacus. Nulla gravida orci ...
  169. """
  170. paragraphs = re.split(r'\n\n+', s)
  171. cleaned = (para.replace('\n', ' ') for para in paragraphs)
  172. return '\n'.join(cleaned)
  173. lorem_ipsum: str = (
  174. files(__name__).joinpath('Lorem_ipsum.txt').read_text(encoding='utf-8')
  175. )
  176. class Splitter:
  177. """object that will split a string with the given arguments for each call
  178. >>> s = Splitter(',')
  179. >>> s('hello, world, this is your, master calling')
  180. ['hello', ' world', ' this is your', ' master calling']
  181. """
  182. def __init__(self, *args):
  183. self.args = args
  184. def __call__(self, s):
  185. return s.split(*self.args)
  186. def indent(string, prefix=' ' * 4):
  187. """
  188. >>> indent('foo')
  189. ' foo'
  190. """
  191. return prefix + string
  192. class WordSet(tuple):
  193. """
  194. Given an identifier, return the words that identifier represents,
  195. whether in camel case, underscore-separated, etc.
  196. >>> WordSet.parse("camelCase")
  197. ('camel', 'Case')
  198. >>> WordSet.parse("under_sep")
  199. ('under', 'sep')
  200. Acronyms should be retained
  201. >>> WordSet.parse("firstSNL")
  202. ('first', 'SNL')
  203. >>> WordSet.parse("you_and_I")
  204. ('you', 'and', 'I')
  205. >>> WordSet.parse("A simple test")
  206. ('A', 'simple', 'test')
  207. Multiple caps should not interfere with the first cap of another word.
  208. >>> WordSet.parse("myABCClass")
  209. ('my', 'ABC', 'Class')
  210. The result is a WordSet, providing access to other forms.
  211. >>> WordSet.parse("myABCClass").underscore_separated()
  212. 'my_ABC_Class'
  213. >>> WordSet.parse('a-command').camel_case()
  214. 'ACommand'
  215. >>> WordSet.parse('someIdentifier').lowered().space_separated()
  216. 'some identifier'
  217. Slices of the result should return another WordSet.
  218. >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
  219. 'out_of_context'
  220. >>> WordSet.from_class_name(WordSet()).lowered().space_separated()
  221. 'word set'
  222. >>> example = WordSet.parse('figured it out')
  223. >>> example.headless_camel_case()
  224. 'figuredItOut'
  225. >>> example.dash_separated()
  226. 'figured-it-out'
  227. """
  228. _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
  229. def capitalized(self):
  230. return WordSet(word.capitalize() for word in self)
  231. def lowered(self):
  232. return WordSet(word.lower() for word in self)
  233. def camel_case(self):
  234. return ''.join(self.capitalized())
  235. def headless_camel_case(self):
  236. words = iter(self)
  237. first = next(words).lower()
  238. new_words = itertools.chain((first,), WordSet(words).camel_case())
  239. return ''.join(new_words)
  240. def underscore_separated(self):
  241. return '_'.join(self)
  242. def dash_separated(self):
  243. return '-'.join(self)
  244. def space_separated(self):
  245. return ' '.join(self)
  246. def trim_right(self, item):
  247. """
  248. Remove the item from the end of the set.
  249. >>> WordSet.parse('foo bar').trim_right('foo')
  250. ('foo', 'bar')
  251. >>> WordSet.parse('foo bar').trim_right('bar')
  252. ('foo',)
  253. >>> WordSet.parse('').trim_right('bar')
  254. ()
  255. """
  256. return self[:-1] if self and self[-1] == item else self
  257. def trim_left(self, item):
  258. """
  259. Remove the item from the beginning of the set.
  260. >>> WordSet.parse('foo bar').trim_left('foo')
  261. ('bar',)
  262. >>> WordSet.parse('foo bar').trim_left('bar')
  263. ('foo', 'bar')
  264. >>> WordSet.parse('').trim_left('bar')
  265. ()
  266. """
  267. return self[1:] if self and self[0] == item else self
  268. def trim(self, item):
  269. """
  270. >>> WordSet.parse('foo bar').trim('foo')
  271. ('bar',)
  272. """
  273. return self.trim_left(item).trim_right(item)
  274. def __getitem__(self, item):
  275. result = super().__getitem__(item)
  276. if isinstance(item, slice):
  277. result = WordSet(result)
  278. return result
  279. @classmethod
  280. def parse(cls, identifier):
  281. matches = cls._pattern.finditer(identifier)
  282. return WordSet(match.group(0) for match in matches)
  283. @classmethod
  284. def from_class_name(cls, subject):
  285. return cls.parse(subject.__class__.__name__)
  286. # for backward compatibility
  287. words = WordSet.parse
  288. def simple_html_strip(s):
  289. r"""
  290. Remove HTML from the string `s`.
  291. >>> str(simple_html_strip(''))
  292. ''
  293. >>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
  294. A stormy day in paradise
  295. >>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
  296. Somebody tell the truth.
  297. >>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
  298. What about
  299. multiple lines?
  300. """
  301. html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
  302. texts = (match.group(3) or '' for match in html_stripper.finditer(s))
  303. return ''.join(texts)
  304. class SeparatedValues(str):
  305. """
  306. A string separated by a separator. Overrides __iter__ for getting
  307. the values.
  308. >>> list(SeparatedValues('a,b,c'))
  309. ['a', 'b', 'c']
  310. Whitespace is stripped and empty values are discarded.
  311. >>> list(SeparatedValues(' a, b , c, '))
  312. ['a', 'b', 'c']
  313. """
  314. separator = ','
  315. def __iter__(self):
  316. parts = self.split(self.separator)
  317. return filter(None, (part.strip() for part in parts))
  318. class Stripper:
  319. r"""
  320. Given a series of lines, find the common prefix and strip it from them.
  321. >>> lines = [
  322. ... 'abcdefg\n',
  323. ... 'abc\n',
  324. ... 'abcde\n',
  325. ... ]
  326. >>> res = Stripper.strip_prefix(lines)
  327. >>> res.prefix
  328. 'abc'
  329. >>> list(res.lines)
  330. ['defg\n', '\n', 'de\n']
  331. If no prefix is common, nothing should be stripped.
  332. >>> lines = [
  333. ... 'abcd\n',
  334. ... '1234\n',
  335. ... ]
  336. >>> res = Stripper.strip_prefix(lines)
  337. >>> res.prefix = ''
  338. >>> list(res.lines)
  339. ['abcd\n', '1234\n']
  340. """
  341. def __init__(self, prefix, lines):
  342. self.prefix = prefix
  343. self.lines = map(self, lines)
  344. @classmethod
  345. def strip_prefix(cls, lines):
  346. prefix_lines, lines = itertools.tee(lines)
  347. prefix = functools.reduce(cls.common_prefix, prefix_lines)
  348. return cls(prefix, lines)
  349. def __call__(self, line):
  350. if not self.prefix:
  351. return line
  352. null, prefix, rest = line.partition(self.prefix)
  353. return rest
  354. @staticmethod
  355. def common_prefix(s1, s2):
  356. """
  357. Return the common prefix of two lines.
  358. """
  359. index = min(len(s1), len(s2))
  360. while s1[:index] != s2[:index]:
  361. index -= 1
  362. return s1[:index]
  363. def remove_prefix(text, prefix):
  364. """
  365. Remove the prefix from the text if it exists.
  366. >>> remove_prefix('underwhelming performance', 'underwhelming ')
  367. 'performance'
  368. >>> remove_prefix('something special', 'sample')
  369. 'something special'
  370. """
  371. null, prefix, rest = text.rpartition(prefix)
  372. return rest
  373. def remove_suffix(text, suffix):
  374. """
  375. Remove the suffix from the text if it exists.
  376. >>> remove_suffix('name.git', '.git')
  377. 'name'
  378. >>> remove_suffix('something special', 'sample')
  379. 'something special'
  380. """
  381. rest, suffix, null = text.partition(suffix)
  382. return rest
  383. def normalize_newlines(text):
  384. r"""
  385. Replace alternate newlines with the canonical newline.
  386. >>> normalize_newlines('Lorem Ipsum\u2029')
  387. 'Lorem Ipsum\n'
  388. >>> normalize_newlines('Lorem Ipsum\r\n')
  389. 'Lorem Ipsum\n'
  390. >>> normalize_newlines('Lorem Ipsum\x85')
  391. 'Lorem Ipsum\n'
  392. """
  393. newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
  394. pattern = '|'.join(newlines)
  395. return re.sub(pattern, '\n', text)
  396. def _nonblank(str):
  397. return str and not str.startswith('#')
  398. @functools.singledispatch
  399. def yield_lines(iterable):
  400. r"""
  401. Yield valid lines of a string or iterable.
  402. >>> list(yield_lines(''))
  403. []
  404. >>> list(yield_lines(['foo', 'bar']))
  405. ['foo', 'bar']
  406. >>> list(yield_lines('foo\nbar'))
  407. ['foo', 'bar']
  408. >>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
  409. ['foo', 'baz #comment']
  410. >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
  411. ['foo', 'bar', 'baz', 'bing']
  412. """
  413. return itertools.chain.from_iterable(map(yield_lines, iterable))
  414. @yield_lines.register(str)
  415. def _(text):
  416. return clean(text.splitlines())
  417. def clean(lines: Iterable[str]):
  418. """
  419. Yield non-blank, non-comment elements from lines.
  420. """
  421. return filter(_nonblank, map(str.strip, lines))
  422. def drop_comment(line):
  423. """
  424. Drop comments.
  425. >>> drop_comment('foo # bar')
  426. 'foo'
  427. A hash without a space may be in a URL.
  428. >>> drop_comment('http://example.com/foo#bar')
  429. 'http://example.com/foo#bar'
  430. """
  431. return line.partition(' #')[0]
  432. def join_continuation(lines):
  433. r"""
  434. Join lines continued by a trailing backslash.
  435. >>> list(join_continuation(['foo \\', 'bar', 'baz']))
  436. ['foobar', 'baz']
  437. >>> list(join_continuation(['foo \\', 'bar', 'baz']))
  438. ['foobar', 'baz']
  439. >>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
  440. ['foobarbaz']
  441. Not sure why, but...
  442. The character preceding the backslash is also elided.
  443. >>> list(join_continuation(['goo\\', 'dly']))
  444. ['godly']
  445. A terrible idea, but...
  446. If no line is available to continue, suppress the lines.
  447. >>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
  448. ['foo']
  449. """
  450. lines = iter(lines)
  451. for item in lines:
  452. while item.endswith('\\'):
  453. try:
  454. item = item[:-2].strip() + next(lines)
  455. except StopIteration:
  456. return
  457. yield item
  458. def read_newlines(filename, limit=1024):
  459. r"""
  460. >>> tmp_path = getfixture('tmp_path')
  461. >>> filename = tmp_path / 'out.txt'
  462. >>> _ = filename.write_text('foo\n', newline='', encoding='utf-8')
  463. >>> read_newlines(filename)
  464. '\n'
  465. >>> _ = filename.write_text('foo\r\n', newline='', encoding='utf-8')
  466. >>> read_newlines(filename)
  467. '\r\n'
  468. >>> _ = filename.write_text('foo\r\nbar\nbing\r', newline='', encoding='utf-8')
  469. >>> read_newlines(filename)
  470. ('\r', '\n', '\r\n')
  471. """
  472. with open(filename, encoding='utf-8') as fp:
  473. fp.read(limit)
  474. return fp.newlines
  475. def lines_from(input):
  476. """
  477. Generate lines from a :class:`importlib.resources.abc.Traversable` path.
  478. >>> lines = lines_from(files(__name__).joinpath('Lorem ipsum.txt'))
  479. >>> next(lines)
  480. 'Lorem ipsum...'
  481. >>> next(lines)
  482. 'Curabitur pretium...'
  483. """
  484. with input.open(encoding='utf-8') as stream:
  485. yield from stream