hypothesis_vector_test.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. """
  2. Hypothesis-based tests for pvector.
  3. """
  4. import gc
  5. from collections.abc import Iterable
  6. from functools import wraps
  7. from pyrsistent import PClass, field
  8. from pytest import fixture
  9. from pyrsistent import pvector, discard
  10. from hypothesis import strategies as st, assume
  11. from hypothesis.stateful import RuleBasedStateMachine, Bundle, rule
  12. class RefCountTracker:
  13. """
  14. An object that might catch reference count errors sometimes.
  15. """
  16. def __init__(self):
  17. self.id = id(self)
  18. def __repr__(self):
  19. return "<%s>" % (self.id,)
  20. def __del__(self):
  21. # If self is a dangling memory reference this check might fail. Or
  22. # segfault :)
  23. if self.id != id(self):
  24. raise RuntimeError()
  25. @fixture(scope="module")
  26. def gc_when_done(request):
  27. request.addfinalizer(gc.collect)
  28. def test_setup(gc_when_done):
  29. """
  30. Ensure we GC when tests finish.
  31. """
  32. # Pairs of a list and corresponding pvector:
  33. PVectorAndLists = st.lists(st.builds(RefCountTracker)).map(
  34. lambda l: (l, pvector(l)))
  35. def verify_inputs_unmodified(original):
  36. """
  37. Decorator that asserts that the wrapped function does not modify its
  38. inputs.
  39. """
  40. def to_tuples(pairs):
  41. return [(tuple(l), tuple(pv)) for (l, pv) in pairs]
  42. @wraps(original)
  43. def wrapper(self, **kwargs):
  44. inputs = [k for k in kwargs.values() if isinstance(k, Iterable)]
  45. tuple_inputs = to_tuples(inputs)
  46. try:
  47. return original(self, **kwargs)
  48. finally:
  49. # Ensure inputs were unmodified:
  50. assert to_tuples(inputs) == tuple_inputs
  51. return wrapper
  52. def assert_equal(l, pv):
  53. assert l == pv
  54. assert len(l) == len(pv)
  55. length = len(l)
  56. for i in range(length):
  57. assert l[i] == pv[i]
  58. for i in range(length):
  59. for j in range(i, length):
  60. assert l[i:j] == pv[i:j]
  61. assert l == list(iter(pv))
  62. class PVectorBuilder(RuleBasedStateMachine):
  63. """
  64. Build a list and matching pvector step-by-step.
  65. In each step in the state machine we do same operation on a list and
  66. on a pvector, and then when we're done we compare the two.
  67. """
  68. sequences = Bundle("sequences")
  69. @rule(target=sequences, start=PVectorAndLists)
  70. def initial_value(self, start):
  71. """
  72. Some initial values generated by a hypothesis strategy.
  73. """
  74. return start
  75. @rule(target=sequences, former=sequences)
  76. @verify_inputs_unmodified
  77. def append(self, former):
  78. """
  79. Append an item to the pair of sequences.
  80. """
  81. l, pv = former
  82. obj = RefCountTracker()
  83. l2 = l[:]
  84. l2.append(obj)
  85. return l2, pv.append(obj)
  86. @rule(target=sequences, start=sequences, end=sequences)
  87. @verify_inputs_unmodified
  88. def extend(self, start, end):
  89. """
  90. Extend a pair of sequences with another pair of sequences.
  91. """
  92. l, pv = start
  93. l2, pv2 = end
  94. # compare() has O(N**2) behavior, so don't want too-large lists:
  95. assume(len(l) + len(l2) < 50)
  96. l3 = l[:]
  97. l3.extend(l2)
  98. return l3, pv.extend(pv2)
  99. @rule(target=sequences, former=sequences, data=st.data())
  100. @verify_inputs_unmodified
  101. def remove(self, former, data):
  102. """
  103. Remove an item from the sequences.
  104. """
  105. l, pv = former
  106. assume(l)
  107. l2 = l[:]
  108. i = data.draw(st.sampled_from(range(len(l))))
  109. del l2[i]
  110. return l2, pv.delete(i)
  111. @rule(target=sequences, former=sequences, data=st.data())
  112. @verify_inputs_unmodified
  113. def set(self, former, data):
  114. """
  115. Overwrite an item in the sequence.
  116. """
  117. l, pv = former
  118. assume(l)
  119. l2 = l[:]
  120. i = data.draw(st.sampled_from(range(len(l))))
  121. obj = RefCountTracker()
  122. l2[i] = obj
  123. return l2, pv.set(i, obj)
  124. @rule(target=sequences, former=sequences, data=st.data())
  125. @verify_inputs_unmodified
  126. def transform_set(self, former, data):
  127. """
  128. Transform the sequence by setting value.
  129. """
  130. l, pv = former
  131. assume(l)
  132. l2 = l[:]
  133. i = data.draw(st.sampled_from(range(len(l))))
  134. obj = RefCountTracker()
  135. l2[i] = obj
  136. return l2, pv.transform([i], obj)
  137. @rule(target=sequences, former=sequences, data=st.data())
  138. @verify_inputs_unmodified
  139. def transform_discard(self, former, data):
  140. """
  141. Transform the sequence by discarding a value.
  142. """
  143. l, pv = former
  144. assume(l)
  145. l2 = l[:]
  146. i = data.draw(st.sampled_from(range(len(l))))
  147. del l2[i]
  148. return l2, pv.transform([i], discard)
  149. @rule(target=sequences, former=sequences, data=st.data())
  150. @verify_inputs_unmodified
  151. def subset(self, former, data):
  152. """
  153. A subset of the previous sequence.
  154. """
  155. l, pv = former
  156. assume(l)
  157. i = data.draw(st.sampled_from(range(len(l))))
  158. j = data.draw(st.sampled_from(range(len(l))))
  159. return l[i:j], pv[i:j]
  160. @rule(pair=sequences)
  161. @verify_inputs_unmodified
  162. def compare(self, pair):
  163. """
  164. The list and pvector must match.
  165. """
  166. l, pv = pair
  167. # compare() has O(N**2) behavior, so don't want too-large lists:
  168. assume(len(l) < 50)
  169. assert_equal(l, pv)
  170. PVectorBuilderTests = PVectorBuilder.TestCase
  171. class EvolverItem(PClass):
  172. original_list = field()
  173. original_pvector = field()
  174. current_list = field()
  175. current_evolver = field()
  176. class PVectorEvolverBuilder(RuleBasedStateMachine):
  177. """
  178. Build a list and matching pvector evolver step-by-step.
  179. In each step in the state machine we do same operation on a list and
  180. on a pvector evolver, and then when we're done we compare the two.
  181. """
  182. sequences = Bundle("evolver_sequences")
  183. @rule(target=sequences, start=PVectorAndLists)
  184. def initial_value(self, start):
  185. """
  186. Some initial values generated by a hypothesis strategy.
  187. """
  188. l, pv = start
  189. return EvolverItem(original_list=l,
  190. original_pvector=pv,
  191. current_list=l[:],
  192. current_evolver=pv.evolver())
  193. @rule(item=sequences)
  194. def append(self, item):
  195. """
  196. Append an item to the pair of sequences.
  197. """
  198. obj = RefCountTracker()
  199. item.current_list.append(obj)
  200. item.current_evolver.append(obj)
  201. @rule(start=sequences, end=sequences)
  202. def extend(self, start, end):
  203. """
  204. Extend a pair of sequences with another pair of sequences.
  205. """
  206. # compare() has O(N**2) behavior, so don't want too-large lists:
  207. assume(len(start.current_list) + len(end.current_list) < 50)
  208. start.current_evolver.extend(end.current_list)
  209. start.current_list.extend(end.current_list)
  210. @rule(item=sequences, data=st.data())
  211. def delete(self, item, data):
  212. """
  213. Remove an item from the sequences.
  214. """
  215. assume(item.current_list)
  216. i = data.draw(st.sampled_from(range(len(item.current_list))))
  217. del item.current_list[i]
  218. del item.current_evolver[i]
  219. @rule(item=sequences, data=st.data())
  220. def setitem(self, item, data):
  221. """
  222. Overwrite an item in the sequence using ``__setitem__``.
  223. """
  224. assume(item.current_list)
  225. i = data.draw(st.sampled_from(range(len(item.current_list))))
  226. obj = RefCountTracker()
  227. item.current_list[i] = obj
  228. item.current_evolver[i] = obj
  229. @rule(item=sequences, data=st.data())
  230. def set(self, item, data):
  231. """
  232. Overwrite an item in the sequence using ``set``.
  233. """
  234. assume(item.current_list)
  235. i = data.draw(st.sampled_from(range(len(item.current_list))))
  236. obj = RefCountTracker()
  237. item.current_list[i] = obj
  238. item.current_evolver.set(i, obj)
  239. @rule(item=sequences)
  240. def compare(self, item):
  241. """
  242. The list and pvector evolver must match.
  243. """
  244. item.current_evolver.is_dirty()
  245. # compare() has O(N**2) behavior, so don't want too-large lists:
  246. assume(len(item.current_list) < 50)
  247. # original object unmodified
  248. assert item.original_list == item.original_pvector
  249. # evolver matches:
  250. for i in range(len(item.current_evolver)):
  251. assert item.current_list[i] == item.current_evolver[i]
  252. # persistent version matches
  253. assert_equal(item.current_list, item.current_evolver.persistent())
  254. # original object still unmodified
  255. assert item.original_list == item.original_pvector
  256. PVectorEvolverBuilderTests = PVectorEvolverBuilder.TestCase