ring_buffer.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /**
  2. * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
  3. * SPDX-License-Identifier: Apache-2.0.
  4. */
  5. #include <aws/common/ring_buffer.h>
  6. #include <aws/common/byte_buf.h>
  7. #ifdef CBMC
  8. # define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \
  9. dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order); \
  10. assert(__CPROVER_same_object(dest_ptr, ring_buf->allocation)); \
  11. assert(aws_ring_buffer_check_atomic_ptr(ring_buf, dest_ptr));
  12. # define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \
  13. assert(aws_ring_buffer_check_atomic_ptr(ring_buf, src_ptr)); \
  14. aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order);
  15. #else
  16. # define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \
  17. dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order);
  18. # define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \
  19. aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order);
  20. #endif
  21. #define AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, dest_ptr) \
  22. AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->tail, aws_memory_order_acquire);
  23. #define AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, src_ptr) \
  24. AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->tail, src_ptr, aws_memory_order_release);
  25. #define AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, dest_ptr) \
  26. AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->head, aws_memory_order_relaxed);
  27. #define AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, src_ptr) \
  28. AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->head, src_ptr, aws_memory_order_relaxed);
  29. int aws_ring_buffer_init(struct aws_ring_buffer *ring_buf, struct aws_allocator *allocator, size_t size) {
  30. AWS_PRECONDITION(ring_buf != NULL);
  31. AWS_PRECONDITION(allocator != NULL);
  32. AWS_PRECONDITION(size > 0);
  33. AWS_ZERO_STRUCT(*ring_buf);
  34. ring_buf->allocation = aws_mem_acquire(allocator, size);
  35. if (!ring_buf->allocation) {
  36. return AWS_OP_ERR;
  37. }
  38. ring_buf->allocator = allocator;
  39. aws_atomic_init_ptr(&ring_buf->head, ring_buf->allocation);
  40. aws_atomic_init_ptr(&ring_buf->tail, ring_buf->allocation);
  41. ring_buf->allocation_end = ring_buf->allocation + size;
  42. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  43. return AWS_OP_SUCCESS;
  44. }
  45. void aws_ring_buffer_clean_up(struct aws_ring_buffer *ring_buf) {
  46. AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
  47. if (ring_buf->allocation) {
  48. aws_mem_release(ring_buf->allocator, ring_buf->allocation);
  49. }
  50. AWS_ZERO_STRUCT(*ring_buf);
  51. }
  52. int aws_ring_buffer_acquire(struct aws_ring_buffer *ring_buf, size_t requested_size, struct aws_byte_buf *dest) {
  53. AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
  54. AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
  55. AWS_ERROR_PRECONDITION(requested_size != 0);
  56. uint8_t *tail_cpy;
  57. uint8_t *head_cpy;
  58. AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy);
  59. AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy);
  60. /* this branch is, we don't have any vended buffers. */
  61. if (head_cpy == tail_cpy) {
  62. size_t ring_space = ring_buf->allocation_end == NULL ? 0 : ring_buf->allocation_end - ring_buf->allocation;
  63. if (requested_size > ring_space) {
  64. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  65. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  66. return aws_raise_error(AWS_ERROR_OOM);
  67. }
  68. AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
  69. AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation);
  70. *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
  71. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  72. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  73. return AWS_OP_SUCCESS;
  74. }
  75. /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */
  76. /* after N + 1 wraps */
  77. if (tail_cpy > head_cpy) {
  78. size_t space = tail_cpy - head_cpy - 1;
  79. if (space >= requested_size) {
  80. AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
  81. *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
  82. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  83. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  84. return AWS_OP_SUCCESS;
  85. }
  86. /* After N wraps */
  87. } else if (tail_cpy < head_cpy) {
  88. /* prefer the head space for efficiency. */
  89. if ((size_t)(ring_buf->allocation_end - head_cpy) >= requested_size) {
  90. AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
  91. *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
  92. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  93. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  94. return AWS_OP_SUCCESS;
  95. }
  96. if ((size_t)(tail_cpy - ring_buf->allocation) > requested_size) {
  97. AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
  98. *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
  99. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  100. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  101. return AWS_OP_SUCCESS;
  102. }
  103. }
  104. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  105. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  106. return aws_raise_error(AWS_ERROR_OOM);
  107. }
  108. int aws_ring_buffer_acquire_up_to(
  109. struct aws_ring_buffer *ring_buf,
  110. size_t minimum_size,
  111. size_t requested_size,
  112. struct aws_byte_buf *dest) {
  113. AWS_PRECONDITION(requested_size >= minimum_size);
  114. AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
  115. AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
  116. if (requested_size == 0 || minimum_size == 0 || !ring_buf || !dest) {
  117. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  118. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  119. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  120. }
  121. uint8_t *tail_cpy;
  122. uint8_t *head_cpy;
  123. AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy);
  124. AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy);
  125. /* this branch is, we don't have any vended buffers. */
  126. if (head_cpy == tail_cpy) {
  127. size_t ring_space = ring_buf->allocation_end == NULL ? 0 : ring_buf->allocation_end - ring_buf->allocation;
  128. size_t allocation_size = ring_space > requested_size ? requested_size : ring_space;
  129. if (allocation_size < minimum_size) {
  130. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  131. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  132. return aws_raise_error(AWS_ERROR_OOM);
  133. }
  134. /* go as big as we can. */
  135. /* we don't have any vended, so this should be safe. */
  136. AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + allocation_size);
  137. AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation);
  138. *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, allocation_size);
  139. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  140. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  141. return AWS_OP_SUCCESS;
  142. }
  143. /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */
  144. /* after N + 1 wraps */
  145. if (tail_cpy > head_cpy) {
  146. size_t space = tail_cpy - head_cpy;
  147. /* this shouldn't be possible. */
  148. AWS_ASSERT(space);
  149. space -= 1;
  150. size_t returnable_size = space > requested_size ? requested_size : space;
  151. if (returnable_size >= minimum_size) {
  152. AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + returnable_size);
  153. *dest = aws_byte_buf_from_empty_array(head_cpy, returnable_size);
  154. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  155. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  156. return AWS_OP_SUCCESS;
  157. }
  158. /* after N wraps */
  159. } else if (tail_cpy < head_cpy) {
  160. size_t head_space = ring_buf->allocation_end - head_cpy;
  161. size_t tail_space = tail_cpy - ring_buf->allocation;
  162. /* if you can vend the whole thing do it. Also prefer head space to tail space. */
  163. if (head_space >= requested_size) {
  164. AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
  165. *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
  166. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  167. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  168. return AWS_OP_SUCCESS;
  169. }
  170. if (tail_space > requested_size) {
  171. AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
  172. *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
  173. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  174. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  175. return AWS_OP_SUCCESS;
  176. }
  177. /* now vend as much as possible, once again preferring head space. */
  178. if (head_space >= minimum_size && head_space >= tail_space) {
  179. AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + head_space);
  180. *dest = aws_byte_buf_from_empty_array(head_cpy, head_space);
  181. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  182. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  183. return AWS_OP_SUCCESS;
  184. }
  185. if (tail_space > minimum_size) {
  186. AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + tail_space - 1);
  187. *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, tail_space - 1);
  188. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  189. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  190. return AWS_OP_SUCCESS;
  191. }
  192. }
  193. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
  194. AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
  195. return aws_raise_error(AWS_ERROR_OOM);
  196. }
  197. static inline bool s_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) {
  198. #ifdef CBMC
  199. /* only continue if buf points-into ring_buffer because comparison of pointers to different objects is undefined
  200. * (C11 6.5.8) */
  201. return (
  202. __CPROVER_same_object(buf->buffer, ring_buffer->allocation) &&
  203. AWS_IMPLIES(
  204. ring_buffer->allocation_end != NULL, __CPROVER_same_object(buf->buffer, ring_buffer->allocation_end - 1)));
  205. #endif
  206. return buf->buffer && ring_buffer->allocation && ring_buffer->allocation_end &&
  207. buf->buffer >= ring_buffer->allocation && buf->buffer + buf->capacity <= ring_buffer->allocation_end;
  208. }
  209. void aws_ring_buffer_release(struct aws_ring_buffer *ring_buffer, struct aws_byte_buf *buf) {
  210. AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer));
  211. AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
  212. AWS_PRECONDITION(s_buf_belongs_to_pool(ring_buffer, buf));
  213. AWS_ATOMIC_STORE_TAIL_PTR(ring_buffer, buf->buffer + buf->capacity);
  214. AWS_ZERO_STRUCT(*buf);
  215. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer));
  216. }
  217. bool aws_ring_buffer_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) {
  218. AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer));
  219. AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
  220. bool rval = s_buf_belongs_to_pool(ring_buffer, buf);
  221. AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer));
  222. AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
  223. return rval;
  224. }