123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261 |
- /**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
- #include <aws/common/ring_buffer.h>
- #include <aws/common/byte_buf.h>
- #ifdef CBMC
- # define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \
- dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order); \
- assert(__CPROVER_same_object(dest_ptr, ring_buf->allocation)); \
- assert(aws_ring_buffer_check_atomic_ptr(ring_buf, dest_ptr));
- # define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \
- assert(aws_ring_buffer_check_atomic_ptr(ring_buf, src_ptr)); \
- aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order);
- #else
- # define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \
- dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order);
- # define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \
- aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order);
- #endif
- #define AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, dest_ptr) \
- AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->tail, aws_memory_order_acquire);
- #define AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, src_ptr) \
- AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->tail, src_ptr, aws_memory_order_release);
- #define AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, dest_ptr) \
- AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->head, aws_memory_order_relaxed);
- #define AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, src_ptr) \
- AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->head, src_ptr, aws_memory_order_relaxed);
- int aws_ring_buffer_init(struct aws_ring_buffer *ring_buf, struct aws_allocator *allocator, size_t size) {
- AWS_PRECONDITION(ring_buf != NULL);
- AWS_PRECONDITION(allocator != NULL);
- AWS_PRECONDITION(size > 0);
- AWS_ZERO_STRUCT(*ring_buf);
- ring_buf->allocation = aws_mem_acquire(allocator, size);
- if (!ring_buf->allocation) {
- return AWS_OP_ERR;
- }
- ring_buf->allocator = allocator;
- aws_atomic_init_ptr(&ring_buf->head, ring_buf->allocation);
- aws_atomic_init_ptr(&ring_buf->tail, ring_buf->allocation);
- ring_buf->allocation_end = ring_buf->allocation + size;
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- return AWS_OP_SUCCESS;
- }
- void aws_ring_buffer_clean_up(struct aws_ring_buffer *ring_buf) {
- AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
- if (ring_buf->allocation) {
- aws_mem_release(ring_buf->allocator, ring_buf->allocation);
- }
- AWS_ZERO_STRUCT(*ring_buf);
- }
- int aws_ring_buffer_acquire(struct aws_ring_buffer *ring_buf, size_t requested_size, struct aws_byte_buf *dest) {
- AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
- AWS_ERROR_PRECONDITION(requested_size != 0);
- uint8_t *tail_cpy;
- uint8_t *head_cpy;
- AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy);
- AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy);
- /* this branch is, we don't have any vended buffers. */
- if (head_cpy == tail_cpy) {
- size_t ring_space = ring_buf->allocation_end == NULL ? 0 : ring_buf->allocation_end - ring_buf->allocation;
- if (requested_size > ring_space) {
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return aws_raise_error(AWS_ERROR_OOM);
- }
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
- AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation);
- *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */
- /* after N + 1 wraps */
- if (tail_cpy > head_cpy) {
- size_t space = tail_cpy - head_cpy - 1;
- if (space >= requested_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
- *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- /* After N wraps */
- } else if (tail_cpy < head_cpy) {
- /* prefer the head space for efficiency. */
- if ((size_t)(ring_buf->allocation_end - head_cpy) >= requested_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
- *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- if ((size_t)(tail_cpy - ring_buf->allocation) > requested_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
- *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- }
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return aws_raise_error(AWS_ERROR_OOM);
- }
- int aws_ring_buffer_acquire_up_to(
- struct aws_ring_buffer *ring_buf,
- size_t minimum_size,
- size_t requested_size,
- struct aws_byte_buf *dest) {
- AWS_PRECONDITION(requested_size >= minimum_size);
- AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
- if (requested_size == 0 || minimum_size == 0 || !ring_buf || !dest) {
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
- uint8_t *tail_cpy;
- uint8_t *head_cpy;
- AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy);
- AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy);
- /* this branch is, we don't have any vended buffers. */
- if (head_cpy == tail_cpy) {
- size_t ring_space = ring_buf->allocation_end == NULL ? 0 : ring_buf->allocation_end - ring_buf->allocation;
- size_t allocation_size = ring_space > requested_size ? requested_size : ring_space;
- if (allocation_size < minimum_size) {
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return aws_raise_error(AWS_ERROR_OOM);
- }
- /* go as big as we can. */
- /* we don't have any vended, so this should be safe. */
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + allocation_size);
- AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation);
- *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, allocation_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */
- /* after N + 1 wraps */
- if (tail_cpy > head_cpy) {
- size_t space = tail_cpy - head_cpy;
- /* this shouldn't be possible. */
- AWS_ASSERT(space);
- space -= 1;
- size_t returnable_size = space > requested_size ? requested_size : space;
- if (returnable_size >= minimum_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + returnable_size);
- *dest = aws_byte_buf_from_empty_array(head_cpy, returnable_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- /* after N wraps */
- } else if (tail_cpy < head_cpy) {
- size_t head_space = ring_buf->allocation_end - head_cpy;
- size_t tail_space = tail_cpy - ring_buf->allocation;
- /* if you can vend the whole thing do it. Also prefer head space to tail space. */
- if (head_space >= requested_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
- *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- if (tail_space > requested_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
- *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- /* now vend as much as possible, once again preferring head space. */
- if (head_space >= minimum_size && head_space >= tail_space) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + head_space);
- *dest = aws_byte_buf_from_empty_array(head_cpy, head_space);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- if (tail_space > minimum_size) {
- AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + tail_space - 1);
- *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, tail_space - 1);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return AWS_OP_SUCCESS;
- }
- }
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
- return aws_raise_error(AWS_ERROR_OOM);
- }
- static inline bool s_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) {
- #ifdef CBMC
- /* only continue if buf points-into ring_buffer because comparison of pointers to different objects is undefined
- * (C11 6.5.8) */
- return (
- __CPROVER_same_object(buf->buffer, ring_buffer->allocation) &&
- AWS_IMPLIES(
- ring_buffer->allocation_end != NULL, __CPROVER_same_object(buf->buffer, ring_buffer->allocation_end - 1)));
- #endif
- return buf->buffer && ring_buffer->allocation && ring_buffer->allocation_end &&
- buf->buffer >= ring_buffer->allocation && buf->buffer + buf->capacity <= ring_buffer->allocation_end;
- }
- void aws_ring_buffer_release(struct aws_ring_buffer *ring_buffer, struct aws_byte_buf *buf) {
- AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer));
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- AWS_PRECONDITION(s_buf_belongs_to_pool(ring_buffer, buf));
- AWS_ATOMIC_STORE_TAIL_PTR(ring_buffer, buf->buffer + buf->capacity);
- AWS_ZERO_STRUCT(*buf);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer));
- }
- bool aws_ring_buffer_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) {
- AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer));
- AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
- bool rval = s_buf_belongs_to_pool(ring_buffer, buf);
- AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer));
- AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
- return rval;
- }
|