rate_limiters.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. /**
  2. * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
  3. * SPDX-License-Identifier: Apache-2.0.
  4. */
  5. #include <aws/mqtt/private/v5/rate_limiters.h>
  6. #include <aws/common/clock.h>
  7. static int s_rate_limit_time_fn(const struct aws_rate_limiter_token_bucket_options *options, uint64_t *current_time) {
  8. if (options->clock_fn != NULL) {
  9. return (*options->clock_fn)(current_time);
  10. }
  11. return aws_high_res_clock_get_ticks(current_time);
  12. }
  13. int aws_rate_limiter_token_bucket_init(
  14. struct aws_rate_limiter_token_bucket *limiter,
  15. const struct aws_rate_limiter_token_bucket_options *options) {
  16. AWS_ZERO_STRUCT(*limiter);
  17. if (options->tokens_per_second == 0 || options->maximum_token_count == 0) {
  18. return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
  19. }
  20. limiter->config = *options;
  21. aws_rate_limiter_token_bucket_reset(limiter);
  22. return AWS_OP_SUCCESS;
  23. }
  24. void aws_rate_limiter_token_bucket_reset(struct aws_rate_limiter_token_bucket *limiter) {
  25. limiter->current_token_count =
  26. aws_min_u64(limiter->config.initial_token_count, limiter->config.maximum_token_count);
  27. limiter->fractional_nanos = 0;
  28. limiter->fractional_nano_tokens = 0;
  29. uint64_t now = 0;
  30. AWS_FATAL_ASSERT(s_rate_limit_time_fn(&limiter->config, &now) == AWS_OP_SUCCESS);
  31. limiter->last_service_time = now;
  32. }
  33. static void s_regenerate_tokens(struct aws_rate_limiter_token_bucket *limiter) {
  34. uint64_t now = 0;
  35. AWS_FATAL_ASSERT(s_rate_limit_time_fn(&limiter->config, &now) == AWS_OP_SUCCESS);
  36. if (now <= limiter->last_service_time) {
  37. return;
  38. }
  39. uint64_t nanos_elapsed = now - limiter->last_service_time;
  40. /*
  41. * We break the regeneration calculation into two distinct steps:
  42. * (1) Perform regeneration based on whole seconds elapsed (nice and easy just multiply times the regen rate)
  43. * (2) Perform regeneration based on the remaining fraction of a second elapsed
  44. *
  45. * We do this to minimize the chances of multiplication saturation before the divide necessary to normalize to
  46. * nanos.
  47. *
  48. * In particular, by doing this, we won't see saturation unless a regeneration rate in the multi-billions is used
  49. * or elapsed_seconds is in the billions. This is similar reasoning to what we do in aws_timestamp_convert_u64.
  50. *
  51. * Additionally, we use a (sub-second) fractional counter/accumulator (fractional_nanos, fractional_nano_tokens)
  52. * in order to prevent error accumulation due to integer division rounding.
  53. */
  54. /* break elapsed time into seconds and remainder nanos */
  55. uint64_t remainder_nanos = 0;
  56. uint64_t elapsed_seconds =
  57. aws_timestamp_convert(nanos_elapsed, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &remainder_nanos);
  58. /* apply seconds-based regeneration */
  59. uint64_t tokens_regenerated = aws_mul_u64_saturating(elapsed_seconds, limiter->config.tokens_per_second);
  60. /* apply fractional remainder regeneration */
  61. limiter->fractional_nanos += remainder_nanos;
  62. /* fractional overflow check */
  63. if (limiter->fractional_nanos < AWS_TIMESTAMP_NANOS) {
  64. /*
  65. * no overflow, just do the division to figure out how many tokens are represented by the updated
  66. * fractional nanos
  67. */
  68. uint64_t new_fractional_tokens =
  69. aws_mul_u64_saturating(limiter->fractional_nanos, limiter->config.tokens_per_second) / AWS_TIMESTAMP_NANOS;
  70. /*
  71. * update token count by how much fractional tokens changed
  72. */
  73. tokens_regenerated += new_fractional_tokens - limiter->fractional_nano_tokens;
  74. limiter->fractional_nano_tokens = new_fractional_tokens;
  75. } else {
  76. /*
  77. * overflow. In this case, update token count by the remaining tokens left to regenerate to make the
  78. * original fractional nano amount equal to one second. This is the key part (a pseudo-reset) that lets us
  79. * avoid error accumulation due to integer division rounding over time.
  80. */
  81. tokens_regenerated += limiter->config.tokens_per_second - limiter->fractional_nano_tokens;
  82. /*
  83. * subtract off a second from the fractional part. Guaranteed to be less than a second afterwards.
  84. */
  85. limiter->fractional_nanos -= AWS_TIMESTAMP_NANOS;
  86. /*
  87. * Calculate the new fractional nano token amount, and add them in.
  88. */
  89. limiter->fractional_nano_tokens =
  90. aws_mul_u64_saturating(limiter->fractional_nanos, limiter->config.tokens_per_second) / AWS_TIMESTAMP_NANOS;
  91. tokens_regenerated += limiter->fractional_nano_tokens;
  92. }
  93. limiter->current_token_count = aws_add_u64_saturating(tokens_regenerated, limiter->current_token_count);
  94. if (limiter->current_token_count > limiter->config.maximum_token_count) {
  95. limiter->current_token_count = limiter->config.maximum_token_count;
  96. }
  97. limiter->last_service_time = now;
  98. }
  99. bool aws_rate_limiter_token_bucket_can_take_tokens(
  100. struct aws_rate_limiter_token_bucket *limiter,
  101. uint64_t token_count) {
  102. s_regenerate_tokens(limiter);
  103. return limiter->current_token_count >= token_count;
  104. }
  105. int aws_rate_limiter_token_bucket_take_tokens(struct aws_rate_limiter_token_bucket *limiter, uint64_t token_count) {
  106. s_regenerate_tokens(limiter);
  107. if (limiter->current_token_count < token_count) {
  108. /* TODO: correct error once seated in aws-c-common */
  109. return aws_raise_error(AWS_ERROR_INVALID_STATE);
  110. }
  111. limiter->current_token_count -= token_count;
  112. return AWS_OP_SUCCESS;
  113. }
  114. uint64_t aws_rate_limiter_token_bucket_compute_wait_for_tokens(
  115. struct aws_rate_limiter_token_bucket *limiter,
  116. uint64_t token_count) {
  117. s_regenerate_tokens(limiter);
  118. if (limiter->current_token_count >= token_count) {
  119. return 0;
  120. }
  121. uint64_t token_rate = limiter->config.tokens_per_second;
  122. AWS_FATAL_ASSERT(limiter->fractional_nanos < AWS_TIMESTAMP_NANOS);
  123. AWS_FATAL_ASSERT(limiter->fractional_nano_tokens <= token_rate);
  124. uint64_t expected_wait = 0;
  125. uint64_t deficit = token_count - limiter->current_token_count;
  126. uint64_t remaining_fractional_tokens = token_rate - limiter->fractional_nano_tokens;
  127. if (deficit < remaining_fractional_tokens) {
  128. /*
  129. * case 1:
  130. * The token deficit is less than what will be regenerated by waiting for the fractional nanos accumulator
  131. * to reach one second's worth of time.
  132. *
  133. * In this case, base the calculation off of just a wait from fractional nanos.
  134. */
  135. uint64_t target_fractional_tokens = aws_add_u64_saturating(deficit, limiter->fractional_nano_tokens);
  136. uint64_t remainder_wait_unnormalized = aws_mul_u64_saturating(target_fractional_tokens, AWS_TIMESTAMP_NANOS);
  137. expected_wait = remainder_wait_unnormalized / token_rate - limiter->fractional_nanos;
  138. /* If the fractional wait is itself, fractional, then add one more nano second to push us over the edge */
  139. if (remainder_wait_unnormalized % token_rate) {
  140. ++expected_wait;
  141. }
  142. } else {
  143. /*
  144. * case 2:
  145. * The token deficit requires regeneration for a time interval at least as large as what is needed
  146. * to overflow the fractional nanos accumulator.
  147. */
  148. /* First account for making the fractional nano accumulator exactly one second */
  149. expected_wait = AWS_TIMESTAMP_NANOS - limiter->fractional_nanos;
  150. deficit -= remaining_fractional_tokens;
  151. /*
  152. * Now, for the remaining tokens, split into tokens from whole seconds worth of regeneration as well
  153. * as a remainder requiring a fractional regeneration
  154. */
  155. uint64_t expected_wait_seconds = deficit / token_rate;
  156. uint64_t deficit_remainder = deficit % token_rate;
  157. /*
  158. * Account for seconds worth of waiting
  159. */
  160. expected_wait += aws_mul_u64_saturating(expected_wait_seconds, AWS_TIMESTAMP_NANOS);
  161. /*
  162. * And finally, calculate the fractional wait to give us the last few tokens
  163. */
  164. uint64_t remainder_wait_unnormalized = aws_mul_u64_saturating(deficit_remainder, AWS_TIMESTAMP_NANOS);
  165. expected_wait += remainder_wait_unnormalized / token_rate;
  166. /* If the fractional wait is itself, fractional, then add one more nano second to push us over the edge */
  167. if (remainder_wait_unnormalized % token_rate) {
  168. ++expected_wait;
  169. }
  170. }
  171. return expected_wait;
  172. }