mpsc_vinfarr_obstructive.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528
  1. #pragma once
  2. /*
  3. Semi-wait-free queue, multiple producers - one consumer. Strict order.
  4. The queue algorithm is using concept of virtual infinite array.
  5. A producer takes a number from a counter and atomicaly increments the counter.
  6. The number taken is a number of a slot for the producer to put a new message
  7. into infinite array.
  8. Then producer constructs a virtual infinite array by bidirectional linked list
  9. of blocks. Each block contains several slots.
  10. There is a hint pointer which optimisticly points to the last block
  11. of the list and never goes backward.
  12. Consumer exploits the property of the hint pointer always going forward
  13. to free old blocks eventually. Consumer periodically read the hint pointer
  14. and the counter and thus deduce producers which potentially holds the pointer
  15. to a block. Consumer can free the block if all that producers filled their
  16. slots and left the queue.
  17. No producer can stop the progress for other producers.
  18. Consumer can obstruct a slot of a delayed producer by putting special mark.
  19. Thus no producer can stop the progress for consumer.
  20. But a slow producer may be forced to retry unlimited number of times.
  21. Though it's very unlikely for a non-preempted producer to be obstructed.
  22. That's why the algorithm is semi-wait-free.
  23. WARNING: there is no wait&notify mechanic for consumer,
  24. consumer receives nullptr if queue was empty.
  25. WARNING: though the algorithm itself is lock-free
  26. but producers and consumer could be blocked by memory allocator
  27. WARNING: copy constructers of the queue are not thread-safe
  28. */
  29. #include <util/generic/noncopyable.h>
  30. #include <util/generic/ptr.h>
  31. #include <library/cpp/deprecated/atomic/atomic.h>
  32. #include <util/system/spinlock.h>
  33. #include "tune.h"
  34. namespace NThreading {
  35. namespace NObstructiveQueuePrivate {
  36. typedef void* TMsgLink;
  37. struct TEmpty {
  38. };
  39. struct TEmptyAux {
  40. TEmptyAux Retrieve() const {
  41. return TEmptyAux();
  42. }
  43. void Store(TEmptyAux&) {
  44. }
  45. static constexpr TEmptyAux Zero() {
  46. return TEmptyAux();
  47. }
  48. };
  49. template <typename TAux>
  50. struct TSlot {
  51. TMsgLink volatile Msg;
  52. TAux AuxiliaryData;
  53. inline void Store(TAux& aux) {
  54. AuxiliaryData.Store(aux);
  55. }
  56. inline TAux Retrieve() const {
  57. return AuxiliaryData.Retrieve();
  58. }
  59. static TSlot<TAux> NullElem() {
  60. return {nullptr, TAux::Zero()};
  61. }
  62. static TSlot<TAux> Pair(TMsgLink msg, TAux aux) {
  63. return {msg, std::move(aux)};
  64. }
  65. };
  66. template <>
  67. struct TSlot<TEmptyAux> {
  68. TMsgLink volatile Msg;
  69. inline void Store(TEmptyAux&) {
  70. }
  71. inline TEmptyAux Retrieve() const {
  72. return TEmptyAux();
  73. }
  74. static TSlot<TEmptyAux> NullElem() {
  75. return {nullptr};
  76. }
  77. static TSlot<TEmptyAux> Pair(TMsgLink msg, TEmptyAux) {
  78. return {msg};
  79. }
  80. };
  81. enum TPushResult {
  82. PUSH_RESULT_OK,
  83. PUSH_RESULT_BACKWARD,
  84. PUSH_RESULT_FORWARD,
  85. PUSH_RESULT_BLOCKED,
  86. };
  87. template <typename TAux, ui32 BUNCH_SIZE, typename TBase = TEmpty>
  88. struct TMsgBunch: public TBase {
  89. ui64 FirstSlot;
  90. TSlot<TAux> LinkArray[BUNCH_SIZE];
  91. TMsgBunch* volatile NextBunch;
  92. TMsgBunch* volatile BackLink;
  93. ui64 volatile Token;
  94. TMsgBunch* volatile NextToken;
  95. /* this push can return PUSH_RESULT_BLOCKED */
  96. inline TPushResult Push(TMsgLink msg, ui64 slot, TAux auxiliary) {
  97. if (Y_UNLIKELY(slot < FirstSlot)) {
  98. return PUSH_RESULT_BACKWARD;
  99. }
  100. if (Y_UNLIKELY(slot >= FirstSlot + BUNCH_SIZE)) {
  101. return PUSH_RESULT_FORWARD;
  102. }
  103. LinkArray[slot - FirstSlot].Store(auxiliary);
  104. auto oldValue = AtomicSwap(&LinkArray[slot - FirstSlot].Msg, msg);
  105. if (Y_LIKELY(oldValue == nullptr)) {
  106. return PUSH_RESULT_OK;
  107. } else {
  108. LeaveBlocked(oldValue);
  109. return PUSH_RESULT_BLOCKED;
  110. }
  111. }
  112. inline bool IsSlotHere(ui64 slot) {
  113. return slot < FirstSlot + BUNCH_SIZE;
  114. }
  115. inline TMsgLink GetSlot(ui64 slot) const {
  116. return AtomicGet(LinkArray[slot - FirstSlot].Msg);
  117. }
  118. inline TSlot<TAux> GetSlotAux(ui64 slot) const {
  119. auto msg = GetSlot(slot);
  120. auto aux = LinkArray[slot - FirstSlot].Retrieve();
  121. return TSlot<TAux>::Pair(msg, aux);
  122. }
  123. void LeaveBlocked(ui64 slot) {
  124. auto token = GetToken(slot);
  125. token->DecrementToken();
  126. }
  127. void LeaveBlocked(TMsgLink msg) {
  128. auto token = reinterpret_cast<TMsgBunch*>(msg);
  129. token->DecrementToken();
  130. }
  131. TSlot<TAux> BlockSlotAux(ui64 slot, TMsgBunch* token) {
  132. auto old =
  133. AtomicSwap(&LinkArray[slot - FirstSlot].Msg, (TMsgLink)token);
  134. if (old == nullptr) {
  135. // It's valid to increment after AtomicCas
  136. // because token will release data only after SetNextToken
  137. token->IncrementToken();
  138. return TSlot<TAux>::NullElem();
  139. }
  140. return TSlot<TAux>::Pair(old, LinkArray[slot - FirstSlot].Retrieve());
  141. }
  142. inline TMsgBunch* GetNextBunch() const {
  143. return AtomicGet(NextBunch);
  144. }
  145. inline bool SetNextBunch(TMsgBunch* ptr) {
  146. return AtomicCas(&NextBunch, ptr, nullptr);
  147. }
  148. inline TMsgBunch* GetBackLink() const {
  149. return AtomicGet(BackLink);
  150. }
  151. inline TMsgBunch* GetToken(ui64 slot) {
  152. return reinterpret_cast<TMsgBunch*>(LinkArray[slot - FirstSlot].Msg);
  153. }
  154. inline void IncrementToken() {
  155. AtomicIncrement(Token);
  156. }
  157. // the object could be destroyed after this method
  158. inline void DecrementToken() {
  159. if (Y_UNLIKELY(AtomicDecrement(Token) == BUNCH_SIZE)) {
  160. Release(this);
  161. AtomicGet(NextToken)->DecrementToken();
  162. // this could be invalid here
  163. }
  164. }
  165. // the object could be destroyed after this method
  166. inline void SetNextToken(TMsgBunch* next) {
  167. AtomicSet(NextToken, next);
  168. if (Y_UNLIKELY(AtomicAdd(Token, BUNCH_SIZE) == BUNCH_SIZE)) {
  169. Release(this);
  170. next->DecrementToken();
  171. }
  172. // this could be invalid here
  173. }
  174. TMsgBunch(ui64 start, TMsgBunch* backLink) {
  175. AtomicSet(FirstSlot, start);
  176. memset(&LinkArray, 0, sizeof(LinkArray));
  177. AtomicSet(NextBunch, nullptr);
  178. AtomicSet(BackLink, backLink);
  179. AtomicSet(Token, 1);
  180. AtomicSet(NextToken, nullptr);
  181. }
  182. static void Release(TMsgBunch* bunch) {
  183. auto backLink = AtomicGet(bunch->BackLink);
  184. if (backLink == nullptr) {
  185. return;
  186. }
  187. AtomicSet(bunch->BackLink, nullptr);
  188. do {
  189. auto bbackLink = backLink->BackLink;
  190. delete backLink;
  191. backLink = bbackLink;
  192. } while (backLink != nullptr);
  193. }
  194. void Destroy() {
  195. for (auto tail = BackLink; tail != nullptr;) {
  196. auto next = tail->BackLink;
  197. delete tail;
  198. tail = next;
  199. }
  200. for (auto next = this; next != nullptr;) {
  201. auto nnext = next->NextBunch;
  202. delete next;
  203. next = nnext;
  204. }
  205. }
  206. };
  207. template <typename TAux, ui32 BUNCH_SIZE, typename TBunchBase = TEmpty>
  208. class TWriteBucket {
  209. public:
  210. static const ui64 GROSS_SIZE;
  211. using TBunch = TMsgBunch<TAux, BUNCH_SIZE, TBunchBase>;
  212. TWriteBucket(TBunch* bunch = new TBunch(0, nullptr))
  213. : LastBunch(bunch)
  214. , SlotCounter(0)
  215. {
  216. }
  217. TWriteBucket(TWriteBucket&& move)
  218. : LastBunch(move.LastBunch)
  219. , SlotCounter(move.SlotCounter)
  220. {
  221. move.LastBunch = nullptr;
  222. }
  223. ~TWriteBucket() {
  224. if (LastBunch != nullptr) {
  225. LastBunch->Destroy();
  226. }
  227. }
  228. inline bool Push(TMsgLink msg, TAux aux) {
  229. ui64 pushSlot = AtomicGetAndIncrement(SlotCounter);
  230. TBunch* hintBunch = GetLastBunch();
  231. for (;;) {
  232. auto hint = hintBunch->Push(msg, pushSlot, aux);
  233. if (Y_LIKELY(hint == PUSH_RESULT_OK)) {
  234. return true;
  235. }
  236. bool hhResult = HandleHint(hintBunch, hint);
  237. if (Y_UNLIKELY(!hhResult)) {
  238. return false;
  239. }
  240. }
  241. }
  242. protected:
  243. template <typename, ui32, typename>
  244. friend class TReadBucket;
  245. TBunch* volatile LastBunch; // Hint
  246. volatile ui64 SlotCounter;
  247. inline TBunch* GetLastBunch() const {
  248. return AtomicGet(LastBunch);
  249. }
  250. bool HandleHint(TBunch*& hintBunch, TPushResult hint) {
  251. if (Y_UNLIKELY(hint == PUSH_RESULT_BLOCKED)) {
  252. return false;
  253. }
  254. if (Y_UNLIKELY(hint == PUSH_RESULT_BACKWARD)) {
  255. hintBunch = hintBunch->GetBackLink();
  256. return true;
  257. }
  258. // PUSH_RESULT_FORWARD
  259. auto nextBunch = hintBunch->GetNextBunch();
  260. if (nextBunch == nullptr) {
  261. auto first = hintBunch->FirstSlot + BUNCH_SIZE;
  262. nextBunch = new TBunch(first, hintBunch);
  263. if (Y_UNLIKELY(!hintBunch->SetNextBunch(nextBunch))) {
  264. delete nextBunch;
  265. nextBunch = hintBunch->GetNextBunch();
  266. }
  267. }
  268. // hintBunch could not be freed here so it cannot be reused
  269. // it's alright if this CAS was not succeeded,
  270. // it means that other thread did that recently
  271. AtomicCas(&LastBunch, nextBunch, hintBunch);
  272. hintBunch = nextBunch;
  273. return true;
  274. }
  275. };
  276. template <typename TAux, ui32 BUNCH_SIZE, typename TBunchBase>
  277. class TReadBucket {
  278. public:
  279. static constexpr int MAX_NUMBER_OF_TRIES_TO_READ = 20;
  280. using TWBucket = TWriteBucket<TAux, BUNCH_SIZE, TBunchBase>;
  281. using TBunch = TMsgBunch<TAux, BUNCH_SIZE, TBunchBase>;
  282. TReadBucket(TWBucket* writer)
  283. : Writer(writer)
  284. , ReadBunch(writer->GetLastBunch())
  285. , LastKnownPushBunch(writer->GetLastBunch())
  286. {
  287. ReadBunch->DecrementToken(); // no previous token
  288. }
  289. TReadBucket(TReadBucket toCopy, TWBucket* writer)
  290. : TReadBucket(std::move(toCopy))
  291. {
  292. Writer = writer;
  293. }
  294. ui64 ReadyCount() const {
  295. return AtomicGet(Writer->SlotCounter) - ReadSlot;
  296. }
  297. inline TMsgLink Pop() {
  298. return PopAux().Msg;
  299. }
  300. inline TSlot<TAux> PopAux() {
  301. for (;;) {
  302. if (Y_UNLIKELY(ReadSlot == LastKnownPushSlot)) {
  303. if (Y_LIKELY(!RereadPushSlot())) {
  304. return TSlot<TAux>::NullElem();
  305. }
  306. }
  307. if (Y_UNLIKELY(!ReadBunch->IsSlotHere(ReadSlot))) {
  308. if (Y_UNLIKELY(!SwitchToNextBunch())) {
  309. return TSlot<TAux>::NullElem();
  310. }
  311. }
  312. auto result = ReadBunch->GetSlotAux(ReadSlot);
  313. if (Y_LIKELY(result.Msg != nullptr)) {
  314. ++ReadSlot;
  315. return result;
  316. }
  317. if (ReadSlot + 1 == AtomicGet(Writer->SlotCounter)) {
  318. return TSlot<TAux>::NullElem();
  319. }
  320. result = StubbornPopAux();
  321. if (result.Msg != nullptr) {
  322. return result;
  323. }
  324. }
  325. }
  326. private:
  327. TWBucket* Writer;
  328. TBunch* ReadBunch;
  329. ui64 ReadSlot = 0;
  330. TBunch* LastKnownPushBunch;
  331. ui64 LastKnownPushSlot = 0;
  332. // MUST BE: ReadSlot == LastKnownPushSlot
  333. bool RereadPushSlot() {
  334. auto oldSlot = LastKnownPushSlot;
  335. auto currentPushBunch = Writer->GetLastBunch();
  336. auto currentPushSlot = AtomicGet(Writer->SlotCounter);
  337. if (currentPushBunch != LastKnownPushBunch) {
  338. // LastKnownPushBunch could be invalid after this line
  339. LastKnownPushBunch->SetNextToken(currentPushBunch);
  340. }
  341. LastKnownPushBunch = currentPushBunch;
  342. LastKnownPushSlot = currentPushSlot;
  343. return oldSlot != LastKnownPushSlot;
  344. }
  345. bool SwitchToNextBunch() {
  346. for (int q = 0; q < MAX_NUMBER_OF_TRIES_TO_READ; ++q) {
  347. auto next = ReadBunch->GetNextBunch();
  348. if (next != nullptr) {
  349. ReadBunch = next;
  350. return true;
  351. }
  352. SpinLockPause();
  353. }
  354. return false;
  355. }
  356. TSlot<TAux> StubbornPopAux() {
  357. for (int q = 0; q < MAX_NUMBER_OF_TRIES_TO_READ; ++q) {
  358. auto result = ReadBunch->GetSlotAux(ReadSlot);
  359. if (Y_LIKELY(result.Msg != nullptr)) {
  360. ++ReadSlot;
  361. return result;
  362. }
  363. SpinLockPause();
  364. }
  365. return ReadBunch->BlockSlotAux(ReadSlot++, LastKnownPushBunch);
  366. }
  367. };
  368. struct TDefaultParams {
  369. static constexpr bool DeleteItems = true;
  370. using TAux = NObstructiveQueuePrivate::TEmptyAux;
  371. using TBunchBase = NObstructiveQueuePrivate::TEmpty;
  372. static constexpr ui32 BUNCH_SIZE = 251;
  373. };
  374. } //namespace NObstructiveQueuePrivate
  375. DeclareTuneValueParam(TObstructiveQueueBunchSize, ui32, BUNCH_SIZE);
  376. DeclareTuneValueParam(TObstructiveQueueDeleteItems, bool, DeleteItems);
  377. DeclareTuneTypeParam(TObstructiveQueueBunchBase, TBunchBase);
  378. DeclareTuneTypeParam(TObstructiveQueueAux, TAux);
  379. template <typename TItem = void, typename... TParams>
  380. class TObstructiveConsumerAuxQueue {
  381. private:
  382. using TTuned =
  383. TTune<NObstructiveQueuePrivate::TDefaultParams, TParams...>;
  384. using TAux = typename TTuned::TAux;
  385. using TSlot = NObstructiveQueuePrivate::TSlot<TAux>;
  386. using TMsgLink = NObstructiveQueuePrivate::TMsgLink;
  387. using TBunchBase = typename TTuned::TBunchBase;
  388. static constexpr bool DeleteItems = TTuned::DeleteItems;
  389. static constexpr ui32 BUNCH_SIZE = TTuned::BUNCH_SIZE;
  390. public:
  391. TObstructiveConsumerAuxQueue()
  392. : RBuckets(&WBucket)
  393. {
  394. }
  395. ~TObstructiveConsumerAuxQueue() {
  396. if (DeleteItems) {
  397. for (;;) {
  398. auto msg = Pop();
  399. if (msg == nullptr) {
  400. break;
  401. }
  402. TDelete::Destroy(msg);
  403. }
  404. }
  405. }
  406. void Push(TItem* msg) {
  407. while (!WBucket.Push(reinterpret_cast<TMsgLink>(msg), TAux())) {
  408. }
  409. }
  410. TItem* Pop() {
  411. return reinterpret_cast<TItem*>(RBuckets.Pop());
  412. }
  413. TSlot PopAux() {
  414. return RBuckets.PopAux();
  415. }
  416. private:
  417. NObstructiveQueuePrivate::TWriteBucket<TAux, BUNCH_SIZE, TBunchBase>
  418. WBucket;
  419. NObstructiveQueuePrivate::TReadBucket<TAux, BUNCH_SIZE, TBunchBase>
  420. RBuckets;
  421. };
  422. template <typename TItem = void, bool DeleteItems = true>
  423. class TObstructiveConsumerQueue
  424. : public TObstructiveConsumerAuxQueue<TItem,
  425. TObstructiveQueueDeleteItems<DeleteItems>> {
  426. };
  427. }