sync_queue.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <stdint.h>
  19. #include <string.h>
  20. #include "libavutil/avassert.h"
  21. #include "libavutil/container_fifo.h"
  22. #include "libavutil/channel_layout.h"
  23. #include "libavutil/cpu.h"
  24. #include "libavutil/error.h"
  25. #include "libavutil/mathematics.h"
  26. #include "libavutil/mem.h"
  27. #include "libavutil/samplefmt.h"
  28. #include "libavutil/timestamp.h"
  29. #include "sync_queue.h"
  30. /*
  31. * How this works:
  32. * --------------
  33. * time: 0 1 2 3 4 5 6 7 8 9 10 11 12 13
  34. * -------------------------------------------------------------------
  35. * | | | | | | | | | | | | | |
  36. * | ┌───┐┌────────┐┌───┐┌─────────────┐
  37. * stream 0| │d=1││ d=2 ││d=1││ d=3 │
  38. * | └───┘└────────┘└───┘└─────────────┘
  39. * ┌───┐ ┌───────────────────────┐
  40. * stream 1│d=1│ │ d=5 │
  41. * └───┘ └───────────────────────┘
  42. * | ┌───┐┌───┐┌───┐┌───┐
  43. * stream 2| │d=1││d=1││d=1││d=1│ <- stream 2 is the head stream of the queue
  44. * | └───┘└───┘└───┘└───┘
  45. * ^ ^
  46. * [stream 2 tail] [stream 2 head]
  47. *
  48. * We have N streams (N=3 in the diagram), each stream is a FIFO. The *tail* of
  49. * each FIFO is the frame with smallest end time, the *head* is the frame with
  50. * the largest end time. Frames submitted to the queue with sq_send() are placed
  51. * after the head, frames returned to the caller with sq_receive() are taken
  52. * from the tail.
  53. *
  54. * The head stream of the whole queue (SyncQueue.head_stream) is the limiting
  55. * stream with the *smallest* head timestamp, i.e. the stream whose source lags
  56. * furthest behind all other streams. It determines which frames can be output
  57. * from the queue.
  58. *
  59. * In the diagram, the head stream is 2, because it head time is t=5, while
  60. * streams 0 and 1 end at t=8 and t=9 respectively. All frames that _end_ at
  61. * or before t=5 can be output, i.e. the first 3 frames from stream 0, first
  62. * frame from stream 1, and all 4 frames from stream 2.
  63. */
  64. #define SQPTR(sq, frame) ((sq->type == SYNC_QUEUE_FRAMES) ? \
  65. (void*)frame.f : (void*)frame.p)
  66. typedef struct SyncQueueStream {
  67. AVContainerFifo *fifo;
  68. AVRational tb;
  69. /* number of audio samples in fifo */
  70. uint64_t samples_queued;
  71. /* stream head: largest timestamp seen */
  72. int64_t head_ts;
  73. int limiting;
  74. /* no more frames will be sent for this stream */
  75. int finished;
  76. uint64_t frames_sent;
  77. uint64_t samples_sent;
  78. uint64_t frames_max;
  79. int frame_samples;
  80. } SyncQueueStream;
  81. struct SyncQueue {
  82. enum SyncQueueType type;
  83. void *logctx;
  84. /* no more frames will be sent for any stream */
  85. int finished;
  86. /* sync head: the stream with the _smallest_ head timestamp
  87. * this stream determines which frames can be output */
  88. int head_stream;
  89. /* the finished stream with the smallest finish timestamp or -1 */
  90. int head_finished_stream;
  91. // maximum buffering duration in microseconds
  92. int64_t buf_size_us;
  93. SyncQueueStream *streams;
  94. unsigned int nb_streams;
  95. int have_limiting;
  96. uintptr_t align_mask;
  97. };
  98. /**
  99. * Compute the end timestamp of a frame. If nb_samples is provided, consider
  100. * the frame to have this number of audio samples, otherwise use frame duration.
  101. */
  102. static int64_t frame_end(const SyncQueue *sq, SyncQueueFrame frame, int nb_samples)
  103. {
  104. if (nb_samples) {
  105. int64_t d = av_rescale_q(nb_samples, (AVRational){ 1, frame.f->sample_rate},
  106. frame.f->time_base);
  107. return frame.f->pts + d;
  108. }
  109. return (sq->type == SYNC_QUEUE_PACKETS) ?
  110. frame.p->pts + frame.p->duration :
  111. frame.f->pts + frame.f->duration;
  112. }
  113. static int frame_samples(const SyncQueue *sq, SyncQueueFrame frame)
  114. {
  115. return (sq->type == SYNC_QUEUE_PACKETS) ? 0 : frame.f->nb_samples;
  116. }
  117. static int frame_null(const SyncQueue *sq, SyncQueueFrame frame)
  118. {
  119. return (sq->type == SYNC_QUEUE_PACKETS) ? (frame.p == NULL) : (frame.f == NULL);
  120. }
  121. static void tb_update(const SyncQueue *sq, SyncQueueStream *st,
  122. const SyncQueueFrame frame)
  123. {
  124. AVRational tb = (sq->type == SYNC_QUEUE_PACKETS) ?
  125. frame.p->time_base : frame.f->time_base;
  126. av_assert0(tb.num > 0 && tb.den > 0);
  127. if (tb.num == st->tb.num && tb.den == st->tb.den)
  128. return;
  129. // timebase should not change after the first frame
  130. av_assert0(!av_container_fifo_can_read(st->fifo));
  131. if (st->head_ts != AV_NOPTS_VALUE)
  132. st->head_ts = av_rescale_q(st->head_ts, st->tb, tb);
  133. st->tb = tb;
  134. }
  135. static void finish_stream(SyncQueue *sq, unsigned int stream_idx)
  136. {
  137. SyncQueueStream *st = &sq->streams[stream_idx];
  138. if (!st->finished)
  139. av_log(sq->logctx, AV_LOG_DEBUG,
  140. "sq: finish %u; head ts %s\n", stream_idx,
  141. av_ts2timestr(st->head_ts, &st->tb));
  142. st->finished = 1;
  143. if (st->limiting && st->head_ts != AV_NOPTS_VALUE) {
  144. /* check if this stream is the new finished head */
  145. if (sq->head_finished_stream < 0 ||
  146. av_compare_ts(st->head_ts, st->tb,
  147. sq->streams[sq->head_finished_stream].head_ts,
  148. sq->streams[sq->head_finished_stream].tb) < 0) {
  149. sq->head_finished_stream = stream_idx;
  150. }
  151. /* mark as finished all streams that should no longer receive new frames,
  152. * due to them being ahead of some finished stream */
  153. st = &sq->streams[sq->head_finished_stream];
  154. for (unsigned int i = 0; i < sq->nb_streams; i++) {
  155. SyncQueueStream *st1 = &sq->streams[i];
  156. if (st != st1 && st1->head_ts != AV_NOPTS_VALUE &&
  157. av_compare_ts(st->head_ts, st->tb, st1->head_ts, st1->tb) <= 0) {
  158. if (!st1->finished)
  159. av_log(sq->logctx, AV_LOG_DEBUG,
  160. "sq: finish secondary %u; head ts %s\n", i,
  161. av_ts2timestr(st1->head_ts, &st1->tb));
  162. st1->finished = 1;
  163. }
  164. }
  165. }
  166. /* mark the whole queue as finished if all streams are finished */
  167. for (unsigned int i = 0; i < sq->nb_streams; i++) {
  168. if (!sq->streams[i].finished)
  169. return;
  170. }
  171. sq->finished = 1;
  172. av_log(sq->logctx, AV_LOG_DEBUG, "sq: finish queue\n");
  173. }
  174. static void queue_head_update(SyncQueue *sq)
  175. {
  176. av_assert0(sq->have_limiting);
  177. if (sq->head_stream < 0) {
  178. unsigned first_limiting = UINT_MAX;
  179. /* wait for one timestamp in each stream before determining
  180. * the queue head */
  181. for (unsigned int i = 0; i < sq->nb_streams; i++) {
  182. SyncQueueStream *st = &sq->streams[i];
  183. if (!st->limiting)
  184. continue;
  185. if (st->head_ts == AV_NOPTS_VALUE)
  186. return;
  187. if (first_limiting == UINT_MAX)
  188. first_limiting = i;
  189. }
  190. // placeholder value, correct one will be found below
  191. av_assert0(first_limiting < UINT_MAX);
  192. sq->head_stream = first_limiting;
  193. }
  194. for (unsigned int i = 0; i < sq->nb_streams; i++) {
  195. SyncQueueStream *st_head = &sq->streams[sq->head_stream];
  196. SyncQueueStream *st_other = &sq->streams[i];
  197. if (st_other->limiting && st_other->head_ts != AV_NOPTS_VALUE &&
  198. av_compare_ts(st_other->head_ts, st_other->tb,
  199. st_head->head_ts, st_head->tb) < 0)
  200. sq->head_stream = i;
  201. }
  202. }
  203. /* update this stream's head timestamp */
  204. static void stream_update_ts(SyncQueue *sq, unsigned int stream_idx, int64_t ts)
  205. {
  206. SyncQueueStream *st = &sq->streams[stream_idx];
  207. if (ts == AV_NOPTS_VALUE ||
  208. (st->head_ts != AV_NOPTS_VALUE && st->head_ts >= ts))
  209. return;
  210. st->head_ts = ts;
  211. /* if this stream is now ahead of some finished stream, then
  212. * this stream is also finished */
  213. if (sq->head_finished_stream >= 0 &&
  214. av_compare_ts(sq->streams[sq->head_finished_stream].head_ts,
  215. sq->streams[sq->head_finished_stream].tb,
  216. ts, st->tb) <= 0)
  217. finish_stream(sq, stream_idx);
  218. /* update the overall head timestamp if it could have changed */
  219. if (st->limiting &&
  220. (sq->head_stream < 0 || sq->head_stream == stream_idx))
  221. queue_head_update(sq);
  222. }
  223. /* If the queue for the given stream (or all streams when stream_idx=-1)
  224. * is overflowing, trigger a fake heartbeat on lagging streams.
  225. *
  226. * @return 1 if heartbeat triggered, 0 otherwise
  227. */
  228. static int overflow_heartbeat(SyncQueue *sq, int stream_idx)
  229. {
  230. SyncQueueStream *st;
  231. SyncQueueFrame frame;
  232. int64_t tail_ts = AV_NOPTS_VALUE;
  233. /* if no stream specified, pick the one that is most ahead */
  234. if (stream_idx < 0) {
  235. int64_t ts = AV_NOPTS_VALUE;
  236. for (int i = 0; i < sq->nb_streams; i++) {
  237. st = &sq->streams[i];
  238. if (st->head_ts != AV_NOPTS_VALUE &&
  239. (ts == AV_NOPTS_VALUE ||
  240. av_compare_ts(ts, sq->streams[stream_idx].tb,
  241. st->head_ts, st->tb) < 0)) {
  242. ts = st->head_ts;
  243. stream_idx = i;
  244. }
  245. }
  246. /* no stream has a timestamp yet -> nothing to do */
  247. if (stream_idx < 0)
  248. return 0;
  249. }
  250. st = &sq->streams[stream_idx];
  251. /* get the chosen stream's tail timestamp */
  252. for (size_t i = 0; tail_ts == AV_NOPTS_VALUE &&
  253. av_container_fifo_peek(st->fifo, (void**)&frame, i) >= 0; i++)
  254. tail_ts = frame_end(sq, frame, 0);
  255. /* overflow triggers when the tail is over specified duration behind the head */
  256. if (tail_ts == AV_NOPTS_VALUE || tail_ts >= st->head_ts ||
  257. av_rescale_q(st->head_ts - tail_ts, st->tb, AV_TIME_BASE_Q) < sq->buf_size_us)
  258. return 0;
  259. /* signal a fake timestamp for all streams that prevent tail_ts from being output */
  260. tail_ts++;
  261. for (unsigned int i = 0; i < sq->nb_streams; i++) {
  262. const SyncQueueStream *st1 = &sq->streams[i];
  263. int64_t ts;
  264. if (st == st1 || st1->finished ||
  265. (st1->head_ts != AV_NOPTS_VALUE &&
  266. av_compare_ts(tail_ts, st->tb, st1->head_ts, st1->tb) <= 0))
  267. continue;
  268. ts = av_rescale_q(tail_ts, st->tb, st1->tb);
  269. if (st1->head_ts != AV_NOPTS_VALUE)
  270. ts = FFMAX(st1->head_ts + 1, ts);
  271. av_log(sq->logctx, AV_LOG_DEBUG, "sq: %u overflow heardbeat %s -> %s\n",
  272. i, av_ts2timestr(st1->head_ts, &st1->tb), av_ts2timestr(ts, &st1->tb));
  273. stream_update_ts(sq, i, ts);
  274. }
  275. return 1;
  276. }
  277. int sq_send(SyncQueue *sq, unsigned int stream_idx, SyncQueueFrame frame)
  278. {
  279. SyncQueueStream *st;
  280. int64_t ts;
  281. int ret, nb_samples;
  282. av_assert0(stream_idx < sq->nb_streams);
  283. st = &sq->streams[stream_idx];
  284. if (frame_null(sq, frame)) {
  285. av_log(sq->logctx, AV_LOG_DEBUG, "sq: %u EOF\n", stream_idx);
  286. finish_stream(sq, stream_idx);
  287. return 0;
  288. }
  289. if (st->finished)
  290. return AVERROR_EOF;
  291. tb_update(sq, st, frame);
  292. nb_samples = frame_samples(sq, frame);
  293. // make sure frame duration is consistent with sample count
  294. if (nb_samples) {
  295. av_assert0(frame.f->sample_rate > 0);
  296. frame.f->duration = av_rescale_q(nb_samples, (AVRational){ 1, frame.f->sample_rate },
  297. frame.f->time_base);
  298. }
  299. ts = frame_end(sq, frame, 0);
  300. av_log(sq->logctx, AV_LOG_DEBUG, "sq: send %u ts %s\n", stream_idx,
  301. av_ts2timestr(ts, &st->tb));
  302. ret = av_container_fifo_write(st->fifo, SQPTR(sq, frame), 0);
  303. if (ret < 0)
  304. return ret;
  305. stream_update_ts(sq, stream_idx, ts);
  306. st->samples_queued += nb_samples;
  307. st->samples_sent += nb_samples;
  308. if (st->frame_samples)
  309. st->frames_sent = st->samples_sent / st->frame_samples;
  310. else
  311. st->frames_sent++;
  312. if (st->frames_sent >= st->frames_max) {
  313. av_log(sq->logctx, AV_LOG_DEBUG, "sq: %u frames_max %"PRIu64" reached\n",
  314. stream_idx, st->frames_max);
  315. finish_stream(sq, stream_idx);
  316. }
  317. return 0;
  318. }
  319. static void offset_audio(AVFrame *f, int nb_samples)
  320. {
  321. const int planar = av_sample_fmt_is_planar(f->format);
  322. const int planes = planar ? f->ch_layout.nb_channels : 1;
  323. const int bps = av_get_bytes_per_sample(f->format);
  324. const int offset = nb_samples * bps * (planar ? 1 : f->ch_layout.nb_channels);
  325. av_assert0(bps > 0);
  326. av_assert0(nb_samples < f->nb_samples);
  327. for (int i = 0; i < planes; i++) {
  328. f->extended_data[i] += offset;
  329. if (i < FF_ARRAY_ELEMS(f->data))
  330. f->data[i] = f->extended_data[i];
  331. }
  332. f->linesize[0] -= offset;
  333. f->nb_samples -= nb_samples;
  334. f->duration = av_rescale_q(f->nb_samples, (AVRational){ 1, f->sample_rate },
  335. f->time_base);
  336. f->pts += av_rescale_q(nb_samples, (AVRational){ 1, f->sample_rate },
  337. f->time_base);
  338. }
  339. static int frame_is_aligned(const SyncQueue *sq, const AVFrame *frame)
  340. {
  341. // only checks linesize[0], so only works for audio
  342. av_assert0(frame->nb_samples > 0);
  343. av_assert0(sq->align_mask);
  344. // only check data[0], because we always offset all data pointers
  345. // by the same offset, so if one is aligned, all are
  346. if (!((uintptr_t)frame->data[0] & sq->align_mask) &&
  347. !(frame->linesize[0] & sq->align_mask) &&
  348. frame->linesize[0] > sq->align_mask)
  349. return 1;
  350. return 0;
  351. }
  352. static int receive_samples(SyncQueue *sq, SyncQueueStream *st,
  353. AVFrame *dst, int nb_samples)
  354. {
  355. SyncQueueFrame src;
  356. int ret;
  357. av_assert0(st->samples_queued >= nb_samples);
  358. ret = av_container_fifo_peek(st->fifo, (void**)&src, 0);
  359. av_assert0(ret >= 0);
  360. // peeked frame has enough samples and its data is aligned
  361. // -> we can just make a reference and limit its sample count
  362. if (src.f->nb_samples > nb_samples && frame_is_aligned(sq, src.f)) {
  363. ret = av_frame_ref(dst, src.f);
  364. if (ret < 0)
  365. return ret;
  366. dst->nb_samples = nb_samples;
  367. offset_audio(src.f, nb_samples);
  368. st->samples_queued -= nb_samples;
  369. goto finish;
  370. }
  371. // otherwise allocate a new frame and copy the data
  372. ret = av_channel_layout_copy(&dst->ch_layout, &src.f->ch_layout);
  373. if (ret < 0)
  374. return ret;
  375. dst->format = src.f->format;
  376. dst->nb_samples = nb_samples;
  377. ret = av_frame_get_buffer(dst, 0);
  378. if (ret < 0)
  379. goto fail;
  380. ret = av_frame_copy_props(dst, src.f);
  381. if (ret < 0)
  382. goto fail;
  383. dst->nb_samples = 0;
  384. while (dst->nb_samples < nb_samples) {
  385. int to_copy;
  386. ret = av_container_fifo_peek(st->fifo, (void**)&src, 0);
  387. av_assert0(ret >= 0);
  388. to_copy = FFMIN(nb_samples - dst->nb_samples, src.f->nb_samples);
  389. av_samples_copy(dst->extended_data, src.f->extended_data, dst->nb_samples,
  390. 0, to_copy, dst->ch_layout.nb_channels, dst->format);
  391. if (to_copy < src.f->nb_samples)
  392. offset_audio(src.f, to_copy);
  393. else
  394. av_container_fifo_drain(st->fifo, 1);
  395. st->samples_queued -= to_copy;
  396. dst->nb_samples += to_copy;
  397. }
  398. finish:
  399. dst->duration = av_rescale_q(nb_samples, (AVRational){ 1, dst->sample_rate },
  400. dst->time_base);
  401. return 0;
  402. fail:
  403. av_frame_unref(dst);
  404. return ret;
  405. }
  406. static int receive_for_stream(SyncQueue *sq, unsigned int stream_idx,
  407. SyncQueueFrame frame)
  408. {
  409. const SyncQueueStream *st_head = sq->head_stream >= 0 ?
  410. &sq->streams[sq->head_stream] : NULL;
  411. SyncQueueStream *st;
  412. av_assert0(stream_idx < sq->nb_streams);
  413. st = &sq->streams[stream_idx];
  414. if (av_container_fifo_can_read(st->fifo) &&
  415. (st->frame_samples <= st->samples_queued || st->finished)) {
  416. int nb_samples = st->frame_samples;
  417. SyncQueueFrame peek;
  418. int64_t ts;
  419. int cmp = 1;
  420. if (st->finished)
  421. nb_samples = FFMIN(nb_samples, st->samples_queued);
  422. av_container_fifo_peek(st->fifo, (void**)&peek, 0);
  423. ts = frame_end(sq, peek, nb_samples);
  424. /* check if this stream's tail timestamp does not overtake
  425. * the overall queue head */
  426. if (ts != AV_NOPTS_VALUE && st_head)
  427. cmp = av_compare_ts(ts, st->tb, st_head->head_ts, st_head->tb);
  428. /* We can release frames that do not end after the queue head.
  429. * Frames with no timestamps are just passed through with no conditions.
  430. * Frames are also passed through when there are no limiting streams.
  431. */
  432. if (cmp <= 0 || ts == AV_NOPTS_VALUE || !sq->have_limiting) {
  433. if (nb_samples &&
  434. (nb_samples != peek.f->nb_samples || !frame_is_aligned(sq, peek.f))) {
  435. int ret = receive_samples(sq, st, frame.f, nb_samples);
  436. if (ret < 0)
  437. return ret;
  438. } else {
  439. int ret = av_container_fifo_read(st->fifo, SQPTR(sq, frame), 0);
  440. av_assert0(ret >= 0);
  441. av_assert0(st->samples_queued >= frame_samples(sq, frame));
  442. st->samples_queued -= frame_samples(sq, frame);
  443. }
  444. av_log(sq->logctx, AV_LOG_DEBUG,
  445. "sq: receive %u ts %s queue head %d ts %s\n", stream_idx,
  446. av_ts2timestr(frame_end(sq, frame, 0), &st->tb),
  447. sq->head_stream,
  448. st_head ? av_ts2timestr(st_head->head_ts, &st_head->tb) : "N/A");
  449. return 0;
  450. }
  451. }
  452. return (sq->finished || (st->finished && !av_container_fifo_can_read(st->fifo))) ?
  453. AVERROR_EOF : AVERROR(EAGAIN);
  454. }
  455. static int receive_internal(SyncQueue *sq, int stream_idx, SyncQueueFrame frame)
  456. {
  457. int nb_eof = 0;
  458. int ret;
  459. /* read a frame for a specific stream */
  460. if (stream_idx >= 0) {
  461. ret = receive_for_stream(sq, stream_idx, frame);
  462. return (ret < 0) ? ret : stream_idx;
  463. }
  464. /* read a frame for any stream with available output */
  465. for (unsigned int i = 0; i < sq->nb_streams; i++) {
  466. ret = receive_for_stream(sq, i, frame);
  467. if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
  468. nb_eof += (ret == AVERROR_EOF);
  469. continue;
  470. }
  471. return (ret < 0) ? ret : i;
  472. }
  473. return (nb_eof == sq->nb_streams) ? AVERROR_EOF : AVERROR(EAGAIN);
  474. }
  475. int sq_receive(SyncQueue *sq, int stream_idx, SyncQueueFrame frame)
  476. {
  477. int ret = receive_internal(sq, stream_idx, frame);
  478. /* try again if the queue overflowed and triggered a fake heartbeat
  479. * for lagging streams */
  480. if (ret == AVERROR(EAGAIN) && overflow_heartbeat(sq, stream_idx))
  481. ret = receive_internal(sq, stream_idx, frame);
  482. return ret;
  483. }
  484. int sq_add_stream(SyncQueue *sq, int limiting)
  485. {
  486. SyncQueueStream *tmp, *st;
  487. tmp = av_realloc_array(sq->streams, sq->nb_streams + 1, sizeof(*sq->streams));
  488. if (!tmp)
  489. return AVERROR(ENOMEM);
  490. sq->streams = tmp;
  491. st = &sq->streams[sq->nb_streams];
  492. memset(st, 0, sizeof(*st));
  493. st->fifo = (sq->type == SYNC_QUEUE_FRAMES) ?
  494. av_container_fifo_alloc_avframe(0) : av_container_fifo_alloc_avpacket(0);
  495. if (!st->fifo)
  496. return AVERROR(ENOMEM);
  497. /* we set a valid default, so that a pathological stream that never
  498. * receives even a real timebase (and no frames) won't stall all other
  499. * streams forever; cf. overflow_heartbeat() */
  500. st->tb = (AVRational){ 1, 1 };
  501. st->head_ts = AV_NOPTS_VALUE;
  502. st->frames_max = UINT64_MAX;
  503. st->limiting = limiting;
  504. sq->have_limiting |= limiting;
  505. return sq->nb_streams++;
  506. }
  507. void sq_limit_frames(SyncQueue *sq, unsigned int stream_idx, uint64_t frames)
  508. {
  509. SyncQueueStream *st;
  510. av_assert0(stream_idx < sq->nb_streams);
  511. st = &sq->streams[stream_idx];
  512. st->frames_max = frames;
  513. if (st->frames_sent >= st->frames_max)
  514. finish_stream(sq, stream_idx);
  515. }
  516. void sq_frame_samples(SyncQueue *sq, unsigned int stream_idx,
  517. int frame_samples)
  518. {
  519. SyncQueueStream *st;
  520. av_assert0(sq->type == SYNC_QUEUE_FRAMES);
  521. av_assert0(stream_idx < sq->nb_streams);
  522. st = &sq->streams[stream_idx];
  523. st->frame_samples = frame_samples;
  524. sq->align_mask = av_cpu_max_align() - 1;
  525. }
  526. SyncQueue *sq_alloc(enum SyncQueueType type, int64_t buf_size_us, void *logctx)
  527. {
  528. SyncQueue *sq = av_mallocz(sizeof(*sq));
  529. if (!sq)
  530. return NULL;
  531. sq->type = type;
  532. sq->buf_size_us = buf_size_us;
  533. sq->logctx = logctx;
  534. sq->head_stream = -1;
  535. sq->head_finished_stream = -1;
  536. return sq;
  537. }
  538. void sq_free(SyncQueue **psq)
  539. {
  540. SyncQueue *sq = *psq;
  541. if (!sq)
  542. return;
  543. for (unsigned int i = 0; i < sq->nb_streams; i++)
  544. av_container_fifo_free(&sq->streams[i].fifo);
  545. av_freep(&sq->streams);
  546. av_freep(psq);
  547. }