ffmpeg_filter.c 81 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549
  1. /*
  2. * ffmpeg filter configuration
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <stdint.h>
  21. #include "ffmpeg.h"
  22. #include "libavfilter/avfilter.h"
  23. #include "libavfilter/buffersink.h"
  24. #include "libavfilter/buffersrc.h"
  25. #include "libavutil/avassert.h"
  26. #include "libavutil/avstring.h"
  27. #include "libavutil/bprint.h"
  28. #include "libavutil/channel_layout.h"
  29. #include "libavutil/display.h"
  30. #include "libavutil/opt.h"
  31. #include "libavutil/pixdesc.h"
  32. #include "libavutil/pixfmt.h"
  33. #include "libavutil/imgutils.h"
  34. #include "libavutil/samplefmt.h"
  35. #include "libavutil/timestamp.h"
  36. // FIXME private header, used for mid_pred()
  37. #include "libavcodec/mathops.h"
  38. typedef struct FilterGraphPriv {
  39. FilterGraph fg;
  40. // name used for logging
  41. char log_name[32];
  42. int is_simple;
  43. // true when the filtergraph contains only meta filters
  44. // that do not modify the frame data
  45. int is_meta;
  46. int disable_conversions;
  47. const char *graph_desc;
  48. // frame for temporarily holding output from the filtergraph
  49. AVFrame *frame;
  50. // frame for sending output to the encoder
  51. AVFrame *frame_enc;
  52. } FilterGraphPriv;
  53. static FilterGraphPriv *fgp_from_fg(FilterGraph *fg)
  54. {
  55. return (FilterGraphPriv*)fg;
  56. }
  57. static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
  58. {
  59. return (const FilterGraphPriv*)fg;
  60. }
  61. typedef struct InputFilterPriv {
  62. InputFilter ifilter;
  63. AVFilterContext *filter;
  64. InputStream *ist;
  65. // used to hold submitted input
  66. AVFrame *frame;
  67. /* for filters that are not yet bound to an input stream,
  68. * this stores the input linklabel, if any */
  69. uint8_t *linklabel;
  70. // filter data type
  71. enum AVMediaType type;
  72. // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
  73. // same as type otherwise
  74. enum AVMediaType type_src;
  75. int eof;
  76. // parameters configured for this input
  77. int format;
  78. int width, height;
  79. AVRational sample_aspect_ratio;
  80. int sample_rate;
  81. AVChannelLayout ch_layout;
  82. AVRational time_base;
  83. AVFifo *frame_queue;
  84. AVBufferRef *hw_frames_ctx;
  85. int displaymatrix_present;
  86. int32_t displaymatrix[9];
  87. // fallback parameters to use when no input is ever sent
  88. struct {
  89. int format;
  90. int width;
  91. int height;
  92. AVRational sample_aspect_ratio;
  93. int sample_rate;
  94. AVChannelLayout ch_layout;
  95. } fallback;
  96. struct {
  97. AVFrame *frame;
  98. int64_t last_pts;
  99. int64_t end_pts;
  100. ///< marks if sub2video_update should force an initialization
  101. unsigned int initialize;
  102. } sub2video;
  103. } InputFilterPriv;
  104. static InputFilterPriv *ifp_from_ifilter(InputFilter *ifilter)
  105. {
  106. return (InputFilterPriv*)ifilter;
  107. }
  108. typedef struct FPSConvContext {
  109. AVFrame *last_frame;
  110. /* number of frames emitted by the video-encoding sync code */
  111. int64_t frame_number;
  112. /* history of nb_frames_prev, i.e. the number of times the
  113. * previous frame was duplicated by vsync code in recent
  114. * do_video_out() calls */
  115. int64_t frames_prev_hist[3];
  116. uint64_t dup_warning;
  117. int last_dropped;
  118. int dropped_keyframe;
  119. AVRational framerate;
  120. AVRational framerate_max;
  121. const AVRational *framerate_supported;
  122. int framerate_clip;
  123. } FPSConvContext;
  124. typedef struct OutputFilterPriv {
  125. OutputFilter ofilter;
  126. AVFilterContext *filter;
  127. /* desired output stream properties */
  128. int format;
  129. int width, height;
  130. int sample_rate;
  131. AVChannelLayout ch_layout;
  132. // time base in which the output is sent to our downstream
  133. // does not need to match the filtersink's timebase
  134. AVRational tb_out;
  135. // at least one frame with the above timebase was sent
  136. // to our downstream, so it cannot change anymore
  137. int tb_out_locked;
  138. AVRational sample_aspect_ratio;
  139. // those are only set if no format is specified and the encoder gives us multiple options
  140. // They point directly to the relevant lists of the encoder.
  141. const int *formats;
  142. const AVChannelLayout *ch_layouts;
  143. const int *sample_rates;
  144. AVRational enc_timebase;
  145. // offset for output timestamps, in AV_TIME_BASE_Q
  146. int64_t ts_offset;
  147. int64_t next_pts;
  148. FPSConvContext fps;
  149. // set to 1 after at least one frame passed through this output
  150. int got_frame;
  151. } OutputFilterPriv;
  152. static OutputFilterPriv *ofp_from_ofilter(OutputFilter *ofilter)
  153. {
  154. return (OutputFilterPriv*)ofilter;
  155. }
  156. static int configure_filtergraph(FilterGraph *fg);
  157. static int sub2video_get_blank_frame(InputFilterPriv *ifp)
  158. {
  159. AVFrame *frame = ifp->sub2video.frame;
  160. int ret;
  161. av_frame_unref(frame);
  162. frame->width = ifp->width;
  163. frame->height = ifp->height;
  164. frame->format = ifp->format;
  165. ret = av_frame_get_buffer(frame, 0);
  166. if (ret < 0)
  167. return ret;
  168. memset(frame->data[0], 0, frame->height * frame->linesize[0]);
  169. return 0;
  170. }
  171. static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
  172. AVSubtitleRect *r)
  173. {
  174. uint32_t *pal, *dst2;
  175. uint8_t *src, *src2;
  176. int x, y;
  177. if (r->type != SUBTITLE_BITMAP) {
  178. av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
  179. return;
  180. }
  181. if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
  182. av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
  183. r->x, r->y, r->w, r->h, w, h
  184. );
  185. return;
  186. }
  187. dst += r->y * dst_linesize + r->x * 4;
  188. src = r->data[0];
  189. pal = (uint32_t *)r->data[1];
  190. for (y = 0; y < r->h; y++) {
  191. dst2 = (uint32_t *)dst;
  192. src2 = src;
  193. for (x = 0; x < r->w; x++)
  194. *(dst2++) = pal[*(src2++)];
  195. dst += dst_linesize;
  196. src += r->linesize[0];
  197. }
  198. }
  199. static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
  200. {
  201. AVFrame *frame = ifp->sub2video.frame;
  202. int ret;
  203. av_assert1(frame->data[0]);
  204. ifp->sub2video.last_pts = frame->pts = pts;
  205. ret = av_buffersrc_add_frame_flags(ifp->filter, frame,
  206. AV_BUFFERSRC_FLAG_KEEP_REF |
  207. AV_BUFFERSRC_FLAG_PUSH);
  208. if (ret != AVERROR_EOF && ret < 0)
  209. av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
  210. av_err2str(ret));
  211. }
  212. static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
  213. const AVSubtitle *sub)
  214. {
  215. AVFrame *frame = ifp->sub2video.frame;
  216. int8_t *dst;
  217. int dst_linesize;
  218. int num_rects, i;
  219. int64_t pts, end_pts;
  220. if (sub) {
  221. pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
  222. AV_TIME_BASE_Q, ifp->time_base);
  223. end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
  224. AV_TIME_BASE_Q, ifp->time_base);
  225. num_rects = sub->num_rects;
  226. } else {
  227. /* If we are initializing the system, utilize current heartbeat
  228. PTS as the start time, and show until the following subpicture
  229. is received. Otherwise, utilize the previous subpicture's end time
  230. as the fall-back value. */
  231. pts = ifp->sub2video.initialize ?
  232. heartbeat_pts : ifp->sub2video.end_pts;
  233. end_pts = INT64_MAX;
  234. num_rects = 0;
  235. }
  236. if (sub2video_get_blank_frame(ifp) < 0) {
  237. av_log(NULL, AV_LOG_ERROR,
  238. "Impossible to get a blank canvas.\n");
  239. return;
  240. }
  241. dst = frame->data [0];
  242. dst_linesize = frame->linesize[0];
  243. for (i = 0; i < num_rects; i++)
  244. sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
  245. sub2video_push_ref(ifp, pts);
  246. ifp->sub2video.end_pts = end_pts;
  247. ifp->sub2video.initialize = 0;
  248. }
  249. /* *dst may return be set to NULL (no pixel format found), a static string or a
  250. * string backed by the bprint. Nothing has been written to the AVBPrint in case
  251. * NULL is returned. The AVBPrint provided should be clean. */
  252. static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint,
  253. const char **dst)
  254. {
  255. OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
  256. OutputStream *ost = ofilter->ost;
  257. *dst = NULL;
  258. if (ost->keep_pix_fmt || ofp->format != AV_PIX_FMT_NONE) {
  259. *dst = ofp->format == AV_PIX_FMT_NONE ? NULL :
  260. av_get_pix_fmt_name(ofp->format);
  261. } else if (ofp->formats) {
  262. const enum AVPixelFormat *p = ofp->formats;
  263. for (; *p != AV_PIX_FMT_NONE; p++) {
  264. const char *name = av_get_pix_fmt_name(*p);
  265. av_bprintf(bprint, "%s%c", name, p[1] == AV_PIX_FMT_NONE ? '\0' : '|');
  266. }
  267. if (!av_bprint_is_complete(bprint))
  268. return AVERROR(ENOMEM);
  269. *dst = bprint->str;
  270. }
  271. return 0;
  272. }
  273. /* Define a function for appending a list of allowed formats
  274. * to an AVBPrint. If nonempty, the list will have a header. */
  275. #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
  276. static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
  277. { \
  278. if (ofp->var == none && !ofp->supported_list) \
  279. return; \
  280. av_bprintf(bprint, #name "="); \
  281. if (ofp->var != none) { \
  282. av_bprintf(bprint, printf_format, get_name(ofp->var)); \
  283. } else { \
  284. const type *p; \
  285. \
  286. for (p = ofp->supported_list; *p != none; p++) { \
  287. av_bprintf(bprint, printf_format "|", get_name(*p)); \
  288. } \
  289. if (bprint->len > 0) \
  290. bprint->str[--bprint->len] = '\0'; \
  291. } \
  292. av_bprint_chars(bprint, ':', 1); \
  293. }
  294. //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
  295. // GET_PIX_FMT_NAME)
  296. DEF_CHOOSE_FORMAT(sample_fmts, enum AVSampleFormat, format, formats,
  297. AV_SAMPLE_FMT_NONE, "%s", av_get_sample_fmt_name)
  298. DEF_CHOOSE_FORMAT(sample_rates, int, sample_rate, sample_rates, 0,
  299. "%d", )
  300. static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
  301. {
  302. if (av_channel_layout_check(&ofp->ch_layout)) {
  303. av_bprintf(bprint, "channel_layouts=");
  304. av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
  305. } else if (ofp->ch_layouts) {
  306. const AVChannelLayout *p;
  307. av_bprintf(bprint, "channel_layouts=");
  308. for (p = ofp->ch_layouts; p->nb_channels; p++) {
  309. av_channel_layout_describe_bprint(p, bprint);
  310. av_bprintf(bprint, "|");
  311. }
  312. if (bprint->len > 0)
  313. bprint->str[--bprint->len] = '\0';
  314. } else
  315. return;
  316. av_bprint_chars(bprint, ':', 1);
  317. }
  318. static int read_binary(const char *path, uint8_t **data, int *len)
  319. {
  320. AVIOContext *io = NULL;
  321. int64_t fsize;
  322. int ret;
  323. *data = NULL;
  324. *len = 0;
  325. ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
  326. if (ret < 0) {
  327. av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
  328. path, av_err2str(ret));
  329. return ret;
  330. }
  331. fsize = avio_size(io);
  332. if (fsize < 0 || fsize > INT_MAX) {
  333. av_log(NULL, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
  334. ret = AVERROR(EIO);
  335. goto fail;
  336. }
  337. *data = av_malloc(fsize);
  338. if (!*data) {
  339. ret = AVERROR(ENOMEM);
  340. goto fail;
  341. }
  342. ret = avio_read(io, *data, fsize);
  343. if (ret != fsize) {
  344. av_log(NULL, AV_LOG_ERROR, "Error reading file %s\n", path);
  345. ret = ret < 0 ? ret : AVERROR(EIO);
  346. goto fail;
  347. }
  348. *len = fsize;
  349. ret = 0;
  350. fail:
  351. avio_close(io);
  352. if (ret < 0) {
  353. av_freep(data);
  354. *len = 0;
  355. }
  356. return ret;
  357. }
  358. static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
  359. {
  360. const AVOption *o = NULL;
  361. int ret;
  362. ret = av_opt_set(f, key, val, AV_OPT_SEARCH_CHILDREN);
  363. if (ret >= 0)
  364. return 0;
  365. if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
  366. o = av_opt_find(f, key + 1, NULL, 0, AV_OPT_SEARCH_CHILDREN);
  367. if (!o)
  368. goto err_apply;
  369. // key is a valid option name prefixed with '/'
  370. // interpret value as a path from which to load the actual option value
  371. key++;
  372. if (o->type == AV_OPT_TYPE_BINARY) {
  373. uint8_t *data;
  374. int len;
  375. ret = read_binary(val, &data, &len);
  376. if (ret < 0)
  377. goto err_load;
  378. ret = av_opt_set_bin(f, key, data, len, AV_OPT_SEARCH_CHILDREN);
  379. av_freep(&data);
  380. } else {
  381. char *data = file_read(val);
  382. if (!data) {
  383. ret = AVERROR(EIO);
  384. goto err_load;
  385. }
  386. ret = av_opt_set(f, key, data, AV_OPT_SEARCH_CHILDREN);
  387. av_freep(&data);
  388. }
  389. if (ret < 0)
  390. goto err_apply;
  391. return 0;
  392. err_apply:
  393. av_log(NULL, AV_LOG_ERROR,
  394. "Error applying option '%s' to filter '%s': %s\n",
  395. key, f->filter->name, av_err2str(ret));
  396. return ret;
  397. err_load:
  398. av_log(NULL, AV_LOG_ERROR,
  399. "Error loading value for option '%s' from file '%s'\n",
  400. key, val);
  401. return ret;
  402. }
  403. static int graph_opts_apply(AVFilterGraphSegment *seg)
  404. {
  405. for (size_t i = 0; i < seg->nb_chains; i++) {
  406. AVFilterChain *ch = seg->chains[i];
  407. for (size_t j = 0; j < ch->nb_filters; j++) {
  408. AVFilterParams *p = ch->filters[j];
  409. const AVDictionaryEntry *e = NULL;
  410. av_assert0(p->filter);
  411. while ((e = av_dict_iterate(p->opts, e))) {
  412. int ret = filter_opt_apply(p->filter, e->key, e->value);
  413. if (ret < 0)
  414. return ret;
  415. }
  416. av_dict_free(&p->opts);
  417. }
  418. }
  419. return 0;
  420. }
  421. static int graph_parse(AVFilterGraph *graph, const char *desc,
  422. AVFilterInOut **inputs, AVFilterInOut **outputs,
  423. AVBufferRef *hw_device)
  424. {
  425. AVFilterGraphSegment *seg;
  426. int ret;
  427. *inputs = NULL;
  428. *outputs = NULL;
  429. ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
  430. if (ret < 0)
  431. return ret;
  432. ret = avfilter_graph_segment_create_filters(seg, 0);
  433. if (ret < 0)
  434. goto fail;
  435. if (hw_device) {
  436. for (int i = 0; i < graph->nb_filters; i++) {
  437. AVFilterContext *f = graph->filters[i];
  438. if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
  439. continue;
  440. f->hw_device_ctx = av_buffer_ref(hw_device);
  441. if (!f->hw_device_ctx) {
  442. ret = AVERROR(ENOMEM);
  443. goto fail;
  444. }
  445. }
  446. }
  447. ret = graph_opts_apply(seg);
  448. if (ret < 0)
  449. goto fail;
  450. ret = avfilter_graph_segment_apply(seg, 0, inputs, outputs);
  451. fail:
  452. avfilter_graph_segment_free(&seg);
  453. return ret;
  454. }
  455. // Filters can be configured only if the formats of all inputs are known.
  456. static int ifilter_has_all_input_formats(FilterGraph *fg)
  457. {
  458. int i;
  459. for (i = 0; i < fg->nb_inputs; i++) {
  460. InputFilterPriv *ifp = ifp_from_ifilter(fg->inputs[i]);
  461. if (ifp->format < 0)
  462. return 0;
  463. }
  464. return 1;
  465. }
  466. static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
  467. {
  468. AVFilterContext *ctx = inout->filter_ctx;
  469. AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
  470. int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
  471. if (nb_pads > 1)
  472. return av_strdup(ctx->filter->name);
  473. return av_asprintf("%s:%s", ctx->filter->name,
  474. avfilter_pad_get_name(pads, inout->pad_idx));
  475. }
  476. static OutputFilter *ofilter_alloc(FilterGraph *fg)
  477. {
  478. OutputFilterPriv *ofp;
  479. OutputFilter *ofilter;
  480. ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
  481. if (!ofp)
  482. return NULL;
  483. ofilter = &ofp->ofilter;
  484. ofilter->graph = fg;
  485. ofp->format = -1;
  486. ofilter->last_pts = AV_NOPTS_VALUE;
  487. return ofilter;
  488. }
  489. static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
  490. {
  491. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  492. int ret;
  493. av_assert0(!ifp->ist);
  494. ifp->ist = ist;
  495. ifp->type_src = ist->st->codecpar->codec_type;
  496. ret = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph));
  497. if (ret < 0)
  498. return ret;
  499. if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
  500. ifp->sub2video.frame = av_frame_alloc();
  501. if (!ifp->sub2video.frame)
  502. return AVERROR(ENOMEM);
  503. }
  504. return 0;
  505. }
  506. static int set_channel_layout(OutputFilterPriv *f, OutputStream *ost)
  507. {
  508. const AVCodec *c = ost->enc_ctx->codec;
  509. int i, err;
  510. if (ost->enc_ctx->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC) {
  511. /* Pass the layout through for all orders but UNSPEC */
  512. err = av_channel_layout_copy(&f->ch_layout, &ost->enc_ctx->ch_layout);
  513. if (err < 0)
  514. return err;
  515. return 0;
  516. }
  517. /* Requested layout is of order UNSPEC */
  518. if (!c->ch_layouts) {
  519. /* Use the default native layout for the requested amount of channels when the
  520. encoder doesn't have a list of supported layouts */
  521. av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
  522. return 0;
  523. }
  524. /* Encoder has a list of supported layouts. Pick the first layout in it with the
  525. same amount of channels as the requested layout */
  526. for (i = 0; c->ch_layouts[i].nb_channels; i++) {
  527. if (c->ch_layouts[i].nb_channels == ost->enc_ctx->ch_layout.nb_channels)
  528. break;
  529. }
  530. if (c->ch_layouts[i].nb_channels) {
  531. /* Use it if one is found */
  532. err = av_channel_layout_copy(&f->ch_layout, &c->ch_layouts[i]);
  533. if (err < 0)
  534. return err;
  535. return 0;
  536. }
  537. /* If no layout for the amount of channels requested was found, use the default
  538. native layout for it. */
  539. av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
  540. return 0;
  541. }
  542. int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost)
  543. {
  544. const OutputFile *of = output_files[ost->file_index];
  545. OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
  546. FilterGraph *fg = ofilter->graph;
  547. FilterGraphPriv *fgp = fgp_from_fg(fg);
  548. const AVCodec *c = ost->enc_ctx->codec;
  549. av_assert0(!ofilter->ost);
  550. ofilter->ost = ost;
  551. av_freep(&ofilter->linklabel);
  552. ofp->ts_offset = of->start_time == AV_NOPTS_VALUE ? 0 : of->start_time;
  553. ofp->enc_timebase = ost->enc_timebase;
  554. switch (ost->enc_ctx->codec_type) {
  555. case AVMEDIA_TYPE_VIDEO:
  556. ofp->width = ost->enc_ctx->width;
  557. ofp->height = ost->enc_ctx->height;
  558. if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
  559. ofp->format = ost->enc_ctx->pix_fmt;
  560. } else {
  561. ofp->formats = c->pix_fmts;
  562. // MJPEG encoder exports a full list of supported pixel formats,
  563. // but the full-range ones are experimental-only.
  564. // Restrict the auto-conversion list unless -strict experimental
  565. // has been specified.
  566. if (!strcmp(c->name, "mjpeg")) {
  567. // FIXME: YUV420P etc. are actually supported with full color range,
  568. // yet the latter information isn't available here.
  569. static const enum AVPixelFormat mjpeg_formats[] =
  570. { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
  571. AV_PIX_FMT_NONE };
  572. const AVDictionaryEntry *strict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
  573. int strict_val = ost->enc_ctx->strict_std_compliance;
  574. if (strict) {
  575. const AVOption *o = av_opt_find(ost->enc_ctx, strict->key, NULL, 0, 0);
  576. av_assert0(o);
  577. av_opt_eval_int(ost->enc_ctx, o, strict->value, &strict_val);
  578. }
  579. if (strict_val > FF_COMPLIANCE_UNOFFICIAL)
  580. ofp->formats = mjpeg_formats;
  581. }
  582. }
  583. fgp->disable_conversions |= ost->keep_pix_fmt;
  584. ofp->fps.last_frame = av_frame_alloc();
  585. if (!ofp->fps.last_frame)
  586. return AVERROR(ENOMEM);
  587. ofp->fps.framerate = ost->frame_rate;
  588. ofp->fps.framerate_max = ost->max_frame_rate;
  589. ofp->fps.framerate_supported = ost->force_fps ?
  590. NULL : c->supported_framerates;
  591. // reduce frame rate for mpeg4 to be within the spec limits
  592. if (c->id == AV_CODEC_ID_MPEG4)
  593. ofp->fps.framerate_clip = 65535;
  594. ofp->fps.dup_warning = 1000;
  595. break;
  596. case AVMEDIA_TYPE_AUDIO:
  597. if (ost->enc_ctx->sample_fmt != AV_SAMPLE_FMT_NONE) {
  598. ofp->format = ost->enc_ctx->sample_fmt;
  599. } else {
  600. ofp->formats = c->sample_fmts;
  601. }
  602. if (ost->enc_ctx->sample_rate) {
  603. ofp->sample_rate = ost->enc_ctx->sample_rate;
  604. } else {
  605. ofp->sample_rates = c->supported_samplerates;
  606. }
  607. if (ost->enc_ctx->ch_layout.nb_channels) {
  608. int ret = set_channel_layout(ofp, ost);
  609. if (ret < 0)
  610. return ret;
  611. } else if (c->ch_layouts) {
  612. ofp->ch_layouts = c->ch_layouts;
  613. }
  614. break;
  615. }
  616. // if we have all input parameters and all outputs are bound,
  617. // the graph can now be configured
  618. if (ifilter_has_all_input_formats(fg)) {
  619. int ret;
  620. for (int i = 0; i < fg->nb_outputs; i++)
  621. if (!fg->outputs[i]->ost)
  622. return 0;
  623. ret = configure_filtergraph(fg);
  624. if (ret < 0) {
  625. av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
  626. av_err2str(ret));
  627. return ret;
  628. }
  629. }
  630. return 0;
  631. }
  632. static InputFilter *ifilter_alloc(FilterGraph *fg)
  633. {
  634. InputFilterPriv *ifp;
  635. InputFilter *ifilter;
  636. ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
  637. if (!ifp)
  638. return NULL;
  639. ifilter = &ifp->ifilter;
  640. ifilter->graph = fg;
  641. ifp->frame = av_frame_alloc();
  642. if (!ifp->frame)
  643. return NULL;
  644. ifp->format = -1;
  645. ifp->fallback.format = -1;
  646. ifp->frame_queue = av_fifo_alloc2(8, sizeof(AVFrame*), AV_FIFO_FLAG_AUTO_GROW);
  647. if (!ifp->frame_queue)
  648. return NULL;
  649. return ifilter;
  650. }
  651. void fg_free(FilterGraph **pfg)
  652. {
  653. FilterGraph *fg = *pfg;
  654. FilterGraphPriv *fgp;
  655. if (!fg)
  656. return;
  657. fgp = fgp_from_fg(fg);
  658. avfilter_graph_free(&fg->graph);
  659. for (int j = 0; j < fg->nb_inputs; j++) {
  660. InputFilter *ifilter = fg->inputs[j];
  661. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  662. if (ifp->frame_queue) {
  663. AVFrame *frame;
  664. while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
  665. av_frame_free(&frame);
  666. av_fifo_freep2(&ifp->frame_queue);
  667. }
  668. av_frame_free(&ifp->sub2video.frame);
  669. av_channel_layout_uninit(&ifp->fallback.ch_layout);
  670. av_frame_free(&ifp->frame);
  671. av_buffer_unref(&ifp->hw_frames_ctx);
  672. av_freep(&ifp->linklabel);
  673. av_freep(&ifilter->name);
  674. av_freep(&fg->inputs[j]);
  675. }
  676. av_freep(&fg->inputs);
  677. for (int j = 0; j < fg->nb_outputs; j++) {
  678. OutputFilter *ofilter = fg->outputs[j];
  679. OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
  680. av_frame_free(&ofp->fps.last_frame);
  681. av_freep(&ofilter->linklabel);
  682. av_freep(&ofilter->name);
  683. av_channel_layout_uninit(&ofp->ch_layout);
  684. av_freep(&fg->outputs[j]);
  685. }
  686. av_freep(&fg->outputs);
  687. av_freep(&fgp->graph_desc);
  688. av_frame_free(&fgp->frame);
  689. av_frame_free(&fgp->frame_enc);
  690. av_freep(pfg);
  691. }
  692. static const char *fg_item_name(void *obj)
  693. {
  694. const FilterGraphPriv *fgp = obj;
  695. return fgp->log_name;
  696. }
  697. static const AVClass fg_class = {
  698. .class_name = "FilterGraph",
  699. .version = LIBAVUTIL_VERSION_INT,
  700. .item_name = fg_item_name,
  701. .category = AV_CLASS_CATEGORY_FILTER,
  702. };
  703. int fg_create(FilterGraph **pfg, char *graph_desc)
  704. {
  705. FilterGraphPriv *fgp;
  706. FilterGraph *fg;
  707. AVFilterInOut *inputs, *outputs;
  708. AVFilterGraph *graph;
  709. int ret = 0;
  710. fgp = allocate_array_elem(&filtergraphs, sizeof(*fgp), &nb_filtergraphs);
  711. if (!fgp)
  712. return AVERROR(ENOMEM);
  713. fg = &fgp->fg;
  714. if (pfg)
  715. *pfg = fg;
  716. fg->class = &fg_class;
  717. fg->index = nb_filtergraphs - 1;
  718. fgp->graph_desc = graph_desc;
  719. fgp->disable_conversions = !auto_conversion_filters;
  720. snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
  721. fgp->frame = av_frame_alloc();
  722. fgp->frame_enc = av_frame_alloc();
  723. if (!fgp->frame || !fgp->frame_enc)
  724. return AVERROR(ENOMEM);
  725. /* this graph is only used for determining the kinds of inputs
  726. * and outputs we have, and is discarded on exit from this function */
  727. graph = avfilter_graph_alloc();
  728. if (!graph)
  729. return AVERROR(ENOMEM);;
  730. graph->nb_threads = 1;
  731. ret = graph_parse(graph, fgp->graph_desc, &inputs, &outputs, NULL);
  732. if (ret < 0)
  733. goto fail;
  734. for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
  735. InputFilter *const ifilter = ifilter_alloc(fg);
  736. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  737. ifp->linklabel = cur->name;
  738. cur->name = NULL;
  739. ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
  740. cur->pad_idx);
  741. ifilter->name = describe_filter_link(fg, cur, 1);
  742. if (!ifilter->name) {
  743. ret = AVERROR(ENOMEM);
  744. goto fail;
  745. }
  746. }
  747. for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
  748. OutputFilter *const ofilter = ofilter_alloc(fg);
  749. if (!ofilter)
  750. goto fail;
  751. ofilter->linklabel = cur->name;
  752. cur->name = NULL;
  753. ofilter->type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
  754. cur->pad_idx);
  755. ofilter->name = describe_filter_link(fg, cur, 0);
  756. if (!ofilter->name) {
  757. ret = AVERROR(ENOMEM);
  758. goto fail;
  759. }
  760. }
  761. if (!fg->nb_outputs) {
  762. av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
  763. ret = AVERROR(ENOSYS);
  764. goto fail;
  765. }
  766. fail:
  767. avfilter_inout_free(&inputs);
  768. avfilter_inout_free(&outputs);
  769. avfilter_graph_free(&graph);
  770. if (ret < 0)
  771. return ret;
  772. return 0;
  773. }
  774. int init_simple_filtergraph(InputStream *ist, OutputStream *ost,
  775. char *graph_desc)
  776. {
  777. FilterGraph *fg;
  778. FilterGraphPriv *fgp;
  779. int ret;
  780. ret = fg_create(&fg, graph_desc);
  781. if (ret < 0)
  782. return ret;
  783. fgp = fgp_from_fg(fg);
  784. fgp->is_simple = 1;
  785. snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf#%d:%d",
  786. av_get_media_type_string(ost->type)[0],
  787. ost->file_index, ost->index);
  788. if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
  789. av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
  790. "to have exactly 1 input and 1 output. "
  791. "However, it had %d input(s) and %d output(s). Please adjust, "
  792. "or use a complex filtergraph (-filter_complex) instead.\n",
  793. graph_desc, fg->nb_inputs, fg->nb_outputs);
  794. return AVERROR(EINVAL);
  795. }
  796. ost->filter = fg->outputs[0];
  797. ret = ifilter_bind_ist(fg->inputs[0], ist);
  798. if (ret < 0)
  799. return ret;
  800. ret = ofilter_bind_ost(fg->outputs[0], ost);
  801. if (ret < 0)
  802. return ret;
  803. return 0;
  804. }
  805. static int init_input_filter(FilterGraph *fg, InputFilter *ifilter)
  806. {
  807. FilterGraphPriv *fgp = fgp_from_fg(fg);
  808. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  809. InputStream *ist = NULL;
  810. enum AVMediaType type = ifp->type;
  811. int i, ret;
  812. // TODO: support other filter types
  813. if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
  814. av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
  815. "currently.\n");
  816. return AVERROR(ENOSYS);
  817. }
  818. if (ifp->linklabel) {
  819. AVFormatContext *s;
  820. AVStream *st = NULL;
  821. char *p;
  822. int file_idx = strtol(ifp->linklabel, &p, 0);
  823. if (file_idx < 0 || file_idx >= nb_input_files) {
  824. av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
  825. file_idx, fgp->graph_desc);
  826. return AVERROR(EINVAL);
  827. }
  828. s = input_files[file_idx]->ctx;
  829. for (i = 0; i < s->nb_streams; i++) {
  830. enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
  831. if (stream_type != type &&
  832. !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
  833. type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
  834. continue;
  835. if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
  836. st = s->streams[i];
  837. break;
  838. }
  839. }
  840. if (!st) {
  841. av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
  842. "matches no streams.\n", p, fgp->graph_desc);
  843. return AVERROR(EINVAL);
  844. }
  845. ist = input_files[file_idx]->streams[st->index];
  846. } else {
  847. ist = ist_find_unused(type);
  848. if (!ist) {
  849. av_log(fg, AV_LOG_FATAL, "Cannot find a matching stream for "
  850. "unlabeled input pad %s\n", ifilter->name);
  851. return AVERROR(EINVAL);
  852. }
  853. }
  854. av_assert0(ist);
  855. ret = ifilter_bind_ist(ifilter, ist);
  856. if (ret < 0) {
  857. av_log(fg, AV_LOG_ERROR,
  858. "Error binding an input stream to complex filtergraph input %s.\n",
  859. ifilter->name);
  860. return ret;
  861. }
  862. return 0;
  863. }
  864. int init_complex_filtergraph(FilterGraph *fg)
  865. {
  866. // bind filtergraph inputs to input streams
  867. for (int i = 0; i < fg->nb_inputs; i++) {
  868. int ret = init_input_filter(fg, fg->inputs[i]);
  869. if (ret < 0)
  870. return ret;
  871. }
  872. return 0;
  873. }
  874. static int insert_trim(int64_t start_time, int64_t duration,
  875. AVFilterContext **last_filter, int *pad_idx,
  876. const char *filter_name)
  877. {
  878. AVFilterGraph *graph = (*last_filter)->graph;
  879. AVFilterContext *ctx;
  880. const AVFilter *trim;
  881. enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
  882. const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
  883. int ret = 0;
  884. if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
  885. return 0;
  886. trim = avfilter_get_by_name(name);
  887. if (!trim) {
  888. av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
  889. "recording time.\n", name);
  890. return AVERROR_FILTER_NOT_FOUND;
  891. }
  892. ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
  893. if (!ctx)
  894. return AVERROR(ENOMEM);
  895. if (duration != INT64_MAX) {
  896. ret = av_opt_set_int(ctx, "durationi", duration,
  897. AV_OPT_SEARCH_CHILDREN);
  898. }
  899. if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
  900. ret = av_opt_set_int(ctx, "starti", start_time,
  901. AV_OPT_SEARCH_CHILDREN);
  902. }
  903. if (ret < 0) {
  904. av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
  905. return ret;
  906. }
  907. ret = avfilter_init_str(ctx, NULL);
  908. if (ret < 0)
  909. return ret;
  910. ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
  911. if (ret < 0)
  912. return ret;
  913. *last_filter = ctx;
  914. *pad_idx = 0;
  915. return 0;
  916. }
  917. static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
  918. const char *filter_name, const char *args)
  919. {
  920. AVFilterGraph *graph = (*last_filter)->graph;
  921. AVFilterContext *ctx;
  922. int ret;
  923. ret = avfilter_graph_create_filter(&ctx,
  924. avfilter_get_by_name(filter_name),
  925. filter_name, args, NULL, graph);
  926. if (ret < 0)
  927. return ret;
  928. ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
  929. if (ret < 0)
  930. return ret;
  931. *last_filter = ctx;
  932. *pad_idx = 0;
  933. return 0;
  934. }
  935. static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
  936. {
  937. OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
  938. OutputStream *ost = ofilter->ost;
  939. OutputFile *of = output_files[ost->file_index];
  940. AVFilterContext *last_filter = out->filter_ctx;
  941. AVBPrint bprint;
  942. int pad_idx = out->pad_idx;
  943. int ret;
  944. const char *pix_fmts;
  945. char name[255];
  946. snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
  947. ret = avfilter_graph_create_filter(&ofp->filter,
  948. avfilter_get_by_name("buffersink"),
  949. name, NULL, NULL, fg->graph);
  950. if (ret < 0)
  951. return ret;
  952. if ((ofp->width || ofp->height) && ofilter->ost->autoscale) {
  953. char args[255];
  954. AVFilterContext *filter;
  955. const AVDictionaryEntry *e = NULL;
  956. snprintf(args, sizeof(args), "%d:%d",
  957. ofp->width, ofp->height);
  958. while ((e = av_dict_iterate(ost->sws_dict, e))) {
  959. av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
  960. }
  961. snprintf(name, sizeof(name), "scaler_out_%d_%d",
  962. ost->file_index, ost->index);
  963. if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
  964. name, args, NULL, fg->graph)) < 0)
  965. return ret;
  966. if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
  967. return ret;
  968. last_filter = filter;
  969. pad_idx = 0;
  970. }
  971. av_bprint_init(&bprint, 0, AV_BPRINT_SIZE_UNLIMITED);
  972. ret = choose_pix_fmts(ofilter, &bprint, &pix_fmts);
  973. if (ret < 0)
  974. return ret;
  975. if (pix_fmts) {
  976. AVFilterContext *filter;
  977. ret = avfilter_graph_create_filter(&filter,
  978. avfilter_get_by_name("format"),
  979. "format", pix_fmts, NULL, fg->graph);
  980. av_bprint_finalize(&bprint, NULL);
  981. if (ret < 0)
  982. return ret;
  983. if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
  984. return ret;
  985. last_filter = filter;
  986. pad_idx = 0;
  987. }
  988. snprintf(name, sizeof(name), "trim_out_%d_%d",
  989. ost->file_index, ost->index);
  990. ret = insert_trim(of->start_time, of->recording_time,
  991. &last_filter, &pad_idx, name);
  992. if (ret < 0)
  993. return ret;
  994. if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
  995. return ret;
  996. return 0;
  997. }
  998. static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
  999. {
  1000. OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
  1001. OutputStream *ost = ofilter->ost;
  1002. OutputFile *of = output_files[ost->file_index];
  1003. AVFilterContext *last_filter = out->filter_ctx;
  1004. int pad_idx = out->pad_idx;
  1005. AVBPrint args;
  1006. char name[255];
  1007. int ret;
  1008. snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
  1009. ret = avfilter_graph_create_filter(&ofp->filter,
  1010. avfilter_get_by_name("abuffersink"),
  1011. name, NULL, NULL, fg->graph);
  1012. if (ret < 0)
  1013. return ret;
  1014. if ((ret = av_opt_set_int(ofp->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
  1015. return ret;
  1016. #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
  1017. AVFilterContext *filt_ctx; \
  1018. \
  1019. av_log(fg, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
  1020. "similarly to -af " filter_name "=%s.\n", arg); \
  1021. \
  1022. ret = avfilter_graph_create_filter(&filt_ctx, \
  1023. avfilter_get_by_name(filter_name), \
  1024. filter_name, arg, NULL, fg->graph); \
  1025. if (ret < 0) \
  1026. goto fail; \
  1027. \
  1028. ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
  1029. if (ret < 0) \
  1030. goto fail; \
  1031. \
  1032. last_filter = filt_ctx; \
  1033. pad_idx = 0; \
  1034. } while (0)
  1035. av_bprint_init(&args, 0, AV_BPRINT_SIZE_UNLIMITED);
  1036. #if FFMPEG_OPT_MAP_CHANNEL
  1037. if (ost->audio_channels_mapped) {
  1038. AVChannelLayout mapped_layout = { 0 };
  1039. int i;
  1040. av_channel_layout_default(&mapped_layout, ost->audio_channels_mapped);
  1041. av_channel_layout_describe_bprint(&mapped_layout, &args);
  1042. for (i = 0; i < ost->audio_channels_mapped; i++)
  1043. if (ost->audio_channels_map[i] != -1)
  1044. av_bprintf(&args, "|c%d=c%d", i, ost->audio_channels_map[i]);
  1045. AUTO_INSERT_FILTER("-map_channel", "pan", args.str);
  1046. av_bprint_clear(&args);
  1047. }
  1048. #endif
  1049. choose_sample_fmts(ofp, &args);
  1050. choose_sample_rates(ofp, &args);
  1051. choose_channel_layouts(ofp, &args);
  1052. if (!av_bprint_is_complete(&args)) {
  1053. ret = AVERROR(ENOMEM);
  1054. goto fail;
  1055. }
  1056. if (args.len) {
  1057. AVFilterContext *format;
  1058. snprintf(name, sizeof(name), "format_out_%d_%d",
  1059. ost->file_index, ost->index);
  1060. ret = avfilter_graph_create_filter(&format,
  1061. avfilter_get_by_name("aformat"),
  1062. name, args.str, NULL, fg->graph);
  1063. if (ret < 0)
  1064. goto fail;
  1065. ret = avfilter_link(last_filter, pad_idx, format, 0);
  1066. if (ret < 0)
  1067. goto fail;
  1068. last_filter = format;
  1069. pad_idx = 0;
  1070. }
  1071. if (ost->apad && of->shortest) {
  1072. int i;
  1073. for (i = 0; i < of->nb_streams; i++)
  1074. if (of->streams[i]->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
  1075. break;
  1076. if (i < of->nb_streams) {
  1077. AUTO_INSERT_FILTER("-apad", "apad", ost->apad);
  1078. }
  1079. }
  1080. snprintf(name, sizeof(name), "trim for output stream %d:%d",
  1081. ost->file_index, ost->index);
  1082. ret = insert_trim(of->start_time, of->recording_time,
  1083. &last_filter, &pad_idx, name);
  1084. if (ret < 0)
  1085. goto fail;
  1086. if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
  1087. goto fail;
  1088. fail:
  1089. av_bprint_finalize(&args, NULL);
  1090. return ret;
  1091. }
  1092. static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter,
  1093. AVFilterInOut *out)
  1094. {
  1095. if (!ofilter->ost) {
  1096. av_log(fg, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
  1097. return AVERROR(EINVAL);
  1098. }
  1099. switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
  1100. case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
  1101. case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
  1102. default: av_assert0(0); return 0;
  1103. }
  1104. }
  1105. int check_filter_outputs(void)
  1106. {
  1107. int i;
  1108. for (i = 0; i < nb_filtergraphs; i++) {
  1109. int n;
  1110. for (n = 0; n < filtergraphs[i]->nb_outputs; n++) {
  1111. OutputFilter *output = filtergraphs[i]->outputs[n];
  1112. if (!output->ost) {
  1113. av_log(filtergraphs[i], AV_LOG_FATAL,
  1114. "Filter %s has an unconnected output\n", output->name);
  1115. return AVERROR(EINVAL);
  1116. }
  1117. }
  1118. }
  1119. return 0;
  1120. }
  1121. static void sub2video_prepare(InputFilterPriv *ifp)
  1122. {
  1123. ifp->sub2video.last_pts = INT64_MIN;
  1124. ifp->sub2video.end_pts = INT64_MIN;
  1125. /* sub2video structure has been (re-)initialized.
  1126. Mark it as such so that the system will be
  1127. initialized with the first received heartbeat. */
  1128. ifp->sub2video.initialize = 1;
  1129. }
  1130. static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
  1131. AVFilterInOut *in)
  1132. {
  1133. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  1134. AVFilterContext *last_filter;
  1135. const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
  1136. const AVPixFmtDescriptor *desc;
  1137. InputStream *ist = ifp->ist;
  1138. InputFile *f = input_files[ist->file_index];
  1139. AVRational fr = ist->framerate;
  1140. AVRational sar;
  1141. AVBPrint args;
  1142. char name[255];
  1143. int ret, pad_idx = 0;
  1144. int64_t tsoffset = 0;
  1145. AVBufferSrcParameters *par = av_buffersrc_parameters_alloc();
  1146. if (!par)
  1147. return AVERROR(ENOMEM);
  1148. memset(par, 0, sizeof(*par));
  1149. par->format = AV_PIX_FMT_NONE;
  1150. if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
  1151. av_log(fg, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
  1152. ret = AVERROR(EINVAL);
  1153. goto fail;
  1154. }
  1155. if (!fr.num)
  1156. fr = ist->framerate_guessed;
  1157. if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
  1158. sub2video_prepare(ifp);
  1159. ifp->time_base = ist->framerate.num ? av_inv_q(ist->framerate) :
  1160. ist->st->time_base;
  1161. sar = ifp->sample_aspect_ratio;
  1162. if(!sar.den)
  1163. sar = (AVRational){0,1};
  1164. av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
  1165. av_bprintf(&args,
  1166. "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
  1167. "pixel_aspect=%d/%d",
  1168. ifp->width, ifp->height, ifp->format,
  1169. ifp->time_base.num, ifp->time_base.den, sar.num, sar.den);
  1170. if (fr.num && fr.den)
  1171. av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
  1172. snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
  1173. ist->file_index, ist->index);
  1174. if ((ret = avfilter_graph_create_filter(&ifp->filter, buffer_filt, name,
  1175. args.str, NULL, fg->graph)) < 0)
  1176. goto fail;
  1177. par->hw_frames_ctx = ifp->hw_frames_ctx;
  1178. ret = av_buffersrc_parameters_set(ifp->filter, par);
  1179. if (ret < 0)
  1180. goto fail;
  1181. av_freep(&par);
  1182. last_filter = ifp->filter;
  1183. desc = av_pix_fmt_desc_get(ifp->format);
  1184. av_assert0(desc);
  1185. // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
  1186. if (ist->autorotate && !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
  1187. const AVPacketSideData *sd = NULL;
  1188. int32_t *displaymatrix = ifp->displaymatrix;
  1189. double theta;
  1190. if (!ifp->displaymatrix_present)
  1191. sd = av_packet_side_data_get(ist->st->codecpar->coded_side_data,
  1192. ist->st->codecpar->nb_coded_side_data,
  1193. AV_PKT_DATA_DISPLAYMATRIX);
  1194. if (sd)
  1195. displaymatrix = (int32_t *)sd->data;
  1196. theta = get_rotation(displaymatrix);
  1197. if (fabs(theta - 90) < 1.0) {
  1198. ret = insert_filter(&last_filter, &pad_idx, "transpose",
  1199. displaymatrix[3] > 0 ? "cclock_flip" : "clock");
  1200. } else if (fabs(theta - 180) < 1.0) {
  1201. if (displaymatrix[0] < 0) {
  1202. ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
  1203. if (ret < 0)
  1204. return ret;
  1205. }
  1206. if (displaymatrix[4] < 0) {
  1207. ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
  1208. }
  1209. } else if (fabs(theta - 270) < 1.0) {
  1210. ret = insert_filter(&last_filter, &pad_idx, "transpose",
  1211. displaymatrix[3] < 0 ? "clock_flip" : "cclock");
  1212. } else if (fabs(theta) > 1.0) {
  1213. char rotate_buf[64];
  1214. snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
  1215. ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
  1216. } else if (fabs(theta) < 1.0) {
  1217. if (displaymatrix && displaymatrix[4] < 0) {
  1218. ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
  1219. }
  1220. }
  1221. if (ret < 0)
  1222. return ret;
  1223. }
  1224. snprintf(name, sizeof(name), "trim_in_%d_%d",
  1225. ist->file_index, ist->index);
  1226. if (copy_ts) {
  1227. tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
  1228. if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
  1229. tsoffset += f->ctx->start_time;
  1230. }
  1231. ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
  1232. AV_NOPTS_VALUE : tsoffset, f->recording_time,
  1233. &last_filter, &pad_idx, name);
  1234. if (ret < 0)
  1235. return ret;
  1236. if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
  1237. return ret;
  1238. return 0;
  1239. fail:
  1240. av_freep(&par);
  1241. return ret;
  1242. }
  1243. static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
  1244. AVFilterInOut *in)
  1245. {
  1246. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  1247. AVFilterContext *last_filter;
  1248. const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
  1249. InputStream *ist = ifp->ist;
  1250. InputFile *f = input_files[ist->file_index];
  1251. AVBPrint args;
  1252. char name[255];
  1253. int ret, pad_idx = 0;
  1254. int64_t tsoffset = 0;
  1255. if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
  1256. av_log(fg, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
  1257. return AVERROR(EINVAL);
  1258. }
  1259. ifp->time_base = (AVRational){ 1, ifp->sample_rate };
  1260. av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
  1261. av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
  1262. ifp->time_base.num, ifp->time_base.den,
  1263. ifp->sample_rate,
  1264. av_get_sample_fmt_name(ifp->format));
  1265. if (av_channel_layout_check(&ifp->ch_layout) &&
  1266. ifp->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC) {
  1267. av_bprintf(&args, ":channel_layout=");
  1268. av_channel_layout_describe_bprint(&ifp->ch_layout, &args);
  1269. } else
  1270. av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
  1271. snprintf(name, sizeof(name), "graph_%d_in_%d_%d", fg->index,
  1272. ist->file_index, ist->index);
  1273. if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
  1274. name, args.str, NULL,
  1275. fg->graph)) < 0)
  1276. return ret;
  1277. last_filter = ifp->filter;
  1278. snprintf(name, sizeof(name), "trim for input stream %d:%d",
  1279. ist->file_index, ist->index);
  1280. if (copy_ts) {
  1281. tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
  1282. if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
  1283. tsoffset += f->ctx->start_time;
  1284. }
  1285. ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
  1286. AV_NOPTS_VALUE : tsoffset, f->recording_time,
  1287. &last_filter, &pad_idx, name);
  1288. if (ret < 0)
  1289. return ret;
  1290. if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
  1291. return ret;
  1292. return 0;
  1293. }
  1294. static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
  1295. AVFilterInOut *in)
  1296. {
  1297. switch (ifp_from_ifilter(ifilter)->type) {
  1298. case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
  1299. case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
  1300. default: av_assert0(0); return 0;
  1301. }
  1302. }
  1303. static void cleanup_filtergraph(FilterGraph *fg)
  1304. {
  1305. int i;
  1306. for (i = 0; i < fg->nb_outputs; i++)
  1307. ofp_from_ofilter(fg->outputs[i])->filter = NULL;
  1308. for (i = 0; i < fg->nb_inputs; i++)
  1309. ifp_from_ifilter(fg->inputs[i])->filter = NULL;
  1310. avfilter_graph_free(&fg->graph);
  1311. }
  1312. static int filter_is_buffersrc(const AVFilterContext *f)
  1313. {
  1314. return f->nb_inputs == 0 &&
  1315. (!strcmp(f->filter->name, "buffer") ||
  1316. !strcmp(f->filter->name, "abuffer"));
  1317. }
  1318. static int graph_is_meta(AVFilterGraph *graph)
  1319. {
  1320. for (unsigned i = 0; i < graph->nb_filters; i++) {
  1321. const AVFilterContext *f = graph->filters[i];
  1322. /* in addition to filters flagged as meta, also
  1323. * disregard sinks and buffersources (but not other sources,
  1324. * since they introduce data we are not aware of)
  1325. */
  1326. if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
  1327. f->nb_outputs == 0 ||
  1328. filter_is_buffersrc(f)))
  1329. return 0;
  1330. }
  1331. return 1;
  1332. }
  1333. static int configure_filtergraph(FilterGraph *fg)
  1334. {
  1335. FilterGraphPriv *fgp = fgp_from_fg(fg);
  1336. AVBufferRef *hw_device;
  1337. AVFilterInOut *inputs, *outputs, *cur;
  1338. int ret, i, simple = filtergraph_is_simple(fg);
  1339. const char *graph_desc = fgp->graph_desc;
  1340. cleanup_filtergraph(fg);
  1341. if (!(fg->graph = avfilter_graph_alloc()))
  1342. return AVERROR(ENOMEM);
  1343. if (simple) {
  1344. OutputStream *ost = fg->outputs[0]->ost;
  1345. if (filter_nbthreads) {
  1346. ret = av_opt_set(fg->graph, "threads", filter_nbthreads, 0);
  1347. if (ret < 0)
  1348. goto fail;
  1349. } else {
  1350. const AVDictionaryEntry *e = NULL;
  1351. e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
  1352. if (e)
  1353. av_opt_set(fg->graph, "threads", e->value, 0);
  1354. }
  1355. if (av_dict_count(ost->sws_dict)) {
  1356. ret = av_dict_get_string(ost->sws_dict,
  1357. &fg->graph->scale_sws_opts,
  1358. '=', ':');
  1359. if (ret < 0)
  1360. goto fail;
  1361. }
  1362. if (av_dict_count(ost->swr_opts)) {
  1363. char *args;
  1364. ret = av_dict_get_string(ost->swr_opts, &args, '=', ':');
  1365. if (ret < 0)
  1366. goto fail;
  1367. av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
  1368. av_free(args);
  1369. }
  1370. } else {
  1371. fg->graph->nb_threads = filter_complex_nbthreads;
  1372. }
  1373. hw_device = hw_device_for_filter();
  1374. if ((ret = graph_parse(fg->graph, graph_desc, &inputs, &outputs, hw_device)) < 0)
  1375. goto fail;
  1376. for (cur = inputs, i = 0; cur; cur = cur->next, i++)
  1377. if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
  1378. avfilter_inout_free(&inputs);
  1379. avfilter_inout_free(&outputs);
  1380. goto fail;
  1381. }
  1382. avfilter_inout_free(&inputs);
  1383. for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
  1384. ret = configure_output_filter(fg, fg->outputs[i], cur);
  1385. if (ret < 0) {
  1386. avfilter_inout_free(&outputs);
  1387. goto fail;
  1388. }
  1389. }
  1390. avfilter_inout_free(&outputs);
  1391. if (fgp->disable_conversions)
  1392. avfilter_graph_set_auto_convert(fg->graph, AVFILTER_AUTO_CONVERT_NONE);
  1393. if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
  1394. goto fail;
  1395. fgp->is_meta = graph_is_meta(fg->graph);
  1396. /* limit the lists of allowed formats to the ones selected, to
  1397. * make sure they stay the same if the filtergraph is reconfigured later */
  1398. for (i = 0; i < fg->nb_outputs; i++) {
  1399. OutputFilter *ofilter = fg->outputs[i];
  1400. OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
  1401. AVFilterContext *sink = ofp->filter;
  1402. ofp->format = av_buffersink_get_format(sink);
  1403. ofp->width = av_buffersink_get_w(sink);
  1404. ofp->height = av_buffersink_get_h(sink);
  1405. // If the timing parameters are not locked yet, get the tentative values
  1406. // here but don't lock them. They will only be used if no output frames
  1407. // are ever produced.
  1408. if (!ofp->tb_out_locked) {
  1409. AVRational fr = av_buffersink_get_frame_rate(sink);
  1410. if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
  1411. fr.num > 0 && fr.den > 0)
  1412. ofp->fps.framerate = fr;
  1413. ofp->tb_out = av_buffersink_get_time_base(sink);
  1414. }
  1415. ofp->sample_aspect_ratio = av_buffersink_get_sample_aspect_ratio(sink);
  1416. ofp->sample_rate = av_buffersink_get_sample_rate(sink);
  1417. av_channel_layout_uninit(&ofp->ch_layout);
  1418. ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
  1419. if (ret < 0)
  1420. goto fail;
  1421. }
  1422. for (i = 0; i < fg->nb_inputs; i++) {
  1423. InputFilterPriv *ifp = ifp_from_ifilter(fg->inputs[i]);
  1424. AVFrame *tmp;
  1425. while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
  1426. if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
  1427. sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)tmp->buf[0]->data);
  1428. } else {
  1429. ret = av_buffersrc_add_frame(ifp->filter, tmp);
  1430. }
  1431. av_frame_free(&tmp);
  1432. if (ret < 0)
  1433. goto fail;
  1434. }
  1435. }
  1436. /* send the EOFs for the finished inputs */
  1437. for (i = 0; i < fg->nb_inputs; i++) {
  1438. InputFilterPriv *ifp = ifp_from_ifilter(fg->inputs[i]);
  1439. if (ifp->eof) {
  1440. ret = av_buffersrc_add_frame(ifp->filter, NULL);
  1441. if (ret < 0)
  1442. goto fail;
  1443. }
  1444. }
  1445. return 0;
  1446. fail:
  1447. cleanup_filtergraph(fg);
  1448. return ret;
  1449. }
  1450. int ifilter_parameters_from_dec(InputFilter *ifilter, const AVCodecContext *dec)
  1451. {
  1452. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  1453. if (dec->codec_type == AVMEDIA_TYPE_VIDEO) {
  1454. ifp->fallback.format = dec->pix_fmt;
  1455. ifp->fallback.width = dec->width;
  1456. ifp->fallback.height = dec->height;
  1457. ifp->fallback.sample_aspect_ratio = dec->sample_aspect_ratio;
  1458. } else if (dec->codec_type == AVMEDIA_TYPE_AUDIO) {
  1459. int ret;
  1460. ifp->fallback.format = dec->sample_fmt;
  1461. ifp->fallback.sample_rate = dec->sample_rate;
  1462. ret = av_channel_layout_copy(&ifp->fallback.ch_layout, &dec->ch_layout);
  1463. if (ret < 0)
  1464. return ret;
  1465. } else {
  1466. // for subtitles (i.e. sub2video) we set the actual parameters,
  1467. // rather than just fallback
  1468. ifp->width = ifp->ist->sub2video.w;
  1469. ifp->height = ifp->ist->sub2video.h;
  1470. /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
  1471. palettes for all rectangles are identical or compatible */
  1472. ifp->format = AV_PIX_FMT_RGB32;
  1473. av_log(NULL, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n", ifp->width, ifp->height);
  1474. }
  1475. return 0;
  1476. }
  1477. static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
  1478. {
  1479. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  1480. AVFrameSideData *sd;
  1481. int ret;
  1482. ret = av_buffer_replace(&ifp->hw_frames_ctx, frame->hw_frames_ctx);
  1483. if (ret < 0)
  1484. return ret;
  1485. ifp->format = frame->format;
  1486. ifp->width = frame->width;
  1487. ifp->height = frame->height;
  1488. ifp->sample_aspect_ratio = frame->sample_aspect_ratio;
  1489. ifp->sample_rate = frame->sample_rate;
  1490. ret = av_channel_layout_copy(&ifp->ch_layout, &frame->ch_layout);
  1491. if (ret < 0)
  1492. return ret;
  1493. sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX);
  1494. if (sd)
  1495. memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
  1496. ifp->displaymatrix_present = !!sd;
  1497. return 0;
  1498. }
  1499. int filtergraph_is_simple(const FilterGraph *fg)
  1500. {
  1501. const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
  1502. return fgp->is_simple;
  1503. }
  1504. void fg_send_command(FilterGraph *fg, double time, const char *target,
  1505. const char *command, const char *arg, int all_filters)
  1506. {
  1507. int ret;
  1508. if (!fg->graph)
  1509. return;
  1510. if (time < 0) {
  1511. char response[4096];
  1512. ret = avfilter_graph_send_command(fg->graph, target, command, arg,
  1513. response, sizeof(response),
  1514. all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
  1515. fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
  1516. fg->index, ret, response);
  1517. } else if (!all_filters) {
  1518. fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
  1519. } else {
  1520. ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
  1521. if (ret < 0)
  1522. fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
  1523. }
  1524. }
  1525. static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
  1526. {
  1527. OutputFilter *ofilter = &ofp->ofilter;
  1528. FPSConvContext *fps = &ofp->fps;
  1529. AVRational tb = (AVRational){ 0, 0 };
  1530. AVRational fr;
  1531. FrameData *fd;
  1532. fd = frame_data(frame);
  1533. // apply -enc_time_base
  1534. if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
  1535. (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
  1536. av_log(ofilter->ost, AV_LOG_ERROR,
  1537. "Demuxing timebase not available - cannot use it for encoding\n");
  1538. return AVERROR(EINVAL);
  1539. }
  1540. switch (ofp->enc_timebase.num) {
  1541. case 0: break;
  1542. case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
  1543. case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
  1544. default: tb = ofp->enc_timebase; break;
  1545. }
  1546. if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
  1547. tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
  1548. goto finish;
  1549. }
  1550. fr = fps->framerate;
  1551. if (!fr.num) {
  1552. AVRational fr_sink = av_buffersink_get_frame_rate(ofp->filter);
  1553. if (fr_sink.num > 0 && fr_sink.den > 0)
  1554. fr = fr_sink;
  1555. }
  1556. if (ofilter->ost->is_cfr) {
  1557. if (!fr.num && !fps->framerate_max.num) {
  1558. fr = (AVRational){25, 1};
  1559. av_log(ofilter->ost, AV_LOG_WARNING,
  1560. "No information "
  1561. "about the input framerate is available. Falling "
  1562. "back to a default value of 25fps. Use the -r option "
  1563. "if you want a different framerate.\n");
  1564. }
  1565. if (fps->framerate_max.num &&
  1566. (av_q2d(fr) > av_q2d(fps->framerate_max) ||
  1567. !fr.den))
  1568. fr = fps->framerate_max;
  1569. }
  1570. if (fr.num > 0) {
  1571. if (fps->framerate_supported) {
  1572. int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
  1573. fr = fps->framerate_supported[idx];
  1574. }
  1575. if (fps->framerate_clip) {
  1576. av_reduce(&fr.num, &fr.den,
  1577. fr.num, fr.den, fps->framerate_clip);
  1578. }
  1579. }
  1580. if (!(tb.num > 0 && tb.den > 0))
  1581. tb = av_inv_q(fr);
  1582. if (!(tb.num > 0 && tb.den > 0))
  1583. tb = frame->time_base;
  1584. finish:
  1585. ofp->tb_out = tb;
  1586. fps->framerate = fr;
  1587. ofp->tb_out_locked = 1;
  1588. return 0;
  1589. }
  1590. static double adjust_frame_pts_to_encoder_tb(AVFrame *frame, AVRational tb_dst,
  1591. int64_t start_time)
  1592. {
  1593. double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
  1594. AVRational tb = tb_dst;
  1595. AVRational filter_tb = frame->time_base;
  1596. const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
  1597. if (frame->pts == AV_NOPTS_VALUE)
  1598. goto early_exit;
  1599. tb.den <<= extra_bits;
  1600. float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
  1601. av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
  1602. float_pts /= 1 << extra_bits;
  1603. // when float_pts is not exactly an integer,
  1604. // avoid exact midpoints to reduce the chance of rounding differences, this
  1605. // can be removed in case the fps code is changed to work with integers
  1606. if (float_pts != llrint(float_pts))
  1607. float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
  1608. frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
  1609. av_rescale_q(start_time, AV_TIME_BASE_Q, tb_dst);
  1610. frame->time_base = tb_dst;
  1611. early_exit:
  1612. if (debug_ts) {
  1613. av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
  1614. frame ? av_ts2str(frame->pts) : "NULL",
  1615. av_ts2timestr(frame->pts, &tb_dst),
  1616. float_pts, tb_dst.num, tb_dst.den);
  1617. }
  1618. return float_pts;
  1619. }
  1620. /* Convert frame timestamps to the encoder timebase and decide how many times
  1621. * should this (and possibly previous) frame be repeated in order to conform to
  1622. * desired target framerate (if any).
  1623. */
  1624. static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame,
  1625. int64_t *nb_frames, int64_t *nb_frames_prev)
  1626. {
  1627. OutputFilter *ofilter = &ofp->ofilter;
  1628. OutputStream *ost = ofilter->ost;
  1629. FPSConvContext *fps = &ofp->fps;
  1630. double delta0, delta, sync_ipts, duration;
  1631. if (!frame) {
  1632. *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
  1633. fps->frames_prev_hist[1],
  1634. fps->frames_prev_hist[2]);
  1635. if (!*nb_frames && fps->last_dropped) {
  1636. ofilter->nb_frames_drop++;
  1637. fps->last_dropped++;
  1638. }
  1639. goto finish;
  1640. }
  1641. duration = frame->duration * av_q2d(frame->time_base) / av_q2d(ofp->tb_out);
  1642. sync_ipts = adjust_frame_pts_to_encoder_tb(frame, ofp->tb_out, ofp->ts_offset);
  1643. /* delta0 is the "drift" between the input frame and
  1644. * where it would fall in the output. */
  1645. delta0 = sync_ipts - ofp->next_pts;
  1646. delta = delta0 + duration;
  1647. // tracks the number of times the PREVIOUS frame should be duplicated,
  1648. // mostly for variable framerate (VFR)
  1649. *nb_frames_prev = 0;
  1650. /* by default, we output a single frame */
  1651. *nb_frames = 1;
  1652. if (delta0 < 0 &&
  1653. delta > 0 &&
  1654. ost->vsync_method != VSYNC_PASSTHROUGH &&
  1655. ost->vsync_method != VSYNC_DROP) {
  1656. if (delta0 < -0.6) {
  1657. av_log(ost, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
  1658. } else
  1659. av_log(ost, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
  1660. sync_ipts = ofp->next_pts;
  1661. duration += delta0;
  1662. delta0 = 0;
  1663. }
  1664. switch (ost->vsync_method) {
  1665. case VSYNC_VSCFR:
  1666. if (fps->frame_number == 0 && delta0 >= 0.5) {
  1667. av_log(ost, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
  1668. delta = duration;
  1669. delta0 = 0;
  1670. ofp->next_pts = llrint(sync_ipts);
  1671. }
  1672. case VSYNC_CFR:
  1673. // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
  1674. if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
  1675. *nb_frames = 0;
  1676. } else if (delta < -1.1)
  1677. *nb_frames = 0;
  1678. else if (delta > 1.1) {
  1679. *nb_frames = llrintf(delta);
  1680. if (delta0 > 1.1)
  1681. *nb_frames_prev = llrintf(delta0 - 0.6);
  1682. }
  1683. frame->duration = 1;
  1684. break;
  1685. case VSYNC_VFR:
  1686. if (delta <= -0.6)
  1687. *nb_frames = 0;
  1688. else if (delta > 0.6)
  1689. ofp->next_pts = llrint(sync_ipts);
  1690. frame->duration = llrint(duration);
  1691. break;
  1692. case VSYNC_DROP:
  1693. case VSYNC_PASSTHROUGH:
  1694. ofp->next_pts = llrint(sync_ipts);
  1695. frame->duration = llrint(duration);
  1696. break;
  1697. default:
  1698. av_assert0(0);
  1699. }
  1700. finish:
  1701. memmove(fps->frames_prev_hist + 1,
  1702. fps->frames_prev_hist,
  1703. sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
  1704. fps->frames_prev_hist[0] = *nb_frames_prev;
  1705. if (*nb_frames_prev == 0 && fps->last_dropped) {
  1706. ofilter->nb_frames_drop++;
  1707. av_log(ost, AV_LOG_VERBOSE,
  1708. "*** dropping frame %"PRId64" at ts %"PRId64"\n",
  1709. fps->frame_number, fps->last_frame->pts);
  1710. }
  1711. if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
  1712. if (*nb_frames > dts_error_threshold * 30) {
  1713. av_log(ost, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
  1714. ofilter->nb_frames_drop++;
  1715. *nb_frames = 0;
  1716. return;
  1717. }
  1718. ofilter->nb_frames_dup += *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev);
  1719. av_log(ost, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
  1720. if (ofilter->nb_frames_dup > fps->dup_warning) {
  1721. av_log(ost, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
  1722. fps->dup_warning *= 10;
  1723. }
  1724. }
  1725. fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
  1726. fps->dropped_keyframe |= fps->last_dropped && (frame->flags & AV_FRAME_FLAG_KEY);
  1727. }
  1728. static int fg_output_frame(OutputFilterPriv *ofp, AVFrame *frame)
  1729. {
  1730. FilterGraphPriv *fgp = fgp_from_fg(ofp->ofilter.graph);
  1731. OutputStream *ost = ofp->ofilter.ost;
  1732. AVFrame *frame_prev = ofp->fps.last_frame;
  1733. enum AVMediaType type = ofp->ofilter.type;
  1734. int64_t nb_frames = 1, nb_frames_prev = 0;
  1735. if (type == AVMEDIA_TYPE_VIDEO)
  1736. video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
  1737. for (int64_t i = 0; i < nb_frames; i++) {
  1738. AVFrame *frame_out;
  1739. int ret;
  1740. if (type == AVMEDIA_TYPE_VIDEO) {
  1741. AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
  1742. frame_prev : frame;
  1743. if (!frame_in)
  1744. break;
  1745. frame_out = fgp->frame_enc;
  1746. ret = av_frame_ref(frame_out, frame_in);
  1747. if (ret < 0)
  1748. return ret;
  1749. frame_out->pts = ofp->next_pts;
  1750. if (ofp->fps.dropped_keyframe) {
  1751. frame_out->flags |= AV_FRAME_FLAG_KEY;
  1752. ofp->fps.dropped_keyframe = 0;
  1753. }
  1754. } else {
  1755. frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
  1756. av_rescale_q(frame->pts, frame->time_base, ofp->tb_out) -
  1757. av_rescale_q(ofp->ts_offset, AV_TIME_BASE_Q, ofp->tb_out);
  1758. frame->time_base = ofp->tb_out;
  1759. frame->duration = av_rescale_q(frame->nb_samples,
  1760. (AVRational){ 1, frame->sample_rate },
  1761. ofp->tb_out);
  1762. ofp->next_pts = frame->pts + frame->duration;
  1763. frame_out = frame;
  1764. }
  1765. ret = enc_frame(ost, frame_out);
  1766. av_frame_unref(frame_out);
  1767. if (ret < 0)
  1768. return ret;
  1769. if (type == AVMEDIA_TYPE_VIDEO) {
  1770. ofp->fps.frame_number++;
  1771. ofp->next_pts++;
  1772. if (i == nb_frames_prev && frame)
  1773. frame->flags &= ~AV_FRAME_FLAG_KEY;
  1774. }
  1775. ofp->got_frame = 1;
  1776. }
  1777. if (frame && frame_prev) {
  1778. av_frame_unref(frame_prev);
  1779. av_frame_move_ref(frame_prev, frame);
  1780. }
  1781. return 0;
  1782. }
  1783. static int fg_output_step(OutputFilterPriv *ofp, int flush)
  1784. {
  1785. FilterGraphPriv *fgp = fgp_from_fg(ofp->ofilter.graph);
  1786. OutputStream *ost = ofp->ofilter.ost;
  1787. AVFrame *frame = fgp->frame;
  1788. AVFilterContext *filter = ofp->filter;
  1789. FrameData *fd;
  1790. int ret;
  1791. ret = av_buffersink_get_frame_flags(filter, frame,
  1792. AV_BUFFERSINK_FLAG_NO_REQUEST);
  1793. if (flush && ret == AVERROR_EOF && ofp->got_frame &&
  1794. ost->type == AVMEDIA_TYPE_VIDEO) {
  1795. ret = fg_output_frame(ofp, NULL);
  1796. return (ret < 0) ? ret : 1;
  1797. } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
  1798. return 1;
  1799. } else if (ret < 0) {
  1800. av_log(fgp, AV_LOG_WARNING,
  1801. "Error in retrieving a frame from the filtergraph: %s\n",
  1802. av_err2str(ret));
  1803. return ret;
  1804. }
  1805. if (ost->finished) {
  1806. av_frame_unref(frame);
  1807. return 0;
  1808. }
  1809. frame->time_base = av_buffersink_get_time_base(filter);
  1810. if (frame->pts != AV_NOPTS_VALUE) {
  1811. ost->filter->last_pts = av_rescale_q(frame->pts, frame->time_base,
  1812. AV_TIME_BASE_Q);
  1813. if (debug_ts)
  1814. av_log(fgp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
  1815. av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
  1816. frame->time_base.num, frame->time_base.den);
  1817. }
  1818. // Choose the output timebase the first time we get a frame.
  1819. if (!ofp->tb_out_locked) {
  1820. ret = choose_out_timebase(ofp, frame);
  1821. if (ret < 0) {
  1822. av_log(ost, AV_LOG_ERROR, "Could not choose an output time base\n");
  1823. av_frame_unref(frame);
  1824. return ret;
  1825. }
  1826. }
  1827. fd = frame_data(frame);
  1828. if (!fd) {
  1829. av_frame_unref(frame);
  1830. return AVERROR(ENOMEM);
  1831. }
  1832. // only use bits_per_raw_sample passed through from the decoder
  1833. // if the filtergraph did not touch the frame data
  1834. if (!fgp->is_meta)
  1835. fd->bits_per_raw_sample = 0;
  1836. if (ost->type == AVMEDIA_TYPE_VIDEO) {
  1837. if (!frame->duration) {
  1838. AVRational fr = av_buffersink_get_frame_rate(filter);
  1839. if (fr.num > 0 && fr.den > 0)
  1840. frame->duration = av_rescale_q(1, av_inv_q(fr), frame->time_base);
  1841. }
  1842. fd->frame_rate_filter = ofp->fps.framerate;
  1843. }
  1844. ret = fg_output_frame(ofp, frame);
  1845. av_frame_unref(frame);
  1846. if (ret < 0)
  1847. return ret;
  1848. return 0;
  1849. }
  1850. int reap_filters(FilterGraph *fg, int flush)
  1851. {
  1852. if (!fg->graph)
  1853. return 0;
  1854. /* Reap all buffers present in the buffer sinks */
  1855. for (int i = 0; i < fg->nb_outputs; i++) {
  1856. OutputFilterPriv *ofp = ofp_from_ofilter(fg->outputs[i]);
  1857. int ret = 0;
  1858. while (!ret) {
  1859. ret = fg_output_step(ofp, flush);
  1860. if (ret < 0)
  1861. return ret;
  1862. }
  1863. }
  1864. return 0;
  1865. }
  1866. void ifilter_sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
  1867. {
  1868. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  1869. int64_t pts2;
  1870. if (!ifilter->graph->graph)
  1871. return;
  1872. /* subtitles seem to be usually muxed ahead of other streams;
  1873. if not, subtracting a larger time here is necessary */
  1874. pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
  1875. /* do not send the heartbeat frame if the subtitle is already ahead */
  1876. if (pts2 <= ifp->sub2video.last_pts)
  1877. return;
  1878. if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
  1879. /* if we have hit the end of the current displayed subpicture,
  1880. or if we need to initialize the system, update the
  1881. overlayed subpicture and its start/end times */
  1882. sub2video_update(ifp, pts2 + 1, NULL);
  1883. if (av_buffersrc_get_nb_failed_requests(ifp->filter))
  1884. sub2video_push_ref(ifp, pts2);
  1885. }
  1886. int ifilter_sub2video(InputFilter *ifilter, const AVFrame *frame)
  1887. {
  1888. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  1889. int ret;
  1890. if (ifilter->graph->graph) {
  1891. if (!frame) {
  1892. if (ifp->sub2video.end_pts < INT64_MAX)
  1893. sub2video_update(ifp, INT64_MAX, NULL);
  1894. return av_buffersrc_add_frame(ifp->filter, NULL);
  1895. }
  1896. ifp->width = frame->width ? frame->width : ifp->width;
  1897. ifp->height = frame->height ? frame->height : ifp->height;
  1898. sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
  1899. } else if (frame) {
  1900. AVFrame *tmp = av_frame_clone(frame);
  1901. if (!tmp)
  1902. return AVERROR(ENOMEM);
  1903. ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
  1904. if (ret < 0) {
  1905. av_frame_free(&tmp);
  1906. return ret;
  1907. }
  1908. }
  1909. return 0;
  1910. }
  1911. int ifilter_send_eof(InputFilter *ifilter, int64_t pts, AVRational tb)
  1912. {
  1913. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  1914. int ret;
  1915. ifp->eof = 1;
  1916. if (ifp->filter) {
  1917. pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
  1918. AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
  1919. ret = av_buffersrc_close(ifp->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
  1920. if (ret < 0)
  1921. return ret;
  1922. } else {
  1923. if (ifp->format < 0) {
  1924. // the filtergraph was never configured, use the fallback parameters
  1925. ifp->format = ifp->fallback.format;
  1926. ifp->sample_rate = ifp->fallback.sample_rate;
  1927. ifp->width = ifp->fallback.width;
  1928. ifp->height = ifp->fallback.height;
  1929. ifp->sample_aspect_ratio = ifp->fallback.sample_aspect_ratio;
  1930. ret = av_channel_layout_copy(&ifp->ch_layout,
  1931. &ifp->fallback.ch_layout);
  1932. if (ret < 0)
  1933. return ret;
  1934. if (ifilter_has_all_input_formats(ifilter->graph)) {
  1935. ret = configure_filtergraph(ifilter->graph);
  1936. if (ret < 0) {
  1937. av_log(NULL, AV_LOG_ERROR, "Error initializing filters!\n");
  1938. return ret;
  1939. }
  1940. }
  1941. }
  1942. if (ifp->format < 0) {
  1943. av_log(NULL, AV_LOG_ERROR,
  1944. "Cannot determine format of input stream %d:%d after EOF\n",
  1945. ifp->ist->file_index, ifp->ist->index);
  1946. return AVERROR_INVALIDDATA;
  1947. }
  1948. }
  1949. return 0;
  1950. }
  1951. int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
  1952. {
  1953. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  1954. FilterGraph *fg = ifilter->graph;
  1955. AVFrameSideData *sd;
  1956. int need_reinit, ret;
  1957. /* determine if the parameters for this input changed */
  1958. need_reinit = ifp->format != frame->format;
  1959. switch (ifp->type) {
  1960. case AVMEDIA_TYPE_AUDIO:
  1961. need_reinit |= ifp->sample_rate != frame->sample_rate ||
  1962. av_channel_layout_compare(&ifp->ch_layout, &frame->ch_layout);
  1963. break;
  1964. case AVMEDIA_TYPE_VIDEO:
  1965. need_reinit |= ifp->width != frame->width ||
  1966. ifp->height != frame->height;
  1967. break;
  1968. }
  1969. if (!ifp->ist->reinit_filters && fg->graph)
  1970. need_reinit = 0;
  1971. if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
  1972. (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
  1973. need_reinit = 1;
  1974. if (sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX)) {
  1975. if (!ifp->displaymatrix_present ||
  1976. memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
  1977. need_reinit = 1;
  1978. } else if (ifp->displaymatrix_present)
  1979. need_reinit = 1;
  1980. if (need_reinit) {
  1981. ret = ifilter_parameters_from_frame(ifilter, frame);
  1982. if (ret < 0)
  1983. return ret;
  1984. }
  1985. /* (re)init the graph if possible, otherwise buffer the frame and return */
  1986. if (need_reinit || !fg->graph) {
  1987. if (!ifilter_has_all_input_formats(fg)) {
  1988. AVFrame *tmp = av_frame_clone(frame);
  1989. if (!tmp)
  1990. return AVERROR(ENOMEM);
  1991. ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
  1992. if (ret < 0)
  1993. av_frame_free(&tmp);
  1994. return ret;
  1995. }
  1996. ret = reap_filters(fg, 0);
  1997. if (ret < 0 && ret != AVERROR_EOF) {
  1998. av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
  1999. return ret;
  2000. }
  2001. ret = configure_filtergraph(fg);
  2002. if (ret < 0) {
  2003. av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
  2004. return ret;
  2005. }
  2006. }
  2007. if (keep_reference) {
  2008. ret = av_frame_ref(ifp->frame, frame);
  2009. if (ret < 0)
  2010. return ret;
  2011. } else
  2012. av_frame_move_ref(ifp->frame, frame);
  2013. frame = ifp->frame;
  2014. frame->pts = av_rescale_q(frame->pts, frame->time_base, ifp->time_base);
  2015. frame->duration = av_rescale_q(frame->duration, frame->time_base, ifp->time_base);
  2016. frame->time_base = ifp->time_base;
  2017. #if LIBAVUTIL_VERSION_MAJOR < 59
  2018. AV_NOWARN_DEPRECATED(
  2019. frame->pkt_duration = frame->duration;
  2020. )
  2021. #endif
  2022. ret = av_buffersrc_add_frame_flags(ifp->filter, frame,
  2023. AV_BUFFERSRC_FLAG_PUSH);
  2024. if (ret < 0) {
  2025. av_frame_unref(frame);
  2026. if (ret != AVERROR_EOF)
  2027. av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
  2028. return ret;
  2029. }
  2030. return 0;
  2031. }
  2032. int fg_transcode_step(FilterGraph *graph, InputStream **best_ist)
  2033. {
  2034. FilterGraphPriv *fgp = fgp_from_fg(graph);
  2035. int i, ret;
  2036. int nb_requests, nb_requests_max = 0;
  2037. InputStream *ist;
  2038. if (!graph->graph) {
  2039. for (int i = 0; i < graph->nb_inputs; i++) {
  2040. InputFilter *ifilter = graph->inputs[i];
  2041. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  2042. if (ifp->format < 0 && !ifp->eof) {
  2043. *best_ist = ifp->ist;
  2044. return 0;
  2045. }
  2046. }
  2047. // This state - graph is not configured, but all inputs are either
  2048. // initialized or EOF - should be unreachable because sending EOF to a
  2049. // filter without even a fallback format should fail
  2050. av_assert0(0);
  2051. return AVERROR_BUG;
  2052. }
  2053. *best_ist = NULL;
  2054. ret = avfilter_graph_request_oldest(graph->graph);
  2055. if (ret >= 0)
  2056. return reap_filters(graph, 0);
  2057. if (ret == AVERROR_EOF) {
  2058. reap_filters(graph, 1);
  2059. for (int i = 0; i < graph->nb_outputs; i++) {
  2060. OutputFilter *ofilter = graph->outputs[i];
  2061. OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
  2062. // we are finished and no frames were ever seen at this output,
  2063. // at least initialize the encoder with a dummy frame
  2064. if (!ofp->got_frame) {
  2065. AVFrame *frame = fgp->frame;
  2066. FrameData *fd;
  2067. frame->time_base = ofp->tb_out;
  2068. frame->format = ofp->format;
  2069. frame->width = ofp->width;
  2070. frame->height = ofp->height;
  2071. frame->sample_aspect_ratio = ofp->sample_aspect_ratio;
  2072. frame->sample_rate = ofp->sample_rate;
  2073. if (ofp->ch_layout.nb_channels) {
  2074. ret = av_channel_layout_copy(&frame->ch_layout, &ofp->ch_layout);
  2075. if (ret < 0)
  2076. return ret;
  2077. }
  2078. fd = frame_data(frame);
  2079. if (!fd)
  2080. return AVERROR(ENOMEM);
  2081. fd->frame_rate_filter = ofp->fps.framerate;
  2082. av_assert0(!frame->buf[0]);
  2083. av_log(ofilter->ost, AV_LOG_WARNING,
  2084. "No filtered frames for output stream, trying to "
  2085. "initialize anyway.\n");
  2086. enc_open(ofilter->ost, frame);
  2087. av_frame_unref(frame);
  2088. }
  2089. close_output_stream(ofilter->ost);
  2090. }
  2091. return 0;
  2092. }
  2093. if (ret != AVERROR(EAGAIN))
  2094. return ret;
  2095. for (i = 0; i < graph->nb_inputs; i++) {
  2096. InputFilter *ifilter = graph->inputs[i];
  2097. InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
  2098. ist = ifp->ist;
  2099. if (input_files[ist->file_index]->eagain || ifp->eof)
  2100. continue;
  2101. nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
  2102. if (nb_requests > nb_requests_max) {
  2103. nb_requests_max = nb_requests;
  2104. *best_ist = ist;
  2105. }
  2106. }
  2107. if (!*best_ist)
  2108. for (i = 0; i < graph->nb_outputs; i++)
  2109. graph->outputs[i]->ost->unavailable = 1;
  2110. return 0;
  2111. }