ffmpeg.c 144 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199
  1. /*
  2. * Copyright (c) 2000-2003 Fabrice Bellard
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * multimedia converter based on the FFmpeg libraries
  23. */
  24. #include "config.h"
  25. #include <ctype.h>
  26. #include <string.h>
  27. #include <math.h>
  28. #include <stdlib.h>
  29. #include <errno.h>
  30. #include <limits.h>
  31. #include <stdatomic.h>
  32. #include <stdint.h>
  33. #if HAVE_IO_H
  34. #include <io.h>
  35. #endif
  36. #if HAVE_UNISTD_H
  37. #include <unistd.h>
  38. #endif
  39. #include "libavformat/avformat.h"
  40. #include "libavdevice/avdevice.h"
  41. #include "libswresample/swresample.h"
  42. #include "libavutil/opt.h"
  43. #include "libavutil/channel_layout.h"
  44. #include "libavutil/parseutils.h"
  45. #include "libavutil/samplefmt.h"
  46. #include "libavutil/fifo.h"
  47. #include "libavutil/hwcontext.h"
  48. #include "libavutil/internal.h"
  49. #include "libavutil/intreadwrite.h"
  50. #include "libavutil/dict.h"
  51. #include "libavutil/display.h"
  52. #include "libavutil/mathematics.h"
  53. #include "libavutil/pixdesc.h"
  54. #include "libavutil/avstring.h"
  55. #include "libavutil/libm.h"
  56. #include "libavutil/imgutils.h"
  57. #include "libavutil/timestamp.h"
  58. #include "libavutil/bprint.h"
  59. #include "libavutil/time.h"
  60. #include "libavutil/thread.h"
  61. #include "libavutil/threadmessage.h"
  62. #include "libavcodec/mathops.h"
  63. #include "libavcodec/version.h"
  64. #include "libavformat/os_support.h"
  65. # include "libavfilter/avfilter.h"
  66. # include "libavfilter/buffersrc.h"
  67. # include "libavfilter/buffersink.h"
  68. #if HAVE_SYS_RESOURCE_H
  69. #include <sys/time.h>
  70. #include <sys/types.h>
  71. #include <sys/resource.h>
  72. #elif HAVE_GETPROCESSTIMES
  73. #include <windows.h>
  74. #endif
  75. #if HAVE_GETPROCESSMEMORYINFO
  76. #include <windows.h>
  77. #include <psapi.h>
  78. #endif
  79. #if HAVE_SETCONSOLECTRLHANDLER
  80. #include <windows.h>
  81. #endif
  82. #if HAVE_SYS_SELECT_H
  83. #include <sys/select.h>
  84. #endif
  85. #if HAVE_TERMIOS_H
  86. #include <fcntl.h>
  87. #include <sys/ioctl.h>
  88. #include <sys/time.h>
  89. #include <termios.h>
  90. #elif HAVE_KBHIT
  91. #include <conio.h>
  92. #endif
  93. #include <time.h>
  94. #include "ffmpeg.h"
  95. #include "cmdutils.h"
  96. #include "sync_queue.h"
  97. #include "libavutil/avassert.h"
  98. const char program_name[] = "ffmpeg";
  99. const int program_birth_year = 2000;
  100. static FILE *vstats_file;
  101. // optionally attached as opaque_ref to decoded AVFrames
  102. typedef struct FrameData {
  103. uint64_t idx;
  104. int64_t pts;
  105. AVRational tb;
  106. } FrameData;
  107. typedef struct BenchmarkTimeStamps {
  108. int64_t real_usec;
  109. int64_t user_usec;
  110. int64_t sys_usec;
  111. } BenchmarkTimeStamps;
  112. static int trigger_fix_sub_duration_heartbeat(OutputStream *ost, const AVPacket *pkt);
  113. static BenchmarkTimeStamps get_benchmark_time_stamps(void);
  114. static int64_t getmaxrss(void);
  115. static int ifilter_has_all_input_formats(FilterGraph *fg);
  116. static int64_t nb_frames_dup = 0;
  117. static uint64_t dup_warning = 1000;
  118. static int64_t nb_frames_drop = 0;
  119. static int64_t decode_error_stat[2];
  120. unsigned nb_output_dumped = 0;
  121. static BenchmarkTimeStamps current_time;
  122. AVIOContext *progress_avio = NULL;
  123. InputFile **input_files = NULL;
  124. int nb_input_files = 0;
  125. OutputFile **output_files = NULL;
  126. int nb_output_files = 0;
  127. FilterGraph **filtergraphs;
  128. int nb_filtergraphs;
  129. #if HAVE_TERMIOS_H
  130. /* init terminal so that we can grab keys */
  131. static struct termios oldtty;
  132. static int restore_tty;
  133. #endif
  134. /* sub2video hack:
  135. Convert subtitles to video with alpha to insert them in filter graphs.
  136. This is a temporary solution until libavfilter gets real subtitles support.
  137. */
  138. static int sub2video_get_blank_frame(InputStream *ist)
  139. {
  140. int ret;
  141. AVFrame *frame = ist->sub2video.frame;
  142. av_frame_unref(frame);
  143. ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
  144. ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
  145. ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
  146. if ((ret = av_frame_get_buffer(frame, 0)) < 0)
  147. return ret;
  148. memset(frame->data[0], 0, frame->height * frame->linesize[0]);
  149. return 0;
  150. }
  151. static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
  152. AVSubtitleRect *r)
  153. {
  154. uint32_t *pal, *dst2;
  155. uint8_t *src, *src2;
  156. int x, y;
  157. if (r->type != SUBTITLE_BITMAP) {
  158. av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
  159. return;
  160. }
  161. if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
  162. av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
  163. r->x, r->y, r->w, r->h, w, h
  164. );
  165. return;
  166. }
  167. dst += r->y * dst_linesize + r->x * 4;
  168. src = r->data[0];
  169. pal = (uint32_t *)r->data[1];
  170. for (y = 0; y < r->h; y++) {
  171. dst2 = (uint32_t *)dst;
  172. src2 = src;
  173. for (x = 0; x < r->w; x++)
  174. *(dst2++) = pal[*(src2++)];
  175. dst += dst_linesize;
  176. src += r->linesize[0];
  177. }
  178. }
  179. static void sub2video_push_ref(InputStream *ist, int64_t pts)
  180. {
  181. AVFrame *frame = ist->sub2video.frame;
  182. int i;
  183. int ret;
  184. av_assert1(frame->data[0]);
  185. ist->sub2video.last_pts = frame->pts = pts;
  186. for (i = 0; i < ist->nb_filters; i++) {
  187. ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
  188. AV_BUFFERSRC_FLAG_KEEP_REF |
  189. AV_BUFFERSRC_FLAG_PUSH);
  190. if (ret != AVERROR_EOF && ret < 0)
  191. av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
  192. av_err2str(ret));
  193. }
  194. }
  195. void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
  196. {
  197. AVFrame *frame = ist->sub2video.frame;
  198. int8_t *dst;
  199. int dst_linesize;
  200. int num_rects, i;
  201. int64_t pts, end_pts;
  202. if (!frame)
  203. return;
  204. if (sub) {
  205. pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
  206. AV_TIME_BASE_Q, ist->st->time_base);
  207. end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
  208. AV_TIME_BASE_Q, ist->st->time_base);
  209. num_rects = sub->num_rects;
  210. } else {
  211. /* If we are initializing the system, utilize current heartbeat
  212. PTS as the start time, and show until the following subpicture
  213. is received. Otherwise, utilize the previous subpicture's end time
  214. as the fall-back value. */
  215. pts = ist->sub2video.initialize ?
  216. heartbeat_pts : ist->sub2video.end_pts;
  217. end_pts = INT64_MAX;
  218. num_rects = 0;
  219. }
  220. if (sub2video_get_blank_frame(ist) < 0) {
  221. av_log(NULL, AV_LOG_ERROR,
  222. "Impossible to get a blank canvas.\n");
  223. return;
  224. }
  225. dst = frame->data [0];
  226. dst_linesize = frame->linesize[0];
  227. for (i = 0; i < num_rects; i++)
  228. sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
  229. sub2video_push_ref(ist, pts);
  230. ist->sub2video.end_pts = end_pts;
  231. ist->sub2video.initialize = 0;
  232. }
  233. static void sub2video_heartbeat(InputStream *ist, int64_t pts)
  234. {
  235. InputFile *infile = input_files[ist->file_index];
  236. int i, j, nb_reqs;
  237. int64_t pts2;
  238. /* When a frame is read from a file, examine all sub2video streams in
  239. the same file and send the sub2video frame again. Otherwise, decoded
  240. video frames could be accumulating in the filter graph while a filter
  241. (possibly overlay) is desperately waiting for a subtitle frame. */
  242. for (i = 0; i < infile->nb_streams; i++) {
  243. InputStream *ist2 = infile->streams[i];
  244. if (!ist2->sub2video.frame)
  245. continue;
  246. /* subtitles seem to be usually muxed ahead of other streams;
  247. if not, subtracting a larger time here is necessary */
  248. pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
  249. /* do not send the heartbeat frame if the subtitle is already ahead */
  250. if (pts2 <= ist2->sub2video.last_pts)
  251. continue;
  252. if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
  253. /* if we have hit the end of the current displayed subpicture,
  254. or if we need to initialize the system, update the
  255. overlayed subpicture and its start/end times */
  256. sub2video_update(ist2, pts2 + 1, NULL);
  257. for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
  258. nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
  259. if (nb_reqs)
  260. sub2video_push_ref(ist2, pts2);
  261. }
  262. }
  263. static void sub2video_flush(InputStream *ist)
  264. {
  265. int i;
  266. int ret;
  267. if (ist->sub2video.end_pts < INT64_MAX)
  268. sub2video_update(ist, INT64_MAX, NULL);
  269. for (i = 0; i < ist->nb_filters; i++) {
  270. ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
  271. if (ret != AVERROR_EOF && ret < 0)
  272. av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
  273. }
  274. }
  275. /* end of sub2video hack */
  276. static void term_exit_sigsafe(void)
  277. {
  278. #if HAVE_TERMIOS_H
  279. if(restore_tty)
  280. tcsetattr (0, TCSANOW, &oldtty);
  281. #endif
  282. }
  283. void term_exit(void)
  284. {
  285. av_log(NULL, AV_LOG_QUIET, "%s", "");
  286. term_exit_sigsafe();
  287. }
  288. static volatile int received_sigterm = 0;
  289. static volatile int received_nb_signals = 0;
  290. static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
  291. static volatile int ffmpeg_exited = 0;
  292. int main_return_code = 0;
  293. static int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
  294. static void
  295. sigterm_handler(int sig)
  296. {
  297. int ret;
  298. received_sigterm = sig;
  299. received_nb_signals++;
  300. term_exit_sigsafe();
  301. if(received_nb_signals > 3) {
  302. ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
  303. strlen("Received > 3 system signals, hard exiting\n"));
  304. if (ret < 0) { /* Do nothing */ };
  305. exit(123);
  306. }
  307. }
  308. #if HAVE_SETCONSOLECTRLHANDLER
  309. static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
  310. {
  311. av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
  312. switch (fdwCtrlType)
  313. {
  314. case CTRL_C_EVENT:
  315. case CTRL_BREAK_EVENT:
  316. sigterm_handler(SIGINT);
  317. return TRUE;
  318. case CTRL_CLOSE_EVENT:
  319. case CTRL_LOGOFF_EVENT:
  320. case CTRL_SHUTDOWN_EVENT:
  321. sigterm_handler(SIGTERM);
  322. /* Basically, with these 3 events, when we return from this method the
  323. process is hard terminated, so stall as long as we need to
  324. to try and let the main thread(s) clean up and gracefully terminate
  325. (we have at most 5 seconds, but should be done far before that). */
  326. while (!ffmpeg_exited) {
  327. Sleep(0);
  328. }
  329. return TRUE;
  330. default:
  331. av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
  332. return FALSE;
  333. }
  334. }
  335. #endif
  336. #ifdef __linux__
  337. #define SIGNAL(sig, func) \
  338. do { \
  339. action.sa_handler = func; \
  340. sigaction(sig, &action, NULL); \
  341. } while (0)
  342. #else
  343. #define SIGNAL(sig, func) \
  344. signal(sig, func)
  345. #endif
  346. void term_init(void)
  347. {
  348. #if defined __linux__
  349. struct sigaction action = {0};
  350. action.sa_handler = sigterm_handler;
  351. /* block other interrupts while processing this one */
  352. sigfillset(&action.sa_mask);
  353. /* restart interruptible functions (i.e. don't fail with EINTR) */
  354. action.sa_flags = SA_RESTART;
  355. #endif
  356. #if HAVE_TERMIOS_H
  357. if (stdin_interaction) {
  358. struct termios tty;
  359. if (tcgetattr (0, &tty) == 0) {
  360. oldtty = tty;
  361. restore_tty = 1;
  362. tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
  363. |INLCR|IGNCR|ICRNL|IXON);
  364. tty.c_oflag |= OPOST;
  365. tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
  366. tty.c_cflag &= ~(CSIZE|PARENB);
  367. tty.c_cflag |= CS8;
  368. tty.c_cc[VMIN] = 1;
  369. tty.c_cc[VTIME] = 0;
  370. tcsetattr (0, TCSANOW, &tty);
  371. }
  372. SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
  373. }
  374. #endif
  375. SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
  376. SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
  377. #ifdef SIGXCPU
  378. SIGNAL(SIGXCPU, sigterm_handler);
  379. #endif
  380. #ifdef SIGPIPE
  381. signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
  382. #endif
  383. #if HAVE_SETCONSOLECTRLHANDLER
  384. SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
  385. #endif
  386. }
  387. /* read a key without blocking */
  388. static int read_key(void)
  389. {
  390. unsigned char ch;
  391. #if HAVE_TERMIOS_H
  392. int n = 1;
  393. struct timeval tv;
  394. fd_set rfds;
  395. FD_ZERO(&rfds);
  396. FD_SET(0, &rfds);
  397. tv.tv_sec = 0;
  398. tv.tv_usec = 0;
  399. n = select(1, &rfds, NULL, NULL, &tv);
  400. if (n > 0) {
  401. n = read(0, &ch, 1);
  402. if (n == 1)
  403. return ch;
  404. return n;
  405. }
  406. #elif HAVE_KBHIT
  407. # if HAVE_PEEKNAMEDPIPE
  408. static int is_pipe;
  409. static HANDLE input_handle;
  410. DWORD dw, nchars;
  411. if(!input_handle){
  412. input_handle = GetStdHandle(STD_INPUT_HANDLE);
  413. is_pipe = !GetConsoleMode(input_handle, &dw);
  414. }
  415. if (is_pipe) {
  416. /* When running under a GUI, you will end here. */
  417. if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
  418. // input pipe may have been closed by the program that ran ffmpeg
  419. return -1;
  420. }
  421. //Read it
  422. if(nchars != 0) {
  423. read(0, &ch, 1);
  424. return ch;
  425. }else{
  426. return -1;
  427. }
  428. }
  429. # endif
  430. if(kbhit())
  431. return(getch());
  432. #endif
  433. return -1;
  434. }
  435. static int decode_interrupt_cb(void *ctx)
  436. {
  437. return received_nb_signals > atomic_load(&transcode_init_done);
  438. }
  439. const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
  440. static void ffmpeg_cleanup(int ret)
  441. {
  442. int i, j;
  443. if (do_benchmark) {
  444. int maxrss = getmaxrss() / 1024;
  445. av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
  446. }
  447. for (i = 0; i < nb_filtergraphs; i++) {
  448. FilterGraph *fg = filtergraphs[i];
  449. avfilter_graph_free(&fg->graph);
  450. for (j = 0; j < fg->nb_inputs; j++) {
  451. InputFilter *ifilter = fg->inputs[j];
  452. struct InputStream *ist = ifilter->ist;
  453. if (ifilter->frame_queue) {
  454. AVFrame *frame;
  455. while (av_fifo_read(ifilter->frame_queue, &frame, 1) >= 0)
  456. av_frame_free(&frame);
  457. av_fifo_freep2(&ifilter->frame_queue);
  458. }
  459. av_freep(&ifilter->displaymatrix);
  460. if (ist->sub2video.sub_queue) {
  461. AVSubtitle sub;
  462. while (av_fifo_read(ist->sub2video.sub_queue, &sub, 1) >= 0)
  463. avsubtitle_free(&sub);
  464. av_fifo_freep2(&ist->sub2video.sub_queue);
  465. }
  466. av_buffer_unref(&ifilter->hw_frames_ctx);
  467. av_freep(&ifilter->name);
  468. av_freep(&fg->inputs[j]);
  469. }
  470. av_freep(&fg->inputs);
  471. for (j = 0; j < fg->nb_outputs; j++) {
  472. OutputFilter *ofilter = fg->outputs[j];
  473. avfilter_inout_free(&ofilter->out_tmp);
  474. av_freep(&ofilter->name);
  475. av_channel_layout_uninit(&ofilter->ch_layout);
  476. av_freep(&fg->outputs[j]);
  477. }
  478. av_freep(&fg->outputs);
  479. av_freep(&fg->graph_desc);
  480. av_freep(&filtergraphs[i]);
  481. }
  482. av_freep(&filtergraphs);
  483. /* close files */
  484. for (i = 0; i < nb_output_files; i++)
  485. of_close(&output_files[i]);
  486. for (i = 0; i < nb_input_files; i++)
  487. ifile_close(&input_files[i]);
  488. if (vstats_file) {
  489. if (fclose(vstats_file))
  490. av_log(NULL, AV_LOG_ERROR,
  491. "Error closing vstats file, loss of information possible: %s\n",
  492. av_err2str(AVERROR(errno)));
  493. }
  494. av_freep(&vstats_filename);
  495. of_enc_stats_close();
  496. av_freep(&filter_nbthreads);
  497. av_freep(&input_files);
  498. av_freep(&output_files);
  499. uninit_opts();
  500. avformat_network_deinit();
  501. if (received_sigterm) {
  502. av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
  503. (int) received_sigterm);
  504. } else if (ret && atomic_load(&transcode_init_done)) {
  505. av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
  506. }
  507. term_exit();
  508. ffmpeg_exited = 1;
  509. }
  510. /* iterate over all output streams in all output files;
  511. * pass NULL to start iteration */
  512. static OutputStream *ost_iter(OutputStream *prev)
  513. {
  514. int of_idx = prev ? prev->file_index : 0;
  515. int ost_idx = prev ? prev->index + 1 : 0;
  516. for (; of_idx < nb_output_files; of_idx++) {
  517. OutputFile *of = output_files[of_idx];
  518. if (ost_idx < of->nb_streams)
  519. return of->streams[ost_idx];
  520. ost_idx = 0;
  521. }
  522. return NULL;
  523. }
  524. InputStream *ist_iter(InputStream *prev)
  525. {
  526. int if_idx = prev ? prev->file_index : 0;
  527. int ist_idx = prev ? prev->st->index + 1 : 0;
  528. for (; if_idx < nb_input_files; if_idx++) {
  529. InputFile *f = input_files[if_idx];
  530. if (ist_idx < f->nb_streams)
  531. return f->streams[ist_idx];
  532. ist_idx = 0;
  533. }
  534. return NULL;
  535. }
  536. void remove_avoptions(AVDictionary **a, AVDictionary *b)
  537. {
  538. const AVDictionaryEntry *t = NULL;
  539. while ((t = av_dict_iterate(b, t))) {
  540. av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
  541. }
  542. }
  543. void assert_avoptions(AVDictionary *m)
  544. {
  545. const AVDictionaryEntry *t;
  546. if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
  547. av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
  548. exit_program(1);
  549. }
  550. }
  551. static void abort_codec_experimental(const AVCodec *c, int encoder)
  552. {
  553. exit_program(1);
  554. }
  555. static void update_benchmark(const char *fmt, ...)
  556. {
  557. if (do_benchmark_all) {
  558. BenchmarkTimeStamps t = get_benchmark_time_stamps();
  559. va_list va;
  560. char buf[1024];
  561. if (fmt) {
  562. va_start(va, fmt);
  563. vsnprintf(buf, sizeof(buf), fmt, va);
  564. va_end(va);
  565. av_log(NULL, AV_LOG_INFO,
  566. "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
  567. t.user_usec - current_time.user_usec,
  568. t.sys_usec - current_time.sys_usec,
  569. t.real_usec - current_time.real_usec, buf);
  570. }
  571. current_time = t;
  572. }
  573. }
  574. static void close_output_stream(OutputStream *ost)
  575. {
  576. OutputFile *of = output_files[ost->file_index];
  577. ost->finished |= ENCODER_FINISHED;
  578. if (ost->sq_idx_encode >= 0)
  579. sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL));
  580. }
  581. static int check_recording_time(OutputStream *ost, int64_t ts, AVRational tb)
  582. {
  583. OutputFile *of = output_files[ost->file_index];
  584. if (of->recording_time != INT64_MAX &&
  585. av_compare_ts(ts, tb, of->recording_time, AV_TIME_BASE_Q) >= 0) {
  586. close_output_stream(ost);
  587. return 0;
  588. }
  589. return 1;
  590. }
  591. static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost,
  592. AVFrame *frame)
  593. {
  594. double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
  595. const int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ?
  596. 0 : of->start_time;
  597. AVCodecContext *const enc = ost->enc_ctx;
  598. AVRational tb = enc->time_base;
  599. AVRational filter_tb = frame->time_base;
  600. const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
  601. if (frame->pts == AV_NOPTS_VALUE)
  602. goto early_exit;
  603. tb.den <<= extra_bits;
  604. float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
  605. av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
  606. float_pts /= 1 << extra_bits;
  607. // avoid exact midoints to reduce the chance of rounding differences, this
  608. // can be removed in case the fps code is changed to work with integers
  609. float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
  610. frame->pts = av_rescale_q(frame->pts, filter_tb, enc->time_base) -
  611. av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
  612. frame->time_base = enc->time_base;
  613. early_exit:
  614. if (debug_ts) {
  615. av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
  616. frame ? av_ts2str(frame->pts) : "NULL",
  617. (enc && frame) ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
  618. float_pts,
  619. enc ? enc->time_base.num : -1,
  620. enc ? enc->time_base.den : -1);
  621. }
  622. return float_pts;
  623. }
  624. static int init_output_stream(OutputStream *ost, AVFrame *frame,
  625. char *error, int error_len);
  626. static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame,
  627. unsigned int fatal)
  628. {
  629. int ret = AVERROR_BUG;
  630. char error[1024] = {0};
  631. if (ost->initialized)
  632. return 0;
  633. ret = init_output_stream(ost, frame, error, sizeof(error));
  634. if (ret < 0) {
  635. av_log(ost, AV_LOG_ERROR, "Error initializing output stream: %s\n",
  636. error);
  637. if (fatal)
  638. exit_program(1);
  639. }
  640. return ret;
  641. }
  642. static double psnr(double d)
  643. {
  644. return -10.0 * log10(d);
  645. }
  646. static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
  647. {
  648. const uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
  649. NULL);
  650. AVCodecContext *enc = ost->enc_ctx;
  651. int64_t frame_number;
  652. double ti1, bitrate, avg_bitrate;
  653. ost->quality = sd ? AV_RL32(sd) : -1;
  654. ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
  655. for (int i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
  656. if (sd && i < sd[5])
  657. ost->error[i] = AV_RL64(sd + 8 + 8*i);
  658. else
  659. ost->error[i] = -1;
  660. }
  661. if (!write_vstats)
  662. return;
  663. /* this is executed just the first time update_video_stats is called */
  664. if (!vstats_file) {
  665. vstats_file = fopen(vstats_filename, "w");
  666. if (!vstats_file) {
  667. perror("fopen");
  668. exit_program(1);
  669. }
  670. }
  671. frame_number = ost->packets_encoded;
  672. if (vstats_version <= 1) {
  673. fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number,
  674. ost->quality / (float)FF_QP2LAMBDA);
  675. } else {
  676. fprintf(vstats_file, "out= %2d st= %2d frame= %5"PRId64" q= %2.1f ", ost->file_index, ost->index, frame_number,
  677. ost->quality / (float)FF_QP2LAMBDA);
  678. }
  679. if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
  680. fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
  681. fprintf(vstats_file,"f_size= %6d ", pkt->size);
  682. /* compute pts value */
  683. ti1 = pkt->dts * av_q2d(pkt->time_base);
  684. if (ti1 < 0.01)
  685. ti1 = 0.01;
  686. bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0;
  687. avg_bitrate = (double)(ost->data_size_enc * 8) / ti1 / 1000.0;
  688. fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
  689. (double)ost->data_size_enc / 1024, ti1, bitrate, avg_bitrate);
  690. fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
  691. }
  692. void enc_stats_write(OutputStream *ost, EncStats *es,
  693. const AVFrame *frame, const AVPacket *pkt,
  694. uint64_t frame_num)
  695. {
  696. AVIOContext *io = es->io;
  697. AVRational tb = frame ? frame->time_base : pkt->time_base;
  698. int64_t pts = frame ? frame->pts : pkt->pts;
  699. AVRational tbi = (AVRational){ 0, 1};
  700. int64_t ptsi = INT64_MAX;
  701. const FrameData *fd;
  702. if ((frame && frame->opaque_ref) || (pkt && pkt->opaque_ref)) {
  703. fd = (const FrameData*)(frame ? frame->opaque_ref->data : pkt->opaque_ref->data);
  704. tbi = fd->tb;
  705. ptsi = fd->pts;
  706. }
  707. for (size_t i = 0; i < es->nb_components; i++) {
  708. const EncStatsComponent *c = &es->components[i];
  709. switch (c->type) {
  710. case ENC_STATS_LITERAL: avio_write (io, c->str, c->str_len); continue;
  711. case ENC_STATS_FILE_IDX: avio_printf(io, "%d", ost->file_index); continue;
  712. case ENC_STATS_STREAM_IDX: avio_printf(io, "%d", ost->index); continue;
  713. case ENC_STATS_TIMEBASE: avio_printf(io, "%d/%d", tb.num, tb.den); continue;
  714. case ENC_STATS_TIMEBASE_IN: avio_printf(io, "%d/%d", tbi.num, tbi.den); continue;
  715. case ENC_STATS_PTS: avio_printf(io, "%"PRId64, pts); continue;
  716. case ENC_STATS_PTS_IN: avio_printf(io, "%"PRId64, ptsi); continue;
  717. case ENC_STATS_PTS_TIME: avio_printf(io, "%g", pts * av_q2d(tb)); continue;
  718. case ENC_STATS_PTS_TIME_IN: avio_printf(io, "%g", ptsi == INT64_MAX ?
  719. INFINITY : ptsi * av_q2d(tbi)); continue;
  720. case ENC_STATS_FRAME_NUM: avio_printf(io, "%"PRIu64, frame_num); continue;
  721. case ENC_STATS_FRAME_NUM_IN: avio_printf(io, "%"PRIu64, fd ? fd->idx : -1); continue;
  722. }
  723. if (frame) {
  724. switch (c->type) {
  725. case ENC_STATS_SAMPLE_NUM: avio_printf(io, "%"PRIu64, ost->samples_encoded); continue;
  726. case ENC_STATS_NB_SAMPLES: avio_printf(io, "%d", frame->nb_samples); continue;
  727. default: av_assert0(0);
  728. }
  729. } else {
  730. switch (c->type) {
  731. case ENC_STATS_DTS: avio_printf(io, "%"PRId64, pkt->dts); continue;
  732. case ENC_STATS_DTS_TIME: avio_printf(io, "%g", pkt->dts * av_q2d(tb)); continue;
  733. case ENC_STATS_PKT_SIZE: avio_printf(io, "%d", pkt->size); continue;
  734. case ENC_STATS_BITRATE: {
  735. double duration = FFMAX(pkt->duration, 1) * av_q2d(tb);
  736. avio_printf(io, "%g", 8.0 * pkt->size / duration);
  737. continue;
  738. }
  739. case ENC_STATS_AVG_BITRATE: {
  740. double duration = pkt->dts * av_q2d(tb);
  741. avio_printf(io, "%g", duration > 0 ? 8.0 * ost->data_size_enc / duration : -1.);
  742. continue;
  743. }
  744. default: av_assert0(0);
  745. }
  746. }
  747. }
  748. avio_w8(io, '\n');
  749. avio_flush(io);
  750. }
  751. static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame)
  752. {
  753. AVCodecContext *enc = ost->enc_ctx;
  754. AVPacket *pkt = ost->pkt;
  755. const char *type_desc = av_get_media_type_string(enc->codec_type);
  756. const char *action = frame ? "encode" : "flush";
  757. int ret;
  758. if (frame) {
  759. if (ost->enc_stats_pre.io)
  760. enc_stats_write(ost, &ost->enc_stats_pre, frame, NULL,
  761. ost->frames_encoded);
  762. ost->frames_encoded++;
  763. ost->samples_encoded += frame->nb_samples;
  764. if (debug_ts) {
  765. av_log(ost, AV_LOG_INFO, "encoder <- type:%s "
  766. "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
  767. type_desc,
  768. av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
  769. enc->time_base.num, enc->time_base.den);
  770. }
  771. }
  772. update_benchmark(NULL);
  773. ret = avcodec_send_frame(enc, frame);
  774. if (ret < 0 && !(ret == AVERROR_EOF && !frame)) {
  775. av_log(ost, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n",
  776. type_desc);
  777. return ret;
  778. }
  779. while (1) {
  780. ret = avcodec_receive_packet(enc, pkt);
  781. update_benchmark("%s_%s %d.%d", action, type_desc,
  782. ost->file_index, ost->index);
  783. pkt->time_base = enc->time_base;
  784. /* if two pass, output log on success and EOF */
  785. if ((ret >= 0 || ret == AVERROR_EOF) && ost->logfile && enc->stats_out)
  786. fprintf(ost->logfile, "%s", enc->stats_out);
  787. if (ret == AVERROR(EAGAIN)) {
  788. av_assert0(frame); // should never happen during flushing
  789. return 0;
  790. } else if (ret == AVERROR_EOF) {
  791. of_output_packet(of, pkt, ost, 1);
  792. return ret;
  793. } else if (ret < 0) {
  794. av_log(ost, AV_LOG_ERROR, "%s encoding failed\n", type_desc);
  795. return ret;
  796. }
  797. if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
  798. update_video_stats(ost, pkt, !!vstats_filename);
  799. if (ost->enc_stats_post.io)
  800. enc_stats_write(ost, &ost->enc_stats_post, NULL, pkt,
  801. ost->packets_encoded);
  802. if (debug_ts) {
  803. av_log(ost, AV_LOG_INFO, "encoder -> type:%s "
  804. "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
  805. "duration:%s duration_time:%s\n",
  806. type_desc,
  807. av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
  808. av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base),
  809. av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base));
  810. }
  811. av_packet_rescale_ts(pkt, pkt->time_base, ost->mux_timebase);
  812. pkt->time_base = ost->mux_timebase;
  813. if (debug_ts) {
  814. av_log(ost, AV_LOG_INFO, "encoder -> type:%s "
  815. "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
  816. "duration:%s duration_time:%s\n",
  817. type_desc,
  818. av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
  819. av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base),
  820. av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base));
  821. }
  822. if ((ret = trigger_fix_sub_duration_heartbeat(ost, pkt)) < 0) {
  823. av_log(NULL, AV_LOG_ERROR,
  824. "Subtitle heartbeat logic failed in %s! (%s)\n",
  825. __func__, av_err2str(ret));
  826. exit_program(1);
  827. }
  828. ost->data_size_enc += pkt->size;
  829. ost->packets_encoded++;
  830. of_output_packet(of, pkt, ost, 0);
  831. }
  832. av_assert0(0);
  833. }
  834. static int submit_encode_frame(OutputFile *of, OutputStream *ost,
  835. AVFrame *frame)
  836. {
  837. int ret;
  838. if (ost->sq_idx_encode < 0)
  839. return encode_frame(of, ost, frame);
  840. if (frame) {
  841. ret = av_frame_ref(ost->sq_frame, frame);
  842. if (ret < 0)
  843. return ret;
  844. frame = ost->sq_frame;
  845. }
  846. ret = sq_send(of->sq_encode, ost->sq_idx_encode,
  847. SQFRAME(frame));
  848. if (ret < 0) {
  849. if (frame)
  850. av_frame_unref(frame);
  851. if (ret != AVERROR_EOF)
  852. return ret;
  853. }
  854. while (1) {
  855. AVFrame *enc_frame = ost->sq_frame;
  856. ret = sq_receive(of->sq_encode, ost->sq_idx_encode,
  857. SQFRAME(enc_frame));
  858. if (ret == AVERROR_EOF) {
  859. enc_frame = NULL;
  860. } else if (ret < 0) {
  861. return (ret == AVERROR(EAGAIN)) ? 0 : ret;
  862. }
  863. ret = encode_frame(of, ost, enc_frame);
  864. if (enc_frame)
  865. av_frame_unref(enc_frame);
  866. if (ret < 0) {
  867. if (ret == AVERROR_EOF)
  868. close_output_stream(ost);
  869. return ret;
  870. }
  871. }
  872. }
  873. static void do_audio_out(OutputFile *of, OutputStream *ost,
  874. AVFrame *frame)
  875. {
  876. AVCodecContext *enc = ost->enc_ctx;
  877. int ret;
  878. if (frame->pts == AV_NOPTS_VALUE)
  879. frame->pts = ost->next_pts;
  880. else {
  881. int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
  882. frame->pts =
  883. av_rescale_q(frame->pts, frame->time_base, enc->time_base) -
  884. av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
  885. }
  886. frame->time_base = enc->time_base;
  887. if (!check_recording_time(ost, frame->pts, frame->time_base))
  888. return;
  889. ost->next_pts = frame->pts + frame->nb_samples;
  890. ret = submit_encode_frame(of, ost, frame);
  891. if (ret < 0 && ret != AVERROR_EOF)
  892. exit_program(1);
  893. }
  894. static void do_subtitle_out(OutputFile *of,
  895. OutputStream *ost,
  896. AVSubtitle *sub)
  897. {
  898. int subtitle_out_max_size = 1024 * 1024;
  899. int subtitle_out_size, nb, i, ret;
  900. AVCodecContext *enc;
  901. AVPacket *pkt = ost->pkt;
  902. int64_t pts;
  903. if (sub->pts == AV_NOPTS_VALUE) {
  904. av_log(ost, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
  905. if (exit_on_error)
  906. exit_program(1);
  907. return;
  908. }
  909. enc = ost->enc_ctx;
  910. /* Note: DVB subtitle need one packet to draw them and one other
  911. packet to clear them */
  912. /* XXX: signal it in the codec context ? */
  913. if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
  914. nb = 2;
  915. else
  916. nb = 1;
  917. /* shift timestamp to honor -ss and make check_recording_time() work with -t */
  918. pts = sub->pts;
  919. if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
  920. pts -= output_files[ost->file_index]->start_time;
  921. for (i = 0; i < nb; i++) {
  922. unsigned save_num_rects = sub->num_rects;
  923. if (!check_recording_time(ost, pts, AV_TIME_BASE_Q))
  924. return;
  925. ret = av_new_packet(pkt, subtitle_out_max_size);
  926. if (ret < 0)
  927. report_and_exit(AVERROR(ENOMEM));
  928. sub->pts = pts;
  929. // start_display_time is required to be 0
  930. sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
  931. sub->end_display_time -= sub->start_display_time;
  932. sub->start_display_time = 0;
  933. if (i == 1)
  934. sub->num_rects = 0;
  935. ost->frames_encoded++;
  936. subtitle_out_size = avcodec_encode_subtitle(enc, pkt->data, pkt->size, sub);
  937. if (i == 1)
  938. sub->num_rects = save_num_rects;
  939. if (subtitle_out_size < 0) {
  940. av_log(ost, AV_LOG_FATAL, "Subtitle encoding failed\n");
  941. exit_program(1);
  942. }
  943. av_shrink_packet(pkt, subtitle_out_size);
  944. pkt->time_base = ost->mux_timebase;
  945. pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, pkt->time_base);
  946. pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, pkt->time_base);
  947. if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
  948. /* XXX: the pts correction is handled here. Maybe handling
  949. it in the codec would be better */
  950. if (i == 0)
  951. pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, pkt->time_base);
  952. else
  953. pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, pkt->time_base);
  954. }
  955. pkt->dts = pkt->pts;
  956. of_output_packet(of, pkt, ost, 0);
  957. }
  958. }
  959. /* Convert frame timestamps to the encoder timebase and decide how many times
  960. * should this (and possibly previous) frame be repeated in order to conform to
  961. * desired target framerate (if any).
  962. */
  963. static void video_sync_process(OutputFile *of, OutputStream *ost,
  964. AVFrame *next_picture, double duration,
  965. int64_t *nb_frames, int64_t *nb_frames_prev)
  966. {
  967. double delta0, delta;
  968. double sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
  969. /* delta0 is the "drift" between the input frame (next_picture) and
  970. * where it would fall in the output. */
  971. delta0 = sync_ipts - ost->next_pts;
  972. delta = delta0 + duration;
  973. // tracks the number of times the PREVIOUS frame should be duplicated,
  974. // mostly for variable framerate (VFR)
  975. *nb_frames_prev = 0;
  976. /* by default, we output a single frame */
  977. *nb_frames = 1;
  978. if (delta0 < 0 &&
  979. delta > 0 &&
  980. ost->vsync_method != VSYNC_PASSTHROUGH &&
  981. ost->vsync_method != VSYNC_DROP) {
  982. if (delta0 < -0.6) {
  983. av_log(ost, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
  984. } else
  985. av_log(ost, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
  986. sync_ipts = ost->next_pts;
  987. duration += delta0;
  988. delta0 = 0;
  989. }
  990. switch (ost->vsync_method) {
  991. case VSYNC_VSCFR:
  992. if (ost->vsync_frame_number == 0 && delta0 >= 0.5) {
  993. av_log(ost, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
  994. delta = duration;
  995. delta0 = 0;
  996. ost->next_pts = llrint(sync_ipts);
  997. }
  998. case VSYNC_CFR:
  999. // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
  1000. if (frame_drop_threshold && delta < frame_drop_threshold && ost->vsync_frame_number) {
  1001. *nb_frames = 0;
  1002. } else if (delta < -1.1)
  1003. *nb_frames = 0;
  1004. else if (delta > 1.1) {
  1005. *nb_frames = llrintf(delta);
  1006. if (delta0 > 1.1)
  1007. *nb_frames_prev = llrintf(delta0 - 0.6);
  1008. }
  1009. next_picture->duration = 1;
  1010. break;
  1011. case VSYNC_VFR:
  1012. if (delta <= -0.6)
  1013. *nb_frames = 0;
  1014. else if (delta > 0.6)
  1015. ost->next_pts = llrint(sync_ipts);
  1016. next_picture->duration = duration;
  1017. break;
  1018. case VSYNC_DROP:
  1019. case VSYNC_PASSTHROUGH:
  1020. next_picture->duration = duration;
  1021. ost->next_pts = llrint(sync_ipts);
  1022. break;
  1023. default:
  1024. av_assert0(0);
  1025. }
  1026. }
  1027. static enum AVPictureType forced_kf_apply(void *logctx, KeyframeForceCtx *kf,
  1028. AVRational tb, const AVFrame *in_picture,
  1029. int dup_idx)
  1030. {
  1031. double pts_time;
  1032. if (kf->ref_pts == AV_NOPTS_VALUE)
  1033. kf->ref_pts = in_picture->pts;
  1034. pts_time = (in_picture->pts - kf->ref_pts) * av_q2d(tb);
  1035. if (kf->index < kf->nb_pts &&
  1036. av_compare_ts(in_picture->pts, tb, kf->pts[kf->index], AV_TIME_BASE_Q) >= 0) {
  1037. kf->index++;
  1038. goto force_keyframe;
  1039. } else if (kf->pexpr) {
  1040. double res;
  1041. kf->expr_const_values[FKF_T] = pts_time;
  1042. res = av_expr_eval(kf->pexpr,
  1043. kf->expr_const_values, NULL);
  1044. ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
  1045. kf->expr_const_values[FKF_N],
  1046. kf->expr_const_values[FKF_N_FORCED],
  1047. kf->expr_const_values[FKF_PREV_FORCED_N],
  1048. kf->expr_const_values[FKF_T],
  1049. kf->expr_const_values[FKF_PREV_FORCED_T],
  1050. res);
  1051. kf->expr_const_values[FKF_N] += 1;
  1052. if (res) {
  1053. kf->expr_const_values[FKF_PREV_FORCED_N] = kf->expr_const_values[FKF_N] - 1;
  1054. kf->expr_const_values[FKF_PREV_FORCED_T] = kf->expr_const_values[FKF_T];
  1055. kf->expr_const_values[FKF_N_FORCED] += 1;
  1056. goto force_keyframe;
  1057. }
  1058. } else if (kf->type == KF_FORCE_SOURCE &&
  1059. in_picture->key_frame == 1 && !dup_idx) {
  1060. goto force_keyframe;
  1061. } else if (kf->type == KF_FORCE_SOURCE_NO_DROP && !dup_idx) {
  1062. kf->dropped_keyframe = 0;
  1063. if ((in_picture->key_frame == 1) || kf->dropped_keyframe)
  1064. goto force_keyframe;
  1065. }
  1066. return AV_PICTURE_TYPE_NONE;
  1067. force_keyframe:
  1068. av_log(logctx, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
  1069. return AV_PICTURE_TYPE_I;
  1070. }
  1071. /* May modify/reset next_picture */
  1072. static void do_video_out(OutputFile *of,
  1073. OutputStream *ost,
  1074. AVFrame *next_picture)
  1075. {
  1076. int ret;
  1077. AVCodecContext *enc = ost->enc_ctx;
  1078. AVRational frame_rate;
  1079. int64_t nb_frames, nb_frames_prev, i;
  1080. double duration = 0;
  1081. InputStream *ist = ost->ist;
  1082. AVFilterContext *filter = ost->filter->filter;
  1083. init_output_stream_wrapper(ost, next_picture, 1);
  1084. frame_rate = av_buffersink_get_frame_rate(filter);
  1085. if (frame_rate.num > 0 && frame_rate.den > 0)
  1086. duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
  1087. if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
  1088. duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
  1089. if (!ost->filters_script &&
  1090. !ost->filters &&
  1091. (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
  1092. next_picture &&
  1093. ist &&
  1094. lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
  1095. duration = lrintf(next_picture->duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
  1096. }
  1097. if (!next_picture) {
  1098. //end, flushing
  1099. nb_frames_prev = nb_frames = mid_pred(ost->last_nb0_frames[0],
  1100. ost->last_nb0_frames[1],
  1101. ost->last_nb0_frames[2]);
  1102. } else {
  1103. video_sync_process(of, ost, next_picture, duration,
  1104. &nb_frames, &nb_frames_prev);
  1105. }
  1106. memmove(ost->last_nb0_frames + 1,
  1107. ost->last_nb0_frames,
  1108. sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
  1109. ost->last_nb0_frames[0] = nb_frames_prev;
  1110. if (nb_frames_prev == 0 && ost->last_dropped) {
  1111. nb_frames_drop++;
  1112. av_log(ost, AV_LOG_VERBOSE,
  1113. "*** dropping frame %"PRId64" at ts %"PRId64"\n",
  1114. ost->vsync_frame_number, ost->last_frame->pts);
  1115. }
  1116. if (nb_frames > (nb_frames_prev && ost->last_dropped) + (nb_frames > nb_frames_prev)) {
  1117. if (nb_frames > dts_error_threshold * 30) {
  1118. av_log(ost, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", nb_frames - 1);
  1119. nb_frames_drop++;
  1120. return;
  1121. }
  1122. nb_frames_dup += nb_frames - (nb_frames_prev && ost->last_dropped) - (nb_frames > nb_frames_prev);
  1123. av_log(ost, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", nb_frames - 1);
  1124. if (nb_frames_dup > dup_warning) {
  1125. av_log(ost, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", dup_warning);
  1126. dup_warning *= 10;
  1127. }
  1128. }
  1129. ost->last_dropped = nb_frames == nb_frames_prev && next_picture;
  1130. ost->kf.dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
  1131. /* duplicates frame if needed */
  1132. for (i = 0; i < nb_frames; i++) {
  1133. AVFrame *in_picture;
  1134. if (i < nb_frames_prev && ost->last_frame->buf[0]) {
  1135. in_picture = ost->last_frame;
  1136. } else
  1137. in_picture = next_picture;
  1138. if (!in_picture)
  1139. return;
  1140. in_picture->pts = ost->next_pts;
  1141. if (!check_recording_time(ost, in_picture->pts, ost->enc_ctx->time_base))
  1142. return;
  1143. in_picture->quality = enc->global_quality;
  1144. in_picture->pict_type = forced_kf_apply(ost, &ost->kf, enc->time_base, in_picture, i);
  1145. if (ost->top_field_first >= 0)
  1146. in_picture->top_field_first = !!ost->top_field_first;
  1147. ret = submit_encode_frame(of, ost, in_picture);
  1148. if (ret == AVERROR_EOF)
  1149. break;
  1150. else if (ret < 0)
  1151. exit_program(1);
  1152. ost->next_pts++;
  1153. ost->vsync_frame_number++;
  1154. }
  1155. av_frame_unref(ost->last_frame);
  1156. if (next_picture)
  1157. av_frame_move_ref(ost->last_frame, next_picture);
  1158. }
  1159. /**
  1160. * Get and encode new output from any of the filtergraphs, without causing
  1161. * activity.
  1162. *
  1163. * @return 0 for success, <0 for severe errors
  1164. */
  1165. static int reap_filters(int flush)
  1166. {
  1167. AVFrame *filtered_frame = NULL;
  1168. /* Reap all buffers present in the buffer sinks */
  1169. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  1170. OutputFile *of = output_files[ost->file_index];
  1171. AVFilterContext *filter;
  1172. AVCodecContext *enc = ost->enc_ctx;
  1173. int ret = 0;
  1174. if (!ost->filter || !ost->filter->graph->graph)
  1175. continue;
  1176. filter = ost->filter->filter;
  1177. /*
  1178. * Unlike video, with audio the audio frame size matters.
  1179. * Currently we are fully reliant on the lavfi filter chain to
  1180. * do the buffering deed for us, and thus the frame size parameter
  1181. * needs to be set accordingly. Where does one get the required
  1182. * frame size? From the initialized AVCodecContext of an audio
  1183. * encoder. Thus, if we have gotten to an audio stream, initialize
  1184. * the encoder earlier than receiving the first AVFrame.
  1185. */
  1186. if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
  1187. init_output_stream_wrapper(ost, NULL, 1);
  1188. filtered_frame = ost->filtered_frame;
  1189. while (1) {
  1190. ret = av_buffersink_get_frame_flags(filter, filtered_frame,
  1191. AV_BUFFERSINK_FLAG_NO_REQUEST);
  1192. if (ret < 0) {
  1193. if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
  1194. av_log(NULL, AV_LOG_WARNING,
  1195. "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
  1196. } else if (flush && ret == AVERROR_EOF) {
  1197. if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
  1198. do_video_out(of, ost, NULL);
  1199. }
  1200. break;
  1201. }
  1202. if (ost->finished) {
  1203. av_frame_unref(filtered_frame);
  1204. continue;
  1205. }
  1206. if (filtered_frame->pts != AV_NOPTS_VALUE) {
  1207. AVRational tb = av_buffersink_get_time_base(filter);
  1208. ost->last_filter_pts = av_rescale_q(filtered_frame->pts, tb,
  1209. AV_TIME_BASE_Q);
  1210. filtered_frame->time_base = tb;
  1211. if (debug_ts)
  1212. av_log(NULL, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
  1213. av_ts2str(filtered_frame->pts),
  1214. av_ts2timestr(filtered_frame->pts, &tb),
  1215. tb.num, tb.den);
  1216. }
  1217. switch (av_buffersink_get_type(filter)) {
  1218. case AVMEDIA_TYPE_VIDEO:
  1219. if (!ost->frame_aspect_ratio.num)
  1220. enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
  1221. do_video_out(of, ost, filtered_frame);
  1222. break;
  1223. case AVMEDIA_TYPE_AUDIO:
  1224. if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
  1225. enc->ch_layout.nb_channels != filtered_frame->ch_layout.nb_channels) {
  1226. av_log(NULL, AV_LOG_ERROR,
  1227. "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
  1228. break;
  1229. }
  1230. do_audio_out(of, ost, filtered_frame);
  1231. break;
  1232. default:
  1233. // TODO support subtitle filters
  1234. av_assert0(0);
  1235. }
  1236. av_frame_unref(filtered_frame);
  1237. }
  1238. }
  1239. return 0;
  1240. }
  1241. static void print_final_stats(int64_t total_size)
  1242. {
  1243. uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
  1244. uint64_t subtitle_size = 0;
  1245. uint64_t data_size = 0;
  1246. float percent = -1.0;
  1247. int i, j;
  1248. int pass1_used = 1;
  1249. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  1250. AVCodecParameters *par = ost->st->codecpar;
  1251. const uint64_t s = ost->data_size_mux;
  1252. switch (par->codec_type) {
  1253. case AVMEDIA_TYPE_VIDEO: video_size += s; break;
  1254. case AVMEDIA_TYPE_AUDIO: audio_size += s; break;
  1255. case AVMEDIA_TYPE_SUBTITLE: subtitle_size += s; break;
  1256. default: other_size += s; break;
  1257. }
  1258. extra_size += par->extradata_size;
  1259. data_size += s;
  1260. if (ost->enc_ctx &&
  1261. (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
  1262. != AV_CODEC_FLAG_PASS1)
  1263. pass1_used = 0;
  1264. }
  1265. if (data_size && total_size>0 && total_size >= data_size)
  1266. percent = 100.0 * (total_size - data_size) / data_size;
  1267. av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
  1268. video_size / 1024.0,
  1269. audio_size / 1024.0,
  1270. subtitle_size / 1024.0,
  1271. other_size / 1024.0,
  1272. extra_size / 1024.0);
  1273. if (percent >= 0.0)
  1274. av_log(NULL, AV_LOG_INFO, "%f%%", percent);
  1275. else
  1276. av_log(NULL, AV_LOG_INFO, "unknown");
  1277. av_log(NULL, AV_LOG_INFO, "\n");
  1278. /* print verbose per-stream stats */
  1279. for (i = 0; i < nb_input_files; i++) {
  1280. InputFile *f = input_files[i];
  1281. uint64_t total_packets = 0, total_size = 0;
  1282. av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
  1283. i, f->ctx->url);
  1284. for (j = 0; j < f->nb_streams; j++) {
  1285. InputStream *ist = f->streams[j];
  1286. enum AVMediaType type = ist->par->codec_type;
  1287. total_size += ist->data_size;
  1288. total_packets += ist->nb_packets;
  1289. av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
  1290. i, j, av_get_media_type_string(type));
  1291. av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
  1292. ist->nb_packets, ist->data_size);
  1293. if (ist->decoding_needed) {
  1294. av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
  1295. ist->frames_decoded);
  1296. if (type == AVMEDIA_TYPE_AUDIO)
  1297. av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
  1298. av_log(NULL, AV_LOG_VERBOSE, "; ");
  1299. }
  1300. av_log(NULL, AV_LOG_VERBOSE, "\n");
  1301. }
  1302. av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
  1303. total_packets, total_size);
  1304. }
  1305. for (i = 0; i < nb_output_files; i++) {
  1306. OutputFile *of = output_files[i];
  1307. uint64_t total_packets = 0, total_size = 0;
  1308. av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
  1309. i, of->url);
  1310. for (j = 0; j < of->nb_streams; j++) {
  1311. OutputStream *ost = of->streams[j];
  1312. enum AVMediaType type = ost->st->codecpar->codec_type;
  1313. total_size += ost->data_size_mux;
  1314. total_packets += atomic_load(&ost->packets_written);
  1315. av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
  1316. i, j, av_get_media_type_string(type));
  1317. if (ost->enc_ctx) {
  1318. av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
  1319. ost->frames_encoded);
  1320. if (type == AVMEDIA_TYPE_AUDIO)
  1321. av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
  1322. av_log(NULL, AV_LOG_VERBOSE, "; ");
  1323. }
  1324. av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
  1325. atomic_load(&ost->packets_written), ost->data_size_mux);
  1326. av_log(NULL, AV_LOG_VERBOSE, "\n");
  1327. }
  1328. av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
  1329. total_packets, total_size);
  1330. }
  1331. if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
  1332. av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
  1333. if (pass1_used) {
  1334. av_log(NULL, AV_LOG_WARNING, "\n");
  1335. } else {
  1336. av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
  1337. }
  1338. }
  1339. }
  1340. static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
  1341. {
  1342. AVBPrint buf, buf_script;
  1343. int64_t total_size = of_filesize(output_files[0]);
  1344. int vid;
  1345. double bitrate;
  1346. double speed;
  1347. int64_t pts = INT64_MIN + 1;
  1348. static int64_t last_time = -1;
  1349. static int first_report = 1;
  1350. static int qp_histogram[52];
  1351. int hours, mins, secs, us;
  1352. const char *hours_sign;
  1353. int ret;
  1354. float t;
  1355. if (!print_stats && !is_last_report && !progress_avio)
  1356. return;
  1357. if (!is_last_report) {
  1358. if (last_time == -1) {
  1359. last_time = cur_time;
  1360. }
  1361. if (((cur_time - last_time) < stats_period && !first_report) ||
  1362. (first_report && nb_output_dumped < nb_output_files))
  1363. return;
  1364. last_time = cur_time;
  1365. }
  1366. t = (cur_time-timer_start) / 1000000.0;
  1367. vid = 0;
  1368. av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
  1369. av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
  1370. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  1371. const AVCodecContext * const enc = ost->enc_ctx;
  1372. const float q = enc ? ost->quality / (float) FF_QP2LAMBDA : -1;
  1373. if (vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
  1374. av_bprintf(&buf, "q=%2.1f ", q);
  1375. av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
  1376. ost->file_index, ost->index, q);
  1377. }
  1378. if (!vid && ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
  1379. float fps;
  1380. uint64_t frame_number = atomic_load(&ost->packets_written);
  1381. fps = t > 1 ? frame_number / t : 0;
  1382. av_bprintf(&buf, "frame=%5"PRId64" fps=%3.*f q=%3.1f ",
  1383. frame_number, fps < 9.95, fps, q);
  1384. av_bprintf(&buf_script, "frame=%"PRId64"\n", frame_number);
  1385. av_bprintf(&buf_script, "fps=%.2f\n", fps);
  1386. av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
  1387. ost->file_index, ost->index, q);
  1388. if (is_last_report)
  1389. av_bprintf(&buf, "L");
  1390. if (qp_hist) {
  1391. int j;
  1392. int qp = lrintf(q);
  1393. if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
  1394. qp_histogram[qp]++;
  1395. for (j = 0; j < 32; j++)
  1396. av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
  1397. }
  1398. if (enc && (enc->flags & AV_CODEC_FLAG_PSNR) &&
  1399. (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
  1400. int j;
  1401. double error, error_sum = 0;
  1402. double scale, scale_sum = 0;
  1403. double p;
  1404. char type[3] = { 'Y','U','V' };
  1405. av_bprintf(&buf, "PSNR=");
  1406. for (j = 0; j < 3; j++) {
  1407. if (is_last_report) {
  1408. error = enc->error[j];
  1409. scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
  1410. } else {
  1411. error = ost->error[j];
  1412. scale = enc->width * enc->height * 255.0 * 255.0;
  1413. }
  1414. if (j)
  1415. scale /= 4;
  1416. error_sum += error;
  1417. scale_sum += scale;
  1418. p = psnr(error / scale);
  1419. av_bprintf(&buf, "%c:%2.2f ", type[j], p);
  1420. av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
  1421. ost->file_index, ost->index, type[j] | 32, p);
  1422. }
  1423. p = psnr(error_sum / scale_sum);
  1424. av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
  1425. av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
  1426. ost->file_index, ost->index, p);
  1427. }
  1428. vid = 1;
  1429. }
  1430. /* compute min output value */
  1431. if (ost->last_mux_dts != AV_NOPTS_VALUE) {
  1432. pts = FFMAX(pts, ost->last_mux_dts);
  1433. if (copy_ts) {
  1434. if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
  1435. copy_ts_first_pts = pts;
  1436. if (copy_ts_first_pts != AV_NOPTS_VALUE)
  1437. pts -= copy_ts_first_pts;
  1438. }
  1439. }
  1440. if (is_last_report)
  1441. nb_frames_drop += ost->last_dropped;
  1442. }
  1443. secs = FFABS(pts) / AV_TIME_BASE;
  1444. us = FFABS(pts) % AV_TIME_BASE;
  1445. mins = secs / 60;
  1446. secs %= 60;
  1447. hours = mins / 60;
  1448. mins %= 60;
  1449. hours_sign = (pts < 0) ? "-" : "";
  1450. bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
  1451. speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
  1452. if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
  1453. else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
  1454. if (pts == AV_NOPTS_VALUE) {
  1455. av_bprintf(&buf, "N/A ");
  1456. } else {
  1457. av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
  1458. hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
  1459. }
  1460. if (bitrate < 0) {
  1461. av_bprintf(&buf, "bitrate=N/A");
  1462. av_bprintf(&buf_script, "bitrate=N/A\n");
  1463. }else{
  1464. av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
  1465. av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
  1466. }
  1467. if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
  1468. else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
  1469. if (pts == AV_NOPTS_VALUE) {
  1470. av_bprintf(&buf_script, "out_time_us=N/A\n");
  1471. av_bprintf(&buf_script, "out_time_ms=N/A\n");
  1472. av_bprintf(&buf_script, "out_time=N/A\n");
  1473. } else {
  1474. av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
  1475. av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
  1476. av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
  1477. hours_sign, hours, mins, secs, us);
  1478. }
  1479. if (nb_frames_dup || nb_frames_drop)
  1480. av_bprintf(&buf, " dup=%"PRId64" drop=%"PRId64, nb_frames_dup, nb_frames_drop);
  1481. av_bprintf(&buf_script, "dup_frames=%"PRId64"\n", nb_frames_dup);
  1482. av_bprintf(&buf_script, "drop_frames=%"PRId64"\n", nb_frames_drop);
  1483. if (speed < 0) {
  1484. av_bprintf(&buf, " speed=N/A");
  1485. av_bprintf(&buf_script, "speed=N/A\n");
  1486. } else {
  1487. av_bprintf(&buf, " speed=%4.3gx", speed);
  1488. av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
  1489. }
  1490. if (print_stats || is_last_report) {
  1491. const char end = is_last_report ? '\n' : '\r';
  1492. if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
  1493. fprintf(stderr, "%s %c", buf.str, end);
  1494. } else
  1495. av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
  1496. fflush(stderr);
  1497. }
  1498. av_bprint_finalize(&buf, NULL);
  1499. if (progress_avio) {
  1500. av_bprintf(&buf_script, "progress=%s\n",
  1501. is_last_report ? "end" : "continue");
  1502. avio_write(progress_avio, buf_script.str,
  1503. FFMIN(buf_script.len, buf_script.size - 1));
  1504. avio_flush(progress_avio);
  1505. av_bprint_finalize(&buf_script, NULL);
  1506. if (is_last_report) {
  1507. if ((ret = avio_closep(&progress_avio)) < 0)
  1508. av_log(NULL, AV_LOG_ERROR,
  1509. "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
  1510. }
  1511. }
  1512. first_report = 0;
  1513. if (is_last_report)
  1514. print_final_stats(total_size);
  1515. }
  1516. static int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
  1517. {
  1518. int ret;
  1519. // We never got any input. Set a fake format, which will
  1520. // come from libavformat.
  1521. ifilter->format = par->format;
  1522. ifilter->sample_rate = par->sample_rate;
  1523. ifilter->width = par->width;
  1524. ifilter->height = par->height;
  1525. ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
  1526. ret = av_channel_layout_copy(&ifilter->ch_layout, &par->ch_layout);
  1527. if (ret < 0)
  1528. return ret;
  1529. return 0;
  1530. }
  1531. static void flush_encoders(void)
  1532. {
  1533. int ret;
  1534. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  1535. OutputFile *of = output_files[ost->file_index];
  1536. if (ost->sq_idx_encode >= 0)
  1537. sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL));
  1538. }
  1539. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  1540. AVCodecContext *enc = ost->enc_ctx;
  1541. OutputFile *of = output_files[ost->file_index];
  1542. if (!enc)
  1543. continue;
  1544. // Try to enable encoding with no input frames.
  1545. // Maybe we should just let encoding fail instead.
  1546. if (!ost->initialized) {
  1547. FilterGraph *fg = ost->filter->graph;
  1548. av_log(ost, AV_LOG_WARNING,
  1549. "Finishing stream without any data written to it.\n");
  1550. if (ost->filter && !fg->graph) {
  1551. int x;
  1552. for (x = 0; x < fg->nb_inputs; x++) {
  1553. InputFilter *ifilter = fg->inputs[x];
  1554. if (ifilter->format < 0 &&
  1555. ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par) < 0) {
  1556. av_log(ost, AV_LOG_ERROR, "Error copying paramerets from input stream\n");
  1557. exit_program(1);
  1558. }
  1559. }
  1560. if (!ifilter_has_all_input_formats(fg))
  1561. continue;
  1562. ret = configure_filtergraph(fg);
  1563. if (ret < 0) {
  1564. av_log(ost, AV_LOG_ERROR, "Error configuring filter graph\n");
  1565. exit_program(1);
  1566. }
  1567. of_output_packet(of, ost->pkt, ost, 1);
  1568. }
  1569. init_output_stream_wrapper(ost, NULL, 1);
  1570. }
  1571. if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
  1572. continue;
  1573. ret = submit_encode_frame(of, ost, NULL);
  1574. if (ret != AVERROR_EOF)
  1575. exit_program(1);
  1576. }
  1577. }
  1578. /*
  1579. * Check whether a packet from ist should be written into ost at this time
  1580. */
  1581. static int check_output_constraints(InputStream *ist, OutputStream *ost)
  1582. {
  1583. OutputFile *of = output_files[ost->file_index];
  1584. if (ost->ist != ist)
  1585. return 0;
  1586. if (ost->finished & MUXER_FINISHED)
  1587. return 0;
  1588. if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
  1589. return 0;
  1590. return 1;
  1591. }
  1592. static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
  1593. {
  1594. OutputFile *of = output_files[ost->file_index];
  1595. InputFile *f = input_files [ist->file_index];
  1596. int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
  1597. int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
  1598. AVPacket *opkt = ost->pkt;
  1599. av_packet_unref(opkt);
  1600. // EOF: flush output bitstream filters.
  1601. if (!pkt) {
  1602. of_output_packet(of, opkt, ost, 1);
  1603. return;
  1604. }
  1605. if (!ost->streamcopy_started && !(pkt->flags & AV_PKT_FLAG_KEY) &&
  1606. !ost->copy_initial_nonkeyframes)
  1607. return;
  1608. if (!ost->streamcopy_started && !ost->copy_prior_start) {
  1609. if (pkt->pts == AV_NOPTS_VALUE ?
  1610. ist->pts < ost->ts_copy_start :
  1611. pkt->pts < av_rescale_q(ost->ts_copy_start, AV_TIME_BASE_Q, ist->st->time_base))
  1612. return;
  1613. }
  1614. if (of->recording_time != INT64_MAX &&
  1615. ist->pts >= of->recording_time + start_time) {
  1616. close_output_stream(ost);
  1617. return;
  1618. }
  1619. if (f->recording_time != INT64_MAX) {
  1620. start_time = 0;
  1621. if (copy_ts) {
  1622. start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
  1623. start_time += start_at_zero ? 0 : f->start_time_effective;
  1624. }
  1625. if (ist->pts >= f->recording_time + start_time) {
  1626. close_output_stream(ost);
  1627. return;
  1628. }
  1629. }
  1630. if (av_packet_ref(opkt, pkt) < 0)
  1631. exit_program(1);
  1632. opkt->time_base = ost->mux_timebase;
  1633. if (pkt->pts != AV_NOPTS_VALUE)
  1634. opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, opkt->time_base) - ost_tb_start_time;
  1635. if (pkt->dts == AV_NOPTS_VALUE) {
  1636. opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, opkt->time_base);
  1637. } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
  1638. int duration = av_get_audio_frame_duration2(ist->par, pkt->size);
  1639. if(!duration)
  1640. duration = ist->par->frame_size;
  1641. opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
  1642. (AVRational){1, ist->par->sample_rate}, duration,
  1643. &ist->filter_in_rescale_delta_last, opkt->time_base);
  1644. /* dts will be set immediately afterwards to what pts is now */
  1645. opkt->pts = opkt->dts - ost_tb_start_time;
  1646. } else
  1647. opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, opkt->time_base);
  1648. opkt->dts -= ost_tb_start_time;
  1649. opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, opkt->time_base);
  1650. {
  1651. int ret = trigger_fix_sub_duration_heartbeat(ost, pkt);
  1652. if (ret < 0) {
  1653. av_log(NULL, AV_LOG_ERROR,
  1654. "Subtitle heartbeat logic failed in %s! (%s)\n",
  1655. __func__, av_err2str(ret));
  1656. exit_program(1);
  1657. }
  1658. }
  1659. of_output_packet(of, opkt, ost, 0);
  1660. ost->streamcopy_started = 1;
  1661. }
  1662. static void check_decode_result(InputStream *ist, int *got_output, int ret)
  1663. {
  1664. if (*got_output || ret<0)
  1665. decode_error_stat[ret<0] ++;
  1666. if (ret < 0 && exit_on_error)
  1667. exit_program(1);
  1668. if (*got_output && ist) {
  1669. if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
  1670. av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
  1671. "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
  1672. if (exit_on_error)
  1673. exit_program(1);
  1674. }
  1675. }
  1676. }
  1677. // Filters can be configured only if the formats of all inputs are known.
  1678. static int ifilter_has_all_input_formats(FilterGraph *fg)
  1679. {
  1680. int i;
  1681. for (i = 0; i < fg->nb_inputs; i++) {
  1682. if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
  1683. fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
  1684. return 0;
  1685. }
  1686. return 1;
  1687. }
  1688. static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
  1689. {
  1690. FilterGraph *fg = ifilter->graph;
  1691. AVFrameSideData *sd;
  1692. int need_reinit, ret;
  1693. int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH;
  1694. if (keep_reference)
  1695. buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF;
  1696. /* determine if the parameters for this input changed */
  1697. need_reinit = ifilter->format != frame->format;
  1698. switch (ifilter->ist->par->codec_type) {
  1699. case AVMEDIA_TYPE_AUDIO:
  1700. need_reinit |= ifilter->sample_rate != frame->sample_rate ||
  1701. av_channel_layout_compare(&ifilter->ch_layout, &frame->ch_layout);
  1702. break;
  1703. case AVMEDIA_TYPE_VIDEO:
  1704. need_reinit |= ifilter->width != frame->width ||
  1705. ifilter->height != frame->height;
  1706. break;
  1707. }
  1708. if (!ifilter->ist->reinit_filters && fg->graph)
  1709. need_reinit = 0;
  1710. if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
  1711. (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
  1712. need_reinit = 1;
  1713. if (sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX)) {
  1714. if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
  1715. need_reinit = 1;
  1716. } else if (ifilter->displaymatrix)
  1717. need_reinit = 1;
  1718. if (need_reinit) {
  1719. ret = ifilter_parameters_from_frame(ifilter, frame);
  1720. if (ret < 0)
  1721. return ret;
  1722. }
  1723. /* (re)init the graph if possible, otherwise buffer the frame and return */
  1724. if (need_reinit || !fg->graph) {
  1725. if (!ifilter_has_all_input_formats(fg)) {
  1726. AVFrame *tmp = av_frame_clone(frame);
  1727. if (!tmp)
  1728. return AVERROR(ENOMEM);
  1729. ret = av_fifo_write(ifilter->frame_queue, &tmp, 1);
  1730. if (ret < 0)
  1731. av_frame_free(&tmp);
  1732. return ret;
  1733. }
  1734. ret = reap_filters(1);
  1735. if (ret < 0 && ret != AVERROR_EOF) {
  1736. av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
  1737. return ret;
  1738. }
  1739. ret = configure_filtergraph(fg);
  1740. if (ret < 0) {
  1741. av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
  1742. return ret;
  1743. }
  1744. }
  1745. ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags);
  1746. if (ret < 0) {
  1747. if (ret != AVERROR_EOF)
  1748. av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
  1749. return ret;
  1750. }
  1751. return 0;
  1752. }
  1753. static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
  1754. {
  1755. int ret;
  1756. ifilter->eof = 1;
  1757. if (ifilter->filter) {
  1758. ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
  1759. if (ret < 0)
  1760. return ret;
  1761. } else {
  1762. // the filtergraph was never configured
  1763. if (ifilter->format < 0) {
  1764. ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->par);
  1765. if (ret < 0)
  1766. return ret;
  1767. }
  1768. if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
  1769. av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
  1770. return AVERROR_INVALIDDATA;
  1771. }
  1772. }
  1773. return 0;
  1774. }
  1775. // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
  1776. // There is the following difference: if you got a frame, you must call
  1777. // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
  1778. // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
  1779. static int decode(InputStream *ist, AVCodecContext *avctx,
  1780. AVFrame *frame, int *got_frame, AVPacket *pkt)
  1781. {
  1782. int ret;
  1783. *got_frame = 0;
  1784. if (pkt) {
  1785. ret = avcodec_send_packet(avctx, pkt);
  1786. // In particular, we don't expect AVERROR(EAGAIN), because we read all
  1787. // decoded frames with avcodec_receive_frame() until done.
  1788. if (ret < 0 && ret != AVERROR_EOF)
  1789. return ret;
  1790. }
  1791. ret = avcodec_receive_frame(avctx, frame);
  1792. if (ret < 0 && ret != AVERROR(EAGAIN))
  1793. return ret;
  1794. if (ret >= 0) {
  1795. if (ist->want_frame_data) {
  1796. FrameData *fd;
  1797. av_assert0(!frame->opaque_ref);
  1798. frame->opaque_ref = av_buffer_allocz(sizeof(*fd));
  1799. if (!frame->opaque_ref) {
  1800. av_frame_unref(frame);
  1801. return AVERROR(ENOMEM);
  1802. }
  1803. fd = (FrameData*)frame->opaque_ref->data;
  1804. fd->pts = frame->pts;
  1805. fd->tb = avctx->pkt_timebase;
  1806. fd->idx = avctx->frame_num - 1;
  1807. }
  1808. *got_frame = 1;
  1809. }
  1810. return 0;
  1811. }
  1812. static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
  1813. {
  1814. int i, ret;
  1815. av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
  1816. for (i = 0; i < ist->nb_filters; i++) {
  1817. ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1);
  1818. if (ret == AVERROR_EOF)
  1819. ret = 0; /* ignore */
  1820. if (ret < 0) {
  1821. av_log(NULL, AV_LOG_ERROR,
  1822. "Failed to inject frame into filter network: %s\n", av_err2str(ret));
  1823. break;
  1824. }
  1825. }
  1826. return ret;
  1827. }
  1828. static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
  1829. int *decode_failed)
  1830. {
  1831. AVFrame *decoded_frame = ist->decoded_frame;
  1832. AVCodecContext *avctx = ist->dec_ctx;
  1833. int ret, err = 0;
  1834. AVRational decoded_frame_tb;
  1835. update_benchmark(NULL);
  1836. ret = decode(ist, avctx, decoded_frame, got_output, pkt);
  1837. update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
  1838. if (ret < 0)
  1839. *decode_failed = 1;
  1840. if (ret != AVERROR_EOF)
  1841. check_decode_result(ist, got_output, ret);
  1842. if (!*got_output || ret < 0)
  1843. return ret;
  1844. ist->samples_decoded += decoded_frame->nb_samples;
  1845. ist->frames_decoded++;
  1846. /* increment next_dts to use for the case where the input stream does not
  1847. have timestamps or there are multiple frames in the packet */
  1848. ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
  1849. decoded_frame->sample_rate;
  1850. ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
  1851. decoded_frame->sample_rate;
  1852. if (decoded_frame->pts != AV_NOPTS_VALUE) {
  1853. decoded_frame_tb = ist->st->time_base;
  1854. } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
  1855. decoded_frame->pts = pkt->pts;
  1856. decoded_frame_tb = ist->st->time_base;
  1857. }else {
  1858. decoded_frame->pts = ist->dts;
  1859. decoded_frame_tb = AV_TIME_BASE_Q;
  1860. }
  1861. if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE &&
  1862. pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration)
  1863. ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE;
  1864. if (pkt)
  1865. ist->prev_pkt_pts = pkt->pts;
  1866. if (decoded_frame->pts != AV_NOPTS_VALUE)
  1867. decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
  1868. (AVRational){1, decoded_frame->sample_rate},
  1869. decoded_frame->nb_samples,
  1870. &ist->filter_in_rescale_delta_last,
  1871. (AVRational){1, decoded_frame->sample_rate});
  1872. ist->nb_samples = decoded_frame->nb_samples;
  1873. err = send_frame_to_filters(ist, decoded_frame);
  1874. av_frame_unref(decoded_frame);
  1875. return err < 0 ? err : ret;
  1876. }
  1877. static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
  1878. int *decode_failed)
  1879. {
  1880. AVFrame *decoded_frame = ist->decoded_frame;
  1881. int i, ret = 0, err = 0;
  1882. int64_t best_effort_timestamp;
  1883. int64_t dts = AV_NOPTS_VALUE;
  1884. // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
  1885. // reason. This seems like a semi-critical bug. Don't trigger EOF, and
  1886. // skip the packet.
  1887. if (!eof && pkt && pkt->size == 0)
  1888. return 0;
  1889. if (ist->dts != AV_NOPTS_VALUE)
  1890. dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
  1891. if (pkt) {
  1892. pkt->dts = dts; // ffmpeg.c probably shouldn't do this
  1893. }
  1894. // The old code used to set dts on the drain packet, which does not work
  1895. // with the new API anymore.
  1896. if (eof) {
  1897. void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
  1898. if (!new)
  1899. return AVERROR(ENOMEM);
  1900. ist->dts_buffer = new;
  1901. ist->dts_buffer[ist->nb_dts_buffer++] = dts;
  1902. }
  1903. update_benchmark(NULL);
  1904. ret = decode(ist, ist->dec_ctx, decoded_frame, got_output, pkt);
  1905. update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
  1906. if (ret < 0)
  1907. *decode_failed = 1;
  1908. // The following line may be required in some cases where there is no parser
  1909. // or the parser does not has_b_frames correctly
  1910. if (ist->par->video_delay < ist->dec_ctx->has_b_frames) {
  1911. if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
  1912. ist->par->video_delay = ist->dec_ctx->has_b_frames;
  1913. } else
  1914. av_log(ist->dec_ctx, AV_LOG_WARNING,
  1915. "video_delay is larger in decoder than demuxer %d > %d.\n"
  1916. "If you want to help, upload a sample "
  1917. "of this file to https://streams.videolan.org/upload/ "
  1918. "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
  1919. ist->dec_ctx->has_b_frames,
  1920. ist->par->video_delay);
  1921. }
  1922. if (ret != AVERROR_EOF)
  1923. check_decode_result(ist, got_output, ret);
  1924. if (*got_output && ret >= 0) {
  1925. if (ist->dec_ctx->width != decoded_frame->width ||
  1926. ist->dec_ctx->height != decoded_frame->height ||
  1927. ist->dec_ctx->pix_fmt != decoded_frame->format) {
  1928. av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
  1929. decoded_frame->width,
  1930. decoded_frame->height,
  1931. decoded_frame->format,
  1932. ist->dec_ctx->width,
  1933. ist->dec_ctx->height,
  1934. ist->dec_ctx->pix_fmt);
  1935. }
  1936. }
  1937. if (!*got_output || ret < 0)
  1938. return ret;
  1939. if(ist->top_field_first>=0)
  1940. decoded_frame->top_field_first = ist->top_field_first;
  1941. ist->frames_decoded++;
  1942. if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
  1943. err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
  1944. if (err < 0)
  1945. goto fail;
  1946. }
  1947. best_effort_timestamp= decoded_frame->best_effort_timestamp;
  1948. *duration_pts = decoded_frame->duration;
  1949. if (ist->framerate.num)
  1950. best_effort_timestamp = ist->cfr_next_pts++;
  1951. if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
  1952. best_effort_timestamp = ist->dts_buffer[0];
  1953. for (i = 0; i < ist->nb_dts_buffer - 1; i++)
  1954. ist->dts_buffer[i] = ist->dts_buffer[i + 1];
  1955. ist->nb_dts_buffer--;
  1956. }
  1957. if(best_effort_timestamp != AV_NOPTS_VALUE) {
  1958. int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
  1959. if (ts != AV_NOPTS_VALUE)
  1960. ist->next_pts = ist->pts = ts;
  1961. }
  1962. if (debug_ts) {
  1963. av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
  1964. "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
  1965. ist->st->index, av_ts2str(decoded_frame->pts),
  1966. av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
  1967. best_effort_timestamp,
  1968. av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
  1969. decoded_frame->key_frame, decoded_frame->pict_type,
  1970. ist->st->time_base.num, ist->st->time_base.den);
  1971. }
  1972. if (ist->st->sample_aspect_ratio.num)
  1973. decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
  1974. err = send_frame_to_filters(ist, decoded_frame);
  1975. fail:
  1976. av_frame_unref(decoded_frame);
  1977. return err < 0 ? err : ret;
  1978. }
  1979. static int process_subtitle(InputStream *ist, AVSubtitle *subtitle, int *got_output)
  1980. {
  1981. int ret = 0;
  1982. int free_sub = 1;
  1983. if (ist->fix_sub_duration) {
  1984. int end = 1;
  1985. if (ist->prev_sub.got_output) {
  1986. end = av_rescale(subtitle->pts - ist->prev_sub.subtitle.pts,
  1987. 1000, AV_TIME_BASE);
  1988. if (end < ist->prev_sub.subtitle.end_display_time) {
  1989. av_log(NULL, AV_LOG_DEBUG,
  1990. "Subtitle duration reduced from %"PRId32" to %d%s\n",
  1991. ist->prev_sub.subtitle.end_display_time, end,
  1992. end <= 0 ? ", dropping it" : "");
  1993. ist->prev_sub.subtitle.end_display_time = end;
  1994. }
  1995. }
  1996. FFSWAP(int, *got_output, ist->prev_sub.got_output);
  1997. FFSWAP(int, ret, ist->prev_sub.ret);
  1998. FFSWAP(AVSubtitle, *subtitle, ist->prev_sub.subtitle);
  1999. if (end <= 0)
  2000. goto out;
  2001. }
  2002. if (!*got_output)
  2003. return ret;
  2004. if (ist->sub2video.frame) {
  2005. sub2video_update(ist, INT64_MIN, subtitle);
  2006. } else if (ist->nb_filters) {
  2007. if (!ist->sub2video.sub_queue)
  2008. ist->sub2video.sub_queue = av_fifo_alloc2(8, sizeof(AVSubtitle), AV_FIFO_FLAG_AUTO_GROW);
  2009. if (!ist->sub2video.sub_queue)
  2010. report_and_exit(AVERROR(ENOMEM));
  2011. ret = av_fifo_write(ist->sub2video.sub_queue, subtitle, 1);
  2012. if (ret < 0)
  2013. exit_program(1);
  2014. free_sub = 0;
  2015. }
  2016. if (!subtitle->num_rects)
  2017. goto out;
  2018. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  2019. if (!check_output_constraints(ist, ost) || !ost->enc_ctx
  2020. || ost->enc_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)
  2021. continue;
  2022. do_subtitle_out(output_files[ost->file_index], ost, subtitle);
  2023. }
  2024. out:
  2025. if (free_sub)
  2026. avsubtitle_free(subtitle);
  2027. return ret;
  2028. }
  2029. static int copy_av_subtitle(AVSubtitle *dst, AVSubtitle *src)
  2030. {
  2031. int ret = AVERROR_BUG;
  2032. AVSubtitle tmp = {
  2033. .format = src->format,
  2034. .start_display_time = src->start_display_time,
  2035. .end_display_time = src->end_display_time,
  2036. .num_rects = 0,
  2037. .rects = NULL,
  2038. .pts = src->pts
  2039. };
  2040. if (!src->num_rects)
  2041. goto success;
  2042. if (!(tmp.rects = av_calloc(src->num_rects, sizeof(*tmp.rects))))
  2043. return AVERROR(ENOMEM);
  2044. for (int i = 0; i < src->num_rects; i++) {
  2045. AVSubtitleRect *src_rect = src->rects[i];
  2046. AVSubtitleRect *dst_rect;
  2047. if (!(dst_rect = tmp.rects[i] = av_mallocz(sizeof(*tmp.rects[0])))) {
  2048. ret = AVERROR(ENOMEM);
  2049. goto cleanup;
  2050. }
  2051. tmp.num_rects++;
  2052. dst_rect->type = src_rect->type;
  2053. dst_rect->flags = src_rect->flags;
  2054. dst_rect->x = src_rect->x;
  2055. dst_rect->y = src_rect->y;
  2056. dst_rect->w = src_rect->w;
  2057. dst_rect->h = src_rect->h;
  2058. dst_rect->nb_colors = src_rect->nb_colors;
  2059. if (src_rect->text)
  2060. if (!(dst_rect->text = av_strdup(src_rect->text))) {
  2061. ret = AVERROR(ENOMEM);
  2062. goto cleanup;
  2063. }
  2064. if (src_rect->ass)
  2065. if (!(dst_rect->ass = av_strdup(src_rect->ass))) {
  2066. ret = AVERROR(ENOMEM);
  2067. goto cleanup;
  2068. }
  2069. for (int j = 0; j < 4; j++) {
  2070. // SUBTITLE_BITMAP images are special in the sense that they
  2071. // are like PAL8 images. first pointer to data, second to
  2072. // palette. This makes the size calculation match this.
  2073. size_t buf_size = src_rect->type == SUBTITLE_BITMAP && j == 1 ?
  2074. AVPALETTE_SIZE :
  2075. src_rect->h * src_rect->linesize[j];
  2076. if (!src_rect->data[j])
  2077. continue;
  2078. if (!(dst_rect->data[j] = av_memdup(src_rect->data[j], buf_size))) {
  2079. ret = AVERROR(ENOMEM);
  2080. goto cleanup;
  2081. }
  2082. dst_rect->linesize[j] = src_rect->linesize[j];
  2083. }
  2084. }
  2085. success:
  2086. *dst = tmp;
  2087. return 0;
  2088. cleanup:
  2089. avsubtitle_free(&tmp);
  2090. return ret;
  2091. }
  2092. static int fix_sub_duration_heartbeat(InputStream *ist, int64_t signal_pts)
  2093. {
  2094. int ret = AVERROR_BUG;
  2095. int got_output = 1;
  2096. AVSubtitle *prev_subtitle = &ist->prev_sub.subtitle;
  2097. AVSubtitle subtitle;
  2098. if (!ist->fix_sub_duration || !prev_subtitle->num_rects ||
  2099. signal_pts <= prev_subtitle->pts)
  2100. return 0;
  2101. if ((ret = copy_av_subtitle(&subtitle, prev_subtitle)) < 0)
  2102. return ret;
  2103. subtitle.pts = signal_pts;
  2104. return process_subtitle(ist, &subtitle, &got_output);
  2105. }
  2106. static int trigger_fix_sub_duration_heartbeat(OutputStream *ost, const AVPacket *pkt)
  2107. {
  2108. OutputFile *of = output_files[ost->file_index];
  2109. int64_t signal_pts = av_rescale_q(pkt->pts, pkt->time_base,
  2110. AV_TIME_BASE_Q);
  2111. if (!ost->fix_sub_duration_heartbeat || !(pkt->flags & AV_PKT_FLAG_KEY))
  2112. // we are only interested in heartbeats on streams configured, and
  2113. // only on random access points.
  2114. return 0;
  2115. for (int i = 0; i < of->nb_streams; i++) {
  2116. OutputStream *iter_ost = of->streams[i];
  2117. InputStream *ist = iter_ost->ist;
  2118. int ret = AVERROR_BUG;
  2119. if (iter_ost == ost || !ist || !ist->decoding_needed ||
  2120. ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE)
  2121. // We wish to skip the stream that causes the heartbeat,
  2122. // output streams without an input stream, streams not decoded
  2123. // (as fix_sub_duration is only done for decoded subtitles) as
  2124. // well as non-subtitle streams.
  2125. continue;
  2126. if ((ret = fix_sub_duration_heartbeat(ist, signal_pts)) < 0)
  2127. return ret;
  2128. }
  2129. return 0;
  2130. }
  2131. static int transcode_subtitles(InputStream *ist, const AVPacket *pkt,
  2132. int *got_output, int *decode_failed)
  2133. {
  2134. AVSubtitle subtitle;
  2135. int ret = avcodec_decode_subtitle2(ist->dec_ctx,
  2136. &subtitle, got_output, pkt);
  2137. check_decode_result(NULL, got_output, ret);
  2138. if (ret < 0 || !*got_output) {
  2139. *decode_failed = 1;
  2140. if (!pkt->size)
  2141. sub2video_flush(ist);
  2142. return ret;
  2143. }
  2144. ist->frames_decoded++;
  2145. return process_subtitle(ist, &subtitle, got_output);
  2146. }
  2147. static int send_filter_eof(InputStream *ist)
  2148. {
  2149. int i, ret;
  2150. /* TODO keep pts also in stream time base to avoid converting back */
  2151. int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
  2152. AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
  2153. for (i = 0; i < ist->nb_filters; i++) {
  2154. ret = ifilter_send_eof(ist->filters[i], pts);
  2155. if (ret < 0)
  2156. return ret;
  2157. }
  2158. return 0;
  2159. }
  2160. /* pkt = NULL means EOF (needed to flush decoder buffers) */
  2161. static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
  2162. {
  2163. const AVCodecParameters *par = ist->par;
  2164. int ret = 0;
  2165. int repeating = 0;
  2166. int eof_reached = 0;
  2167. AVPacket *avpkt = ist->pkt;
  2168. if (!ist->saw_first_ts) {
  2169. ist->first_dts =
  2170. ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
  2171. ist->pts = 0;
  2172. if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
  2173. ist->first_dts =
  2174. ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
  2175. ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
  2176. }
  2177. ist->saw_first_ts = 1;
  2178. }
  2179. if (ist->next_dts == AV_NOPTS_VALUE)
  2180. ist->next_dts = ist->dts;
  2181. if (ist->next_pts == AV_NOPTS_VALUE)
  2182. ist->next_pts = ist->pts;
  2183. if (pkt) {
  2184. av_packet_unref(avpkt);
  2185. ret = av_packet_ref(avpkt, pkt);
  2186. if (ret < 0)
  2187. return ret;
  2188. }
  2189. if (pkt && pkt->dts != AV_NOPTS_VALUE) {
  2190. ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
  2191. if (par->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
  2192. ist->next_pts = ist->pts = ist->dts;
  2193. }
  2194. // while we have more to decode or while the decoder did output something on EOF
  2195. while (ist->decoding_needed) {
  2196. int64_t duration_dts = 0;
  2197. int64_t duration_pts = 0;
  2198. int got_output = 0;
  2199. int decode_failed = 0;
  2200. ist->pts = ist->next_pts;
  2201. ist->dts = ist->next_dts;
  2202. switch (par->codec_type) {
  2203. case AVMEDIA_TYPE_AUDIO:
  2204. ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
  2205. &decode_failed);
  2206. av_packet_unref(avpkt);
  2207. break;
  2208. case AVMEDIA_TYPE_VIDEO:
  2209. ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
  2210. &decode_failed);
  2211. if (!repeating || !pkt || got_output) {
  2212. if (pkt && pkt->duration) {
  2213. duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
  2214. } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
  2215. int ticks = ist->last_pkt_repeat_pict >= 0 ?
  2216. ist->last_pkt_repeat_pict + 1 :
  2217. ist->dec_ctx->ticks_per_frame;
  2218. duration_dts = ((int64_t)AV_TIME_BASE *
  2219. ist->dec_ctx->framerate.den * ticks) /
  2220. ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
  2221. }
  2222. if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
  2223. ist->next_dts += duration_dts;
  2224. }else
  2225. ist->next_dts = AV_NOPTS_VALUE;
  2226. }
  2227. if (got_output) {
  2228. if (duration_pts > 0) {
  2229. ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
  2230. } else {
  2231. ist->next_pts += duration_dts;
  2232. }
  2233. }
  2234. av_packet_unref(avpkt);
  2235. break;
  2236. case AVMEDIA_TYPE_SUBTITLE:
  2237. if (repeating)
  2238. break;
  2239. ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
  2240. if (!pkt && ret >= 0)
  2241. ret = AVERROR_EOF;
  2242. av_packet_unref(avpkt);
  2243. break;
  2244. default:
  2245. return -1;
  2246. }
  2247. if (ret == AVERROR_EOF) {
  2248. eof_reached = 1;
  2249. break;
  2250. }
  2251. if (ret < 0) {
  2252. if (decode_failed) {
  2253. av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
  2254. ist->file_index, ist->st->index, av_err2str(ret));
  2255. } else {
  2256. av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
  2257. "data for stream #%d:%d\n", ist->file_index, ist->st->index);
  2258. }
  2259. if (!decode_failed || exit_on_error)
  2260. exit_program(1);
  2261. break;
  2262. }
  2263. if (got_output)
  2264. ist->got_output = 1;
  2265. if (!got_output)
  2266. break;
  2267. // During draining, we might get multiple output frames in this loop.
  2268. // ffmpeg.c does not drain the filter chain on configuration changes,
  2269. // which means if we send multiple frames at once to the filters, and
  2270. // one of those frames changes configuration, the buffered frames will
  2271. // be lost. This can upset certain FATE tests.
  2272. // Decode only 1 frame per call on EOF to appease these FATE tests.
  2273. // The ideal solution would be to rewrite decoding to use the new
  2274. // decoding API in a better way.
  2275. if (!pkt)
  2276. break;
  2277. repeating = 1;
  2278. }
  2279. /* after flushing, send an EOF on all the filter inputs attached to the stream */
  2280. /* except when looping we need to flush but not to send an EOF */
  2281. if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
  2282. int ret = send_filter_eof(ist);
  2283. if (ret < 0) {
  2284. av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
  2285. exit_program(1);
  2286. }
  2287. }
  2288. /* handle stream copy */
  2289. if (!ist->decoding_needed && pkt) {
  2290. ist->dts = ist->next_dts;
  2291. switch (par->codec_type) {
  2292. case AVMEDIA_TYPE_AUDIO:
  2293. av_assert1(pkt->duration >= 0);
  2294. if (par->sample_rate) {
  2295. ist->next_dts += ((int64_t)AV_TIME_BASE * par->frame_size) /
  2296. par->sample_rate;
  2297. } else {
  2298. ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
  2299. }
  2300. break;
  2301. case AVMEDIA_TYPE_VIDEO:
  2302. if (ist->framerate.num) {
  2303. // TODO: Remove work-around for c99-to-c89 issue 7
  2304. AVRational time_base_q = AV_TIME_BASE_Q;
  2305. int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
  2306. ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
  2307. } else if (pkt->duration) {
  2308. ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
  2309. } else if(ist->dec_ctx->framerate.num != 0) {
  2310. int ticks = ist->last_pkt_repeat_pict >= 0 ?
  2311. ist->last_pkt_repeat_pict + 1 :
  2312. ist->dec_ctx->ticks_per_frame;
  2313. ist->next_dts += ((int64_t)AV_TIME_BASE *
  2314. ist->dec_ctx->framerate.den * ticks) /
  2315. ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
  2316. }
  2317. break;
  2318. }
  2319. ist->pts = ist->dts;
  2320. ist->next_pts = ist->next_dts;
  2321. } else if (!ist->decoding_needed)
  2322. eof_reached = 1;
  2323. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  2324. if (!check_output_constraints(ist, ost) || ost->enc_ctx ||
  2325. (!pkt && no_eof))
  2326. continue;
  2327. do_streamcopy(ist, ost, pkt);
  2328. }
  2329. return !eof_reached;
  2330. }
  2331. static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
  2332. {
  2333. InputStream *ist = s->opaque;
  2334. const enum AVPixelFormat *p;
  2335. int ret;
  2336. for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
  2337. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
  2338. const AVCodecHWConfig *config = NULL;
  2339. int i;
  2340. if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
  2341. break;
  2342. if (ist->hwaccel_id == HWACCEL_GENERIC ||
  2343. ist->hwaccel_id == HWACCEL_AUTO) {
  2344. for (i = 0;; i++) {
  2345. config = avcodec_get_hw_config(s->codec, i);
  2346. if (!config)
  2347. break;
  2348. if (!(config->methods &
  2349. AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
  2350. continue;
  2351. if (config->pix_fmt == *p)
  2352. break;
  2353. }
  2354. }
  2355. if (config && config->device_type == ist->hwaccel_device_type) {
  2356. ret = hwaccel_decode_init(s);
  2357. if (ret < 0) {
  2358. if (ist->hwaccel_id == HWACCEL_GENERIC) {
  2359. av_log(NULL, AV_LOG_FATAL,
  2360. "%s hwaccel requested for input stream #%d:%d, "
  2361. "but cannot be initialized.\n",
  2362. av_hwdevice_get_type_name(config->device_type),
  2363. ist->file_index, ist->st->index);
  2364. return AV_PIX_FMT_NONE;
  2365. }
  2366. continue;
  2367. }
  2368. ist->hwaccel_pix_fmt = *p;
  2369. break;
  2370. }
  2371. }
  2372. return *p;
  2373. }
  2374. static int init_input_stream(InputStream *ist, char *error, int error_len)
  2375. {
  2376. int ret;
  2377. if (ist->decoding_needed) {
  2378. const AVCodec *codec = ist->dec;
  2379. if (!codec) {
  2380. snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
  2381. avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
  2382. return AVERROR(EINVAL);
  2383. }
  2384. ist->dec_ctx->opaque = ist;
  2385. ist->dec_ctx->get_format = get_format;
  2386. if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
  2387. (ist->decoding_needed & DECODING_FOR_OST)) {
  2388. av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
  2389. if (ist->decoding_needed & DECODING_FOR_FILTER)
  2390. av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
  2391. }
  2392. /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
  2393. * audio, and video decoders such as cuvid or mediacodec */
  2394. ist->dec_ctx->pkt_timebase = ist->st->time_base;
  2395. if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
  2396. av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
  2397. /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
  2398. if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
  2399. av_dict_set(&ist->decoder_opts, "threads", "1", 0);
  2400. ret = hw_device_setup_for_decode(ist);
  2401. if (ret < 0) {
  2402. snprintf(error, error_len, "Device setup failed for "
  2403. "decoder on input stream #%d:%d : %s",
  2404. ist->file_index, ist->st->index, av_err2str(ret));
  2405. return ret;
  2406. }
  2407. if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
  2408. if (ret == AVERROR_EXPERIMENTAL)
  2409. abort_codec_experimental(codec, 0);
  2410. snprintf(error, error_len,
  2411. "Error while opening decoder for input stream "
  2412. "#%d:%d : %s",
  2413. ist->file_index, ist->st->index, av_err2str(ret));
  2414. return ret;
  2415. }
  2416. assert_avoptions(ist->decoder_opts);
  2417. }
  2418. ist->next_pts = AV_NOPTS_VALUE;
  2419. ist->next_dts = AV_NOPTS_VALUE;
  2420. return 0;
  2421. }
  2422. static int init_output_stream_streamcopy(OutputStream *ost)
  2423. {
  2424. OutputFile *of = output_files[ost->file_index];
  2425. InputStream *ist = ost->ist;
  2426. InputFile *ifile = input_files[ist->file_index];
  2427. AVCodecParameters *par = ost->st->codecpar;
  2428. AVCodecContext *codec_ctx;
  2429. AVRational sar;
  2430. int i, ret;
  2431. uint32_t codec_tag = par->codec_tag;
  2432. av_assert0(ist && !ost->filter);
  2433. codec_ctx = avcodec_alloc_context3(NULL);
  2434. if (!codec_ctx)
  2435. return AVERROR(ENOMEM);
  2436. ret = avcodec_parameters_to_context(codec_ctx, ist->par);
  2437. if (ret >= 0)
  2438. ret = av_opt_set_dict(codec_ctx, &ost->encoder_opts);
  2439. if (ret < 0) {
  2440. av_log(ost, AV_LOG_FATAL,
  2441. "Error setting up codec context options.\n");
  2442. avcodec_free_context(&codec_ctx);
  2443. return ret;
  2444. }
  2445. ret = avcodec_parameters_from_context(par, codec_ctx);
  2446. avcodec_free_context(&codec_ctx);
  2447. if (ret < 0) {
  2448. av_log(ost, AV_LOG_FATAL,
  2449. "Error getting reference codec parameters.\n");
  2450. return ret;
  2451. }
  2452. if (!codec_tag) {
  2453. unsigned int codec_tag_tmp;
  2454. if (!of->format->codec_tag ||
  2455. av_codec_get_id (of->format->codec_tag, par->codec_tag) == par->codec_id ||
  2456. !av_codec_get_tag2(of->format->codec_tag, par->codec_id, &codec_tag_tmp))
  2457. codec_tag = par->codec_tag;
  2458. }
  2459. par->codec_tag = codec_tag;
  2460. if (!ost->frame_rate.num)
  2461. ost->frame_rate = ist->framerate;
  2462. if (ost->frame_rate.num)
  2463. ost->st->avg_frame_rate = ost->frame_rate;
  2464. else
  2465. ost->st->avg_frame_rate = ist->st->avg_frame_rate;
  2466. ret = avformat_transfer_internal_stream_timing_info(of->format, ost->st, ist->st, copy_tb);
  2467. if (ret < 0)
  2468. return ret;
  2469. // copy timebase while removing common factors
  2470. if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
  2471. if (ost->frame_rate.num)
  2472. ost->st->time_base = av_inv_q(ost->frame_rate);
  2473. else
  2474. ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
  2475. }
  2476. // copy estimated duration as a hint to the muxer
  2477. if (ost->st->duration <= 0 && ist->st->duration > 0)
  2478. ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
  2479. if (!ost->copy_prior_start) {
  2480. ost->ts_copy_start = (of->start_time == AV_NOPTS_VALUE) ?
  2481. 0 : of->start_time;
  2482. if (copy_ts && ifile->start_time != AV_NOPTS_VALUE) {
  2483. ost->ts_copy_start = FFMAX(ost->ts_copy_start,
  2484. ifile->start_time + ifile->ts_offset);
  2485. }
  2486. }
  2487. if (ist->st->nb_side_data) {
  2488. for (i = 0; i < ist->st->nb_side_data; i++) {
  2489. const AVPacketSideData *sd_src = &ist->st->side_data[i];
  2490. uint8_t *dst_data;
  2491. dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
  2492. if (!dst_data)
  2493. return AVERROR(ENOMEM);
  2494. memcpy(dst_data, sd_src->data, sd_src->size);
  2495. }
  2496. }
  2497. #if FFMPEG_ROTATION_METADATA
  2498. if (ost->rotate_overridden) {
  2499. uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
  2500. sizeof(int32_t) * 9);
  2501. if (sd)
  2502. av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
  2503. }
  2504. #endif
  2505. switch (par->codec_type) {
  2506. case AVMEDIA_TYPE_AUDIO:
  2507. if ((par->block_align == 1 || par->block_align == 1152 || par->block_align == 576) &&
  2508. par->codec_id == AV_CODEC_ID_MP3)
  2509. par->block_align = 0;
  2510. if (par->codec_id == AV_CODEC_ID_AC3)
  2511. par->block_align = 0;
  2512. break;
  2513. case AVMEDIA_TYPE_VIDEO:
  2514. if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
  2515. sar =
  2516. av_mul_q(ost->frame_aspect_ratio,
  2517. (AVRational){ par->height, par->width });
  2518. av_log(ost, AV_LOG_WARNING, "Overriding aspect ratio "
  2519. "with stream copy may produce invalid files\n");
  2520. }
  2521. else if (ist->st->sample_aspect_ratio.num)
  2522. sar = ist->st->sample_aspect_ratio;
  2523. else
  2524. sar = par->sample_aspect_ratio;
  2525. ost->st->sample_aspect_ratio = par->sample_aspect_ratio = sar;
  2526. ost->st->avg_frame_rate = ist->st->avg_frame_rate;
  2527. ost->st->r_frame_rate = ist->st->r_frame_rate;
  2528. break;
  2529. }
  2530. ost->mux_timebase = ist->st->time_base;
  2531. return 0;
  2532. }
  2533. static void set_encoder_id(OutputFile *of, OutputStream *ost)
  2534. {
  2535. const char *cname = ost->enc_ctx->codec->name;
  2536. uint8_t *encoder_string;
  2537. int encoder_string_len;
  2538. if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
  2539. return;
  2540. encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(cname) + 2;
  2541. encoder_string = av_mallocz(encoder_string_len);
  2542. if (!encoder_string)
  2543. report_and_exit(AVERROR(ENOMEM));
  2544. if (!of->bitexact && !ost->bitexact)
  2545. av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
  2546. else
  2547. av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
  2548. av_strlcat(encoder_string, cname, encoder_string_len);
  2549. av_dict_set(&ost->st->metadata, "encoder", encoder_string,
  2550. AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
  2551. }
  2552. static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
  2553. {
  2554. InputStream *ist = ost->ist;
  2555. AVCodecContext *enc_ctx = ost->enc_ctx;
  2556. if (ost->enc_timebase.num > 0) {
  2557. enc_ctx->time_base = ost->enc_timebase;
  2558. return;
  2559. }
  2560. if (ost->enc_timebase.num < 0) {
  2561. if (ist) {
  2562. enc_ctx->time_base = ist->st->time_base;
  2563. return;
  2564. }
  2565. av_log(ost, AV_LOG_WARNING,
  2566. "Input stream data not available, using default time base\n");
  2567. }
  2568. enc_ctx->time_base = default_time_base;
  2569. }
  2570. static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
  2571. {
  2572. InputStream *ist = ost->ist;
  2573. AVCodecContext *enc_ctx = ost->enc_ctx;
  2574. AVCodecContext *dec_ctx = NULL;
  2575. OutputFile *of = output_files[ost->file_index];
  2576. int ret;
  2577. set_encoder_id(output_files[ost->file_index], ost);
  2578. if (ist) {
  2579. dec_ctx = ist->dec_ctx;
  2580. }
  2581. if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
  2582. if (!ost->frame_rate.num)
  2583. ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
  2584. if (!ost->frame_rate.num && !ost->max_frame_rate.num) {
  2585. ost->frame_rate = (AVRational){25, 1};
  2586. av_log(ost, AV_LOG_WARNING,
  2587. "No information "
  2588. "about the input framerate is available. Falling "
  2589. "back to a default value of 25fps. Use the -r option "
  2590. "if you want a different framerate.\n");
  2591. }
  2592. if (ost->max_frame_rate.num &&
  2593. (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
  2594. !ost->frame_rate.den))
  2595. ost->frame_rate = ost->max_frame_rate;
  2596. if (enc_ctx->codec->supported_framerates && !ost->force_fps) {
  2597. int idx = av_find_nearest_q_idx(ost->frame_rate, enc_ctx->codec->supported_framerates);
  2598. ost->frame_rate = enc_ctx->codec->supported_framerates[idx];
  2599. }
  2600. // reduce frame rate for mpeg4 to be within the spec limits
  2601. if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
  2602. av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
  2603. ost->frame_rate.num, ost->frame_rate.den, 65535);
  2604. }
  2605. }
  2606. switch (enc_ctx->codec_type) {
  2607. case AVMEDIA_TYPE_AUDIO:
  2608. enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
  2609. enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
  2610. ret = av_buffersink_get_ch_layout(ost->filter->filter, &enc_ctx->ch_layout);
  2611. if (ret < 0)
  2612. return ret;
  2613. if (ost->bits_per_raw_sample)
  2614. enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
  2615. else if (dec_ctx && ost->filter->graph->is_meta)
  2616. enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
  2617. av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
  2618. init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
  2619. break;
  2620. case AVMEDIA_TYPE_VIDEO:
  2621. init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
  2622. if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
  2623. enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
  2624. if ( av_q2d(enc_ctx->time_base) < 0.001 && ost->vsync_method != VSYNC_PASSTHROUGH
  2625. && (ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR ||
  2626. (ost->vsync_method == VSYNC_AUTO && !(of->format->flags & AVFMT_VARIABLE_FPS)))){
  2627. av_log(ost, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
  2628. "Please consider specifying a lower framerate, a different muxer or "
  2629. "setting vsync/fps_mode to vfr\n");
  2630. }
  2631. enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
  2632. enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
  2633. enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
  2634. ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
  2635. av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
  2636. av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
  2637. enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
  2638. if (ost->bits_per_raw_sample)
  2639. enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
  2640. else if (dec_ctx && ost->filter->graph->is_meta)
  2641. enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
  2642. av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
  2643. if (frame) {
  2644. enc_ctx->color_range = frame->color_range;
  2645. enc_ctx->color_primaries = frame->color_primaries;
  2646. enc_ctx->color_trc = frame->color_trc;
  2647. enc_ctx->colorspace = frame->colorspace;
  2648. enc_ctx->chroma_sample_location = frame->chroma_location;
  2649. }
  2650. enc_ctx->framerate = ost->frame_rate;
  2651. ost->st->avg_frame_rate = ost->frame_rate;
  2652. // Field order: autodetection
  2653. if (frame) {
  2654. if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
  2655. ost->top_field_first >= 0)
  2656. frame->top_field_first = !!ost->top_field_first;
  2657. if (frame->interlaced_frame) {
  2658. if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
  2659. enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
  2660. else
  2661. enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
  2662. } else
  2663. enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
  2664. }
  2665. // Field order: override
  2666. if (ost->top_field_first == 0) {
  2667. enc_ctx->field_order = AV_FIELD_BB;
  2668. } else if (ost->top_field_first == 1) {
  2669. enc_ctx->field_order = AV_FIELD_TT;
  2670. }
  2671. break;
  2672. case AVMEDIA_TYPE_SUBTITLE:
  2673. enc_ctx->time_base = AV_TIME_BASE_Q;
  2674. if (!enc_ctx->width) {
  2675. enc_ctx->width = ost->ist->par->width;
  2676. enc_ctx->height = ost->ist->par->height;
  2677. }
  2678. if (dec_ctx && dec_ctx->subtitle_header) {
  2679. /* ASS code assumes this buffer is null terminated so add extra byte. */
  2680. ost->enc_ctx->subtitle_header = av_mallocz(dec_ctx->subtitle_header_size + 1);
  2681. if (!ost->enc_ctx->subtitle_header)
  2682. return AVERROR(ENOMEM);
  2683. memcpy(ost->enc_ctx->subtitle_header, dec_ctx->subtitle_header,
  2684. dec_ctx->subtitle_header_size);
  2685. ost->enc_ctx->subtitle_header_size = dec_ctx->subtitle_header_size;
  2686. }
  2687. if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE &&
  2688. enc_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
  2689. int input_props = 0, output_props = 0;
  2690. AVCodecDescriptor const *input_descriptor =
  2691. avcodec_descriptor_get(ist->dec->id);
  2692. AVCodecDescriptor const *output_descriptor =
  2693. avcodec_descriptor_get(ost->enc_ctx->codec_id);
  2694. if (input_descriptor)
  2695. input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
  2696. if (output_descriptor)
  2697. output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
  2698. if (input_props && output_props && input_props != output_props) {
  2699. av_log(ost, AV_LOG_ERROR,
  2700. "Subtitle encoding currently only possible from text to text "
  2701. "or bitmap to bitmap");
  2702. return AVERROR_INVALIDDATA;
  2703. }
  2704. }
  2705. break;
  2706. case AVMEDIA_TYPE_DATA:
  2707. break;
  2708. default:
  2709. abort();
  2710. break;
  2711. }
  2712. if (ost->bitexact)
  2713. enc_ctx->flags |= AV_CODEC_FLAG_BITEXACT;
  2714. if (ost->sq_idx_encode >= 0)
  2715. sq_set_tb(of->sq_encode, ost->sq_idx_encode, enc_ctx->time_base);
  2716. ost->mux_timebase = enc_ctx->time_base;
  2717. return 0;
  2718. }
  2719. static int init_output_stream(OutputStream *ost, AVFrame *frame,
  2720. char *error, int error_len)
  2721. {
  2722. int ret = 0;
  2723. if (ost->enc_ctx) {
  2724. const AVCodec *codec = ost->enc_ctx->codec;
  2725. InputStream *ist = ost->ist;
  2726. ret = init_output_stream_encode(ost, frame);
  2727. if (ret < 0)
  2728. return ret;
  2729. if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
  2730. av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
  2731. if (codec->capabilities & AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE) {
  2732. ret = av_dict_set(&ost->encoder_opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
  2733. if (ret < 0)
  2734. return ret;
  2735. }
  2736. ret = hw_device_setup_for_encode(ost);
  2737. if (ret < 0) {
  2738. snprintf(error, error_len, "Device setup failed for "
  2739. "encoder on output stream #%d:%d : %s",
  2740. ost->file_index, ost->index, av_err2str(ret));
  2741. return ret;
  2742. }
  2743. if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
  2744. if (ret == AVERROR_EXPERIMENTAL)
  2745. abort_codec_experimental(codec, 1);
  2746. snprintf(error, error_len,
  2747. "Error while opening encoder for output stream #%d:%d - "
  2748. "maybe incorrect parameters such as bit_rate, rate, width or height",
  2749. ost->file_index, ost->index);
  2750. return ret;
  2751. }
  2752. if (codec->type == AVMEDIA_TYPE_AUDIO &&
  2753. !(codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
  2754. av_buffersink_set_frame_size(ost->filter->filter,
  2755. ost->enc_ctx->frame_size);
  2756. assert_avoptions(ost->encoder_opts);
  2757. if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
  2758. ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
  2759. av_log(ost, AV_LOG_WARNING, "The bitrate parameter is set too low."
  2760. " It takes bits/s as argument, not kbits/s\n");
  2761. ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
  2762. if (ret < 0) {
  2763. av_log(ost, AV_LOG_FATAL,
  2764. "Error initializing the output stream codec context.\n");
  2765. exit_program(1);
  2766. }
  2767. if (ost->enc_ctx->nb_coded_side_data) {
  2768. int i;
  2769. for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
  2770. const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
  2771. uint8_t *dst_data;
  2772. dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
  2773. if (!dst_data)
  2774. return AVERROR(ENOMEM);
  2775. memcpy(dst_data, sd_src->data, sd_src->size);
  2776. }
  2777. }
  2778. /*
  2779. * Add global input side data. For now this is naive, and copies it
  2780. * from the input stream's global side data. All side data should
  2781. * really be funneled over AVFrame and libavfilter, then added back to
  2782. * packet side data, and then potentially using the first packet for
  2783. * global side data.
  2784. */
  2785. if (ist) {
  2786. int i;
  2787. for (i = 0; i < ist->st->nb_side_data; i++) {
  2788. AVPacketSideData *sd = &ist->st->side_data[i];
  2789. if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
  2790. uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
  2791. if (!dst)
  2792. return AVERROR(ENOMEM);
  2793. memcpy(dst, sd->data, sd->size);
  2794. if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
  2795. av_display_rotation_set((int32_t *)dst, 0);
  2796. }
  2797. }
  2798. }
  2799. // copy timebase while removing common factors
  2800. if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
  2801. ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
  2802. // copy estimated duration as a hint to the muxer
  2803. if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
  2804. ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
  2805. } else if (ost->ist) {
  2806. ret = init_output_stream_streamcopy(ost);
  2807. if (ret < 0)
  2808. return ret;
  2809. }
  2810. ret = of_stream_init(output_files[ost->file_index], ost);
  2811. if (ret < 0)
  2812. return ret;
  2813. return ret;
  2814. }
  2815. static int transcode_init(void)
  2816. {
  2817. int ret = 0;
  2818. char error[1024] = {0};
  2819. /* init framerate emulation */
  2820. for (int i = 0; i < nb_input_files; i++) {
  2821. InputFile *ifile = input_files[i];
  2822. if (ifile->readrate || ifile->rate_emu)
  2823. for (int j = 0; j < ifile->nb_streams; j++)
  2824. ifile->streams[j]->start = av_gettime_relative();
  2825. }
  2826. /* init input streams */
  2827. for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist))
  2828. if ((ret = init_input_stream(ist, error, sizeof(error))) < 0)
  2829. goto dump_format;
  2830. /*
  2831. * initialize stream copy and subtitle/data streams.
  2832. * Encoded AVFrame based streams will get initialized as follows:
  2833. * - when the first AVFrame is received in do_video_out
  2834. * - just before the first AVFrame is received in either transcode_step
  2835. * or reap_filters due to us requiring the filter chain buffer sink
  2836. * to be configured with the correct audio frame size, which is only
  2837. * known after the encoder is initialized.
  2838. */
  2839. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  2840. if (ost->enc_ctx &&
  2841. (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
  2842. ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO))
  2843. continue;
  2844. ret = init_output_stream_wrapper(ost, NULL, 0);
  2845. if (ret < 0)
  2846. goto dump_format;
  2847. }
  2848. /* discard unused programs */
  2849. for (int i = 0; i < nb_input_files; i++) {
  2850. InputFile *ifile = input_files[i];
  2851. for (int j = 0; j < ifile->ctx->nb_programs; j++) {
  2852. AVProgram *p = ifile->ctx->programs[j];
  2853. int discard = AVDISCARD_ALL;
  2854. for (int k = 0; k < p->nb_stream_indexes; k++)
  2855. if (!ifile->streams[p->stream_index[k]]->discard) {
  2856. discard = AVDISCARD_DEFAULT;
  2857. break;
  2858. }
  2859. p->discard = discard;
  2860. }
  2861. }
  2862. dump_format:
  2863. /* dump the stream mapping */
  2864. av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
  2865. for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist)) {
  2866. for (int j = 0; j < ist->nb_filters; j++) {
  2867. if (!filtergraph_is_simple(ist->filters[j]->graph)) {
  2868. av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
  2869. ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
  2870. ist->filters[j]->name);
  2871. if (nb_filtergraphs > 1)
  2872. av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
  2873. av_log(NULL, AV_LOG_INFO, "\n");
  2874. }
  2875. }
  2876. }
  2877. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  2878. if (ost->attachment_filename) {
  2879. /* an attached file */
  2880. av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
  2881. ost->attachment_filename, ost->file_index, ost->index);
  2882. continue;
  2883. }
  2884. if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
  2885. /* output from a complex graph */
  2886. av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
  2887. if (nb_filtergraphs > 1)
  2888. av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
  2889. av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
  2890. ost->index, ost->enc_ctx->codec->name);
  2891. continue;
  2892. }
  2893. av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
  2894. ost->ist->file_index,
  2895. ost->ist->st->index,
  2896. ost->file_index,
  2897. ost->index);
  2898. if (ost->enc_ctx) {
  2899. const AVCodec *in_codec = ost->ist->dec;
  2900. const AVCodec *out_codec = ost->enc_ctx->codec;
  2901. const char *decoder_name = "?";
  2902. const char *in_codec_name = "?";
  2903. const char *encoder_name = "?";
  2904. const char *out_codec_name = "?";
  2905. const AVCodecDescriptor *desc;
  2906. if (in_codec) {
  2907. decoder_name = in_codec->name;
  2908. desc = avcodec_descriptor_get(in_codec->id);
  2909. if (desc)
  2910. in_codec_name = desc->name;
  2911. if (!strcmp(decoder_name, in_codec_name))
  2912. decoder_name = "native";
  2913. }
  2914. if (out_codec) {
  2915. encoder_name = out_codec->name;
  2916. desc = avcodec_descriptor_get(out_codec->id);
  2917. if (desc)
  2918. out_codec_name = desc->name;
  2919. if (!strcmp(encoder_name, out_codec_name))
  2920. encoder_name = "native";
  2921. }
  2922. av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
  2923. in_codec_name, decoder_name,
  2924. out_codec_name, encoder_name);
  2925. } else
  2926. av_log(NULL, AV_LOG_INFO, " (copy)");
  2927. av_log(NULL, AV_LOG_INFO, "\n");
  2928. }
  2929. if (ret) {
  2930. av_log(NULL, AV_LOG_ERROR, "%s\n", error);
  2931. return ret;
  2932. }
  2933. atomic_store(&transcode_init_done, 1);
  2934. return 0;
  2935. }
  2936. /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
  2937. static int need_output(void)
  2938. {
  2939. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  2940. if (ost->finished)
  2941. continue;
  2942. return 1;
  2943. }
  2944. return 0;
  2945. }
  2946. /**
  2947. * Select the output stream to process.
  2948. *
  2949. * @return selected output stream, or NULL if none available
  2950. */
  2951. static OutputStream *choose_output(void)
  2952. {
  2953. int64_t opts_min = INT64_MAX;
  2954. OutputStream *ost_min = NULL;
  2955. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  2956. int64_t opts;
  2957. if (ost->filter && ost->last_filter_pts != AV_NOPTS_VALUE) {
  2958. opts = ost->last_filter_pts;
  2959. } else {
  2960. opts = ost->last_mux_dts == AV_NOPTS_VALUE ?
  2961. INT64_MIN : ost->last_mux_dts;
  2962. if (ost->last_mux_dts == AV_NOPTS_VALUE)
  2963. av_log(ost, AV_LOG_DEBUG,
  2964. "cur_dts is invalid [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
  2965. ost->initialized, ost->inputs_done, ost->finished);
  2966. }
  2967. if (!ost->initialized && !ost->inputs_done)
  2968. return ost->unavailable ? NULL : ost;
  2969. if (!ost->finished && opts < opts_min) {
  2970. opts_min = opts;
  2971. ost_min = ost->unavailable ? NULL : ost;
  2972. }
  2973. }
  2974. return ost_min;
  2975. }
  2976. static void set_tty_echo(int on)
  2977. {
  2978. #if HAVE_TERMIOS_H
  2979. struct termios tty;
  2980. if (tcgetattr(0, &tty) == 0) {
  2981. if (on) tty.c_lflag |= ECHO;
  2982. else tty.c_lflag &= ~ECHO;
  2983. tcsetattr(0, TCSANOW, &tty);
  2984. }
  2985. #endif
  2986. }
  2987. static int check_keyboard_interaction(int64_t cur_time)
  2988. {
  2989. int i, ret, key;
  2990. static int64_t last_time;
  2991. if (received_nb_signals)
  2992. return AVERROR_EXIT;
  2993. /* read_key() returns 0 on EOF */
  2994. if (cur_time - last_time >= 100000) {
  2995. key = read_key();
  2996. last_time = cur_time;
  2997. }else
  2998. key = -1;
  2999. if (key == 'q') {
  3000. av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n");
  3001. return AVERROR_EXIT;
  3002. }
  3003. if (key == '+') av_log_set_level(av_log_get_level()+10);
  3004. if (key == '-') av_log_set_level(av_log_get_level()-10);
  3005. if (key == 's') qp_hist ^= 1;
  3006. if (key == 'c' || key == 'C'){
  3007. char buf[4096], target[64], command[256], arg[256] = {0};
  3008. double time;
  3009. int k, n = 0;
  3010. fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
  3011. i = 0;
  3012. set_tty_echo(1);
  3013. while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
  3014. if (k > 0)
  3015. buf[i++] = k;
  3016. buf[i] = 0;
  3017. set_tty_echo(0);
  3018. fprintf(stderr, "\n");
  3019. if (k > 0 &&
  3020. (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
  3021. av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
  3022. target, time, command, arg);
  3023. for (i = 0; i < nb_filtergraphs; i++) {
  3024. FilterGraph *fg = filtergraphs[i];
  3025. if (fg->graph) {
  3026. if (time < 0) {
  3027. ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
  3028. key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
  3029. fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
  3030. } else if (key == 'c') {
  3031. fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
  3032. ret = AVERROR_PATCHWELCOME;
  3033. } else {
  3034. ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
  3035. if (ret < 0)
  3036. fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
  3037. }
  3038. }
  3039. }
  3040. } else {
  3041. av_log(NULL, AV_LOG_ERROR,
  3042. "Parse error, at least 3 arguments were expected, "
  3043. "only %d given in string '%s'\n", n, buf);
  3044. }
  3045. }
  3046. if (key == 'd' || key == 'D'){
  3047. int debug=0;
  3048. if(key == 'D') {
  3049. InputStream *ist = ist_iter(NULL);
  3050. if (ist)
  3051. debug = ist->dec_ctx->debug << 1;
  3052. if(!debug) debug = 1;
  3053. while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
  3054. debug += debug;
  3055. }else{
  3056. char buf[32];
  3057. int k = 0;
  3058. i = 0;
  3059. set_tty_echo(1);
  3060. while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
  3061. if (k > 0)
  3062. buf[i++] = k;
  3063. buf[i] = 0;
  3064. set_tty_echo(0);
  3065. fprintf(stderr, "\n");
  3066. if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
  3067. fprintf(stderr,"error parsing debug value\n");
  3068. }
  3069. for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist))
  3070. ist->dec_ctx->debug = debug;
  3071. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  3072. if (ost->enc_ctx)
  3073. ost->enc_ctx->debug = debug;
  3074. }
  3075. if(debug) av_log_set_level(AV_LOG_DEBUG);
  3076. fprintf(stderr,"debug=%d\n", debug);
  3077. }
  3078. if (key == '?'){
  3079. fprintf(stderr, "key function\n"
  3080. "? show this help\n"
  3081. "+ increase verbosity\n"
  3082. "- decrease verbosity\n"
  3083. "c Send command to first matching filter supporting it\n"
  3084. "C Send/Queue command to all matching filters\n"
  3085. "D cycle through available debug modes\n"
  3086. "h dump packets/hex press to cycle through the 3 states\n"
  3087. "q quit\n"
  3088. "s Show QP histogram\n"
  3089. );
  3090. }
  3091. return 0;
  3092. }
  3093. static int got_eagain(void)
  3094. {
  3095. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost))
  3096. if (ost->unavailable)
  3097. return 1;
  3098. return 0;
  3099. }
  3100. static void reset_eagain(void)
  3101. {
  3102. int i;
  3103. for (i = 0; i < nb_input_files; i++)
  3104. input_files[i]->eagain = 0;
  3105. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost))
  3106. ost->unavailable = 0;
  3107. }
  3108. static void decode_flush(InputFile *ifile)
  3109. {
  3110. for (int i = 0; i < ifile->nb_streams; i++) {
  3111. InputStream *ist = ifile->streams[i];
  3112. int ret;
  3113. if (!ist->processing_needed)
  3114. continue;
  3115. do {
  3116. ret = process_input_packet(ist, NULL, 1);
  3117. } while (ret > 0);
  3118. if (ist->decoding_needed) {
  3119. /* report last frame duration to the demuxer thread */
  3120. if (ist->par->codec_type == AVMEDIA_TYPE_AUDIO) {
  3121. LastFrameDuration dur;
  3122. dur.stream_idx = i;
  3123. dur.duration = av_rescale_q(ist->nb_samples,
  3124. (AVRational){ 1, ist->dec_ctx->sample_rate},
  3125. ist->st->time_base);
  3126. av_thread_message_queue_send(ifile->audio_duration_queue, &dur, 0);
  3127. }
  3128. avcodec_flush_buffers(ist->dec_ctx);
  3129. }
  3130. }
  3131. }
  3132. static void ts_discontinuity_detect(InputFile *ifile, InputStream *ist,
  3133. AVPacket *pkt)
  3134. {
  3135. const int fmt_is_discont = ifile->ctx->iformat->flags & AVFMT_TS_DISCONT;
  3136. int disable_discontinuity_correction = copy_ts;
  3137. int64_t pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q,
  3138. AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
  3139. if (copy_ts && ist->next_dts != AV_NOPTS_VALUE &&
  3140. fmt_is_discont && ist->st->pts_wrap_bits < 60) {
  3141. int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
  3142. ist->st->time_base, AV_TIME_BASE_Q,
  3143. AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
  3144. if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
  3145. disable_discontinuity_correction = 0;
  3146. }
  3147. if (ist->next_dts != AV_NOPTS_VALUE && !disable_discontinuity_correction) {
  3148. int64_t delta = pkt_dts - ist->next_dts;
  3149. if (fmt_is_discont) {
  3150. if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE ||
  3151. pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
  3152. ifile->ts_offset_discont -= delta;
  3153. av_log(NULL, AV_LOG_DEBUG,
  3154. "timestamp discontinuity for stream #%d:%d "
  3155. "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
  3156. ist->file_index, ist->st->index, ist->st->id,
  3157. av_get_media_type_string(ist->par->codec_type),
  3158. delta, ifile->ts_offset_discont);
  3159. pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
  3160. if (pkt->pts != AV_NOPTS_VALUE)
  3161. pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
  3162. }
  3163. } else {
  3164. if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
  3165. av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
  3166. pkt->dts = AV_NOPTS_VALUE;
  3167. }
  3168. if (pkt->pts != AV_NOPTS_VALUE){
  3169. int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
  3170. delta = pkt_pts - ist->next_dts;
  3171. if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
  3172. av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
  3173. pkt->pts = AV_NOPTS_VALUE;
  3174. }
  3175. }
  3176. }
  3177. } else if (ist->next_dts == AV_NOPTS_VALUE && !copy_ts &&
  3178. fmt_is_discont && ifile->last_ts != AV_NOPTS_VALUE) {
  3179. int64_t delta = pkt_dts - ifile->last_ts;
  3180. if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE) {
  3181. ifile->ts_offset_discont -= delta;
  3182. av_log(NULL, AV_LOG_DEBUG,
  3183. "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
  3184. delta, ifile->ts_offset_discont);
  3185. pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
  3186. if (pkt->pts != AV_NOPTS_VALUE)
  3187. pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
  3188. }
  3189. }
  3190. ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
  3191. }
  3192. static void ts_discontinuity_process(InputFile *ifile, InputStream *ist,
  3193. AVPacket *pkt)
  3194. {
  3195. int64_t offset = av_rescale_q(ifile->ts_offset_discont, AV_TIME_BASE_Q,
  3196. ist->st->time_base);
  3197. // apply previously-detected timestamp-discontinuity offset
  3198. // (to all streams, not just audio/video)
  3199. if (pkt->dts != AV_NOPTS_VALUE)
  3200. pkt->dts += offset;
  3201. if (pkt->pts != AV_NOPTS_VALUE)
  3202. pkt->pts += offset;
  3203. // detect timestamp discontinuities for audio/video
  3204. if ((ist->par->codec_type == AVMEDIA_TYPE_VIDEO ||
  3205. ist->par->codec_type == AVMEDIA_TYPE_AUDIO) &&
  3206. pkt->dts != AV_NOPTS_VALUE)
  3207. ts_discontinuity_detect(ifile, ist, pkt);
  3208. }
  3209. /*
  3210. * Return
  3211. * - 0 -- one packet was read and processed
  3212. * - AVERROR(EAGAIN) -- no packets were available for selected file,
  3213. * this function should be called again
  3214. * - AVERROR_EOF -- this function should not be called again
  3215. */
  3216. static int process_input(int file_index)
  3217. {
  3218. InputFile *ifile = input_files[file_index];
  3219. AVFormatContext *is;
  3220. InputStream *ist;
  3221. AVPacket *pkt;
  3222. int ret, i;
  3223. is = ifile->ctx;
  3224. ret = ifile_get_packet(ifile, &pkt);
  3225. if (ret == AVERROR(EAGAIN)) {
  3226. ifile->eagain = 1;
  3227. return ret;
  3228. }
  3229. if (ret == 1) {
  3230. /* the input file is looped: flush the decoders */
  3231. decode_flush(ifile);
  3232. return AVERROR(EAGAIN);
  3233. }
  3234. if (ret < 0) {
  3235. if (ret != AVERROR_EOF) {
  3236. print_error(is->url, ret);
  3237. if (exit_on_error)
  3238. exit_program(1);
  3239. }
  3240. for (i = 0; i < ifile->nb_streams; i++) {
  3241. ist = ifile->streams[i];
  3242. if (ist->processing_needed) {
  3243. ret = process_input_packet(ist, NULL, 0);
  3244. if (ret>0)
  3245. return 0;
  3246. }
  3247. /* mark all outputs that don't go through lavfi as finished */
  3248. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  3249. if (ost->ist == ist &&
  3250. (!ost->enc_ctx || ost->enc_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE)) {
  3251. OutputFile *of = output_files[ost->file_index];
  3252. of_output_packet(of, ost->pkt, ost, 1);
  3253. }
  3254. }
  3255. }
  3256. ifile->eof_reached = 1;
  3257. return AVERROR(EAGAIN);
  3258. }
  3259. reset_eagain();
  3260. ist = ifile->streams[pkt->stream_index];
  3261. ist->data_size += pkt->size;
  3262. ist->nb_packets++;
  3263. if (ist->discard)
  3264. goto discard_packet;
  3265. /* add the stream-global side data to the first packet */
  3266. if (ist->nb_packets == 1) {
  3267. for (i = 0; i < ist->st->nb_side_data; i++) {
  3268. AVPacketSideData *src_sd = &ist->st->side_data[i];
  3269. uint8_t *dst_data;
  3270. if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
  3271. continue;
  3272. if (av_packet_get_side_data(pkt, src_sd->type, NULL))
  3273. continue;
  3274. dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
  3275. if (!dst_data)
  3276. report_and_exit(AVERROR(ENOMEM));
  3277. memcpy(dst_data, src_sd->data, src_sd->size);
  3278. }
  3279. }
  3280. // detect and try to correct for timestamp discontinuities
  3281. ts_discontinuity_process(ifile, ist, pkt);
  3282. if (debug_ts) {
  3283. av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
  3284. ifile->index, pkt->stream_index,
  3285. av_get_media_type_string(ist->par->codec_type),
  3286. av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
  3287. av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
  3288. av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &ist->st->time_base),
  3289. av_ts2str(input_files[ist->file_index]->ts_offset),
  3290. av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
  3291. }
  3292. sub2video_heartbeat(ist, pkt->pts);
  3293. process_input_packet(ist, pkt, 0);
  3294. discard_packet:
  3295. av_packet_free(&pkt);
  3296. return 0;
  3297. }
  3298. /**
  3299. * Perform a step of transcoding for the specified filter graph.
  3300. *
  3301. * @param[in] graph filter graph to consider
  3302. * @param[out] best_ist input stream where a frame would allow to continue
  3303. * @return 0 for success, <0 for error
  3304. */
  3305. static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
  3306. {
  3307. int i, ret;
  3308. int nb_requests, nb_requests_max = 0;
  3309. InputFilter *ifilter;
  3310. InputStream *ist;
  3311. *best_ist = NULL;
  3312. ret = avfilter_graph_request_oldest(graph->graph);
  3313. if (ret >= 0)
  3314. return reap_filters(0);
  3315. if (ret == AVERROR_EOF) {
  3316. ret = reap_filters(1);
  3317. for (i = 0; i < graph->nb_outputs; i++)
  3318. close_output_stream(graph->outputs[i]->ost);
  3319. return ret;
  3320. }
  3321. if (ret != AVERROR(EAGAIN))
  3322. return ret;
  3323. for (i = 0; i < graph->nb_inputs; i++) {
  3324. ifilter = graph->inputs[i];
  3325. ist = ifilter->ist;
  3326. if (input_files[ist->file_index]->eagain ||
  3327. input_files[ist->file_index]->eof_reached)
  3328. continue;
  3329. nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
  3330. if (nb_requests > nb_requests_max) {
  3331. nb_requests_max = nb_requests;
  3332. *best_ist = ist;
  3333. }
  3334. }
  3335. if (!*best_ist)
  3336. for (i = 0; i < graph->nb_outputs; i++)
  3337. graph->outputs[i]->ost->unavailable = 1;
  3338. return 0;
  3339. }
  3340. /**
  3341. * Run a single step of transcoding.
  3342. *
  3343. * @return 0 for success, <0 for error
  3344. */
  3345. static int transcode_step(void)
  3346. {
  3347. OutputStream *ost;
  3348. InputStream *ist = NULL;
  3349. int ret;
  3350. ost = choose_output();
  3351. if (!ost) {
  3352. if (got_eagain()) {
  3353. reset_eagain();
  3354. av_usleep(10000);
  3355. return 0;
  3356. }
  3357. av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
  3358. return AVERROR_EOF;
  3359. }
  3360. if (ost->filter && !ost->filter->graph->graph) {
  3361. if (ifilter_has_all_input_formats(ost->filter->graph)) {
  3362. ret = configure_filtergraph(ost->filter->graph);
  3363. if (ret < 0) {
  3364. av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
  3365. return ret;
  3366. }
  3367. }
  3368. }
  3369. if (ost->filter && ost->filter->graph->graph) {
  3370. /*
  3371. * Similar case to the early audio initialization in reap_filters.
  3372. * Audio is special in ffmpeg.c currently as we depend on lavfi's
  3373. * audio frame buffering/creation to get the output audio frame size
  3374. * in samples correct. The audio frame size for the filter chain is
  3375. * configured during the output stream initialization.
  3376. *
  3377. * Apparently avfilter_graph_request_oldest (called in
  3378. * transcode_from_filter just down the line) peeks. Peeking already
  3379. * puts one frame "ready to be given out", which means that any
  3380. * update in filter buffer sink configuration afterwards will not
  3381. * help us. And yes, even if it would be utilized,
  3382. * av_buffersink_get_samples is affected, as it internally utilizes
  3383. * the same early exit for peeked frames.
  3384. *
  3385. * In other words, if avfilter_graph_request_oldest would not make
  3386. * further filter chain configuration or usage of
  3387. * av_buffersink_get_samples useless (by just causing the return
  3388. * of the peeked AVFrame as-is), we could get rid of this additional
  3389. * early encoder initialization.
  3390. */
  3391. if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
  3392. init_output_stream_wrapper(ost, NULL, 1);
  3393. if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
  3394. return ret;
  3395. if (!ist)
  3396. return 0;
  3397. } else if (ost->filter) {
  3398. int i;
  3399. for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
  3400. InputFilter *ifilter = ost->filter->graph->inputs[i];
  3401. if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
  3402. ist = ifilter->ist;
  3403. break;
  3404. }
  3405. }
  3406. if (!ist) {
  3407. ost->inputs_done = 1;
  3408. return 0;
  3409. }
  3410. } else {
  3411. ist = ost->ist;
  3412. av_assert0(ist);
  3413. }
  3414. ret = process_input(ist->file_index);
  3415. if (ret == AVERROR(EAGAIN)) {
  3416. if (input_files[ist->file_index]->eagain)
  3417. ost->unavailable = 1;
  3418. return 0;
  3419. }
  3420. if (ret < 0)
  3421. return ret == AVERROR_EOF ? 0 : ret;
  3422. return reap_filters(0);
  3423. }
  3424. /*
  3425. * The following code is the main loop of the file converter
  3426. */
  3427. static int transcode(void)
  3428. {
  3429. int ret, i;
  3430. InputStream *ist;
  3431. int64_t timer_start;
  3432. int64_t total_packets_written = 0;
  3433. ret = transcode_init();
  3434. if (ret < 0)
  3435. goto fail;
  3436. if (stdin_interaction) {
  3437. av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
  3438. }
  3439. timer_start = av_gettime_relative();
  3440. while (!received_sigterm) {
  3441. int64_t cur_time= av_gettime_relative();
  3442. /* if 'q' pressed, exits */
  3443. if (stdin_interaction)
  3444. if (check_keyboard_interaction(cur_time) < 0)
  3445. break;
  3446. /* check if there's any stream where output is still needed */
  3447. if (!need_output()) {
  3448. av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
  3449. break;
  3450. }
  3451. ret = transcode_step();
  3452. if (ret < 0 && ret != AVERROR_EOF) {
  3453. av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
  3454. break;
  3455. }
  3456. /* dump report by using the output first video and audio streams */
  3457. print_report(0, timer_start, cur_time);
  3458. }
  3459. /* at the end of stream, we must flush the decoder buffers */
  3460. for (ist = ist_iter(NULL); ist; ist = ist_iter(ist)) {
  3461. if (!input_files[ist->file_index]->eof_reached) {
  3462. process_input_packet(ist, NULL, 0);
  3463. }
  3464. }
  3465. flush_encoders();
  3466. term_exit();
  3467. /* write the trailer if needed */
  3468. for (i = 0; i < nb_output_files; i++) {
  3469. ret = of_write_trailer(output_files[i]);
  3470. if (ret < 0 && exit_on_error)
  3471. exit_program(1);
  3472. }
  3473. /* dump report by using the first video and audio streams */
  3474. print_report(1, timer_start, av_gettime_relative());
  3475. /* close each encoder */
  3476. for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
  3477. uint64_t packets_written;
  3478. packets_written = atomic_load(&ost->packets_written);
  3479. total_packets_written += packets_written;
  3480. if (!packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
  3481. av_log(ost, AV_LOG_FATAL, "Empty output\n");
  3482. exit_program(1);
  3483. }
  3484. }
  3485. if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
  3486. av_log(NULL, AV_LOG_FATAL, "Empty output\n");
  3487. exit_program(1);
  3488. }
  3489. hw_device_free_all();
  3490. /* finished ! */
  3491. ret = 0;
  3492. fail:
  3493. return ret;
  3494. }
  3495. static BenchmarkTimeStamps get_benchmark_time_stamps(void)
  3496. {
  3497. BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
  3498. #if HAVE_GETRUSAGE
  3499. struct rusage rusage;
  3500. getrusage(RUSAGE_SELF, &rusage);
  3501. time_stamps.user_usec =
  3502. (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
  3503. time_stamps.sys_usec =
  3504. (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
  3505. #elif HAVE_GETPROCESSTIMES
  3506. HANDLE proc;
  3507. FILETIME c, e, k, u;
  3508. proc = GetCurrentProcess();
  3509. GetProcessTimes(proc, &c, &e, &k, &u);
  3510. time_stamps.user_usec =
  3511. ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
  3512. time_stamps.sys_usec =
  3513. ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
  3514. #else
  3515. time_stamps.user_usec = time_stamps.sys_usec = 0;
  3516. #endif
  3517. return time_stamps;
  3518. }
  3519. static int64_t getmaxrss(void)
  3520. {
  3521. #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
  3522. struct rusage rusage;
  3523. getrusage(RUSAGE_SELF, &rusage);
  3524. return (int64_t)rusage.ru_maxrss * 1024;
  3525. #elif HAVE_GETPROCESSMEMORYINFO
  3526. HANDLE proc;
  3527. PROCESS_MEMORY_COUNTERS memcounters;
  3528. proc = GetCurrentProcess();
  3529. memcounters.cb = sizeof(memcounters);
  3530. GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
  3531. return memcounters.PeakPagefileUsage;
  3532. #else
  3533. return 0;
  3534. #endif
  3535. }
  3536. int main(int argc, char **argv)
  3537. {
  3538. int ret;
  3539. BenchmarkTimeStamps ti;
  3540. init_dynload();
  3541. register_exit(ffmpeg_cleanup);
  3542. setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
  3543. av_log_set_flags(AV_LOG_SKIP_REPEATED);
  3544. parse_loglevel(argc, argv, options);
  3545. #if CONFIG_AVDEVICE
  3546. avdevice_register_all();
  3547. #endif
  3548. avformat_network_init();
  3549. show_banner(argc, argv, options);
  3550. /* parse options and open all input/output files */
  3551. ret = ffmpeg_parse_options(argc, argv);
  3552. if (ret < 0)
  3553. exit_program(1);
  3554. if (nb_output_files <= 0 && nb_input_files == 0) {
  3555. show_usage();
  3556. av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
  3557. exit_program(1);
  3558. }
  3559. /* file converter / grab */
  3560. if (nb_output_files <= 0) {
  3561. av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
  3562. exit_program(1);
  3563. }
  3564. current_time = ti = get_benchmark_time_stamps();
  3565. if (transcode() < 0)
  3566. exit_program(1);
  3567. if (do_benchmark) {
  3568. int64_t utime, stime, rtime;
  3569. current_time = get_benchmark_time_stamps();
  3570. utime = current_time.user_usec - ti.user_usec;
  3571. stime = current_time.sys_usec - ti.sys_usec;
  3572. rtime = current_time.real_usec - ti.real_usec;
  3573. av_log(NULL, AV_LOG_INFO,
  3574. "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
  3575. utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
  3576. }
  3577. av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
  3578. decode_error_stat[0], decode_error_stat[1]);
  3579. if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
  3580. exit_program(69);
  3581. exit_program(received_nb_signals ? 255 : main_return_code);
  3582. return main_return_code;
  3583. }