ffplay.c 98 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099
  1. /*
  2. * ffplay : Simple Media Player based on the FFmpeg libraries
  3. * Copyright (c) 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "config.h"
  22. #include <inttypes.h>
  23. #include <math.h>
  24. #include <limits.h>
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/colorspace.h"
  27. #include "libavutil/mathematics.h"
  28. #include "libavutil/pixdesc.h"
  29. #include "libavutil/imgutils.h"
  30. #include "libavutil/dict.h"
  31. #include "libavutil/parseutils.h"
  32. #include "libavutil/samplefmt.h"
  33. #include "libavutil/avassert.h"
  34. #include "libavformat/avformat.h"
  35. #include "libavdevice/avdevice.h"
  36. #include "libswscale/swscale.h"
  37. #include "libavcodec/audioconvert.h"
  38. #include "libavutil/opt.h"
  39. #include "libavcodec/avfft.h"
  40. #if CONFIG_AVFILTER
  41. # include "libavfilter/avcodec.h"
  42. # include "libavfilter/avfilter.h"
  43. # include "libavfilter/avfiltergraph.h"
  44. # include "libavfilter/buffersink.h"
  45. #endif
  46. #include <SDL.h>
  47. #include <SDL_thread.h>
  48. #include "cmdutils.h"
  49. #include <unistd.h>
  50. #include <assert.h>
  51. const char program_name[] = "ffplay";
  52. const int program_birth_year = 2003;
  53. #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
  54. #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
  55. #define MIN_FRAMES 5
  56. /* SDL audio buffer size, in samples. Should be small to have precise
  57. A/V sync as SDL does not have hardware buffer fullness info. */
  58. #define SDL_AUDIO_BUFFER_SIZE 1024
  59. /* no AV sync correction is done if below the AV sync threshold */
  60. #define AV_SYNC_THRESHOLD 0.01
  61. /* no AV correction is done if too big error */
  62. #define AV_NOSYNC_THRESHOLD 10.0
  63. #define FRAME_SKIP_FACTOR 0.05
  64. /* maximum audio speed change to get correct sync */
  65. #define SAMPLE_CORRECTION_PERCENT_MAX 10
  66. /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
  67. #define AUDIO_DIFF_AVG_NB 20
  68. /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
  69. #define SAMPLE_ARRAY_SIZE (2*65536)
  70. static int sws_flags = SWS_BICUBIC;
  71. typedef struct PacketQueue {
  72. AVPacketList *first_pkt, *last_pkt;
  73. int nb_packets;
  74. int size;
  75. int abort_request;
  76. SDL_mutex *mutex;
  77. SDL_cond *cond;
  78. } PacketQueue;
  79. #define VIDEO_PICTURE_QUEUE_SIZE 2
  80. #define SUBPICTURE_QUEUE_SIZE 4
  81. typedef struct VideoPicture {
  82. double pts; ///<presentation time stamp for this picture
  83. double target_clock; ///<av_gettime() time at which this should be displayed ideally
  84. double duration; ///<expected duration of the frame
  85. int64_t pos; ///<byte position in file
  86. SDL_Overlay *bmp;
  87. int width, height; /* source height & width */
  88. int allocated;
  89. enum PixelFormat pix_fmt;
  90. #if CONFIG_AVFILTER
  91. AVFilterBufferRef *picref;
  92. #endif
  93. } VideoPicture;
  94. typedef struct SubPicture {
  95. double pts; /* presentation time stamp for this picture */
  96. AVSubtitle sub;
  97. } SubPicture;
  98. enum {
  99. AV_SYNC_AUDIO_MASTER, /* default choice */
  100. AV_SYNC_VIDEO_MASTER,
  101. AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
  102. };
  103. typedef struct VideoState {
  104. SDL_Thread *read_tid;
  105. SDL_Thread *video_tid;
  106. SDL_Thread *refresh_tid;
  107. AVInputFormat *iformat;
  108. int no_background;
  109. int abort_request;
  110. int paused;
  111. int last_paused;
  112. int seek_req;
  113. int seek_flags;
  114. int64_t seek_pos;
  115. int64_t seek_rel;
  116. int read_pause_return;
  117. AVFormatContext *ic;
  118. int audio_stream;
  119. int av_sync_type;
  120. double external_clock; /* external clock base */
  121. int64_t external_clock_time;
  122. double audio_clock;
  123. double audio_diff_cum; /* used for AV difference average computation */
  124. double audio_diff_avg_coef;
  125. double audio_diff_threshold;
  126. int audio_diff_avg_count;
  127. AVStream *audio_st;
  128. PacketQueue audioq;
  129. int audio_hw_buf_size;
  130. /* samples output by the codec. we reserve more space for avsync
  131. compensation */
  132. DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
  133. DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
  134. uint8_t *audio_buf;
  135. unsigned int audio_buf_size; /* in bytes */
  136. int audio_buf_index; /* in bytes */
  137. int audio_write_buf_size;
  138. AVPacket audio_pkt_temp;
  139. AVPacket audio_pkt;
  140. enum AVSampleFormat audio_src_fmt;
  141. AVAudioConvert *reformat_ctx;
  142. double audio_current_pts;
  143. double audio_current_pts_drift;
  144. enum ShowMode {
  145. SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
  146. } show_mode;
  147. int16_t sample_array[SAMPLE_ARRAY_SIZE];
  148. int sample_array_index;
  149. int last_i_start;
  150. RDFTContext *rdft;
  151. int rdft_bits;
  152. FFTSample *rdft_data;
  153. int xpos;
  154. SDL_Thread *subtitle_tid;
  155. int subtitle_stream;
  156. int subtitle_stream_changed;
  157. AVStream *subtitle_st;
  158. PacketQueue subtitleq;
  159. SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
  160. int subpq_size, subpq_rindex, subpq_windex;
  161. SDL_mutex *subpq_mutex;
  162. SDL_cond *subpq_cond;
  163. double frame_timer;
  164. double frame_last_pts;
  165. double frame_last_delay;
  166. double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
  167. int video_stream;
  168. AVStream *video_st;
  169. PacketQueue videoq;
  170. double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
  171. double video_current_pts_drift; ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
  172. int64_t video_current_pos; ///<current displayed file pos
  173. VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
  174. int pictq_size, pictq_rindex, pictq_windex;
  175. SDL_mutex *pictq_mutex;
  176. SDL_cond *pictq_cond;
  177. #if !CONFIG_AVFILTER
  178. struct SwsContext *img_convert_ctx;
  179. #endif
  180. char filename[1024];
  181. int width, height, xleft, ytop;
  182. int step;
  183. #if CONFIG_AVFILTER
  184. AVFilterContext *out_video_filter; ///<the last filter in the video chain
  185. #endif
  186. float skip_frames;
  187. float skip_frames_index;
  188. int refresh;
  189. } VideoState;
  190. static int opt_help(const char *opt, const char *arg);
  191. /* options specified by the user */
  192. static AVInputFormat *file_iformat;
  193. static const char *input_filename;
  194. static const char *window_title;
  195. static int fs_screen_width;
  196. static int fs_screen_height;
  197. static int screen_width = 0;
  198. static int screen_height = 0;
  199. static int audio_disable;
  200. static int video_disable;
  201. static int wanted_stream[AVMEDIA_TYPE_NB]={
  202. [AVMEDIA_TYPE_AUDIO]=-1,
  203. [AVMEDIA_TYPE_VIDEO]=-1,
  204. [AVMEDIA_TYPE_SUBTITLE]=-1,
  205. };
  206. static int seek_by_bytes=-1;
  207. static int display_disable;
  208. static int show_status = 1;
  209. static int av_sync_type = AV_SYNC_AUDIO_MASTER;
  210. static int64_t start_time = AV_NOPTS_VALUE;
  211. static int64_t duration = AV_NOPTS_VALUE;
  212. static int workaround_bugs = 1;
  213. static int fast = 0;
  214. static int genpts = 0;
  215. static int lowres = 0;
  216. static int idct = FF_IDCT_AUTO;
  217. static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
  218. static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
  219. static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
  220. static int error_recognition = FF_ER_CAREFUL;
  221. static int error_concealment = 3;
  222. static int decoder_reorder_pts= -1;
  223. static int autoexit;
  224. static int exit_on_keydown;
  225. static int exit_on_mousedown;
  226. static int loop=1;
  227. static int framedrop=-1;
  228. static enum ShowMode show_mode = SHOW_MODE_NONE;
  229. static int rdftspeed=20;
  230. #if CONFIG_AVFILTER
  231. static char *vfilters = NULL;
  232. #endif
  233. /* current context */
  234. static int is_full_screen;
  235. static int64_t audio_callback_time;
  236. static AVPacket flush_pkt;
  237. #define FF_ALLOC_EVENT (SDL_USEREVENT)
  238. #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
  239. #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
  240. static SDL_Surface *screen;
  241. void exit_program(int ret)
  242. {
  243. exit(ret);
  244. }
  245. static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
  246. {
  247. AVPacketList *pkt1;
  248. /* duplicate the packet */
  249. if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
  250. return -1;
  251. pkt1 = av_malloc(sizeof(AVPacketList));
  252. if (!pkt1)
  253. return -1;
  254. pkt1->pkt = *pkt;
  255. pkt1->next = NULL;
  256. SDL_LockMutex(q->mutex);
  257. if (!q->last_pkt)
  258. q->first_pkt = pkt1;
  259. else
  260. q->last_pkt->next = pkt1;
  261. q->last_pkt = pkt1;
  262. q->nb_packets++;
  263. q->size += pkt1->pkt.size + sizeof(*pkt1);
  264. /* XXX: should duplicate packet data in DV case */
  265. SDL_CondSignal(q->cond);
  266. SDL_UnlockMutex(q->mutex);
  267. return 0;
  268. }
  269. /* packet queue handling */
  270. static void packet_queue_init(PacketQueue *q)
  271. {
  272. memset(q, 0, sizeof(PacketQueue));
  273. q->mutex = SDL_CreateMutex();
  274. q->cond = SDL_CreateCond();
  275. packet_queue_put(q, &flush_pkt);
  276. }
  277. static void packet_queue_flush(PacketQueue *q)
  278. {
  279. AVPacketList *pkt, *pkt1;
  280. SDL_LockMutex(q->mutex);
  281. for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
  282. pkt1 = pkt->next;
  283. av_free_packet(&pkt->pkt);
  284. av_freep(&pkt);
  285. }
  286. q->last_pkt = NULL;
  287. q->first_pkt = NULL;
  288. q->nb_packets = 0;
  289. q->size = 0;
  290. SDL_UnlockMutex(q->mutex);
  291. }
  292. static void packet_queue_end(PacketQueue *q)
  293. {
  294. packet_queue_flush(q);
  295. SDL_DestroyMutex(q->mutex);
  296. SDL_DestroyCond(q->cond);
  297. }
  298. static void packet_queue_abort(PacketQueue *q)
  299. {
  300. SDL_LockMutex(q->mutex);
  301. q->abort_request = 1;
  302. SDL_CondSignal(q->cond);
  303. SDL_UnlockMutex(q->mutex);
  304. }
  305. /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
  306. static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
  307. {
  308. AVPacketList *pkt1;
  309. int ret;
  310. SDL_LockMutex(q->mutex);
  311. for(;;) {
  312. if (q->abort_request) {
  313. ret = -1;
  314. break;
  315. }
  316. pkt1 = q->first_pkt;
  317. if (pkt1) {
  318. q->first_pkt = pkt1->next;
  319. if (!q->first_pkt)
  320. q->last_pkt = NULL;
  321. q->nb_packets--;
  322. q->size -= pkt1->pkt.size + sizeof(*pkt1);
  323. *pkt = pkt1->pkt;
  324. av_free(pkt1);
  325. ret = 1;
  326. break;
  327. } else if (!block) {
  328. ret = 0;
  329. break;
  330. } else {
  331. SDL_CondWait(q->cond, q->mutex);
  332. }
  333. }
  334. SDL_UnlockMutex(q->mutex);
  335. return ret;
  336. }
  337. static inline void fill_rectangle(SDL_Surface *screen,
  338. int x, int y, int w, int h, int color)
  339. {
  340. SDL_Rect rect;
  341. rect.x = x;
  342. rect.y = y;
  343. rect.w = w;
  344. rect.h = h;
  345. SDL_FillRect(screen, &rect, color);
  346. }
  347. #define ALPHA_BLEND(a, oldp, newp, s)\
  348. ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
  349. #define RGBA_IN(r, g, b, a, s)\
  350. {\
  351. unsigned int v = ((const uint32_t *)(s))[0];\
  352. a = (v >> 24) & 0xff;\
  353. r = (v >> 16) & 0xff;\
  354. g = (v >> 8) & 0xff;\
  355. b = v & 0xff;\
  356. }
  357. #define YUVA_IN(y, u, v, a, s, pal)\
  358. {\
  359. unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
  360. a = (val >> 24) & 0xff;\
  361. y = (val >> 16) & 0xff;\
  362. u = (val >> 8) & 0xff;\
  363. v = val & 0xff;\
  364. }
  365. #define YUVA_OUT(d, y, u, v, a)\
  366. {\
  367. ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
  368. }
  369. #define BPP 1
  370. static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
  371. {
  372. int wrap, wrap3, width2, skip2;
  373. int y, u, v, a, u1, v1, a1, w, h;
  374. uint8_t *lum, *cb, *cr;
  375. const uint8_t *p;
  376. const uint32_t *pal;
  377. int dstx, dsty, dstw, dsth;
  378. dstw = av_clip(rect->w, 0, imgw);
  379. dsth = av_clip(rect->h, 0, imgh);
  380. dstx = av_clip(rect->x, 0, imgw - dstw);
  381. dsty = av_clip(rect->y, 0, imgh - dsth);
  382. lum = dst->data[0] + dsty * dst->linesize[0];
  383. cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
  384. cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
  385. width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
  386. skip2 = dstx >> 1;
  387. wrap = dst->linesize[0];
  388. wrap3 = rect->pict.linesize[0];
  389. p = rect->pict.data[0];
  390. pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
  391. if (dsty & 1) {
  392. lum += dstx;
  393. cb += skip2;
  394. cr += skip2;
  395. if (dstx & 1) {
  396. YUVA_IN(y, u, v, a, p, pal);
  397. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  398. cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
  399. cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
  400. cb++;
  401. cr++;
  402. lum++;
  403. p += BPP;
  404. }
  405. for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
  406. YUVA_IN(y, u, v, a, p, pal);
  407. u1 = u;
  408. v1 = v;
  409. a1 = a;
  410. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  411. YUVA_IN(y, u, v, a, p + BPP, pal);
  412. u1 += u;
  413. v1 += v;
  414. a1 += a;
  415. lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
  416. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
  417. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
  418. cb++;
  419. cr++;
  420. p += 2 * BPP;
  421. lum += 2;
  422. }
  423. if (w) {
  424. YUVA_IN(y, u, v, a, p, pal);
  425. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  426. cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
  427. cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
  428. p++;
  429. lum++;
  430. }
  431. p += wrap3 - dstw * BPP;
  432. lum += wrap - dstw - dstx;
  433. cb += dst->linesize[1] - width2 - skip2;
  434. cr += dst->linesize[2] - width2 - skip2;
  435. }
  436. for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
  437. lum += dstx;
  438. cb += skip2;
  439. cr += skip2;
  440. if (dstx & 1) {
  441. YUVA_IN(y, u, v, a, p, pal);
  442. u1 = u;
  443. v1 = v;
  444. a1 = a;
  445. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  446. p += wrap3;
  447. lum += wrap;
  448. YUVA_IN(y, u, v, a, p, pal);
  449. u1 += u;
  450. v1 += v;
  451. a1 += a;
  452. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  453. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
  454. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
  455. cb++;
  456. cr++;
  457. p += -wrap3 + BPP;
  458. lum += -wrap + 1;
  459. }
  460. for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
  461. YUVA_IN(y, u, v, a, p, pal);
  462. u1 = u;
  463. v1 = v;
  464. a1 = a;
  465. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  466. YUVA_IN(y, u, v, a, p + BPP, pal);
  467. u1 += u;
  468. v1 += v;
  469. a1 += a;
  470. lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
  471. p += wrap3;
  472. lum += wrap;
  473. YUVA_IN(y, u, v, a, p, pal);
  474. u1 += u;
  475. v1 += v;
  476. a1 += a;
  477. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  478. YUVA_IN(y, u, v, a, p + BPP, pal);
  479. u1 += u;
  480. v1 += v;
  481. a1 += a;
  482. lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
  483. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
  484. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
  485. cb++;
  486. cr++;
  487. p += -wrap3 + 2 * BPP;
  488. lum += -wrap + 2;
  489. }
  490. if (w) {
  491. YUVA_IN(y, u, v, a, p, pal);
  492. u1 = u;
  493. v1 = v;
  494. a1 = a;
  495. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  496. p += wrap3;
  497. lum += wrap;
  498. YUVA_IN(y, u, v, a, p, pal);
  499. u1 += u;
  500. v1 += v;
  501. a1 += a;
  502. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  503. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
  504. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
  505. cb++;
  506. cr++;
  507. p += -wrap3 + BPP;
  508. lum += -wrap + 1;
  509. }
  510. p += wrap3 + (wrap3 - dstw * BPP);
  511. lum += wrap + (wrap - dstw - dstx);
  512. cb += dst->linesize[1] - width2 - skip2;
  513. cr += dst->linesize[2] - width2 - skip2;
  514. }
  515. /* handle odd height */
  516. if (h) {
  517. lum += dstx;
  518. cb += skip2;
  519. cr += skip2;
  520. if (dstx & 1) {
  521. YUVA_IN(y, u, v, a, p, pal);
  522. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  523. cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
  524. cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
  525. cb++;
  526. cr++;
  527. lum++;
  528. p += BPP;
  529. }
  530. for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
  531. YUVA_IN(y, u, v, a, p, pal);
  532. u1 = u;
  533. v1 = v;
  534. a1 = a;
  535. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  536. YUVA_IN(y, u, v, a, p + BPP, pal);
  537. u1 += u;
  538. v1 += v;
  539. a1 += a;
  540. lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
  541. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
  542. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
  543. cb++;
  544. cr++;
  545. p += 2 * BPP;
  546. lum += 2;
  547. }
  548. if (w) {
  549. YUVA_IN(y, u, v, a, p, pal);
  550. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  551. cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
  552. cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
  553. }
  554. }
  555. }
  556. static void free_subpicture(SubPicture *sp)
  557. {
  558. avsubtitle_free(&sp->sub);
  559. }
  560. static void video_image_display(VideoState *is)
  561. {
  562. VideoPicture *vp;
  563. SubPicture *sp;
  564. AVPicture pict;
  565. float aspect_ratio;
  566. int width, height, x, y;
  567. SDL_Rect rect;
  568. int i;
  569. vp = &is->pictq[is->pictq_rindex];
  570. if (vp->bmp) {
  571. #if CONFIG_AVFILTER
  572. if (vp->picref->video->sample_aspect_ratio.num == 0)
  573. aspect_ratio = 0;
  574. else
  575. aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
  576. #else
  577. /* XXX: use variable in the frame */
  578. if (is->video_st->sample_aspect_ratio.num)
  579. aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
  580. else if (is->video_st->codec->sample_aspect_ratio.num)
  581. aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
  582. else
  583. aspect_ratio = 0;
  584. #endif
  585. if (aspect_ratio <= 0.0)
  586. aspect_ratio = 1.0;
  587. aspect_ratio *= (float)vp->width / (float)vp->height;
  588. if (is->subtitle_st) {
  589. if (is->subpq_size > 0) {
  590. sp = &is->subpq[is->subpq_rindex];
  591. if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
  592. SDL_LockYUVOverlay (vp->bmp);
  593. pict.data[0] = vp->bmp->pixels[0];
  594. pict.data[1] = vp->bmp->pixels[2];
  595. pict.data[2] = vp->bmp->pixels[1];
  596. pict.linesize[0] = vp->bmp->pitches[0];
  597. pict.linesize[1] = vp->bmp->pitches[2];
  598. pict.linesize[2] = vp->bmp->pitches[1];
  599. for (i = 0; i < sp->sub.num_rects; i++)
  600. blend_subrect(&pict, sp->sub.rects[i],
  601. vp->bmp->w, vp->bmp->h);
  602. SDL_UnlockYUVOverlay (vp->bmp);
  603. }
  604. }
  605. }
  606. /* XXX: we suppose the screen has a 1.0 pixel ratio */
  607. height = is->height;
  608. width = ((int)rint(height * aspect_ratio)) & ~1;
  609. if (width > is->width) {
  610. width = is->width;
  611. height = ((int)rint(width / aspect_ratio)) & ~1;
  612. }
  613. x = (is->width - width) / 2;
  614. y = (is->height - height) / 2;
  615. is->no_background = 0;
  616. rect.x = is->xleft + x;
  617. rect.y = is->ytop + y;
  618. rect.w = FFMAX(width, 1);
  619. rect.h = FFMAX(height, 1);
  620. SDL_DisplayYUVOverlay(vp->bmp, &rect);
  621. }
  622. }
  623. static inline int compute_mod(int a, int b)
  624. {
  625. return a < 0 ? a%b + b : a%b;
  626. }
  627. static void video_audio_display(VideoState *s)
  628. {
  629. int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
  630. int ch, channels, h, h2, bgcolor, fgcolor;
  631. int16_t time_diff;
  632. int rdft_bits, nb_freq;
  633. for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
  634. ;
  635. nb_freq= 1<<(rdft_bits-1);
  636. /* compute display index : center on currently output samples */
  637. channels = s->audio_st->codec->channels;
  638. nb_display_channels = channels;
  639. if (!s->paused) {
  640. int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
  641. n = 2 * channels;
  642. delay = s->audio_write_buf_size;
  643. delay /= n;
  644. /* to be more precise, we take into account the time spent since
  645. the last buffer computation */
  646. if (audio_callback_time) {
  647. time_diff = av_gettime() - audio_callback_time;
  648. delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
  649. }
  650. delay += 2*data_used;
  651. if (delay < data_used)
  652. delay = data_used;
  653. i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
  654. if (s->show_mode == SHOW_MODE_WAVES) {
  655. h= INT_MIN;
  656. for(i=0; i<1000; i+=channels){
  657. int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
  658. int a= s->sample_array[idx];
  659. int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
  660. int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
  661. int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
  662. int score= a-d;
  663. if(h<score && (b^c)<0){
  664. h= score;
  665. i_start= idx;
  666. }
  667. }
  668. }
  669. s->last_i_start = i_start;
  670. } else {
  671. i_start = s->last_i_start;
  672. }
  673. bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
  674. if (s->show_mode == SHOW_MODE_WAVES) {
  675. fill_rectangle(screen,
  676. s->xleft, s->ytop, s->width, s->height,
  677. bgcolor);
  678. fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
  679. /* total height for one channel */
  680. h = s->height / nb_display_channels;
  681. /* graph height / 2 */
  682. h2 = (h * 9) / 20;
  683. for(ch = 0;ch < nb_display_channels; ch++) {
  684. i = i_start + ch;
  685. y1 = s->ytop + ch * h + (h / 2); /* position of center line */
  686. for(x = 0; x < s->width; x++) {
  687. y = (s->sample_array[i] * h2) >> 15;
  688. if (y < 0) {
  689. y = -y;
  690. ys = y1 - y;
  691. } else {
  692. ys = y1;
  693. }
  694. fill_rectangle(screen,
  695. s->xleft + x, ys, 1, y,
  696. fgcolor);
  697. i += channels;
  698. if (i >= SAMPLE_ARRAY_SIZE)
  699. i -= SAMPLE_ARRAY_SIZE;
  700. }
  701. }
  702. fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
  703. for(ch = 1;ch < nb_display_channels; ch++) {
  704. y = s->ytop + ch * h;
  705. fill_rectangle(screen,
  706. s->xleft, y, s->width, 1,
  707. fgcolor);
  708. }
  709. SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
  710. }else{
  711. nb_display_channels= FFMIN(nb_display_channels, 2);
  712. if(rdft_bits != s->rdft_bits){
  713. av_rdft_end(s->rdft);
  714. av_free(s->rdft_data);
  715. s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
  716. s->rdft_bits= rdft_bits;
  717. s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
  718. }
  719. {
  720. FFTSample *data[2];
  721. for(ch = 0;ch < nb_display_channels; ch++) {
  722. data[ch] = s->rdft_data + 2*nb_freq*ch;
  723. i = i_start + ch;
  724. for(x = 0; x < 2*nb_freq; x++) {
  725. double w= (x-nb_freq)*(1.0/nb_freq);
  726. data[ch][x]= s->sample_array[i]*(1.0-w*w);
  727. i += channels;
  728. if (i >= SAMPLE_ARRAY_SIZE)
  729. i -= SAMPLE_ARRAY_SIZE;
  730. }
  731. av_rdft_calc(s->rdft, data[ch]);
  732. }
  733. //least efficient way to do this, we should of course directly access it but its more than fast enough
  734. for(y=0; y<s->height; y++){
  735. double w= 1/sqrt(nb_freq);
  736. int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
  737. int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
  738. + data[1][2*y+1]*data[1][2*y+1])) : a;
  739. a= FFMIN(a,255);
  740. b= FFMIN(b,255);
  741. fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
  742. fill_rectangle(screen,
  743. s->xpos, s->height-y, 1, 1,
  744. fgcolor);
  745. }
  746. }
  747. SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
  748. s->xpos++;
  749. if(s->xpos >= s->width)
  750. s->xpos= s->xleft;
  751. }
  752. }
  753. static void stream_close(VideoState *is)
  754. {
  755. VideoPicture *vp;
  756. int i;
  757. /* XXX: use a special url_shutdown call to abort parse cleanly */
  758. is->abort_request = 1;
  759. SDL_WaitThread(is->read_tid, NULL);
  760. SDL_WaitThread(is->refresh_tid, NULL);
  761. /* free all pictures */
  762. for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
  763. vp = &is->pictq[i];
  764. #if CONFIG_AVFILTER
  765. if (vp->picref) {
  766. avfilter_unref_buffer(vp->picref);
  767. vp->picref = NULL;
  768. }
  769. #endif
  770. if (vp->bmp) {
  771. SDL_FreeYUVOverlay(vp->bmp);
  772. vp->bmp = NULL;
  773. }
  774. }
  775. SDL_DestroyMutex(is->pictq_mutex);
  776. SDL_DestroyCond(is->pictq_cond);
  777. SDL_DestroyMutex(is->subpq_mutex);
  778. SDL_DestroyCond(is->subpq_cond);
  779. #if !CONFIG_AVFILTER
  780. if (is->img_convert_ctx)
  781. sws_freeContext(is->img_convert_ctx);
  782. #endif
  783. av_free(is);
  784. }
  785. static void do_exit(VideoState *is)
  786. {
  787. if (is) {
  788. stream_close(is);
  789. }
  790. av_lockmgr_register(NULL);
  791. uninit_opts();
  792. #if CONFIG_AVFILTER
  793. avfilter_uninit();
  794. #endif
  795. if (show_status)
  796. printf("\n");
  797. SDL_Quit();
  798. av_log(NULL, AV_LOG_QUIET, "%s", "");
  799. exit(0);
  800. }
  801. static int video_open(VideoState *is){
  802. int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
  803. int w,h;
  804. if(is_full_screen) flags |= SDL_FULLSCREEN;
  805. else flags |= SDL_RESIZABLE;
  806. if (is_full_screen && fs_screen_width) {
  807. w = fs_screen_width;
  808. h = fs_screen_height;
  809. } else if(!is_full_screen && screen_width){
  810. w = screen_width;
  811. h = screen_height;
  812. #if CONFIG_AVFILTER
  813. }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
  814. w = is->out_video_filter->inputs[0]->w;
  815. h = is->out_video_filter->inputs[0]->h;
  816. #else
  817. }else if (is->video_st && is->video_st->codec->width){
  818. w = is->video_st->codec->width;
  819. h = is->video_st->codec->height;
  820. #endif
  821. } else {
  822. w = 640;
  823. h = 480;
  824. }
  825. if(screen && is->width == screen->w && screen->w == w
  826. && is->height== screen->h && screen->h == h)
  827. return 0;
  828. #ifndef __APPLE__
  829. screen = SDL_SetVideoMode(w, h, 0, flags);
  830. #else
  831. /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
  832. screen = SDL_SetVideoMode(w, h, 24, flags);
  833. #endif
  834. if (!screen) {
  835. fprintf(stderr, "SDL: could not set video mode - exiting\n");
  836. do_exit(is);
  837. }
  838. if (!window_title)
  839. window_title = input_filename;
  840. SDL_WM_SetCaption(window_title, window_title);
  841. is->width = screen->w;
  842. is->height = screen->h;
  843. return 0;
  844. }
  845. /* display the current picture, if any */
  846. static void video_display(VideoState *is)
  847. {
  848. if(!screen)
  849. video_open(is);
  850. if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
  851. video_audio_display(is);
  852. else if (is->video_st)
  853. video_image_display(is);
  854. }
  855. static int refresh_thread(void *opaque)
  856. {
  857. VideoState *is= opaque;
  858. while(!is->abort_request){
  859. SDL_Event event;
  860. event.type = FF_REFRESH_EVENT;
  861. event.user.data1 = opaque;
  862. if(!is->refresh){
  863. is->refresh=1;
  864. SDL_PushEvent(&event);
  865. }
  866. //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
  867. usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
  868. }
  869. return 0;
  870. }
  871. /* get the current audio clock value */
  872. static double get_audio_clock(VideoState *is)
  873. {
  874. if (is->paused) {
  875. return is->audio_current_pts;
  876. } else {
  877. return is->audio_current_pts_drift + av_gettime() / 1000000.0;
  878. }
  879. }
  880. /* get the current video clock value */
  881. static double get_video_clock(VideoState *is)
  882. {
  883. if (is->paused) {
  884. return is->video_current_pts;
  885. } else {
  886. return is->video_current_pts_drift + av_gettime() / 1000000.0;
  887. }
  888. }
  889. /* get the current external clock value */
  890. static double get_external_clock(VideoState *is)
  891. {
  892. int64_t ti;
  893. ti = av_gettime();
  894. return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
  895. }
  896. /* get the current master clock value */
  897. static double get_master_clock(VideoState *is)
  898. {
  899. double val;
  900. if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
  901. if (is->video_st)
  902. val = get_video_clock(is);
  903. else
  904. val = get_audio_clock(is);
  905. } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
  906. if (is->audio_st)
  907. val = get_audio_clock(is);
  908. else
  909. val = get_video_clock(is);
  910. } else {
  911. val = get_external_clock(is);
  912. }
  913. return val;
  914. }
  915. /* seek in the stream */
  916. static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
  917. {
  918. if (!is->seek_req) {
  919. is->seek_pos = pos;
  920. is->seek_rel = rel;
  921. is->seek_flags &= ~AVSEEK_FLAG_BYTE;
  922. if (seek_by_bytes)
  923. is->seek_flags |= AVSEEK_FLAG_BYTE;
  924. is->seek_req = 1;
  925. }
  926. }
  927. /* pause or resume the video */
  928. static void stream_toggle_pause(VideoState *is)
  929. {
  930. if (is->paused) {
  931. is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
  932. if(is->read_pause_return != AVERROR(ENOSYS)){
  933. is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
  934. }
  935. is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
  936. }
  937. is->paused = !is->paused;
  938. }
  939. static double compute_target_time(double frame_current_pts, VideoState *is)
  940. {
  941. double delay, sync_threshold, diff;
  942. /* compute nominal delay */
  943. delay = frame_current_pts - is->frame_last_pts;
  944. if (delay <= 0 || delay >= 10.0) {
  945. /* if incorrect delay, use previous one */
  946. delay = is->frame_last_delay;
  947. } else {
  948. is->frame_last_delay = delay;
  949. }
  950. is->frame_last_pts = frame_current_pts;
  951. /* update delay to follow master synchronisation source */
  952. if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
  953. is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
  954. /* if video is slave, we try to correct big delays by
  955. duplicating or deleting a frame */
  956. diff = get_video_clock(is) - get_master_clock(is);
  957. /* skip or repeat frame. We take into account the
  958. delay to compute the threshold. I still don't know
  959. if it is the best guess */
  960. sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
  961. if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
  962. if (diff <= -sync_threshold)
  963. delay = 0;
  964. else if (diff >= sync_threshold)
  965. delay = 2 * delay;
  966. }
  967. }
  968. is->frame_timer += delay;
  969. av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
  970. delay, frame_current_pts, -diff);
  971. return is->frame_timer;
  972. }
  973. /* called to display each frame */
  974. static void video_refresh(void *opaque)
  975. {
  976. VideoState *is = opaque;
  977. VideoPicture *vp;
  978. SubPicture *sp, *sp2;
  979. if (is->video_st) {
  980. retry:
  981. if (is->pictq_size == 0) {
  982. //nothing to do, no picture to display in the que
  983. } else {
  984. double time= av_gettime()/1000000.0;
  985. double next_target;
  986. /* dequeue the picture */
  987. vp = &is->pictq[is->pictq_rindex];
  988. if(time < vp->target_clock)
  989. return;
  990. /* update current video pts */
  991. is->video_current_pts = vp->pts;
  992. is->video_current_pts_drift = is->video_current_pts - time;
  993. is->video_current_pos = vp->pos;
  994. if(is->pictq_size > 1){
  995. VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
  996. assert(nextvp->target_clock >= vp->target_clock);
  997. next_target= nextvp->target_clock;
  998. }else{
  999. next_target= vp->target_clock + vp->duration;
  1000. }
  1001. if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
  1002. is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
  1003. if(is->pictq_size > 1 || time > next_target + 0.5){
  1004. /* update queue size and signal for next picture */
  1005. if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
  1006. is->pictq_rindex = 0;
  1007. SDL_LockMutex(is->pictq_mutex);
  1008. is->pictq_size--;
  1009. SDL_CondSignal(is->pictq_cond);
  1010. SDL_UnlockMutex(is->pictq_mutex);
  1011. goto retry;
  1012. }
  1013. }
  1014. if(is->subtitle_st) {
  1015. if (is->subtitle_stream_changed) {
  1016. SDL_LockMutex(is->subpq_mutex);
  1017. while (is->subpq_size) {
  1018. free_subpicture(&is->subpq[is->subpq_rindex]);
  1019. /* update queue size and signal for next picture */
  1020. if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
  1021. is->subpq_rindex = 0;
  1022. is->subpq_size--;
  1023. }
  1024. is->subtitle_stream_changed = 0;
  1025. SDL_CondSignal(is->subpq_cond);
  1026. SDL_UnlockMutex(is->subpq_mutex);
  1027. } else {
  1028. if (is->subpq_size > 0) {
  1029. sp = &is->subpq[is->subpq_rindex];
  1030. if (is->subpq_size > 1)
  1031. sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
  1032. else
  1033. sp2 = NULL;
  1034. if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
  1035. || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
  1036. {
  1037. free_subpicture(sp);
  1038. /* update queue size and signal for next picture */
  1039. if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
  1040. is->subpq_rindex = 0;
  1041. SDL_LockMutex(is->subpq_mutex);
  1042. is->subpq_size--;
  1043. SDL_CondSignal(is->subpq_cond);
  1044. SDL_UnlockMutex(is->subpq_mutex);
  1045. }
  1046. }
  1047. }
  1048. }
  1049. /* display picture */
  1050. if (!display_disable)
  1051. video_display(is);
  1052. /* update queue size and signal for next picture */
  1053. if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
  1054. is->pictq_rindex = 0;
  1055. SDL_LockMutex(is->pictq_mutex);
  1056. is->pictq_size--;
  1057. SDL_CondSignal(is->pictq_cond);
  1058. SDL_UnlockMutex(is->pictq_mutex);
  1059. }
  1060. } else if (is->audio_st) {
  1061. /* draw the next audio frame */
  1062. /* if only audio stream, then display the audio bars (better
  1063. than nothing, just to test the implementation */
  1064. /* display picture */
  1065. if (!display_disable)
  1066. video_display(is);
  1067. }
  1068. if (show_status) {
  1069. static int64_t last_time;
  1070. int64_t cur_time;
  1071. int aqsize, vqsize, sqsize;
  1072. double av_diff;
  1073. cur_time = av_gettime();
  1074. if (!last_time || (cur_time - last_time) >= 30000) {
  1075. aqsize = 0;
  1076. vqsize = 0;
  1077. sqsize = 0;
  1078. if (is->audio_st)
  1079. aqsize = is->audioq.size;
  1080. if (is->video_st)
  1081. vqsize = is->videoq.size;
  1082. if (is->subtitle_st)
  1083. sqsize = is->subtitleq.size;
  1084. av_diff = 0;
  1085. if (is->audio_st && is->video_st)
  1086. av_diff = get_audio_clock(is) - get_video_clock(is);
  1087. printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
  1088. get_master_clock(is),
  1089. av_diff,
  1090. FFMAX(is->skip_frames-1, 0),
  1091. aqsize / 1024,
  1092. vqsize / 1024,
  1093. sqsize,
  1094. is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
  1095. is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
  1096. fflush(stdout);
  1097. last_time = cur_time;
  1098. }
  1099. }
  1100. }
  1101. /* allocate a picture (needs to do that in main thread to avoid
  1102. potential locking problems */
  1103. static void alloc_picture(void *opaque)
  1104. {
  1105. VideoState *is = opaque;
  1106. VideoPicture *vp;
  1107. vp = &is->pictq[is->pictq_windex];
  1108. if (vp->bmp)
  1109. SDL_FreeYUVOverlay(vp->bmp);
  1110. #if CONFIG_AVFILTER
  1111. if (vp->picref)
  1112. avfilter_unref_buffer(vp->picref);
  1113. vp->picref = NULL;
  1114. vp->width = is->out_video_filter->inputs[0]->w;
  1115. vp->height = is->out_video_filter->inputs[0]->h;
  1116. vp->pix_fmt = is->out_video_filter->inputs[0]->format;
  1117. #else
  1118. vp->width = is->video_st->codec->width;
  1119. vp->height = is->video_st->codec->height;
  1120. vp->pix_fmt = is->video_st->codec->pix_fmt;
  1121. #endif
  1122. vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
  1123. SDL_YV12_OVERLAY,
  1124. screen);
  1125. if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
  1126. /* SDL allocates a buffer smaller than requested if the video
  1127. * overlay hardware is unable to support the requested size. */
  1128. fprintf(stderr, "Error: the video system does not support an image\n"
  1129. "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
  1130. "to reduce the image size.\n", vp->width, vp->height );
  1131. do_exit(is);
  1132. }
  1133. SDL_LockMutex(is->pictq_mutex);
  1134. vp->allocated = 1;
  1135. SDL_CondSignal(is->pictq_cond);
  1136. SDL_UnlockMutex(is->pictq_mutex);
  1137. }
  1138. static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
  1139. {
  1140. VideoPicture *vp;
  1141. double frame_delay, pts = pts1;
  1142. /* compute the exact PTS for the picture if it is omitted in the stream
  1143. * pts1 is the dts of the pkt / pts of the frame */
  1144. if (pts != 0) {
  1145. /* update video clock with pts, if present */
  1146. is->video_clock = pts;
  1147. } else {
  1148. pts = is->video_clock;
  1149. }
  1150. /* update video clock for next frame */
  1151. frame_delay = av_q2d(is->video_st->codec->time_base);
  1152. /* for MPEG2, the frame can be repeated, so we update the
  1153. clock accordingly */
  1154. frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
  1155. is->video_clock += frame_delay;
  1156. #if defined(DEBUG_SYNC) && 0
  1157. printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
  1158. av_get_picture_type_char(src_frame->pict_type), pts, pts1);
  1159. #endif
  1160. /* wait until we have space to put a new picture */
  1161. SDL_LockMutex(is->pictq_mutex);
  1162. if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
  1163. is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
  1164. while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
  1165. !is->videoq.abort_request) {
  1166. SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  1167. }
  1168. SDL_UnlockMutex(is->pictq_mutex);
  1169. if (is->videoq.abort_request)
  1170. return -1;
  1171. vp = &is->pictq[is->pictq_windex];
  1172. vp->duration = frame_delay;
  1173. /* alloc or resize hardware picture buffer */
  1174. if (!vp->bmp ||
  1175. #if CONFIG_AVFILTER
  1176. vp->width != is->out_video_filter->inputs[0]->w ||
  1177. vp->height != is->out_video_filter->inputs[0]->h) {
  1178. #else
  1179. vp->width != is->video_st->codec->width ||
  1180. vp->height != is->video_st->codec->height) {
  1181. #endif
  1182. SDL_Event event;
  1183. vp->allocated = 0;
  1184. /* the allocation must be done in the main thread to avoid
  1185. locking problems */
  1186. event.type = FF_ALLOC_EVENT;
  1187. event.user.data1 = is;
  1188. SDL_PushEvent(&event);
  1189. /* wait until the picture is allocated */
  1190. SDL_LockMutex(is->pictq_mutex);
  1191. while (!vp->allocated && !is->videoq.abort_request) {
  1192. SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  1193. }
  1194. /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
  1195. if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
  1196. while (!vp->allocated) {
  1197. SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  1198. }
  1199. }
  1200. SDL_UnlockMutex(is->pictq_mutex);
  1201. if (is->videoq.abort_request)
  1202. return -1;
  1203. }
  1204. /* if the frame is not skipped, then display it */
  1205. if (vp->bmp) {
  1206. AVPicture pict;
  1207. #if CONFIG_AVFILTER
  1208. if(vp->picref)
  1209. avfilter_unref_buffer(vp->picref);
  1210. vp->picref = src_frame->opaque;
  1211. #endif
  1212. /* get a pointer on the bitmap */
  1213. SDL_LockYUVOverlay (vp->bmp);
  1214. memset(&pict,0,sizeof(AVPicture));
  1215. pict.data[0] = vp->bmp->pixels[0];
  1216. pict.data[1] = vp->bmp->pixels[2];
  1217. pict.data[2] = vp->bmp->pixels[1];
  1218. pict.linesize[0] = vp->bmp->pitches[0];
  1219. pict.linesize[1] = vp->bmp->pitches[2];
  1220. pict.linesize[2] = vp->bmp->pitches[1];
  1221. #if CONFIG_AVFILTER
  1222. //FIXME use direct rendering
  1223. av_picture_copy(&pict, (AVPicture *)src_frame,
  1224. vp->pix_fmt, vp->width, vp->height);
  1225. #else
  1226. sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
  1227. is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
  1228. vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
  1229. PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
  1230. if (is->img_convert_ctx == NULL) {
  1231. fprintf(stderr, "Cannot initialize the conversion context\n");
  1232. exit(1);
  1233. }
  1234. sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
  1235. 0, vp->height, pict.data, pict.linesize);
  1236. #endif
  1237. /* update the bitmap content */
  1238. SDL_UnlockYUVOverlay(vp->bmp);
  1239. vp->pts = pts;
  1240. vp->pos = pos;
  1241. /* now we can update the picture count */
  1242. if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
  1243. is->pictq_windex = 0;
  1244. SDL_LockMutex(is->pictq_mutex);
  1245. vp->target_clock= compute_target_time(vp->pts, is);
  1246. is->pictq_size++;
  1247. SDL_UnlockMutex(is->pictq_mutex);
  1248. }
  1249. return 0;
  1250. }
  1251. static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
  1252. {
  1253. int got_picture, i;
  1254. if (packet_queue_get(&is->videoq, pkt, 1) < 0)
  1255. return -1;
  1256. if (pkt->data == flush_pkt.data) {
  1257. avcodec_flush_buffers(is->video_st->codec);
  1258. SDL_LockMutex(is->pictq_mutex);
  1259. //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
  1260. for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
  1261. is->pictq[i].target_clock= 0;
  1262. }
  1263. while (is->pictq_size && !is->videoq.abort_request) {
  1264. SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  1265. }
  1266. is->video_current_pos = -1;
  1267. SDL_UnlockMutex(is->pictq_mutex);
  1268. is->frame_last_pts = AV_NOPTS_VALUE;
  1269. is->frame_last_delay = 0;
  1270. is->frame_timer = (double)av_gettime() / 1000000.0;
  1271. is->skip_frames = 1;
  1272. is->skip_frames_index = 0;
  1273. return 0;
  1274. }
  1275. avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
  1276. if (got_picture) {
  1277. if (decoder_reorder_pts == -1) {
  1278. *pts = frame->best_effort_timestamp;
  1279. } else if (decoder_reorder_pts) {
  1280. *pts = frame->pkt_pts;
  1281. } else {
  1282. *pts = frame->pkt_dts;
  1283. }
  1284. if (*pts == AV_NOPTS_VALUE) {
  1285. *pts = 0;
  1286. }
  1287. is->skip_frames_index += 1;
  1288. if(is->skip_frames_index >= is->skip_frames){
  1289. is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
  1290. return 1;
  1291. }
  1292. }
  1293. return 0;
  1294. }
  1295. #if CONFIG_AVFILTER
  1296. typedef struct {
  1297. VideoState *is;
  1298. AVFrame *frame;
  1299. int use_dr1;
  1300. } FilterPriv;
  1301. static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
  1302. {
  1303. AVFilterContext *ctx = codec->opaque;
  1304. AVFilterBufferRef *ref;
  1305. int perms = AV_PERM_WRITE;
  1306. int i, w, h, stride[4];
  1307. unsigned edge;
  1308. int pixel_size;
  1309. av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
  1310. if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
  1311. perms |= AV_PERM_NEG_LINESIZES;
  1312. if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
  1313. if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
  1314. if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
  1315. if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
  1316. }
  1317. if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
  1318. w = codec->width;
  1319. h = codec->height;
  1320. if(av_image_check_size(w, h, 0, codec))
  1321. return -1;
  1322. avcodec_align_dimensions2(codec, &w, &h, stride);
  1323. edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
  1324. w += edge << 1;
  1325. h += edge << 1;
  1326. if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
  1327. return -1;
  1328. pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
  1329. ref->video->w = codec->width;
  1330. ref->video->h = codec->height;
  1331. for(i = 0; i < 4; i ++) {
  1332. unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
  1333. unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
  1334. if (ref->data[i]) {
  1335. ref->data[i] += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
  1336. }
  1337. pic->data[i] = ref->data[i];
  1338. pic->linesize[i] = ref->linesize[i];
  1339. }
  1340. pic->opaque = ref;
  1341. pic->age = INT_MAX;
  1342. pic->type = FF_BUFFER_TYPE_USER;
  1343. pic->reordered_opaque = codec->reordered_opaque;
  1344. if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
  1345. else pic->pkt_pts = AV_NOPTS_VALUE;
  1346. return 0;
  1347. }
  1348. static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
  1349. {
  1350. memset(pic->data, 0, sizeof(pic->data));
  1351. avfilter_unref_buffer(pic->opaque);
  1352. }
  1353. static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
  1354. {
  1355. AVFilterBufferRef *ref = pic->opaque;
  1356. if (pic->data[0] == NULL) {
  1357. pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
  1358. return codec->get_buffer(codec, pic);
  1359. }
  1360. if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
  1361. (codec->pix_fmt != ref->format)) {
  1362. av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
  1363. return -1;
  1364. }
  1365. pic->reordered_opaque = codec->reordered_opaque;
  1366. if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
  1367. else pic->pkt_pts = AV_NOPTS_VALUE;
  1368. return 0;
  1369. }
  1370. static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
  1371. {
  1372. FilterPriv *priv = ctx->priv;
  1373. AVCodecContext *codec;
  1374. if(!opaque) return -1;
  1375. priv->is = opaque;
  1376. codec = priv->is->video_st->codec;
  1377. codec->opaque = ctx;
  1378. if((codec->codec->capabilities & CODEC_CAP_DR1)
  1379. ) {
  1380. av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
  1381. priv->use_dr1 = 1;
  1382. codec->get_buffer = input_get_buffer;
  1383. codec->release_buffer = input_release_buffer;
  1384. codec->reget_buffer = input_reget_buffer;
  1385. codec->thread_safe_callbacks = 1;
  1386. }
  1387. priv->frame = avcodec_alloc_frame();
  1388. return 0;
  1389. }
  1390. static void input_uninit(AVFilterContext *ctx)
  1391. {
  1392. FilterPriv *priv = ctx->priv;
  1393. av_free(priv->frame);
  1394. }
  1395. static int input_request_frame(AVFilterLink *link)
  1396. {
  1397. FilterPriv *priv = link->src->priv;
  1398. AVFilterBufferRef *picref;
  1399. int64_t pts = 0;
  1400. AVPacket pkt;
  1401. int ret;
  1402. while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
  1403. av_free_packet(&pkt);
  1404. if (ret < 0)
  1405. return -1;
  1406. if(priv->use_dr1 && priv->frame->opaque) {
  1407. picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
  1408. } else {
  1409. picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
  1410. av_image_copy(picref->data, picref->linesize,
  1411. priv->frame->data, priv->frame->linesize,
  1412. picref->format, link->w, link->h);
  1413. }
  1414. av_free_packet(&pkt);
  1415. avfilter_copy_frame_props(picref, priv->frame);
  1416. picref->pts = pts;
  1417. avfilter_start_frame(link, picref);
  1418. avfilter_draw_slice(link, 0, link->h, 1);
  1419. avfilter_end_frame(link);
  1420. return 0;
  1421. }
  1422. static int input_query_formats(AVFilterContext *ctx)
  1423. {
  1424. FilterPriv *priv = ctx->priv;
  1425. enum PixelFormat pix_fmts[] = {
  1426. priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
  1427. };
  1428. avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
  1429. return 0;
  1430. }
  1431. static int input_config_props(AVFilterLink *link)
  1432. {
  1433. FilterPriv *priv = link->src->priv;
  1434. AVStream *s = priv->is->video_st;
  1435. link->w = s->codec->width;
  1436. link->h = s->codec->height;
  1437. link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
  1438. s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
  1439. link->time_base = s->time_base;
  1440. return 0;
  1441. }
  1442. static AVFilter input_filter =
  1443. {
  1444. .name = "ffplay_input",
  1445. .priv_size = sizeof(FilterPriv),
  1446. .init = input_init,
  1447. .uninit = input_uninit,
  1448. .query_formats = input_query_formats,
  1449. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  1450. .outputs = (AVFilterPad[]) {{ .name = "default",
  1451. .type = AVMEDIA_TYPE_VIDEO,
  1452. .request_frame = input_request_frame,
  1453. .config_props = input_config_props, },
  1454. { .name = NULL }},
  1455. };
  1456. static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
  1457. {
  1458. char sws_flags_str[128];
  1459. int ret;
  1460. enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
  1461. AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
  1462. AVFilterContext *filt_src = NULL, *filt_out = NULL;
  1463. snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
  1464. graph->scale_sws_opts = av_strdup(sws_flags_str);
  1465. if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
  1466. NULL, is, graph)) < 0)
  1467. return ret;
  1468. #if FF_API_OLD_VSINK_API
  1469. ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
  1470. NULL, pix_fmts, graph);
  1471. #else
  1472. buffersink_params->pixel_fmts = pix_fmts;
  1473. ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
  1474. NULL, buffersink_params, graph);
  1475. #endif
  1476. av_freep(&buffersink_params);
  1477. if (ret < 0)
  1478. return ret;
  1479. if(vfilters) {
  1480. AVFilterInOut *outputs = avfilter_inout_alloc();
  1481. AVFilterInOut *inputs = avfilter_inout_alloc();
  1482. outputs->name = av_strdup("in");
  1483. outputs->filter_ctx = filt_src;
  1484. outputs->pad_idx = 0;
  1485. outputs->next = NULL;
  1486. inputs->name = av_strdup("out");
  1487. inputs->filter_ctx = filt_out;
  1488. inputs->pad_idx = 0;
  1489. inputs->next = NULL;
  1490. if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
  1491. return ret;
  1492. } else {
  1493. if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
  1494. return ret;
  1495. }
  1496. if ((ret = avfilter_graph_config(graph, NULL)) < 0)
  1497. return ret;
  1498. is->out_video_filter = filt_out;
  1499. return ret;
  1500. }
  1501. #endif /* CONFIG_AVFILTER */
  1502. static int video_thread(void *arg)
  1503. {
  1504. VideoState *is = arg;
  1505. AVFrame *frame= avcodec_alloc_frame();
  1506. int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
  1507. double pts;
  1508. int ret;
  1509. #if CONFIG_AVFILTER
  1510. AVFilterGraph *graph = avfilter_graph_alloc();
  1511. AVFilterContext *filt_out = NULL;
  1512. int last_w = is->video_st->codec->width;
  1513. int last_h = is->video_st->codec->height;
  1514. if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
  1515. goto the_end;
  1516. filt_out = is->out_video_filter;
  1517. #endif
  1518. for(;;) {
  1519. #if !CONFIG_AVFILTER
  1520. AVPacket pkt;
  1521. #else
  1522. AVFilterBufferRef *picref;
  1523. AVRational tb = filt_out->inputs[0]->time_base;
  1524. #endif
  1525. while (is->paused && !is->videoq.abort_request)
  1526. SDL_Delay(10);
  1527. #if CONFIG_AVFILTER
  1528. if ( last_w != is->video_st->codec->width
  1529. || last_h != is->video_st->codec->height) {
  1530. av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
  1531. last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
  1532. avfilter_graph_free(&graph);
  1533. graph = avfilter_graph_alloc();
  1534. if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
  1535. goto the_end;
  1536. filt_out = is->out_video_filter;
  1537. last_w = is->video_st->codec->width;
  1538. last_h = is->video_st->codec->height;
  1539. }
  1540. ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
  1541. if (picref) {
  1542. avfilter_fill_frame_from_video_buffer_ref(frame, picref);
  1543. pts_int = picref->pts;
  1544. pos = picref->pos;
  1545. frame->opaque = picref;
  1546. }
  1547. if (av_cmp_q(tb, is->video_st->time_base)) {
  1548. av_unused int64_t pts1 = pts_int;
  1549. pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
  1550. av_dlog(NULL, "video_thread(): "
  1551. "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
  1552. tb.num, tb.den, pts1,
  1553. is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
  1554. }
  1555. #else
  1556. ret = get_video_frame(is, frame, &pts_int, &pkt);
  1557. pos = pkt.pos;
  1558. av_free_packet(&pkt);
  1559. #endif
  1560. if (ret < 0) goto the_end;
  1561. #if CONFIG_AVFILTER
  1562. if (!picref)
  1563. continue;
  1564. #endif
  1565. pts = pts_int*av_q2d(is->video_st->time_base);
  1566. ret = queue_picture(is, frame, pts, pos);
  1567. if (ret < 0)
  1568. goto the_end;
  1569. if (is->step)
  1570. stream_toggle_pause(is);
  1571. }
  1572. the_end:
  1573. #if CONFIG_AVFILTER
  1574. avfilter_graph_free(&graph);
  1575. #endif
  1576. av_free(frame);
  1577. return 0;
  1578. }
  1579. static int subtitle_thread(void *arg)
  1580. {
  1581. VideoState *is = arg;
  1582. SubPicture *sp;
  1583. AVPacket pkt1, *pkt = &pkt1;
  1584. int got_subtitle;
  1585. double pts;
  1586. int i, j;
  1587. int r, g, b, y, u, v, a;
  1588. for(;;) {
  1589. while (is->paused && !is->subtitleq.abort_request) {
  1590. SDL_Delay(10);
  1591. }
  1592. if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
  1593. break;
  1594. if(pkt->data == flush_pkt.data){
  1595. avcodec_flush_buffers(is->subtitle_st->codec);
  1596. continue;
  1597. }
  1598. SDL_LockMutex(is->subpq_mutex);
  1599. while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
  1600. !is->subtitleq.abort_request) {
  1601. SDL_CondWait(is->subpq_cond, is->subpq_mutex);
  1602. }
  1603. SDL_UnlockMutex(is->subpq_mutex);
  1604. if (is->subtitleq.abort_request)
  1605. return 0;
  1606. sp = &is->subpq[is->subpq_windex];
  1607. /* NOTE: ipts is the PTS of the _first_ picture beginning in
  1608. this packet, if any */
  1609. pts = 0;
  1610. if (pkt->pts != AV_NOPTS_VALUE)
  1611. pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
  1612. avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
  1613. &got_subtitle, pkt);
  1614. if (got_subtitle && sp->sub.format == 0) {
  1615. sp->pts = pts;
  1616. for (i = 0; i < sp->sub.num_rects; i++)
  1617. {
  1618. for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
  1619. {
  1620. RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
  1621. y = RGB_TO_Y_CCIR(r, g, b);
  1622. u = RGB_TO_U_CCIR(r, g, b, 0);
  1623. v = RGB_TO_V_CCIR(r, g, b, 0);
  1624. YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
  1625. }
  1626. }
  1627. /* now we can update the picture count */
  1628. if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
  1629. is->subpq_windex = 0;
  1630. SDL_LockMutex(is->subpq_mutex);
  1631. is->subpq_size++;
  1632. SDL_UnlockMutex(is->subpq_mutex);
  1633. }
  1634. av_free_packet(pkt);
  1635. }
  1636. return 0;
  1637. }
  1638. /* copy samples for viewing in editor window */
  1639. static void update_sample_display(VideoState *is, short *samples, int samples_size)
  1640. {
  1641. int size, len;
  1642. size = samples_size / sizeof(short);
  1643. while (size > 0) {
  1644. len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
  1645. if (len > size)
  1646. len = size;
  1647. memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
  1648. samples += len;
  1649. is->sample_array_index += len;
  1650. if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
  1651. is->sample_array_index = 0;
  1652. size -= len;
  1653. }
  1654. }
  1655. /* return the new audio buffer size (samples can be added or deleted
  1656. to get better sync if video or external master clock) */
  1657. static int synchronize_audio(VideoState *is, short *samples,
  1658. int samples_size1, double pts)
  1659. {
  1660. int n, samples_size;
  1661. double ref_clock;
  1662. n = 2 * is->audio_st->codec->channels;
  1663. samples_size = samples_size1;
  1664. /* if not master, then we try to remove or add samples to correct the clock */
  1665. if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
  1666. is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
  1667. double diff, avg_diff;
  1668. int wanted_size, min_size, max_size, nb_samples;
  1669. ref_clock = get_master_clock(is);
  1670. diff = get_audio_clock(is) - ref_clock;
  1671. if (diff < AV_NOSYNC_THRESHOLD) {
  1672. is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
  1673. if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
  1674. /* not enough measures to have a correct estimate */
  1675. is->audio_diff_avg_count++;
  1676. } else {
  1677. /* estimate the A-V difference */
  1678. avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
  1679. if (fabs(avg_diff) >= is->audio_diff_threshold) {
  1680. wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
  1681. nb_samples = samples_size / n;
  1682. min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
  1683. max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
  1684. if (wanted_size < min_size)
  1685. wanted_size = min_size;
  1686. else if (wanted_size > max_size)
  1687. wanted_size = max_size;
  1688. /* add or remove samples to correction the synchro */
  1689. if (wanted_size < samples_size) {
  1690. /* remove samples */
  1691. samples_size = wanted_size;
  1692. } else if (wanted_size > samples_size) {
  1693. uint8_t *samples_end, *q;
  1694. int nb;
  1695. /* add samples */
  1696. nb = (samples_size - wanted_size);
  1697. samples_end = (uint8_t *)samples + samples_size - n;
  1698. q = samples_end + n;
  1699. while (nb > 0) {
  1700. memcpy(q, samples_end, n);
  1701. q += n;
  1702. nb -= n;
  1703. }
  1704. samples_size = wanted_size;
  1705. }
  1706. }
  1707. av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
  1708. diff, avg_diff, samples_size - samples_size1,
  1709. is->audio_clock, is->video_clock, is->audio_diff_threshold);
  1710. }
  1711. } else {
  1712. /* too big difference : may be initial PTS errors, so
  1713. reset A-V filter */
  1714. is->audio_diff_avg_count = 0;
  1715. is->audio_diff_cum = 0;
  1716. }
  1717. }
  1718. return samples_size;
  1719. }
  1720. /* decode one audio frame and returns its uncompressed size */
  1721. static int audio_decode_frame(VideoState *is, double *pts_ptr)
  1722. {
  1723. AVPacket *pkt_temp = &is->audio_pkt_temp;
  1724. AVPacket *pkt = &is->audio_pkt;
  1725. AVCodecContext *dec= is->audio_st->codec;
  1726. int n, len1, data_size;
  1727. double pts;
  1728. for(;;) {
  1729. /* NOTE: the audio packet can contain several frames */
  1730. while (pkt_temp->size > 0) {
  1731. data_size = sizeof(is->audio_buf1);
  1732. len1 = avcodec_decode_audio3(dec,
  1733. (int16_t *)is->audio_buf1, &data_size,
  1734. pkt_temp);
  1735. if (len1 < 0) {
  1736. /* if error, we skip the frame */
  1737. pkt_temp->size = 0;
  1738. break;
  1739. }
  1740. pkt_temp->data += len1;
  1741. pkt_temp->size -= len1;
  1742. if (data_size <= 0)
  1743. continue;
  1744. if (dec->sample_fmt != is->audio_src_fmt) {
  1745. if (is->reformat_ctx)
  1746. av_audio_convert_free(is->reformat_ctx);
  1747. is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
  1748. dec->sample_fmt, 1, NULL, 0);
  1749. if (!is->reformat_ctx) {
  1750. fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
  1751. av_get_sample_fmt_name(dec->sample_fmt),
  1752. av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
  1753. break;
  1754. }
  1755. is->audio_src_fmt= dec->sample_fmt;
  1756. }
  1757. if (is->reformat_ctx) {
  1758. const void *ibuf[6]= {is->audio_buf1};
  1759. void *obuf[6]= {is->audio_buf2};
  1760. int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
  1761. int ostride[6]= {2};
  1762. int len= data_size/istride[0];
  1763. if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
  1764. printf("av_audio_convert() failed\n");
  1765. break;
  1766. }
  1767. is->audio_buf= is->audio_buf2;
  1768. /* FIXME: existing code assume that data_size equals framesize*channels*2
  1769. remove this legacy cruft */
  1770. data_size= len*2;
  1771. }else{
  1772. is->audio_buf= is->audio_buf1;
  1773. }
  1774. /* if no pts, then compute it */
  1775. pts = is->audio_clock;
  1776. *pts_ptr = pts;
  1777. n = 2 * dec->channels;
  1778. is->audio_clock += (double)data_size /
  1779. (double)(n * dec->sample_rate);
  1780. #ifdef DEBUG
  1781. {
  1782. static double last_clock;
  1783. printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
  1784. is->audio_clock - last_clock,
  1785. is->audio_clock, pts);
  1786. last_clock = is->audio_clock;
  1787. }
  1788. #endif
  1789. return data_size;
  1790. }
  1791. /* free the current packet */
  1792. if (pkt->data)
  1793. av_free_packet(pkt);
  1794. if (is->paused || is->audioq.abort_request) {
  1795. return -1;
  1796. }
  1797. /* read next packet */
  1798. if (packet_queue_get(&is->audioq, pkt, 1) < 0)
  1799. return -1;
  1800. if(pkt->data == flush_pkt.data){
  1801. avcodec_flush_buffers(dec);
  1802. continue;
  1803. }
  1804. pkt_temp->data = pkt->data;
  1805. pkt_temp->size = pkt->size;
  1806. /* if update the audio clock with the pts */
  1807. if (pkt->pts != AV_NOPTS_VALUE) {
  1808. is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
  1809. }
  1810. }
  1811. }
  1812. /* prepare a new audio buffer */
  1813. static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
  1814. {
  1815. VideoState *is = opaque;
  1816. int audio_size, len1;
  1817. int bytes_per_sec;
  1818. double pts;
  1819. audio_callback_time = av_gettime();
  1820. while (len > 0) {
  1821. if (is->audio_buf_index >= is->audio_buf_size) {
  1822. audio_size = audio_decode_frame(is, &pts);
  1823. if (audio_size < 0) {
  1824. /* if error, just output silence */
  1825. is->audio_buf = is->audio_buf1;
  1826. is->audio_buf_size = 1024;
  1827. memset(is->audio_buf, 0, is->audio_buf_size);
  1828. } else {
  1829. if (is->show_mode != SHOW_MODE_VIDEO)
  1830. update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
  1831. audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
  1832. pts);
  1833. is->audio_buf_size = audio_size;
  1834. }
  1835. is->audio_buf_index = 0;
  1836. }
  1837. len1 = is->audio_buf_size - is->audio_buf_index;
  1838. if (len1 > len)
  1839. len1 = len;
  1840. memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
  1841. len -= len1;
  1842. stream += len1;
  1843. is->audio_buf_index += len1;
  1844. }
  1845. bytes_per_sec = is->audio_st->codec->sample_rate *
  1846. 2 * is->audio_st->codec->channels;
  1847. is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
  1848. /* Let's assume the audio driver that is used by SDL has two periods. */
  1849. is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
  1850. is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
  1851. }
  1852. /* open a given stream. Return 0 if OK */
  1853. static int stream_component_open(VideoState *is, int stream_index)
  1854. {
  1855. AVFormatContext *ic = is->ic;
  1856. AVCodecContext *avctx;
  1857. AVCodec *codec;
  1858. SDL_AudioSpec wanted_spec, spec;
  1859. AVDictionary *opts;
  1860. AVDictionaryEntry *t = NULL;
  1861. if (stream_index < 0 || stream_index >= ic->nb_streams)
  1862. return -1;
  1863. avctx = ic->streams[stream_index]->codec;
  1864. opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
  1865. /* prepare audio output */
  1866. if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
  1867. if (avctx->channels > 0) {
  1868. avctx->request_channels = FFMIN(2, avctx->channels);
  1869. } else {
  1870. avctx->request_channels = 2;
  1871. }
  1872. }
  1873. codec = avcodec_find_decoder(avctx->codec_id);
  1874. if (!codec)
  1875. return -1;
  1876. avctx->workaround_bugs = workaround_bugs;
  1877. avctx->lowres = lowres;
  1878. if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
  1879. avctx->idct_algo= idct;
  1880. if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
  1881. avctx->skip_frame= skip_frame;
  1882. avctx->skip_idct= skip_idct;
  1883. avctx->skip_loop_filter= skip_loop_filter;
  1884. avctx->error_recognition= error_recognition;
  1885. avctx->error_concealment= error_concealment;
  1886. if(codec->capabilities & CODEC_CAP_DR1)
  1887. avctx->flags |= CODEC_FLAG_EMU_EDGE;
  1888. if (!codec ||
  1889. avcodec_open2(avctx, codec, &opts) < 0)
  1890. return -1;
  1891. if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
  1892. av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
  1893. return AVERROR_OPTION_NOT_FOUND;
  1894. }
  1895. /* prepare audio output */
  1896. if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
  1897. if(avctx->sample_rate <= 0 || avctx->channels <= 0){
  1898. fprintf(stderr, "Invalid sample rate or channel count\n");
  1899. return -1;
  1900. }
  1901. wanted_spec.freq = avctx->sample_rate;
  1902. wanted_spec.format = AUDIO_S16SYS;
  1903. wanted_spec.channels = avctx->channels;
  1904. wanted_spec.silence = 0;
  1905. wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
  1906. wanted_spec.callback = sdl_audio_callback;
  1907. wanted_spec.userdata = is;
  1908. if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
  1909. fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
  1910. return -1;
  1911. }
  1912. is->audio_hw_buf_size = spec.size;
  1913. is->audio_src_fmt= AV_SAMPLE_FMT_S16;
  1914. }
  1915. ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
  1916. switch(avctx->codec_type) {
  1917. case AVMEDIA_TYPE_AUDIO:
  1918. is->audio_stream = stream_index;
  1919. is->audio_st = ic->streams[stream_index];
  1920. is->audio_buf_size = 0;
  1921. is->audio_buf_index = 0;
  1922. /* init averaging filter */
  1923. is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
  1924. is->audio_diff_avg_count = 0;
  1925. /* since we do not have a precise anough audio fifo fullness,
  1926. we correct audio sync only if larger than this threshold */
  1927. is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
  1928. memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
  1929. packet_queue_init(&is->audioq);
  1930. SDL_PauseAudio(0);
  1931. break;
  1932. case AVMEDIA_TYPE_VIDEO:
  1933. is->video_stream = stream_index;
  1934. is->video_st = ic->streams[stream_index];
  1935. packet_queue_init(&is->videoq);
  1936. is->video_tid = SDL_CreateThread(video_thread, is);
  1937. break;
  1938. case AVMEDIA_TYPE_SUBTITLE:
  1939. is->subtitle_stream = stream_index;
  1940. is->subtitle_st = ic->streams[stream_index];
  1941. packet_queue_init(&is->subtitleq);
  1942. is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
  1943. break;
  1944. default:
  1945. break;
  1946. }
  1947. return 0;
  1948. }
  1949. static void stream_component_close(VideoState *is, int stream_index)
  1950. {
  1951. AVFormatContext *ic = is->ic;
  1952. AVCodecContext *avctx;
  1953. if (stream_index < 0 || stream_index >= ic->nb_streams)
  1954. return;
  1955. avctx = ic->streams[stream_index]->codec;
  1956. switch(avctx->codec_type) {
  1957. case AVMEDIA_TYPE_AUDIO:
  1958. packet_queue_abort(&is->audioq);
  1959. SDL_CloseAudio();
  1960. packet_queue_end(&is->audioq);
  1961. if (is->reformat_ctx)
  1962. av_audio_convert_free(is->reformat_ctx);
  1963. is->reformat_ctx = NULL;
  1964. break;
  1965. case AVMEDIA_TYPE_VIDEO:
  1966. packet_queue_abort(&is->videoq);
  1967. /* note: we also signal this mutex to make sure we deblock the
  1968. video thread in all cases */
  1969. SDL_LockMutex(is->pictq_mutex);
  1970. SDL_CondSignal(is->pictq_cond);
  1971. SDL_UnlockMutex(is->pictq_mutex);
  1972. SDL_WaitThread(is->video_tid, NULL);
  1973. packet_queue_end(&is->videoq);
  1974. break;
  1975. case AVMEDIA_TYPE_SUBTITLE:
  1976. packet_queue_abort(&is->subtitleq);
  1977. /* note: we also signal this mutex to make sure we deblock the
  1978. video thread in all cases */
  1979. SDL_LockMutex(is->subpq_mutex);
  1980. is->subtitle_stream_changed = 1;
  1981. SDL_CondSignal(is->subpq_cond);
  1982. SDL_UnlockMutex(is->subpq_mutex);
  1983. SDL_WaitThread(is->subtitle_tid, NULL);
  1984. packet_queue_end(&is->subtitleq);
  1985. break;
  1986. default:
  1987. break;
  1988. }
  1989. ic->streams[stream_index]->discard = AVDISCARD_ALL;
  1990. avcodec_close(avctx);
  1991. switch(avctx->codec_type) {
  1992. case AVMEDIA_TYPE_AUDIO:
  1993. is->audio_st = NULL;
  1994. is->audio_stream = -1;
  1995. break;
  1996. case AVMEDIA_TYPE_VIDEO:
  1997. is->video_st = NULL;
  1998. is->video_stream = -1;
  1999. break;
  2000. case AVMEDIA_TYPE_SUBTITLE:
  2001. is->subtitle_st = NULL;
  2002. is->subtitle_stream = -1;
  2003. break;
  2004. default:
  2005. break;
  2006. }
  2007. }
  2008. /* since we have only one decoding thread, we can use a global
  2009. variable instead of a thread local variable */
  2010. static VideoState *global_video_state;
  2011. static int decode_interrupt_cb(void)
  2012. {
  2013. return (global_video_state && global_video_state->abort_request);
  2014. }
  2015. /* this thread gets the stream from the disk or the network */
  2016. static int read_thread(void *arg)
  2017. {
  2018. VideoState *is = arg;
  2019. AVFormatContext *ic = NULL;
  2020. int err, i, ret;
  2021. int st_index[AVMEDIA_TYPE_NB];
  2022. AVPacket pkt1, *pkt = &pkt1;
  2023. int eof=0;
  2024. int pkt_in_play_range = 0;
  2025. AVDictionaryEntry *t;
  2026. AVDictionary **opts;
  2027. int orig_nb_streams;
  2028. memset(st_index, -1, sizeof(st_index));
  2029. is->video_stream = -1;
  2030. is->audio_stream = -1;
  2031. is->subtitle_stream = -1;
  2032. global_video_state = is;
  2033. avio_set_interrupt_cb(decode_interrupt_cb);
  2034. err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
  2035. if (err < 0) {
  2036. print_error(is->filename, err);
  2037. ret = -1;
  2038. goto fail;
  2039. }
  2040. if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
  2041. av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
  2042. ret = AVERROR_OPTION_NOT_FOUND;
  2043. goto fail;
  2044. }
  2045. is->ic = ic;
  2046. if(genpts)
  2047. ic->flags |= AVFMT_FLAG_GENPTS;
  2048. opts = setup_find_stream_info_opts(ic, codec_opts);
  2049. orig_nb_streams = ic->nb_streams;
  2050. err = avformat_find_stream_info(ic, opts);
  2051. if (err < 0) {
  2052. fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
  2053. ret = -1;
  2054. goto fail;
  2055. }
  2056. for (i = 0; i < orig_nb_streams; i++)
  2057. av_dict_free(&opts[i]);
  2058. av_freep(&opts);
  2059. if(ic->pb)
  2060. ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
  2061. if(seek_by_bytes<0)
  2062. seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
  2063. /* if seeking requested, we execute it */
  2064. if (start_time != AV_NOPTS_VALUE) {
  2065. int64_t timestamp;
  2066. timestamp = start_time;
  2067. /* add the stream start time */
  2068. if (ic->start_time != AV_NOPTS_VALUE)
  2069. timestamp += ic->start_time;
  2070. ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
  2071. if (ret < 0) {
  2072. fprintf(stderr, "%s: could not seek to position %0.3f\n",
  2073. is->filename, (double)timestamp / AV_TIME_BASE);
  2074. }
  2075. }
  2076. for (i = 0; i < ic->nb_streams; i++)
  2077. ic->streams[i]->discard = AVDISCARD_ALL;
  2078. if (!video_disable)
  2079. st_index[AVMEDIA_TYPE_VIDEO] =
  2080. av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
  2081. wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
  2082. if (!audio_disable)
  2083. st_index[AVMEDIA_TYPE_AUDIO] =
  2084. av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
  2085. wanted_stream[AVMEDIA_TYPE_AUDIO],
  2086. st_index[AVMEDIA_TYPE_VIDEO],
  2087. NULL, 0);
  2088. if (!video_disable)
  2089. st_index[AVMEDIA_TYPE_SUBTITLE] =
  2090. av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
  2091. wanted_stream[AVMEDIA_TYPE_SUBTITLE],
  2092. (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
  2093. st_index[AVMEDIA_TYPE_AUDIO] :
  2094. st_index[AVMEDIA_TYPE_VIDEO]),
  2095. NULL, 0);
  2096. if (show_status) {
  2097. av_dump_format(ic, 0, is->filename, 0);
  2098. }
  2099. is->show_mode = show_mode;
  2100. /* open the streams */
  2101. if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
  2102. stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
  2103. }
  2104. ret=-1;
  2105. if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
  2106. ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
  2107. }
  2108. is->refresh_tid = SDL_CreateThread(refresh_thread, is);
  2109. if (is->show_mode == SHOW_MODE_NONE)
  2110. is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
  2111. if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
  2112. stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
  2113. }
  2114. if (is->video_stream < 0 && is->audio_stream < 0) {
  2115. fprintf(stderr, "%s: could not open codecs\n", is->filename);
  2116. ret = -1;
  2117. goto fail;
  2118. }
  2119. for(;;) {
  2120. if (is->abort_request)
  2121. break;
  2122. if (is->paused != is->last_paused) {
  2123. is->last_paused = is->paused;
  2124. if (is->paused)
  2125. is->read_pause_return= av_read_pause(ic);
  2126. else
  2127. av_read_play(ic);
  2128. }
  2129. #if CONFIG_RTSP_DEMUXER
  2130. if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
  2131. /* wait 10 ms to avoid trying to get another packet */
  2132. /* XXX: horrible */
  2133. SDL_Delay(10);
  2134. continue;
  2135. }
  2136. #endif
  2137. if (is->seek_req) {
  2138. int64_t seek_target= is->seek_pos;
  2139. int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
  2140. int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
  2141. //FIXME the +-2 is due to rounding being not done in the correct direction in generation
  2142. // of the seek_pos/seek_rel variables
  2143. ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
  2144. if (ret < 0) {
  2145. fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
  2146. }else{
  2147. if (is->audio_stream >= 0) {
  2148. packet_queue_flush(&is->audioq);
  2149. packet_queue_put(&is->audioq, &flush_pkt);
  2150. }
  2151. if (is->subtitle_stream >= 0) {
  2152. packet_queue_flush(&is->subtitleq);
  2153. packet_queue_put(&is->subtitleq, &flush_pkt);
  2154. }
  2155. if (is->video_stream >= 0) {
  2156. packet_queue_flush(&is->videoq);
  2157. packet_queue_put(&is->videoq, &flush_pkt);
  2158. }
  2159. }
  2160. is->seek_req = 0;
  2161. eof= 0;
  2162. }
  2163. /* if the queue are full, no need to read more */
  2164. if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
  2165. || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
  2166. && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
  2167. && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
  2168. /* wait 10 ms */
  2169. SDL_Delay(10);
  2170. continue;
  2171. }
  2172. if(eof) {
  2173. if(is->video_stream >= 0){
  2174. av_init_packet(pkt);
  2175. pkt->data=NULL;
  2176. pkt->size=0;
  2177. pkt->stream_index= is->video_stream;
  2178. packet_queue_put(&is->videoq, pkt);
  2179. }
  2180. SDL_Delay(10);
  2181. if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
  2182. if(loop!=1 && (!loop || --loop)){
  2183. stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
  2184. }else if(autoexit){
  2185. ret=AVERROR_EOF;
  2186. goto fail;
  2187. }
  2188. }
  2189. eof=0;
  2190. continue;
  2191. }
  2192. ret = av_read_frame(ic, pkt);
  2193. if (ret < 0) {
  2194. if (ret == AVERROR_EOF || url_feof(ic->pb))
  2195. eof=1;
  2196. if (ic->pb && ic->pb->error)
  2197. break;
  2198. SDL_Delay(100); /* wait for user event */
  2199. continue;
  2200. }
  2201. /* check if packet is in play range specified by user, then queue, otherwise discard */
  2202. pkt_in_play_range = duration == AV_NOPTS_VALUE ||
  2203. (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
  2204. av_q2d(ic->streams[pkt->stream_index]->time_base) -
  2205. (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
  2206. <= ((double)duration/1000000);
  2207. if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
  2208. packet_queue_put(&is->audioq, pkt);
  2209. } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
  2210. packet_queue_put(&is->videoq, pkt);
  2211. } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
  2212. packet_queue_put(&is->subtitleq, pkt);
  2213. } else {
  2214. av_free_packet(pkt);
  2215. }
  2216. }
  2217. /* wait until the end */
  2218. while (!is->abort_request) {
  2219. SDL_Delay(100);
  2220. }
  2221. ret = 0;
  2222. fail:
  2223. /* disable interrupting */
  2224. global_video_state = NULL;
  2225. /* close each stream */
  2226. if (is->audio_stream >= 0)
  2227. stream_component_close(is, is->audio_stream);
  2228. if (is->video_stream >= 0)
  2229. stream_component_close(is, is->video_stream);
  2230. if (is->subtitle_stream >= 0)
  2231. stream_component_close(is, is->subtitle_stream);
  2232. if (is->ic) {
  2233. av_close_input_file(is->ic);
  2234. is->ic = NULL; /* safety */
  2235. }
  2236. avio_set_interrupt_cb(NULL);
  2237. if (ret != 0) {
  2238. SDL_Event event;
  2239. event.type = FF_QUIT_EVENT;
  2240. event.user.data1 = is;
  2241. SDL_PushEvent(&event);
  2242. }
  2243. return 0;
  2244. }
  2245. static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
  2246. {
  2247. VideoState *is;
  2248. is = av_mallocz(sizeof(VideoState));
  2249. if (!is)
  2250. return NULL;
  2251. av_strlcpy(is->filename, filename, sizeof(is->filename));
  2252. is->iformat = iformat;
  2253. is->ytop = 0;
  2254. is->xleft = 0;
  2255. /* start video display */
  2256. is->pictq_mutex = SDL_CreateMutex();
  2257. is->pictq_cond = SDL_CreateCond();
  2258. is->subpq_mutex = SDL_CreateMutex();
  2259. is->subpq_cond = SDL_CreateCond();
  2260. is->av_sync_type = av_sync_type;
  2261. is->read_tid = SDL_CreateThread(read_thread, is);
  2262. if (!is->read_tid) {
  2263. av_free(is);
  2264. return NULL;
  2265. }
  2266. return is;
  2267. }
  2268. static void stream_cycle_channel(VideoState *is, int codec_type)
  2269. {
  2270. AVFormatContext *ic = is->ic;
  2271. int start_index, stream_index;
  2272. AVStream *st;
  2273. if (codec_type == AVMEDIA_TYPE_VIDEO)
  2274. start_index = is->video_stream;
  2275. else if (codec_type == AVMEDIA_TYPE_AUDIO)
  2276. start_index = is->audio_stream;
  2277. else
  2278. start_index = is->subtitle_stream;
  2279. if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
  2280. return;
  2281. stream_index = start_index;
  2282. for(;;) {
  2283. if (++stream_index >= is->ic->nb_streams)
  2284. {
  2285. if (codec_type == AVMEDIA_TYPE_SUBTITLE)
  2286. {
  2287. stream_index = -1;
  2288. goto the_end;
  2289. } else
  2290. stream_index = 0;
  2291. }
  2292. if (stream_index == start_index)
  2293. return;
  2294. st = ic->streams[stream_index];
  2295. if (st->codec->codec_type == codec_type) {
  2296. /* check that parameters are OK */
  2297. switch(codec_type) {
  2298. case AVMEDIA_TYPE_AUDIO:
  2299. if (st->codec->sample_rate != 0 &&
  2300. st->codec->channels != 0)
  2301. goto the_end;
  2302. break;
  2303. case AVMEDIA_TYPE_VIDEO:
  2304. case AVMEDIA_TYPE_SUBTITLE:
  2305. goto the_end;
  2306. default:
  2307. break;
  2308. }
  2309. }
  2310. }
  2311. the_end:
  2312. stream_component_close(is, start_index);
  2313. stream_component_open(is, stream_index);
  2314. }
  2315. static void toggle_full_screen(VideoState *is)
  2316. {
  2317. is_full_screen = !is_full_screen;
  2318. video_open(is);
  2319. }
  2320. static void toggle_pause(VideoState *is)
  2321. {
  2322. stream_toggle_pause(is);
  2323. is->step = 0;
  2324. }
  2325. static void step_to_next_frame(VideoState *is)
  2326. {
  2327. /* if the stream is paused unpause it, then step */
  2328. if (is->paused)
  2329. stream_toggle_pause(is);
  2330. is->step = 1;
  2331. }
  2332. static void toggle_audio_display(VideoState *is)
  2333. {
  2334. int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
  2335. is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
  2336. fill_rectangle(screen,
  2337. is->xleft, is->ytop, is->width, is->height,
  2338. bgcolor);
  2339. SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
  2340. }
  2341. /* handle an event sent by the GUI */
  2342. static void event_loop(VideoState *cur_stream)
  2343. {
  2344. SDL_Event event;
  2345. double incr, pos, frac;
  2346. for(;;) {
  2347. double x;
  2348. SDL_WaitEvent(&event);
  2349. switch(event.type) {
  2350. case SDL_KEYDOWN:
  2351. if (exit_on_keydown) {
  2352. do_exit(cur_stream);
  2353. break;
  2354. }
  2355. switch(event.key.keysym.sym) {
  2356. case SDLK_ESCAPE:
  2357. case SDLK_q:
  2358. do_exit(cur_stream);
  2359. break;
  2360. case SDLK_f:
  2361. toggle_full_screen(cur_stream);
  2362. break;
  2363. case SDLK_p:
  2364. case SDLK_SPACE:
  2365. toggle_pause(cur_stream);
  2366. break;
  2367. case SDLK_s: //S: Step to next frame
  2368. step_to_next_frame(cur_stream);
  2369. break;
  2370. case SDLK_a:
  2371. stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
  2372. break;
  2373. case SDLK_v:
  2374. stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
  2375. break;
  2376. case SDLK_t:
  2377. stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
  2378. break;
  2379. case SDLK_w:
  2380. toggle_audio_display(cur_stream);
  2381. break;
  2382. case SDLK_LEFT:
  2383. incr = -10.0;
  2384. goto do_seek;
  2385. case SDLK_RIGHT:
  2386. incr = 10.0;
  2387. goto do_seek;
  2388. case SDLK_UP:
  2389. incr = 60.0;
  2390. goto do_seek;
  2391. case SDLK_DOWN:
  2392. incr = -60.0;
  2393. do_seek:
  2394. if (seek_by_bytes) {
  2395. if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
  2396. pos= cur_stream->video_current_pos;
  2397. }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
  2398. pos= cur_stream->audio_pkt.pos;
  2399. }else
  2400. pos = avio_tell(cur_stream->ic->pb);
  2401. if (cur_stream->ic->bit_rate)
  2402. incr *= cur_stream->ic->bit_rate / 8.0;
  2403. else
  2404. incr *= 180000.0;
  2405. pos += incr;
  2406. stream_seek(cur_stream, pos, incr, 1);
  2407. } else {
  2408. pos = get_master_clock(cur_stream);
  2409. pos += incr;
  2410. stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
  2411. }
  2412. break;
  2413. default:
  2414. break;
  2415. }
  2416. break;
  2417. case SDL_MOUSEBUTTONDOWN:
  2418. if (exit_on_mousedown) {
  2419. do_exit(cur_stream);
  2420. break;
  2421. }
  2422. case SDL_MOUSEMOTION:
  2423. if(event.type ==SDL_MOUSEBUTTONDOWN){
  2424. x= event.button.x;
  2425. }else{
  2426. if(event.motion.state != SDL_PRESSED)
  2427. break;
  2428. x= event.motion.x;
  2429. }
  2430. if(seek_by_bytes || cur_stream->ic->duration<=0){
  2431. uint64_t size= avio_size(cur_stream->ic->pb);
  2432. stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
  2433. }else{
  2434. int64_t ts;
  2435. int ns, hh, mm, ss;
  2436. int tns, thh, tmm, tss;
  2437. tns = cur_stream->ic->duration/1000000LL;
  2438. thh = tns/3600;
  2439. tmm = (tns%3600)/60;
  2440. tss = (tns%60);
  2441. frac = x/cur_stream->width;
  2442. ns = frac*tns;
  2443. hh = ns/3600;
  2444. mm = (ns%3600)/60;
  2445. ss = (ns%60);
  2446. fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
  2447. hh, mm, ss, thh, tmm, tss);
  2448. ts = frac*cur_stream->ic->duration;
  2449. if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
  2450. ts += cur_stream->ic->start_time;
  2451. stream_seek(cur_stream, ts, 0, 0);
  2452. }
  2453. break;
  2454. case SDL_VIDEORESIZE:
  2455. screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
  2456. SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
  2457. screen_width = cur_stream->width = event.resize.w;
  2458. screen_height= cur_stream->height= event.resize.h;
  2459. break;
  2460. case SDL_QUIT:
  2461. case FF_QUIT_EVENT:
  2462. do_exit(cur_stream);
  2463. break;
  2464. case FF_ALLOC_EVENT:
  2465. video_open(event.user.data1);
  2466. alloc_picture(event.user.data1);
  2467. break;
  2468. case FF_REFRESH_EVENT:
  2469. video_refresh(event.user.data1);
  2470. cur_stream->refresh=0;
  2471. break;
  2472. default:
  2473. break;
  2474. }
  2475. }
  2476. }
  2477. static int opt_frame_size(const char *opt, const char *arg)
  2478. {
  2479. av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
  2480. return opt_default("video_size", arg);
  2481. }
  2482. static int opt_width(const char *opt, const char *arg)
  2483. {
  2484. screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
  2485. return 0;
  2486. }
  2487. static int opt_height(const char *opt, const char *arg)
  2488. {
  2489. screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
  2490. return 0;
  2491. }
  2492. static int opt_format(const char *opt, const char *arg)
  2493. {
  2494. file_iformat = av_find_input_format(arg);
  2495. if (!file_iformat) {
  2496. fprintf(stderr, "Unknown input format: %s\n", arg);
  2497. return AVERROR(EINVAL);
  2498. }
  2499. return 0;
  2500. }
  2501. static int opt_frame_pix_fmt(const char *opt, const char *arg)
  2502. {
  2503. av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
  2504. return opt_default("pixel_format", arg);
  2505. }
  2506. static int opt_sync(const char *opt, const char *arg)
  2507. {
  2508. if (!strcmp(arg, "audio"))
  2509. av_sync_type = AV_SYNC_AUDIO_MASTER;
  2510. else if (!strcmp(arg, "video"))
  2511. av_sync_type = AV_SYNC_VIDEO_MASTER;
  2512. else if (!strcmp(arg, "ext"))
  2513. av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
  2514. else {
  2515. fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
  2516. exit(1);
  2517. }
  2518. return 0;
  2519. }
  2520. static int opt_seek(const char *opt, const char *arg)
  2521. {
  2522. start_time = parse_time_or_die(opt, arg, 1);
  2523. return 0;
  2524. }
  2525. static int opt_duration(const char *opt, const char *arg)
  2526. {
  2527. duration = parse_time_or_die(opt, arg, 1);
  2528. return 0;
  2529. }
  2530. static int opt_show_mode(const char *opt, const char *arg)
  2531. {
  2532. show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
  2533. !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
  2534. !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
  2535. parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
  2536. return 0;
  2537. }
  2538. static void opt_input_file(void *optctx, const char *filename)
  2539. {
  2540. if (input_filename) {
  2541. fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
  2542. filename, input_filename);
  2543. exit_program(1);
  2544. }
  2545. if (!strcmp(filename, "-"))
  2546. filename = "pipe:";
  2547. input_filename = filename;
  2548. }
  2549. static int dummy;
  2550. static const OptionDef options[] = {
  2551. #include "cmdutils_common_opts.h"
  2552. { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
  2553. { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
  2554. { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
  2555. { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
  2556. { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
  2557. { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
  2558. { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
  2559. { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
  2560. { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
  2561. { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
  2562. { "t", HAS_ARG, {(void*)&opt_duration}, "play \"duration\" seconds of audio/video", "duration" },
  2563. { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
  2564. { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
  2565. { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
  2566. { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
  2567. { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
  2568. { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
  2569. { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
  2570. { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
  2571. { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
  2572. { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
  2573. { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
  2574. { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
  2575. { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
  2576. { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
  2577. { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
  2578. { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
  2579. { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
  2580. { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
  2581. { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
  2582. { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
  2583. { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
  2584. { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
  2585. { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
  2586. #if CONFIG_AVFILTER
  2587. { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
  2588. #endif
  2589. { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
  2590. { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
  2591. { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
  2592. { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
  2593. { NULL, },
  2594. };
  2595. static void show_usage(void)
  2596. {
  2597. printf("Simple media player\n");
  2598. printf("usage: %s [options] input_file\n", program_name);
  2599. printf("\n");
  2600. }
  2601. static int opt_help(const char *opt, const char *arg)
  2602. {
  2603. const AVClass *class;
  2604. av_log_set_callback(log_callback_help);
  2605. show_usage();
  2606. show_help_options(options, "Main options:\n",
  2607. OPT_EXPERT, 0);
  2608. show_help_options(options, "\nAdvanced options:\n",
  2609. OPT_EXPERT, OPT_EXPERT);
  2610. printf("\n");
  2611. class = avcodec_get_class();
  2612. av_opt_show2(&class, NULL,
  2613. AV_OPT_FLAG_DECODING_PARAM, 0);
  2614. printf("\n");
  2615. class = avformat_get_class();
  2616. av_opt_show2(&class, NULL,
  2617. AV_OPT_FLAG_DECODING_PARAM, 0);
  2618. #if !CONFIG_AVFILTER
  2619. printf("\n");
  2620. class = sws_get_class();
  2621. av_opt_show2(&class, NULL,
  2622. AV_OPT_FLAG_ENCODING_PARAM, 0);
  2623. #endif
  2624. printf("\nWhile playing:\n"
  2625. "q, ESC quit\n"
  2626. "f toggle full screen\n"
  2627. "p, SPC pause\n"
  2628. "a cycle audio channel\n"
  2629. "v cycle video channel\n"
  2630. "t cycle subtitle channel\n"
  2631. "w show audio waves\n"
  2632. "s activate frame-step mode\n"
  2633. "left/right seek backward/forward 10 seconds\n"
  2634. "down/up seek backward/forward 1 minute\n"
  2635. "mouse click seek to percentage in file corresponding to fraction of width\n"
  2636. );
  2637. return 0;
  2638. }
  2639. static int lockmgr(void **mtx, enum AVLockOp op)
  2640. {
  2641. switch(op) {
  2642. case AV_LOCK_CREATE:
  2643. *mtx = SDL_CreateMutex();
  2644. if(!*mtx)
  2645. return 1;
  2646. return 0;
  2647. case AV_LOCK_OBTAIN:
  2648. return !!SDL_LockMutex(*mtx);
  2649. case AV_LOCK_RELEASE:
  2650. return !!SDL_UnlockMutex(*mtx);
  2651. case AV_LOCK_DESTROY:
  2652. SDL_DestroyMutex(*mtx);
  2653. return 0;
  2654. }
  2655. return 1;
  2656. }
  2657. /* Called from the main */
  2658. int main(int argc, char **argv)
  2659. {
  2660. int flags;
  2661. VideoState *is;
  2662. av_log_set_flags(AV_LOG_SKIP_REPEATED);
  2663. /* register all codecs, demux and protocols */
  2664. avcodec_register_all();
  2665. #if CONFIG_AVDEVICE
  2666. avdevice_register_all();
  2667. #endif
  2668. #if CONFIG_AVFILTER
  2669. avfilter_register_all();
  2670. #endif
  2671. av_register_all();
  2672. init_opts();
  2673. show_banner();
  2674. parse_options(NULL, argc, argv, options, opt_input_file);
  2675. if (!input_filename) {
  2676. show_usage();
  2677. fprintf(stderr, "An input file must be specified\n");
  2678. fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
  2679. exit(1);
  2680. }
  2681. if (display_disable) {
  2682. video_disable = 1;
  2683. }
  2684. flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
  2685. if (audio_disable)
  2686. flags &= ~SDL_INIT_AUDIO;
  2687. #if !defined(__MINGW32__) && !defined(__APPLE__)
  2688. flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
  2689. #endif
  2690. if (SDL_Init (flags)) {
  2691. fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
  2692. fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
  2693. exit(1);
  2694. }
  2695. if (!display_disable) {
  2696. #if HAVE_SDL_VIDEO_SIZE
  2697. const SDL_VideoInfo *vi = SDL_GetVideoInfo();
  2698. fs_screen_width = vi->current_w;
  2699. fs_screen_height = vi->current_h;
  2700. #endif
  2701. }
  2702. SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
  2703. SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
  2704. SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
  2705. if (av_lockmgr_register(lockmgr)) {
  2706. fprintf(stderr, "Could not initialize lock manager!\n");
  2707. do_exit(NULL);
  2708. }
  2709. av_init_packet(&flush_pkt);
  2710. flush_pkt.data= "FLUSH";
  2711. is = stream_open(input_filename, file_iformat);
  2712. if (!is) {
  2713. fprintf(stderr, "Failed to initialize VideoState!\n");
  2714. do_exit(NULL);
  2715. }
  2716. event_loop(is);
  2717. /* never returns */
  2718. return 0;
  2719. }