ffplay.c 79 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610
  1. /*
  2. * FFplay : Simple Media Player based on the ffmpeg libraries
  3. * Copyright (c) 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <math.h>
  22. #include <limits.h>
  23. #include "libavutil/avstring.h"
  24. #include "libavformat/avformat.h"
  25. #include "libavformat/rtsp.h"
  26. #include "libavdevice/avdevice.h"
  27. #include "libswscale/swscale.h"
  28. #include "libavcodec/audioconvert.h"
  29. #include "libavcodec/opt.h"
  30. #include "cmdutils.h"
  31. #include <SDL.h>
  32. #include <SDL_thread.h>
  33. #ifdef __MINGW32__
  34. #undef main /* We don't want SDL to override our main() */
  35. #endif
  36. #undef exit
  37. const char program_name[] = "FFplay";
  38. const int program_birth_year = 2003;
  39. //#define DEBUG_SYNC
  40. #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
  41. #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
  42. #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
  43. /* SDL audio buffer size, in samples. Should be small to have precise
  44. A/V sync as SDL does not have hardware buffer fullness info. */
  45. #define SDL_AUDIO_BUFFER_SIZE 1024
  46. /* no AV sync correction is done if below the AV sync threshold */
  47. #define AV_SYNC_THRESHOLD 0.01
  48. /* no AV correction is done if too big error */
  49. #define AV_NOSYNC_THRESHOLD 10.0
  50. /* maximum audio speed change to get correct sync */
  51. #define SAMPLE_CORRECTION_PERCENT_MAX 10
  52. /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
  53. #define AUDIO_DIFF_AVG_NB 20
  54. /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
  55. #define SAMPLE_ARRAY_SIZE (2*65536)
  56. static int sws_flags = SWS_BICUBIC;
  57. typedef struct PacketQueue {
  58. AVPacketList *first_pkt, *last_pkt;
  59. int nb_packets;
  60. int size;
  61. int abort_request;
  62. SDL_mutex *mutex;
  63. SDL_cond *cond;
  64. } PacketQueue;
  65. #define VIDEO_PICTURE_QUEUE_SIZE 1
  66. #define SUBPICTURE_QUEUE_SIZE 4
  67. typedef struct VideoPicture {
  68. double pts; ///<presentation time stamp for this picture
  69. SDL_Overlay *bmp;
  70. int width, height; /* source height & width */
  71. int allocated;
  72. } VideoPicture;
  73. typedef struct SubPicture {
  74. double pts; /* presentation time stamp for this picture */
  75. AVSubtitle sub;
  76. } SubPicture;
  77. enum {
  78. AV_SYNC_AUDIO_MASTER, /* default choice */
  79. AV_SYNC_VIDEO_MASTER,
  80. AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
  81. };
  82. typedef struct VideoState {
  83. SDL_Thread *parse_tid;
  84. SDL_Thread *video_tid;
  85. AVInputFormat *iformat;
  86. int no_background;
  87. int abort_request;
  88. int paused;
  89. int last_paused;
  90. int seek_req;
  91. int seek_flags;
  92. int64_t seek_pos;
  93. AVFormatContext *ic;
  94. int dtg_active_format;
  95. int audio_stream;
  96. int av_sync_type;
  97. double external_clock; /* external clock base */
  98. int64_t external_clock_time;
  99. double audio_clock;
  100. double audio_diff_cum; /* used for AV difference average computation */
  101. double audio_diff_avg_coef;
  102. double audio_diff_threshold;
  103. int audio_diff_avg_count;
  104. AVStream *audio_st;
  105. PacketQueue audioq;
  106. int audio_hw_buf_size;
  107. /* samples output by the codec. we reserve more space for avsync
  108. compensation */
  109. DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
  110. DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
  111. uint8_t *audio_buf;
  112. unsigned int audio_buf_size; /* in bytes */
  113. int audio_buf_index; /* in bytes */
  114. AVPacket audio_pkt;
  115. uint8_t *audio_pkt_data;
  116. int audio_pkt_size;
  117. enum SampleFormat audio_src_fmt;
  118. AVAudioConvert *reformat_ctx;
  119. int show_audio; /* if true, display audio samples */
  120. int16_t sample_array[SAMPLE_ARRAY_SIZE];
  121. int sample_array_index;
  122. int last_i_start;
  123. SDL_Thread *subtitle_tid;
  124. int subtitle_stream;
  125. int subtitle_stream_changed;
  126. AVStream *subtitle_st;
  127. PacketQueue subtitleq;
  128. SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
  129. int subpq_size, subpq_rindex, subpq_windex;
  130. SDL_mutex *subpq_mutex;
  131. SDL_cond *subpq_cond;
  132. double frame_timer;
  133. double frame_last_pts;
  134. double frame_last_delay;
  135. double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
  136. int video_stream;
  137. AVStream *video_st;
  138. PacketQueue videoq;
  139. double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
  140. int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
  141. VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
  142. int pictq_size, pictq_rindex, pictq_windex;
  143. SDL_mutex *pictq_mutex;
  144. SDL_cond *pictq_cond;
  145. // QETimer *video_timer;
  146. char filename[1024];
  147. int width, height, xleft, ytop;
  148. } VideoState;
  149. static void show_help(void);
  150. static int audio_write_get_buf_size(VideoState *is);
  151. /* options specified by the user */
  152. static AVInputFormat *file_iformat;
  153. static const char *input_filename;
  154. static int fs_screen_width;
  155. static int fs_screen_height;
  156. static int screen_width = 0;
  157. static int screen_height = 0;
  158. static int frame_width = 0;
  159. static int frame_height = 0;
  160. static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
  161. static int audio_disable;
  162. static int video_disable;
  163. static int wanted_audio_stream= 0;
  164. static int wanted_video_stream= 0;
  165. static int wanted_subtitle_stream= -1;
  166. static int seek_by_bytes;
  167. static int display_disable;
  168. static int show_status;
  169. static int av_sync_type = AV_SYNC_AUDIO_MASTER;
  170. static int64_t start_time = AV_NOPTS_VALUE;
  171. static int debug = 0;
  172. static int debug_mv = 0;
  173. static int step = 0;
  174. static int thread_count = 1;
  175. static int workaround_bugs = 1;
  176. static int fast = 0;
  177. static int genpts = 0;
  178. static int lowres = 0;
  179. static int idct = FF_IDCT_AUTO;
  180. static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
  181. static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
  182. static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
  183. static int error_recognition = FF_ER_CAREFUL;
  184. static int error_concealment = 3;
  185. static int decoder_reorder_pts= 0;
  186. /* current context */
  187. static int is_full_screen;
  188. static VideoState *cur_stream;
  189. static int64_t audio_callback_time;
  190. static AVPacket flush_pkt;
  191. #define FF_ALLOC_EVENT (SDL_USEREVENT)
  192. #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
  193. #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
  194. static SDL_Surface *screen;
  195. /* packet queue handling */
  196. static void packet_queue_init(PacketQueue *q)
  197. {
  198. memset(q, 0, sizeof(PacketQueue));
  199. q->mutex = SDL_CreateMutex();
  200. q->cond = SDL_CreateCond();
  201. }
  202. static void packet_queue_flush(PacketQueue *q)
  203. {
  204. AVPacketList *pkt, *pkt1;
  205. SDL_LockMutex(q->mutex);
  206. for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
  207. pkt1 = pkt->next;
  208. av_free_packet(&pkt->pkt);
  209. av_freep(&pkt);
  210. }
  211. q->last_pkt = NULL;
  212. q->first_pkt = NULL;
  213. q->nb_packets = 0;
  214. q->size = 0;
  215. SDL_UnlockMutex(q->mutex);
  216. }
  217. static void packet_queue_end(PacketQueue *q)
  218. {
  219. packet_queue_flush(q);
  220. SDL_DestroyMutex(q->mutex);
  221. SDL_DestroyCond(q->cond);
  222. }
  223. static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
  224. {
  225. AVPacketList *pkt1;
  226. /* duplicate the packet */
  227. if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
  228. return -1;
  229. pkt1 = av_malloc(sizeof(AVPacketList));
  230. if (!pkt1)
  231. return -1;
  232. pkt1->pkt = *pkt;
  233. pkt1->next = NULL;
  234. SDL_LockMutex(q->mutex);
  235. if (!q->last_pkt)
  236. q->first_pkt = pkt1;
  237. else
  238. q->last_pkt->next = pkt1;
  239. q->last_pkt = pkt1;
  240. q->nb_packets++;
  241. q->size += pkt1->pkt.size + sizeof(*pkt1);
  242. /* XXX: should duplicate packet data in DV case */
  243. SDL_CondSignal(q->cond);
  244. SDL_UnlockMutex(q->mutex);
  245. return 0;
  246. }
  247. static void packet_queue_abort(PacketQueue *q)
  248. {
  249. SDL_LockMutex(q->mutex);
  250. q->abort_request = 1;
  251. SDL_CondSignal(q->cond);
  252. SDL_UnlockMutex(q->mutex);
  253. }
  254. /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
  255. static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
  256. {
  257. AVPacketList *pkt1;
  258. int ret;
  259. SDL_LockMutex(q->mutex);
  260. for(;;) {
  261. if (q->abort_request) {
  262. ret = -1;
  263. break;
  264. }
  265. pkt1 = q->first_pkt;
  266. if (pkt1) {
  267. q->first_pkt = pkt1->next;
  268. if (!q->first_pkt)
  269. q->last_pkt = NULL;
  270. q->nb_packets--;
  271. q->size -= pkt1->pkt.size + sizeof(*pkt1);
  272. *pkt = pkt1->pkt;
  273. av_free(pkt1);
  274. ret = 1;
  275. break;
  276. } else if (!block) {
  277. ret = 0;
  278. break;
  279. } else {
  280. SDL_CondWait(q->cond, q->mutex);
  281. }
  282. }
  283. SDL_UnlockMutex(q->mutex);
  284. return ret;
  285. }
  286. static inline void fill_rectangle(SDL_Surface *screen,
  287. int x, int y, int w, int h, int color)
  288. {
  289. SDL_Rect rect;
  290. rect.x = x;
  291. rect.y = y;
  292. rect.w = w;
  293. rect.h = h;
  294. SDL_FillRect(screen, &rect, color);
  295. }
  296. #if 0
  297. /* draw only the border of a rectangle */
  298. void fill_border(VideoState *s, int x, int y, int w, int h, int color)
  299. {
  300. int w1, w2, h1, h2;
  301. /* fill the background */
  302. w1 = x;
  303. if (w1 < 0)
  304. w1 = 0;
  305. w2 = s->width - (x + w);
  306. if (w2 < 0)
  307. w2 = 0;
  308. h1 = y;
  309. if (h1 < 0)
  310. h1 = 0;
  311. h2 = s->height - (y + h);
  312. if (h2 < 0)
  313. h2 = 0;
  314. fill_rectangle(screen,
  315. s->xleft, s->ytop,
  316. w1, s->height,
  317. color);
  318. fill_rectangle(screen,
  319. s->xleft + s->width - w2, s->ytop,
  320. w2, s->height,
  321. color);
  322. fill_rectangle(screen,
  323. s->xleft + w1, s->ytop,
  324. s->width - w1 - w2, h1,
  325. color);
  326. fill_rectangle(screen,
  327. s->xleft + w1, s->ytop + s->height - h2,
  328. s->width - w1 - w2, h2,
  329. color);
  330. }
  331. #endif
  332. #define SCALEBITS 10
  333. #define ONE_HALF (1 << (SCALEBITS - 1))
  334. #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
  335. #define RGB_TO_Y_CCIR(r, g, b) \
  336. ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
  337. FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
  338. #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
  339. (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
  340. FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  341. #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
  342. (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
  343. FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
  344. #define ALPHA_BLEND(a, oldp, newp, s)\
  345. ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
  346. #define RGBA_IN(r, g, b, a, s)\
  347. {\
  348. unsigned int v = ((const uint32_t *)(s))[0];\
  349. a = (v >> 24) & 0xff;\
  350. r = (v >> 16) & 0xff;\
  351. g = (v >> 8) & 0xff;\
  352. b = v & 0xff;\
  353. }
  354. #define YUVA_IN(y, u, v, a, s, pal)\
  355. {\
  356. unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
  357. a = (val >> 24) & 0xff;\
  358. y = (val >> 16) & 0xff;\
  359. u = (val >> 8) & 0xff;\
  360. v = val & 0xff;\
  361. }
  362. #define YUVA_OUT(d, y, u, v, a)\
  363. {\
  364. ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
  365. }
  366. #define BPP 1
  367. static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
  368. {
  369. int wrap, wrap3, width2, skip2;
  370. int y, u, v, a, u1, v1, a1, w, h;
  371. uint8_t *lum, *cb, *cr;
  372. const uint8_t *p;
  373. const uint32_t *pal;
  374. int dstx, dsty, dstw, dsth;
  375. dstw = av_clip(rect->w, 0, imgw);
  376. dsth = av_clip(rect->h, 0, imgh);
  377. dstx = av_clip(rect->x, 0, imgw - dstw);
  378. dsty = av_clip(rect->y, 0, imgh - dsth);
  379. lum = dst->data[0] + dsty * dst->linesize[0];
  380. cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
  381. cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
  382. width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
  383. skip2 = dstx >> 1;
  384. wrap = dst->linesize[0];
  385. wrap3 = rect->pict.linesize[0];
  386. p = rect->pict.data[0];
  387. pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
  388. if (dsty & 1) {
  389. lum += dstx;
  390. cb += skip2;
  391. cr += skip2;
  392. if (dstx & 1) {
  393. YUVA_IN(y, u, v, a, p, pal);
  394. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  395. cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
  396. cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
  397. cb++;
  398. cr++;
  399. lum++;
  400. p += BPP;
  401. }
  402. for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
  403. YUVA_IN(y, u, v, a, p, pal);
  404. u1 = u;
  405. v1 = v;
  406. a1 = a;
  407. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  408. YUVA_IN(y, u, v, a, p + BPP, pal);
  409. u1 += u;
  410. v1 += v;
  411. a1 += a;
  412. lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
  413. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
  414. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
  415. cb++;
  416. cr++;
  417. p += 2 * BPP;
  418. lum += 2;
  419. }
  420. if (w) {
  421. YUVA_IN(y, u, v, a, p, pal);
  422. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  423. cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
  424. cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
  425. p++;
  426. lum++;
  427. }
  428. p += wrap3 - dstw * BPP;
  429. lum += wrap - dstw - dstx;
  430. cb += dst->linesize[1] - width2 - skip2;
  431. cr += dst->linesize[2] - width2 - skip2;
  432. }
  433. for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
  434. lum += dstx;
  435. cb += skip2;
  436. cr += skip2;
  437. if (dstx & 1) {
  438. YUVA_IN(y, u, v, a, p, pal);
  439. u1 = u;
  440. v1 = v;
  441. a1 = a;
  442. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  443. p += wrap3;
  444. lum += wrap;
  445. YUVA_IN(y, u, v, a, p, pal);
  446. u1 += u;
  447. v1 += v;
  448. a1 += a;
  449. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  450. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
  451. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
  452. cb++;
  453. cr++;
  454. p += -wrap3 + BPP;
  455. lum += -wrap + 1;
  456. }
  457. for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
  458. YUVA_IN(y, u, v, a, p, pal);
  459. u1 = u;
  460. v1 = v;
  461. a1 = a;
  462. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  463. YUVA_IN(y, u, v, a, p + BPP, pal);
  464. u1 += u;
  465. v1 += v;
  466. a1 += a;
  467. lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
  468. p += wrap3;
  469. lum += wrap;
  470. YUVA_IN(y, u, v, a, p, pal);
  471. u1 += u;
  472. v1 += v;
  473. a1 += a;
  474. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  475. YUVA_IN(y, u, v, a, p + BPP, pal);
  476. u1 += u;
  477. v1 += v;
  478. a1 += a;
  479. lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
  480. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
  481. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
  482. cb++;
  483. cr++;
  484. p += -wrap3 + 2 * BPP;
  485. lum += -wrap + 2;
  486. }
  487. if (w) {
  488. YUVA_IN(y, u, v, a, p, pal);
  489. u1 = u;
  490. v1 = v;
  491. a1 = a;
  492. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  493. p += wrap3;
  494. lum += wrap;
  495. YUVA_IN(y, u, v, a, p, pal);
  496. u1 += u;
  497. v1 += v;
  498. a1 += a;
  499. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  500. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
  501. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
  502. cb++;
  503. cr++;
  504. p += -wrap3 + BPP;
  505. lum += -wrap + 1;
  506. }
  507. p += wrap3 + (wrap3 - dstw * BPP);
  508. lum += wrap + (wrap - dstw - dstx);
  509. cb += dst->linesize[1] - width2 - skip2;
  510. cr += dst->linesize[2] - width2 - skip2;
  511. }
  512. /* handle odd height */
  513. if (h) {
  514. lum += dstx;
  515. cb += skip2;
  516. cr += skip2;
  517. if (dstx & 1) {
  518. YUVA_IN(y, u, v, a, p, pal);
  519. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  520. cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
  521. cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
  522. cb++;
  523. cr++;
  524. lum++;
  525. p += BPP;
  526. }
  527. for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
  528. YUVA_IN(y, u, v, a, p, pal);
  529. u1 = u;
  530. v1 = v;
  531. a1 = a;
  532. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  533. YUVA_IN(y, u, v, a, p + BPP, pal);
  534. u1 += u;
  535. v1 += v;
  536. a1 += a;
  537. lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
  538. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
  539. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
  540. cb++;
  541. cr++;
  542. p += 2 * BPP;
  543. lum += 2;
  544. }
  545. if (w) {
  546. YUVA_IN(y, u, v, a, p, pal);
  547. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  548. cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
  549. cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
  550. }
  551. }
  552. }
  553. static void free_subpicture(SubPicture *sp)
  554. {
  555. int i;
  556. for (i = 0; i < sp->sub.num_rects; i++)
  557. {
  558. av_freep(&sp->sub.rects[i]->pict.data[0]);
  559. av_freep(&sp->sub.rects[i]->pict.data[1]);
  560. av_freep(&sp->sub.rects[i]);
  561. }
  562. av_free(sp->sub.rects);
  563. memset(&sp->sub, 0, sizeof(AVSubtitle));
  564. }
  565. static void video_image_display(VideoState *is)
  566. {
  567. VideoPicture *vp;
  568. SubPicture *sp;
  569. AVPicture pict;
  570. float aspect_ratio;
  571. int width, height, x, y;
  572. SDL_Rect rect;
  573. int i;
  574. vp = &is->pictq[is->pictq_rindex];
  575. if (vp->bmp) {
  576. /* XXX: use variable in the frame */
  577. if (is->video_st->sample_aspect_ratio.num)
  578. aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
  579. else if (is->video_st->codec->sample_aspect_ratio.num)
  580. aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
  581. else
  582. aspect_ratio = 0;
  583. if (aspect_ratio <= 0.0)
  584. aspect_ratio = 1.0;
  585. aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
  586. /* if an active format is indicated, then it overrides the
  587. mpeg format */
  588. #if 0
  589. if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
  590. is->dtg_active_format = is->video_st->codec->dtg_active_format;
  591. printf("dtg_active_format=%d\n", is->dtg_active_format);
  592. }
  593. #endif
  594. #if 0
  595. switch(is->video_st->codec->dtg_active_format) {
  596. case FF_DTG_AFD_SAME:
  597. default:
  598. /* nothing to do */
  599. break;
  600. case FF_DTG_AFD_4_3:
  601. aspect_ratio = 4.0 / 3.0;
  602. break;
  603. case FF_DTG_AFD_16_9:
  604. aspect_ratio = 16.0 / 9.0;
  605. break;
  606. case FF_DTG_AFD_14_9:
  607. aspect_ratio = 14.0 / 9.0;
  608. break;
  609. case FF_DTG_AFD_4_3_SP_14_9:
  610. aspect_ratio = 14.0 / 9.0;
  611. break;
  612. case FF_DTG_AFD_16_9_SP_14_9:
  613. aspect_ratio = 14.0 / 9.0;
  614. break;
  615. case FF_DTG_AFD_SP_4_3:
  616. aspect_ratio = 4.0 / 3.0;
  617. break;
  618. }
  619. #endif
  620. if (is->subtitle_st)
  621. {
  622. if (is->subpq_size > 0)
  623. {
  624. sp = &is->subpq[is->subpq_rindex];
  625. if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
  626. {
  627. SDL_LockYUVOverlay (vp->bmp);
  628. pict.data[0] = vp->bmp->pixels[0];
  629. pict.data[1] = vp->bmp->pixels[2];
  630. pict.data[2] = vp->bmp->pixels[1];
  631. pict.linesize[0] = vp->bmp->pitches[0];
  632. pict.linesize[1] = vp->bmp->pitches[2];
  633. pict.linesize[2] = vp->bmp->pitches[1];
  634. for (i = 0; i < sp->sub.num_rects; i++)
  635. blend_subrect(&pict, sp->sub.rects[i],
  636. vp->bmp->w, vp->bmp->h);
  637. SDL_UnlockYUVOverlay (vp->bmp);
  638. }
  639. }
  640. }
  641. /* XXX: we suppose the screen has a 1.0 pixel ratio */
  642. height = is->height;
  643. width = ((int)rint(height * aspect_ratio)) & ~1;
  644. if (width > is->width) {
  645. width = is->width;
  646. height = ((int)rint(width / aspect_ratio)) & ~1;
  647. }
  648. x = (is->width - width) / 2;
  649. y = (is->height - height) / 2;
  650. if (!is->no_background) {
  651. /* fill the background */
  652. // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
  653. } else {
  654. is->no_background = 0;
  655. }
  656. rect.x = is->xleft + x;
  657. rect.y = is->ytop + y;
  658. rect.w = width;
  659. rect.h = height;
  660. SDL_DisplayYUVOverlay(vp->bmp, &rect);
  661. } else {
  662. #if 0
  663. fill_rectangle(screen,
  664. is->xleft, is->ytop, is->width, is->height,
  665. QERGB(0x00, 0x00, 0x00));
  666. #endif
  667. }
  668. }
  669. static inline int compute_mod(int a, int b)
  670. {
  671. a = a % b;
  672. if (a >= 0)
  673. return a;
  674. else
  675. return a + b;
  676. }
  677. static void video_audio_display(VideoState *s)
  678. {
  679. int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
  680. int ch, channels, h, h2, bgcolor, fgcolor;
  681. int16_t time_diff;
  682. /* compute display index : center on currently output samples */
  683. channels = s->audio_st->codec->channels;
  684. nb_display_channels = channels;
  685. if (!s->paused) {
  686. n = 2 * channels;
  687. delay = audio_write_get_buf_size(s);
  688. delay /= n;
  689. /* to be more precise, we take into account the time spent since
  690. the last buffer computation */
  691. if (audio_callback_time) {
  692. time_diff = av_gettime() - audio_callback_time;
  693. delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
  694. }
  695. delay -= s->width / 2;
  696. if (delay < s->width)
  697. delay = s->width;
  698. i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
  699. h= INT_MIN;
  700. for(i=0; i<1000; i+=channels){
  701. int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
  702. int a= s->sample_array[idx];
  703. int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
  704. int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
  705. int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
  706. int score= a-d;
  707. if(h<score && (b^c)<0){
  708. h= score;
  709. i_start= idx;
  710. }
  711. }
  712. s->last_i_start = i_start;
  713. } else {
  714. i_start = s->last_i_start;
  715. }
  716. bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
  717. fill_rectangle(screen,
  718. s->xleft, s->ytop, s->width, s->height,
  719. bgcolor);
  720. fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
  721. /* total height for one channel */
  722. h = s->height / nb_display_channels;
  723. /* graph height / 2 */
  724. h2 = (h * 9) / 20;
  725. for(ch = 0;ch < nb_display_channels; ch++) {
  726. i = i_start + ch;
  727. y1 = s->ytop + ch * h + (h / 2); /* position of center line */
  728. for(x = 0; x < s->width; x++) {
  729. y = (s->sample_array[i] * h2) >> 15;
  730. if (y < 0) {
  731. y = -y;
  732. ys = y1 - y;
  733. } else {
  734. ys = y1;
  735. }
  736. fill_rectangle(screen,
  737. s->xleft + x, ys, 1, y,
  738. fgcolor);
  739. i += channels;
  740. if (i >= SAMPLE_ARRAY_SIZE)
  741. i -= SAMPLE_ARRAY_SIZE;
  742. }
  743. }
  744. fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
  745. for(ch = 1;ch < nb_display_channels; ch++) {
  746. y = s->ytop + ch * h;
  747. fill_rectangle(screen,
  748. s->xleft, y, s->width, 1,
  749. fgcolor);
  750. }
  751. SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
  752. }
  753. static int video_open(VideoState *is){
  754. int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
  755. int w,h;
  756. if(is_full_screen) flags |= SDL_FULLSCREEN;
  757. else flags |= SDL_RESIZABLE;
  758. if (is_full_screen && fs_screen_width) {
  759. w = fs_screen_width;
  760. h = fs_screen_height;
  761. } else if(!is_full_screen && screen_width){
  762. w = screen_width;
  763. h = screen_height;
  764. }else if (is->video_st && is->video_st->codec->width){
  765. w = is->video_st->codec->width;
  766. h = is->video_st->codec->height;
  767. } else {
  768. w = 640;
  769. h = 480;
  770. }
  771. #ifndef __APPLE__
  772. screen = SDL_SetVideoMode(w, h, 0, flags);
  773. #else
  774. /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
  775. screen = SDL_SetVideoMode(w, h, 24, flags);
  776. #endif
  777. if (!screen) {
  778. fprintf(stderr, "SDL: could not set video mode - exiting\n");
  779. return -1;
  780. }
  781. SDL_WM_SetCaption("FFplay", "FFplay");
  782. is->width = screen->w;
  783. is->height = screen->h;
  784. return 0;
  785. }
  786. /* display the current picture, if any */
  787. static void video_display(VideoState *is)
  788. {
  789. if(!screen)
  790. video_open(cur_stream);
  791. if (is->audio_st && is->show_audio)
  792. video_audio_display(is);
  793. else if (is->video_st)
  794. video_image_display(is);
  795. }
  796. static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
  797. {
  798. SDL_Event event;
  799. event.type = FF_REFRESH_EVENT;
  800. event.user.data1 = opaque;
  801. SDL_PushEvent(&event);
  802. return 0; /* 0 means stop timer */
  803. }
  804. /* schedule a video refresh in 'delay' ms */
  805. static void schedule_refresh(VideoState *is, int delay)
  806. {
  807. if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
  808. SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
  809. }
  810. /* get the current audio clock value */
  811. static double get_audio_clock(VideoState *is)
  812. {
  813. double pts;
  814. int hw_buf_size, bytes_per_sec;
  815. pts = is->audio_clock;
  816. hw_buf_size = audio_write_get_buf_size(is);
  817. bytes_per_sec = 0;
  818. if (is->audio_st) {
  819. bytes_per_sec = is->audio_st->codec->sample_rate *
  820. 2 * is->audio_st->codec->channels;
  821. }
  822. if (bytes_per_sec)
  823. pts -= (double)hw_buf_size / bytes_per_sec;
  824. return pts;
  825. }
  826. /* get the current video clock value */
  827. static double get_video_clock(VideoState *is)
  828. {
  829. double delta;
  830. if (is->paused) {
  831. delta = 0;
  832. } else {
  833. delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
  834. }
  835. return is->video_current_pts + delta;
  836. }
  837. /* get the current external clock value */
  838. static double get_external_clock(VideoState *is)
  839. {
  840. int64_t ti;
  841. ti = av_gettime();
  842. return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
  843. }
  844. /* get the current master clock value */
  845. static double get_master_clock(VideoState *is)
  846. {
  847. double val;
  848. if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
  849. if (is->video_st)
  850. val = get_video_clock(is);
  851. else
  852. val = get_audio_clock(is);
  853. } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
  854. if (is->audio_st)
  855. val = get_audio_clock(is);
  856. else
  857. val = get_video_clock(is);
  858. } else {
  859. val = get_external_clock(is);
  860. }
  861. return val;
  862. }
  863. /* seek in the stream */
  864. static void stream_seek(VideoState *is, int64_t pos, int rel)
  865. {
  866. if (!is->seek_req) {
  867. is->seek_pos = pos;
  868. is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
  869. if (seek_by_bytes)
  870. is->seek_flags |= AVSEEK_FLAG_BYTE;
  871. is->seek_req = 1;
  872. }
  873. }
  874. /* pause or resume the video */
  875. static void stream_pause(VideoState *is)
  876. {
  877. is->paused = !is->paused;
  878. if (!is->paused) {
  879. is->video_current_pts = get_video_clock(is);
  880. is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
  881. }
  882. }
  883. static double compute_frame_delay(double frame_current_pts, VideoState *is)
  884. {
  885. double actual_delay, delay, sync_threshold, ref_clock, diff;
  886. /* compute nominal delay */
  887. delay = frame_current_pts - is->frame_last_pts;
  888. if (delay <= 0 || delay >= 10.0) {
  889. /* if incorrect delay, use previous one */
  890. delay = is->frame_last_delay;
  891. } else {
  892. is->frame_last_delay = delay;
  893. }
  894. is->frame_last_pts = frame_current_pts;
  895. /* update delay to follow master synchronisation source */
  896. if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
  897. is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
  898. /* if video is slave, we try to correct big delays by
  899. duplicating or deleting a frame */
  900. ref_clock = get_master_clock(is);
  901. diff = frame_current_pts - ref_clock;
  902. /* skip or repeat frame. We take into account the
  903. delay to compute the threshold. I still don't know
  904. if it is the best guess */
  905. sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
  906. if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
  907. if (diff <= -sync_threshold)
  908. delay = 0;
  909. else if (diff >= sync_threshold)
  910. delay = 2 * delay;
  911. }
  912. }
  913. is->frame_timer += delay;
  914. /* compute the REAL delay (we need to do that to avoid
  915. long term errors */
  916. actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
  917. if (actual_delay < 0.010) {
  918. /* XXX: should skip picture */
  919. actual_delay = 0.010;
  920. }
  921. #if defined(DEBUG_SYNC)
  922. printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
  923. delay, actual_delay, frame_current_pts, -diff);
  924. #endif
  925. return actual_delay;
  926. }
  927. /* called to display each frame */
  928. static void video_refresh_timer(void *opaque)
  929. {
  930. VideoState *is = opaque;
  931. VideoPicture *vp;
  932. SubPicture *sp, *sp2;
  933. if (is->video_st) {
  934. if (is->pictq_size == 0) {
  935. /* if no picture, need to wait */
  936. schedule_refresh(is, 1);
  937. } else {
  938. /* dequeue the picture */
  939. vp = &is->pictq[is->pictq_rindex];
  940. /* update current video pts */
  941. is->video_current_pts = vp->pts;
  942. is->video_current_pts_time = av_gettime();
  943. /* launch timer for next picture */
  944. schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
  945. if(is->subtitle_st) {
  946. if (is->subtitle_stream_changed) {
  947. SDL_LockMutex(is->subpq_mutex);
  948. while (is->subpq_size) {
  949. free_subpicture(&is->subpq[is->subpq_rindex]);
  950. /* update queue size and signal for next picture */
  951. if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
  952. is->subpq_rindex = 0;
  953. is->subpq_size--;
  954. }
  955. is->subtitle_stream_changed = 0;
  956. SDL_CondSignal(is->subpq_cond);
  957. SDL_UnlockMutex(is->subpq_mutex);
  958. } else {
  959. if (is->subpq_size > 0) {
  960. sp = &is->subpq[is->subpq_rindex];
  961. if (is->subpq_size > 1)
  962. sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
  963. else
  964. sp2 = NULL;
  965. if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
  966. || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
  967. {
  968. free_subpicture(sp);
  969. /* update queue size and signal for next picture */
  970. if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
  971. is->subpq_rindex = 0;
  972. SDL_LockMutex(is->subpq_mutex);
  973. is->subpq_size--;
  974. SDL_CondSignal(is->subpq_cond);
  975. SDL_UnlockMutex(is->subpq_mutex);
  976. }
  977. }
  978. }
  979. }
  980. /* display picture */
  981. video_display(is);
  982. /* update queue size and signal for next picture */
  983. if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
  984. is->pictq_rindex = 0;
  985. SDL_LockMutex(is->pictq_mutex);
  986. is->pictq_size--;
  987. SDL_CondSignal(is->pictq_cond);
  988. SDL_UnlockMutex(is->pictq_mutex);
  989. }
  990. } else if (is->audio_st) {
  991. /* draw the next audio frame */
  992. schedule_refresh(is, 40);
  993. /* if only audio stream, then display the audio bars (better
  994. than nothing, just to test the implementation */
  995. /* display picture */
  996. video_display(is);
  997. } else {
  998. schedule_refresh(is, 100);
  999. }
  1000. if (show_status) {
  1001. static int64_t last_time;
  1002. int64_t cur_time;
  1003. int aqsize, vqsize, sqsize;
  1004. double av_diff;
  1005. cur_time = av_gettime();
  1006. if (!last_time || (cur_time - last_time) >= 500 * 1000) {
  1007. aqsize = 0;
  1008. vqsize = 0;
  1009. sqsize = 0;
  1010. if (is->audio_st)
  1011. aqsize = is->audioq.size;
  1012. if (is->video_st)
  1013. vqsize = is->videoq.size;
  1014. if (is->subtitle_st)
  1015. sqsize = is->subtitleq.size;
  1016. av_diff = 0;
  1017. if (is->audio_st && is->video_st)
  1018. av_diff = get_audio_clock(is) - get_video_clock(is);
  1019. printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
  1020. get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
  1021. fflush(stdout);
  1022. last_time = cur_time;
  1023. }
  1024. }
  1025. }
  1026. /* allocate a picture (needs to do that in main thread to avoid
  1027. potential locking problems */
  1028. static void alloc_picture(void *opaque)
  1029. {
  1030. VideoState *is = opaque;
  1031. VideoPicture *vp;
  1032. vp = &is->pictq[is->pictq_windex];
  1033. if (vp->bmp)
  1034. SDL_FreeYUVOverlay(vp->bmp);
  1035. #if 0
  1036. /* XXX: use generic function */
  1037. /* XXX: disable overlay if no hardware acceleration or if RGB format */
  1038. switch(is->video_st->codec->pix_fmt) {
  1039. case PIX_FMT_YUV420P:
  1040. case PIX_FMT_YUV422P:
  1041. case PIX_FMT_YUV444P:
  1042. case PIX_FMT_YUYV422:
  1043. case PIX_FMT_YUV410P:
  1044. case PIX_FMT_YUV411P:
  1045. is_yuv = 1;
  1046. break;
  1047. default:
  1048. is_yuv = 0;
  1049. break;
  1050. }
  1051. #endif
  1052. vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
  1053. is->video_st->codec->height,
  1054. SDL_YV12_OVERLAY,
  1055. screen);
  1056. vp->width = is->video_st->codec->width;
  1057. vp->height = is->video_st->codec->height;
  1058. SDL_LockMutex(is->pictq_mutex);
  1059. vp->allocated = 1;
  1060. SDL_CondSignal(is->pictq_cond);
  1061. SDL_UnlockMutex(is->pictq_mutex);
  1062. }
  1063. /**
  1064. *
  1065. * @param pts the dts of the pkt / pts of the frame and guessed if not known
  1066. */
  1067. static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
  1068. {
  1069. VideoPicture *vp;
  1070. int dst_pix_fmt;
  1071. AVPicture pict;
  1072. static struct SwsContext *img_convert_ctx;
  1073. /* wait until we have space to put a new picture */
  1074. SDL_LockMutex(is->pictq_mutex);
  1075. while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
  1076. !is->videoq.abort_request) {
  1077. SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  1078. }
  1079. SDL_UnlockMutex(is->pictq_mutex);
  1080. if (is->videoq.abort_request)
  1081. return -1;
  1082. vp = &is->pictq[is->pictq_windex];
  1083. /* alloc or resize hardware picture buffer */
  1084. if (!vp->bmp ||
  1085. vp->width != is->video_st->codec->width ||
  1086. vp->height != is->video_st->codec->height) {
  1087. SDL_Event event;
  1088. vp->allocated = 0;
  1089. /* the allocation must be done in the main thread to avoid
  1090. locking problems */
  1091. event.type = FF_ALLOC_EVENT;
  1092. event.user.data1 = is;
  1093. SDL_PushEvent(&event);
  1094. /* wait until the picture is allocated */
  1095. SDL_LockMutex(is->pictq_mutex);
  1096. while (!vp->allocated && !is->videoq.abort_request) {
  1097. SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  1098. }
  1099. SDL_UnlockMutex(is->pictq_mutex);
  1100. if (is->videoq.abort_request)
  1101. return -1;
  1102. }
  1103. /* if the frame is not skipped, then display it */
  1104. if (vp->bmp) {
  1105. /* get a pointer on the bitmap */
  1106. SDL_LockYUVOverlay (vp->bmp);
  1107. dst_pix_fmt = PIX_FMT_YUV420P;
  1108. pict.data[0] = vp->bmp->pixels[0];
  1109. pict.data[1] = vp->bmp->pixels[2];
  1110. pict.data[2] = vp->bmp->pixels[1];
  1111. pict.linesize[0] = vp->bmp->pitches[0];
  1112. pict.linesize[1] = vp->bmp->pitches[2];
  1113. pict.linesize[2] = vp->bmp->pitches[1];
  1114. sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
  1115. img_convert_ctx = sws_getCachedContext(img_convert_ctx,
  1116. is->video_st->codec->width, is->video_st->codec->height,
  1117. is->video_st->codec->pix_fmt,
  1118. is->video_st->codec->width, is->video_st->codec->height,
  1119. dst_pix_fmt, sws_flags, NULL, NULL, NULL);
  1120. if (img_convert_ctx == NULL) {
  1121. fprintf(stderr, "Cannot initialize the conversion context\n");
  1122. exit(1);
  1123. }
  1124. sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
  1125. 0, is->video_st->codec->height, pict.data, pict.linesize);
  1126. /* update the bitmap content */
  1127. SDL_UnlockYUVOverlay(vp->bmp);
  1128. vp->pts = pts;
  1129. /* now we can update the picture count */
  1130. if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
  1131. is->pictq_windex = 0;
  1132. SDL_LockMutex(is->pictq_mutex);
  1133. is->pictq_size++;
  1134. SDL_UnlockMutex(is->pictq_mutex);
  1135. }
  1136. return 0;
  1137. }
  1138. /**
  1139. * compute the exact PTS for the picture if it is omitted in the stream
  1140. * @param pts1 the dts of the pkt / pts of the frame
  1141. */
  1142. static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
  1143. {
  1144. double frame_delay, pts;
  1145. pts = pts1;
  1146. if (pts != 0) {
  1147. /* update video clock with pts, if present */
  1148. is->video_clock = pts;
  1149. } else {
  1150. pts = is->video_clock;
  1151. }
  1152. /* update video clock for next frame */
  1153. frame_delay = av_q2d(is->video_st->codec->time_base);
  1154. /* for MPEG2, the frame can be repeated, so we update the
  1155. clock accordingly */
  1156. frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
  1157. is->video_clock += frame_delay;
  1158. #if defined(DEBUG_SYNC) && 0
  1159. {
  1160. int ftype;
  1161. if (src_frame->pict_type == FF_B_TYPE)
  1162. ftype = 'B';
  1163. else if (src_frame->pict_type == FF_I_TYPE)
  1164. ftype = 'I';
  1165. else
  1166. ftype = 'P';
  1167. printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
  1168. ftype, pts, pts1);
  1169. }
  1170. #endif
  1171. return queue_picture(is, src_frame, pts);
  1172. }
  1173. static int video_thread(void *arg)
  1174. {
  1175. VideoState *is = arg;
  1176. AVPacket pkt1, *pkt = &pkt1;
  1177. int len1, got_picture;
  1178. AVFrame *frame= avcodec_alloc_frame();
  1179. double pts;
  1180. for(;;) {
  1181. while (is->paused && !is->videoq.abort_request) {
  1182. SDL_Delay(10);
  1183. }
  1184. if (packet_queue_get(&is->videoq, pkt, 1) < 0)
  1185. break;
  1186. if(pkt->data == flush_pkt.data){
  1187. avcodec_flush_buffers(is->video_st->codec);
  1188. continue;
  1189. }
  1190. /* NOTE: ipts is the PTS of the _first_ picture beginning in
  1191. this packet, if any */
  1192. is->video_st->codec->reordered_opaque= pkt->pts;
  1193. len1 = avcodec_decode_video(is->video_st->codec,
  1194. frame, &got_picture,
  1195. pkt->data, pkt->size);
  1196. if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
  1197. && frame->reordered_opaque != AV_NOPTS_VALUE)
  1198. pts= frame->reordered_opaque;
  1199. else if(pkt->dts != AV_NOPTS_VALUE)
  1200. pts= pkt->dts;
  1201. else
  1202. pts= 0;
  1203. pts *= av_q2d(is->video_st->time_base);
  1204. // if (len1 < 0)
  1205. // break;
  1206. if (got_picture) {
  1207. if (output_picture2(is, frame, pts) < 0)
  1208. goto the_end;
  1209. }
  1210. av_free_packet(pkt);
  1211. if (step)
  1212. if (cur_stream)
  1213. stream_pause(cur_stream);
  1214. }
  1215. the_end:
  1216. av_free(frame);
  1217. return 0;
  1218. }
  1219. static int subtitle_thread(void *arg)
  1220. {
  1221. VideoState *is = arg;
  1222. SubPicture *sp;
  1223. AVPacket pkt1, *pkt = &pkt1;
  1224. int len1, got_subtitle;
  1225. double pts;
  1226. int i, j;
  1227. int r, g, b, y, u, v, a;
  1228. for(;;) {
  1229. while (is->paused && !is->subtitleq.abort_request) {
  1230. SDL_Delay(10);
  1231. }
  1232. if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
  1233. break;
  1234. if(pkt->data == flush_pkt.data){
  1235. avcodec_flush_buffers(is->subtitle_st->codec);
  1236. continue;
  1237. }
  1238. SDL_LockMutex(is->subpq_mutex);
  1239. while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
  1240. !is->subtitleq.abort_request) {
  1241. SDL_CondWait(is->subpq_cond, is->subpq_mutex);
  1242. }
  1243. SDL_UnlockMutex(is->subpq_mutex);
  1244. if (is->subtitleq.abort_request)
  1245. goto the_end;
  1246. sp = &is->subpq[is->subpq_windex];
  1247. /* NOTE: ipts is the PTS of the _first_ picture beginning in
  1248. this packet, if any */
  1249. pts = 0;
  1250. if (pkt->pts != AV_NOPTS_VALUE)
  1251. pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
  1252. len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
  1253. &sp->sub, &got_subtitle,
  1254. pkt->data, pkt->size);
  1255. // if (len1 < 0)
  1256. // break;
  1257. if (got_subtitle && sp->sub.format == 0) {
  1258. sp->pts = pts;
  1259. for (i = 0; i < sp->sub.num_rects; i++)
  1260. {
  1261. for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
  1262. {
  1263. RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
  1264. y = RGB_TO_Y_CCIR(r, g, b);
  1265. u = RGB_TO_U_CCIR(r, g, b, 0);
  1266. v = RGB_TO_V_CCIR(r, g, b, 0);
  1267. YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
  1268. }
  1269. }
  1270. /* now we can update the picture count */
  1271. if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
  1272. is->subpq_windex = 0;
  1273. SDL_LockMutex(is->subpq_mutex);
  1274. is->subpq_size++;
  1275. SDL_UnlockMutex(is->subpq_mutex);
  1276. }
  1277. av_free_packet(pkt);
  1278. // if (step)
  1279. // if (cur_stream)
  1280. // stream_pause(cur_stream);
  1281. }
  1282. the_end:
  1283. return 0;
  1284. }
  1285. /* copy samples for viewing in editor window */
  1286. static void update_sample_display(VideoState *is, short *samples, int samples_size)
  1287. {
  1288. int size, len, channels;
  1289. channels = is->audio_st->codec->channels;
  1290. size = samples_size / sizeof(short);
  1291. while (size > 0) {
  1292. len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
  1293. if (len > size)
  1294. len = size;
  1295. memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
  1296. samples += len;
  1297. is->sample_array_index += len;
  1298. if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
  1299. is->sample_array_index = 0;
  1300. size -= len;
  1301. }
  1302. }
  1303. /* return the new audio buffer size (samples can be added or deleted
  1304. to get better sync if video or external master clock) */
  1305. static int synchronize_audio(VideoState *is, short *samples,
  1306. int samples_size1, double pts)
  1307. {
  1308. int n, samples_size;
  1309. double ref_clock;
  1310. n = 2 * is->audio_st->codec->channels;
  1311. samples_size = samples_size1;
  1312. /* if not master, then we try to remove or add samples to correct the clock */
  1313. if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
  1314. is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
  1315. double diff, avg_diff;
  1316. int wanted_size, min_size, max_size, nb_samples;
  1317. ref_clock = get_master_clock(is);
  1318. diff = get_audio_clock(is) - ref_clock;
  1319. if (diff < AV_NOSYNC_THRESHOLD) {
  1320. is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
  1321. if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
  1322. /* not enough measures to have a correct estimate */
  1323. is->audio_diff_avg_count++;
  1324. } else {
  1325. /* estimate the A-V difference */
  1326. avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
  1327. if (fabs(avg_diff) >= is->audio_diff_threshold) {
  1328. wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
  1329. nb_samples = samples_size / n;
  1330. min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
  1331. max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
  1332. if (wanted_size < min_size)
  1333. wanted_size = min_size;
  1334. else if (wanted_size > max_size)
  1335. wanted_size = max_size;
  1336. /* add or remove samples to correction the synchro */
  1337. if (wanted_size < samples_size) {
  1338. /* remove samples */
  1339. samples_size = wanted_size;
  1340. } else if (wanted_size > samples_size) {
  1341. uint8_t *samples_end, *q;
  1342. int nb;
  1343. /* add samples */
  1344. nb = (samples_size - wanted_size);
  1345. samples_end = (uint8_t *)samples + samples_size - n;
  1346. q = samples_end + n;
  1347. while (nb > 0) {
  1348. memcpy(q, samples_end, n);
  1349. q += n;
  1350. nb -= n;
  1351. }
  1352. samples_size = wanted_size;
  1353. }
  1354. }
  1355. #if 0
  1356. printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
  1357. diff, avg_diff, samples_size - samples_size1,
  1358. is->audio_clock, is->video_clock, is->audio_diff_threshold);
  1359. #endif
  1360. }
  1361. } else {
  1362. /* too big difference : may be initial PTS errors, so
  1363. reset A-V filter */
  1364. is->audio_diff_avg_count = 0;
  1365. is->audio_diff_cum = 0;
  1366. }
  1367. }
  1368. return samples_size;
  1369. }
  1370. /* decode one audio frame and returns its uncompressed size */
  1371. static int audio_decode_frame(VideoState *is, double *pts_ptr)
  1372. {
  1373. AVPacket *pkt = &is->audio_pkt;
  1374. AVCodecContext *dec= is->audio_st->codec;
  1375. int n, len1, data_size;
  1376. double pts;
  1377. for(;;) {
  1378. /* NOTE: the audio packet can contain several frames */
  1379. while (is->audio_pkt_size > 0) {
  1380. data_size = sizeof(is->audio_buf1);
  1381. len1 = avcodec_decode_audio2(dec,
  1382. (int16_t *)is->audio_buf1, &data_size,
  1383. is->audio_pkt_data, is->audio_pkt_size);
  1384. if (len1 < 0) {
  1385. /* if error, we skip the frame */
  1386. is->audio_pkt_size = 0;
  1387. break;
  1388. }
  1389. is->audio_pkt_data += len1;
  1390. is->audio_pkt_size -= len1;
  1391. if (data_size <= 0)
  1392. continue;
  1393. if (dec->sample_fmt != is->audio_src_fmt) {
  1394. if (is->reformat_ctx)
  1395. av_audio_convert_free(is->reformat_ctx);
  1396. is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
  1397. dec->sample_fmt, 1, NULL, 0);
  1398. if (!is->reformat_ctx) {
  1399. fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
  1400. avcodec_get_sample_fmt_name(dec->sample_fmt),
  1401. avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
  1402. break;
  1403. }
  1404. is->audio_src_fmt= dec->sample_fmt;
  1405. }
  1406. if (is->reformat_ctx) {
  1407. const void *ibuf[6]= {is->audio_buf1};
  1408. void *obuf[6]= {is->audio_buf2};
  1409. int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
  1410. int ostride[6]= {2};
  1411. int len= data_size/istride[0];
  1412. if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
  1413. printf("av_audio_convert() failed\n");
  1414. break;
  1415. }
  1416. is->audio_buf= is->audio_buf2;
  1417. /* FIXME: existing code assume that data_size equals framesize*channels*2
  1418. remove this legacy cruft */
  1419. data_size= len*2;
  1420. }else{
  1421. is->audio_buf= is->audio_buf1;
  1422. }
  1423. /* if no pts, then compute it */
  1424. pts = is->audio_clock;
  1425. *pts_ptr = pts;
  1426. n = 2 * dec->channels;
  1427. is->audio_clock += (double)data_size /
  1428. (double)(n * dec->sample_rate);
  1429. #if defined(DEBUG_SYNC)
  1430. {
  1431. static double last_clock;
  1432. printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
  1433. is->audio_clock - last_clock,
  1434. is->audio_clock, pts);
  1435. last_clock = is->audio_clock;
  1436. }
  1437. #endif
  1438. return data_size;
  1439. }
  1440. /* free the current packet */
  1441. if (pkt->data)
  1442. av_free_packet(pkt);
  1443. if (is->paused || is->audioq.abort_request) {
  1444. return -1;
  1445. }
  1446. /* read next packet */
  1447. if (packet_queue_get(&is->audioq, pkt, 1) < 0)
  1448. return -1;
  1449. if(pkt->data == flush_pkt.data){
  1450. avcodec_flush_buffers(dec);
  1451. continue;
  1452. }
  1453. is->audio_pkt_data = pkt->data;
  1454. is->audio_pkt_size = pkt->size;
  1455. /* if update the audio clock with the pts */
  1456. if (pkt->pts != AV_NOPTS_VALUE) {
  1457. is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
  1458. }
  1459. }
  1460. }
  1461. /* get the current audio output buffer size, in samples. With SDL, we
  1462. cannot have a precise information */
  1463. static int audio_write_get_buf_size(VideoState *is)
  1464. {
  1465. return is->audio_buf_size - is->audio_buf_index;
  1466. }
  1467. /* prepare a new audio buffer */
  1468. static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
  1469. {
  1470. VideoState *is = opaque;
  1471. int audio_size, len1;
  1472. double pts;
  1473. audio_callback_time = av_gettime();
  1474. while (len > 0) {
  1475. if (is->audio_buf_index >= is->audio_buf_size) {
  1476. audio_size = audio_decode_frame(is, &pts);
  1477. if (audio_size < 0) {
  1478. /* if error, just output silence */
  1479. is->audio_buf = is->audio_buf1;
  1480. is->audio_buf_size = 1024;
  1481. memset(is->audio_buf, 0, is->audio_buf_size);
  1482. } else {
  1483. if (is->show_audio)
  1484. update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
  1485. audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
  1486. pts);
  1487. is->audio_buf_size = audio_size;
  1488. }
  1489. is->audio_buf_index = 0;
  1490. }
  1491. len1 = is->audio_buf_size - is->audio_buf_index;
  1492. if (len1 > len)
  1493. len1 = len;
  1494. memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
  1495. len -= len1;
  1496. stream += len1;
  1497. is->audio_buf_index += len1;
  1498. }
  1499. }
  1500. /* open a given stream. Return 0 if OK */
  1501. static int stream_component_open(VideoState *is, int stream_index)
  1502. {
  1503. AVFormatContext *ic = is->ic;
  1504. AVCodecContext *enc;
  1505. AVCodec *codec;
  1506. SDL_AudioSpec wanted_spec, spec;
  1507. if (stream_index < 0 || stream_index >= ic->nb_streams)
  1508. return -1;
  1509. enc = ic->streams[stream_index]->codec;
  1510. /* prepare audio output */
  1511. if (enc->codec_type == CODEC_TYPE_AUDIO) {
  1512. if (enc->channels > 0) {
  1513. enc->request_channels = FFMIN(2, enc->channels);
  1514. } else {
  1515. enc->request_channels = 2;
  1516. }
  1517. }
  1518. codec = avcodec_find_decoder(enc->codec_id);
  1519. enc->debug_mv = debug_mv;
  1520. enc->debug = debug;
  1521. enc->workaround_bugs = workaround_bugs;
  1522. enc->lowres = lowres;
  1523. if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
  1524. enc->idct_algo= idct;
  1525. if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
  1526. enc->skip_frame= skip_frame;
  1527. enc->skip_idct= skip_idct;
  1528. enc->skip_loop_filter= skip_loop_filter;
  1529. enc->error_recognition= error_recognition;
  1530. enc->error_concealment= error_concealment;
  1531. set_context_opts(enc, avctx_opts[enc->codec_type], 0);
  1532. if (!codec ||
  1533. avcodec_open(enc, codec) < 0)
  1534. return -1;
  1535. /* prepare audio output */
  1536. if (enc->codec_type == CODEC_TYPE_AUDIO) {
  1537. wanted_spec.freq = enc->sample_rate;
  1538. wanted_spec.format = AUDIO_S16SYS;
  1539. wanted_spec.channels = enc->channels;
  1540. wanted_spec.silence = 0;
  1541. wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
  1542. wanted_spec.callback = sdl_audio_callback;
  1543. wanted_spec.userdata = is;
  1544. if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
  1545. fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
  1546. return -1;
  1547. }
  1548. is->audio_hw_buf_size = spec.size;
  1549. is->audio_src_fmt= SAMPLE_FMT_S16;
  1550. }
  1551. if(thread_count>1)
  1552. avcodec_thread_init(enc, thread_count);
  1553. enc->thread_count= thread_count;
  1554. ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
  1555. switch(enc->codec_type) {
  1556. case CODEC_TYPE_AUDIO:
  1557. is->audio_stream = stream_index;
  1558. is->audio_st = ic->streams[stream_index];
  1559. is->audio_buf_size = 0;
  1560. is->audio_buf_index = 0;
  1561. /* init averaging filter */
  1562. is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
  1563. is->audio_diff_avg_count = 0;
  1564. /* since we do not have a precise anough audio fifo fullness,
  1565. we correct audio sync only if larger than this threshold */
  1566. is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
  1567. memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
  1568. packet_queue_init(&is->audioq);
  1569. SDL_PauseAudio(0);
  1570. break;
  1571. case CODEC_TYPE_VIDEO:
  1572. is->video_stream = stream_index;
  1573. is->video_st = ic->streams[stream_index];
  1574. is->frame_last_delay = 40e-3;
  1575. is->frame_timer = (double)av_gettime() / 1000000.0;
  1576. is->video_current_pts_time = av_gettime();
  1577. packet_queue_init(&is->videoq);
  1578. is->video_tid = SDL_CreateThread(video_thread, is);
  1579. break;
  1580. case CODEC_TYPE_SUBTITLE:
  1581. is->subtitle_stream = stream_index;
  1582. is->subtitle_st = ic->streams[stream_index];
  1583. packet_queue_init(&is->subtitleq);
  1584. is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
  1585. break;
  1586. default:
  1587. break;
  1588. }
  1589. return 0;
  1590. }
  1591. static void stream_component_close(VideoState *is, int stream_index)
  1592. {
  1593. AVFormatContext *ic = is->ic;
  1594. AVCodecContext *enc;
  1595. if (stream_index < 0 || stream_index >= ic->nb_streams)
  1596. return;
  1597. enc = ic->streams[stream_index]->codec;
  1598. switch(enc->codec_type) {
  1599. case CODEC_TYPE_AUDIO:
  1600. packet_queue_abort(&is->audioq);
  1601. SDL_CloseAudio();
  1602. packet_queue_end(&is->audioq);
  1603. if (is->reformat_ctx)
  1604. av_audio_convert_free(is->reformat_ctx);
  1605. break;
  1606. case CODEC_TYPE_VIDEO:
  1607. packet_queue_abort(&is->videoq);
  1608. /* note: we also signal this mutex to make sure we deblock the
  1609. video thread in all cases */
  1610. SDL_LockMutex(is->pictq_mutex);
  1611. SDL_CondSignal(is->pictq_cond);
  1612. SDL_UnlockMutex(is->pictq_mutex);
  1613. SDL_WaitThread(is->video_tid, NULL);
  1614. packet_queue_end(&is->videoq);
  1615. break;
  1616. case CODEC_TYPE_SUBTITLE:
  1617. packet_queue_abort(&is->subtitleq);
  1618. /* note: we also signal this mutex to make sure we deblock the
  1619. video thread in all cases */
  1620. SDL_LockMutex(is->subpq_mutex);
  1621. is->subtitle_stream_changed = 1;
  1622. SDL_CondSignal(is->subpq_cond);
  1623. SDL_UnlockMutex(is->subpq_mutex);
  1624. SDL_WaitThread(is->subtitle_tid, NULL);
  1625. packet_queue_end(&is->subtitleq);
  1626. break;
  1627. default:
  1628. break;
  1629. }
  1630. ic->streams[stream_index]->discard = AVDISCARD_ALL;
  1631. avcodec_close(enc);
  1632. switch(enc->codec_type) {
  1633. case CODEC_TYPE_AUDIO:
  1634. is->audio_st = NULL;
  1635. is->audio_stream = -1;
  1636. break;
  1637. case CODEC_TYPE_VIDEO:
  1638. is->video_st = NULL;
  1639. is->video_stream = -1;
  1640. break;
  1641. case CODEC_TYPE_SUBTITLE:
  1642. is->subtitle_st = NULL;
  1643. is->subtitle_stream = -1;
  1644. break;
  1645. default:
  1646. break;
  1647. }
  1648. }
  1649. static void dump_stream_info(const AVFormatContext *s)
  1650. {
  1651. AVMetadataTag *tag = NULL;
  1652. while ((tag=av_metadata_get(s->metadata,"",tag,AV_METADATA_IGNORE_SUFFIX)))
  1653. fprintf(stderr, "%s: %s\n", tag->key, tag->value);
  1654. }
  1655. /* since we have only one decoding thread, we can use a global
  1656. variable instead of a thread local variable */
  1657. static VideoState *global_video_state;
  1658. static int decode_interrupt_cb(void)
  1659. {
  1660. return (global_video_state && global_video_state->abort_request);
  1661. }
  1662. /* this thread gets the stream from the disk or the network */
  1663. static int decode_thread(void *arg)
  1664. {
  1665. VideoState *is = arg;
  1666. AVFormatContext *ic;
  1667. int err, i, ret, video_index, audio_index, subtitle_index;
  1668. AVPacket pkt1, *pkt = &pkt1;
  1669. AVFormatParameters params, *ap = &params;
  1670. video_index = -1;
  1671. audio_index = -1;
  1672. subtitle_index = -1;
  1673. is->video_stream = -1;
  1674. is->audio_stream = -1;
  1675. is->subtitle_stream = -1;
  1676. global_video_state = is;
  1677. url_set_interrupt_cb(decode_interrupt_cb);
  1678. memset(ap, 0, sizeof(*ap));
  1679. ap->width = frame_width;
  1680. ap->height= frame_height;
  1681. ap->time_base= (AVRational){1, 25};
  1682. ap->pix_fmt = frame_pix_fmt;
  1683. err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
  1684. if (err < 0) {
  1685. print_error(is->filename, err);
  1686. ret = -1;
  1687. goto fail;
  1688. }
  1689. is->ic = ic;
  1690. if(genpts)
  1691. ic->flags |= AVFMT_FLAG_GENPTS;
  1692. err = av_find_stream_info(ic);
  1693. if (err < 0) {
  1694. fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
  1695. ret = -1;
  1696. goto fail;
  1697. }
  1698. if(ic->pb)
  1699. ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
  1700. /* if seeking requested, we execute it */
  1701. if (start_time != AV_NOPTS_VALUE) {
  1702. int64_t timestamp;
  1703. timestamp = start_time;
  1704. /* add the stream start time */
  1705. if (ic->start_time != AV_NOPTS_VALUE)
  1706. timestamp += ic->start_time;
  1707. ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
  1708. if (ret < 0) {
  1709. fprintf(stderr, "%s: could not seek to position %0.3f\n",
  1710. is->filename, (double)timestamp / AV_TIME_BASE);
  1711. }
  1712. }
  1713. for(i = 0; i < ic->nb_streams; i++) {
  1714. AVCodecContext *enc = ic->streams[i]->codec;
  1715. ic->streams[i]->discard = AVDISCARD_ALL;
  1716. switch(enc->codec_type) {
  1717. case CODEC_TYPE_AUDIO:
  1718. if (wanted_audio_stream-- >= 0 && !audio_disable)
  1719. audio_index = i;
  1720. break;
  1721. case CODEC_TYPE_VIDEO:
  1722. if (wanted_video_stream-- >= 0 && !video_disable)
  1723. video_index = i;
  1724. break;
  1725. case CODEC_TYPE_SUBTITLE:
  1726. if (wanted_subtitle_stream-- >= 0 && !video_disable)
  1727. subtitle_index = i;
  1728. break;
  1729. default:
  1730. break;
  1731. }
  1732. }
  1733. if (show_status) {
  1734. dump_format(ic, 0, is->filename, 0);
  1735. dump_stream_info(ic);
  1736. }
  1737. /* open the streams */
  1738. if (audio_index >= 0) {
  1739. stream_component_open(is, audio_index);
  1740. }
  1741. if (video_index >= 0) {
  1742. stream_component_open(is, video_index);
  1743. } else {
  1744. if (!display_disable)
  1745. is->show_audio = 1;
  1746. }
  1747. if (subtitle_index >= 0) {
  1748. stream_component_open(is, subtitle_index);
  1749. }
  1750. if (is->video_stream < 0 && is->audio_stream < 0) {
  1751. fprintf(stderr, "%s: could not open codecs\n", is->filename);
  1752. ret = -1;
  1753. goto fail;
  1754. }
  1755. for(;;) {
  1756. if (is->abort_request)
  1757. break;
  1758. if (is->paused != is->last_paused) {
  1759. is->last_paused = is->paused;
  1760. if (is->paused)
  1761. av_read_pause(ic);
  1762. else
  1763. av_read_play(ic);
  1764. }
  1765. #if CONFIG_RTSP_DEMUXER
  1766. if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
  1767. /* wait 10 ms to avoid trying to get another packet */
  1768. /* XXX: horrible */
  1769. SDL_Delay(10);
  1770. continue;
  1771. }
  1772. #endif
  1773. if (is->seek_req) {
  1774. int stream_index= -1;
  1775. int64_t seek_target= is->seek_pos;
  1776. if (is-> video_stream >= 0) stream_index= is-> video_stream;
  1777. else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
  1778. else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
  1779. if(stream_index>=0){
  1780. seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
  1781. }
  1782. ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
  1783. if (ret < 0) {
  1784. fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
  1785. }else{
  1786. if (is->audio_stream >= 0) {
  1787. packet_queue_flush(&is->audioq);
  1788. packet_queue_put(&is->audioq, &flush_pkt);
  1789. }
  1790. if (is->subtitle_stream >= 0) {
  1791. packet_queue_flush(&is->subtitleq);
  1792. packet_queue_put(&is->subtitleq, &flush_pkt);
  1793. }
  1794. if (is->video_stream >= 0) {
  1795. packet_queue_flush(&is->videoq);
  1796. packet_queue_put(&is->videoq, &flush_pkt);
  1797. }
  1798. }
  1799. is->seek_req = 0;
  1800. }
  1801. /* if the queue are full, no need to read more */
  1802. if (is->audioq.size > MAX_AUDIOQ_SIZE ||
  1803. is->videoq.size > MAX_VIDEOQ_SIZE ||
  1804. is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
  1805. /* wait 10 ms */
  1806. SDL_Delay(10);
  1807. continue;
  1808. }
  1809. if(url_feof(ic->pb)) {
  1810. av_init_packet(pkt);
  1811. pkt->data=NULL;
  1812. pkt->size=0;
  1813. pkt->stream_index= is->video_stream;
  1814. packet_queue_put(&is->videoq, pkt);
  1815. continue;
  1816. }
  1817. ret = av_read_frame(ic, pkt);
  1818. if (ret < 0) {
  1819. if (ret != AVERROR_EOF && url_ferror(ic->pb) == 0) {
  1820. SDL_Delay(100); /* wait for user event */
  1821. continue;
  1822. } else
  1823. break;
  1824. }
  1825. if (pkt->stream_index == is->audio_stream) {
  1826. packet_queue_put(&is->audioq, pkt);
  1827. } else if (pkt->stream_index == is->video_stream) {
  1828. packet_queue_put(&is->videoq, pkt);
  1829. } else if (pkt->stream_index == is->subtitle_stream) {
  1830. packet_queue_put(&is->subtitleq, pkt);
  1831. } else {
  1832. av_free_packet(pkt);
  1833. }
  1834. }
  1835. /* wait until the end */
  1836. while (!is->abort_request) {
  1837. SDL_Delay(100);
  1838. }
  1839. ret = 0;
  1840. fail:
  1841. /* disable interrupting */
  1842. global_video_state = NULL;
  1843. /* close each stream */
  1844. if (is->audio_stream >= 0)
  1845. stream_component_close(is, is->audio_stream);
  1846. if (is->video_stream >= 0)
  1847. stream_component_close(is, is->video_stream);
  1848. if (is->subtitle_stream >= 0)
  1849. stream_component_close(is, is->subtitle_stream);
  1850. if (is->ic) {
  1851. av_close_input_file(is->ic);
  1852. is->ic = NULL; /* safety */
  1853. }
  1854. url_set_interrupt_cb(NULL);
  1855. if (ret != 0) {
  1856. SDL_Event event;
  1857. event.type = FF_QUIT_EVENT;
  1858. event.user.data1 = is;
  1859. SDL_PushEvent(&event);
  1860. }
  1861. return 0;
  1862. }
  1863. static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
  1864. {
  1865. VideoState *is;
  1866. is = av_mallocz(sizeof(VideoState));
  1867. if (!is)
  1868. return NULL;
  1869. av_strlcpy(is->filename, filename, sizeof(is->filename));
  1870. is->iformat = iformat;
  1871. is->ytop = 0;
  1872. is->xleft = 0;
  1873. /* start video display */
  1874. is->pictq_mutex = SDL_CreateMutex();
  1875. is->pictq_cond = SDL_CreateCond();
  1876. is->subpq_mutex = SDL_CreateMutex();
  1877. is->subpq_cond = SDL_CreateCond();
  1878. /* add the refresh timer to draw the picture */
  1879. schedule_refresh(is, 40);
  1880. is->av_sync_type = av_sync_type;
  1881. is->parse_tid = SDL_CreateThread(decode_thread, is);
  1882. if (!is->parse_tid) {
  1883. av_free(is);
  1884. return NULL;
  1885. }
  1886. return is;
  1887. }
  1888. static void stream_close(VideoState *is)
  1889. {
  1890. VideoPicture *vp;
  1891. int i;
  1892. /* XXX: use a special url_shutdown call to abort parse cleanly */
  1893. is->abort_request = 1;
  1894. SDL_WaitThread(is->parse_tid, NULL);
  1895. /* free all pictures */
  1896. for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
  1897. vp = &is->pictq[i];
  1898. if (vp->bmp) {
  1899. SDL_FreeYUVOverlay(vp->bmp);
  1900. vp->bmp = NULL;
  1901. }
  1902. }
  1903. SDL_DestroyMutex(is->pictq_mutex);
  1904. SDL_DestroyCond(is->pictq_cond);
  1905. SDL_DestroyMutex(is->subpq_mutex);
  1906. SDL_DestroyCond(is->subpq_cond);
  1907. }
  1908. static void stream_cycle_channel(VideoState *is, int codec_type)
  1909. {
  1910. AVFormatContext *ic = is->ic;
  1911. int start_index, stream_index;
  1912. AVStream *st;
  1913. if (codec_type == CODEC_TYPE_VIDEO)
  1914. start_index = is->video_stream;
  1915. else if (codec_type == CODEC_TYPE_AUDIO)
  1916. start_index = is->audio_stream;
  1917. else
  1918. start_index = is->subtitle_stream;
  1919. if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
  1920. return;
  1921. stream_index = start_index;
  1922. for(;;) {
  1923. if (++stream_index >= is->ic->nb_streams)
  1924. {
  1925. if (codec_type == CODEC_TYPE_SUBTITLE)
  1926. {
  1927. stream_index = -1;
  1928. goto the_end;
  1929. } else
  1930. stream_index = 0;
  1931. }
  1932. if (stream_index == start_index)
  1933. return;
  1934. st = ic->streams[stream_index];
  1935. if (st->codec->codec_type == codec_type) {
  1936. /* check that parameters are OK */
  1937. switch(codec_type) {
  1938. case CODEC_TYPE_AUDIO:
  1939. if (st->codec->sample_rate != 0 &&
  1940. st->codec->channels != 0)
  1941. goto the_end;
  1942. break;
  1943. case CODEC_TYPE_VIDEO:
  1944. case CODEC_TYPE_SUBTITLE:
  1945. goto the_end;
  1946. default:
  1947. break;
  1948. }
  1949. }
  1950. }
  1951. the_end:
  1952. stream_component_close(is, start_index);
  1953. stream_component_open(is, stream_index);
  1954. }
  1955. static void toggle_full_screen(void)
  1956. {
  1957. is_full_screen = !is_full_screen;
  1958. if (!fs_screen_width) {
  1959. /* use default SDL method */
  1960. // SDL_WM_ToggleFullScreen(screen);
  1961. }
  1962. video_open(cur_stream);
  1963. }
  1964. static void toggle_pause(void)
  1965. {
  1966. if (cur_stream)
  1967. stream_pause(cur_stream);
  1968. step = 0;
  1969. }
  1970. static void step_to_next_frame(void)
  1971. {
  1972. if (cur_stream) {
  1973. /* if the stream is paused unpause it, then step */
  1974. if (cur_stream->paused)
  1975. stream_pause(cur_stream);
  1976. }
  1977. step = 1;
  1978. }
  1979. static void do_exit(void)
  1980. {
  1981. if (cur_stream) {
  1982. stream_close(cur_stream);
  1983. cur_stream = NULL;
  1984. }
  1985. if (show_status)
  1986. printf("\n");
  1987. SDL_Quit();
  1988. exit(0);
  1989. }
  1990. static void toggle_audio_display(void)
  1991. {
  1992. if (cur_stream) {
  1993. cur_stream->show_audio = !cur_stream->show_audio;
  1994. }
  1995. }
  1996. /* handle an event sent by the GUI */
  1997. static void event_loop(void)
  1998. {
  1999. SDL_Event event;
  2000. double incr, pos, frac;
  2001. for(;;) {
  2002. SDL_WaitEvent(&event);
  2003. switch(event.type) {
  2004. case SDL_KEYDOWN:
  2005. switch(event.key.keysym.sym) {
  2006. case SDLK_ESCAPE:
  2007. case SDLK_q:
  2008. do_exit();
  2009. break;
  2010. case SDLK_f:
  2011. toggle_full_screen();
  2012. break;
  2013. case SDLK_p:
  2014. case SDLK_SPACE:
  2015. toggle_pause();
  2016. break;
  2017. case SDLK_s: //S: Step to next frame
  2018. step_to_next_frame();
  2019. break;
  2020. case SDLK_a:
  2021. if (cur_stream)
  2022. stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
  2023. break;
  2024. case SDLK_v:
  2025. if (cur_stream)
  2026. stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
  2027. break;
  2028. case SDLK_t:
  2029. if (cur_stream)
  2030. stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
  2031. break;
  2032. case SDLK_w:
  2033. toggle_audio_display();
  2034. break;
  2035. case SDLK_LEFT:
  2036. incr = -10.0;
  2037. goto do_seek;
  2038. case SDLK_RIGHT:
  2039. incr = 10.0;
  2040. goto do_seek;
  2041. case SDLK_UP:
  2042. incr = 60.0;
  2043. goto do_seek;
  2044. case SDLK_DOWN:
  2045. incr = -60.0;
  2046. do_seek:
  2047. if (cur_stream) {
  2048. if (seek_by_bytes) {
  2049. pos = url_ftell(cur_stream->ic->pb);
  2050. if (cur_stream->ic->bit_rate)
  2051. incr *= cur_stream->ic->bit_rate / 60.0;
  2052. else
  2053. incr *= 180000.0;
  2054. pos += incr;
  2055. stream_seek(cur_stream, pos, incr);
  2056. } else {
  2057. pos = get_master_clock(cur_stream);
  2058. pos += incr;
  2059. stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
  2060. }
  2061. }
  2062. break;
  2063. default:
  2064. break;
  2065. }
  2066. break;
  2067. case SDL_MOUSEBUTTONDOWN:
  2068. if (cur_stream) {
  2069. int ns, hh, mm, ss;
  2070. int tns, thh, tmm, tss;
  2071. tns = cur_stream->ic->duration/1000000LL;
  2072. thh = tns/3600;
  2073. tmm = (tns%3600)/60;
  2074. tss = (tns%60);
  2075. frac = (double)event.button.x/(double)cur_stream->width;
  2076. ns = frac*tns;
  2077. hh = ns/3600;
  2078. mm = (ns%3600)/60;
  2079. ss = (ns%60);
  2080. fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
  2081. hh, mm, ss, thh, tmm, tss);
  2082. stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
  2083. }
  2084. break;
  2085. case SDL_VIDEORESIZE:
  2086. if (cur_stream) {
  2087. screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
  2088. SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
  2089. screen_width = cur_stream->width = event.resize.w;
  2090. screen_height= cur_stream->height= event.resize.h;
  2091. }
  2092. break;
  2093. case SDL_QUIT:
  2094. case FF_QUIT_EVENT:
  2095. do_exit();
  2096. break;
  2097. case FF_ALLOC_EVENT:
  2098. video_open(event.user.data1);
  2099. alloc_picture(event.user.data1);
  2100. break;
  2101. case FF_REFRESH_EVENT:
  2102. video_refresh_timer(event.user.data1);
  2103. break;
  2104. default:
  2105. break;
  2106. }
  2107. }
  2108. }
  2109. static void opt_frame_size(const char *arg)
  2110. {
  2111. if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
  2112. fprintf(stderr, "Incorrect frame size\n");
  2113. exit(1);
  2114. }
  2115. if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
  2116. fprintf(stderr, "Frame size must be a multiple of 2\n");
  2117. exit(1);
  2118. }
  2119. }
  2120. static int opt_width(const char *opt, const char *arg)
  2121. {
  2122. screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
  2123. return 0;
  2124. }
  2125. static int opt_height(const char *opt, const char *arg)
  2126. {
  2127. screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
  2128. return 0;
  2129. }
  2130. static void opt_format(const char *arg)
  2131. {
  2132. file_iformat = av_find_input_format(arg);
  2133. if (!file_iformat) {
  2134. fprintf(stderr, "Unknown input format: %s\n", arg);
  2135. exit(1);
  2136. }
  2137. }
  2138. static void opt_frame_pix_fmt(const char *arg)
  2139. {
  2140. frame_pix_fmt = avcodec_get_pix_fmt(arg);
  2141. }
  2142. static int opt_sync(const char *opt, const char *arg)
  2143. {
  2144. if (!strcmp(arg, "audio"))
  2145. av_sync_type = AV_SYNC_AUDIO_MASTER;
  2146. else if (!strcmp(arg, "video"))
  2147. av_sync_type = AV_SYNC_VIDEO_MASTER;
  2148. else if (!strcmp(arg, "ext"))
  2149. av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
  2150. else {
  2151. fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
  2152. exit(1);
  2153. }
  2154. return 0;
  2155. }
  2156. static int opt_seek(const char *opt, const char *arg)
  2157. {
  2158. start_time = parse_time_or_die(opt, arg, 1);
  2159. return 0;
  2160. }
  2161. static int opt_debug(const char *opt, const char *arg)
  2162. {
  2163. av_log_set_level(99);
  2164. debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
  2165. return 0;
  2166. }
  2167. static int opt_vismv(const char *opt, const char *arg)
  2168. {
  2169. debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
  2170. return 0;
  2171. }
  2172. static int opt_thread_count(const char *opt, const char *arg)
  2173. {
  2174. thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
  2175. #if !HAVE_THREADS
  2176. fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
  2177. #endif
  2178. return 0;
  2179. }
  2180. static const OptionDef options[] = {
  2181. { "h", OPT_EXIT, {(void*)show_help}, "show help" },
  2182. { "version", OPT_EXIT, {(void*)show_version}, "show version" },
  2183. { "L", OPT_EXIT, {(void*)show_license}, "show license" },
  2184. { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
  2185. { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
  2186. { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
  2187. { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
  2188. { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
  2189. { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
  2190. { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
  2191. { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
  2192. { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
  2193. { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
  2194. { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
  2195. { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
  2196. { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
  2197. { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
  2198. { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
  2199. { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
  2200. { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
  2201. { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
  2202. { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
  2203. { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
  2204. { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
  2205. { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
  2206. { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
  2207. { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
  2208. { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
  2209. { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
  2210. { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
  2211. { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
  2212. { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
  2213. { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
  2214. { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
  2215. { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
  2216. { NULL, },
  2217. };
  2218. static void show_help(void)
  2219. {
  2220. printf("usage: ffplay [options] input_file\n"
  2221. "Simple media player\n");
  2222. printf("\n");
  2223. show_help_options(options, "Main options:\n",
  2224. OPT_EXPERT, 0);
  2225. show_help_options(options, "\nAdvanced options:\n",
  2226. OPT_EXPERT, OPT_EXPERT);
  2227. printf("\nWhile playing:\n"
  2228. "q, ESC quit\n"
  2229. "f toggle full screen\n"
  2230. "p, SPC pause\n"
  2231. "a cycle audio channel\n"
  2232. "v cycle video channel\n"
  2233. "t cycle subtitle channel\n"
  2234. "w show audio waves\n"
  2235. "left/right seek backward/forward 10 seconds\n"
  2236. "down/up seek backward/forward 1 minute\n"
  2237. "mouse click seek to percentage in file corresponding to fraction of width\n"
  2238. );
  2239. }
  2240. static void opt_input_file(const char *filename)
  2241. {
  2242. if (!strcmp(filename, "-"))
  2243. filename = "pipe:";
  2244. input_filename = filename;
  2245. }
  2246. /* Called from the main */
  2247. int main(int argc, char **argv)
  2248. {
  2249. int flags, i;
  2250. /* register all codecs, demux and protocols */
  2251. avcodec_register_all();
  2252. avdevice_register_all();
  2253. av_register_all();
  2254. for(i=0; i<CODEC_TYPE_NB; i++){
  2255. avctx_opts[i]= avcodec_alloc_context2(i);
  2256. }
  2257. avformat_opts = avformat_alloc_context();
  2258. sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
  2259. show_banner();
  2260. parse_options(argc, argv, options, opt_input_file);
  2261. if (!input_filename) {
  2262. fprintf(stderr, "An input file must be specified\n");
  2263. exit(1);
  2264. }
  2265. if (display_disable) {
  2266. video_disable = 1;
  2267. }
  2268. flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
  2269. #if !defined(__MINGW32__) && !defined(__APPLE__)
  2270. flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
  2271. #endif
  2272. if (SDL_Init (flags)) {
  2273. fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
  2274. exit(1);
  2275. }
  2276. if (!display_disable) {
  2277. #if HAVE_SDL_VIDEO_SIZE
  2278. const SDL_VideoInfo *vi = SDL_GetVideoInfo();
  2279. fs_screen_width = vi->current_w;
  2280. fs_screen_height = vi->current_h;
  2281. #endif
  2282. }
  2283. SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
  2284. SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
  2285. SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
  2286. SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
  2287. av_init_packet(&flush_pkt);
  2288. flush_pkt.data= "FLUSH";
  2289. cur_stream = stream_open(input_filename, file_iformat);
  2290. event_loop();
  2291. /* never returns */
  2292. return 0;
  2293. }