ffplay.c 95 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077
  1. /*
  2. * FFplay : Simple Media Player based on the FFmpeg libraries
  3. * Copyright (c) 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "config.h"
  22. #include <inttypes.h>
  23. #include <math.h>
  24. #include <limits.h>
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/pixdesc.h"
  27. #include "libavformat/avformat.h"
  28. #include "libavdevice/avdevice.h"
  29. #include "libswscale/swscale.h"
  30. #include "libavcodec/audioconvert.h"
  31. #include "libavcodec/colorspace.h"
  32. #include "libavcodec/opt.h"
  33. #include "libavcodec/avfft.h"
  34. #if CONFIG_AVFILTER
  35. # include "libavfilter/avfilter.h"
  36. # include "libavfilter/avfiltergraph.h"
  37. # include "libavfilter/graphparser.h"
  38. #endif
  39. #include "cmdutils.h"
  40. #include <SDL.h>
  41. #include <SDL_thread.h>
  42. #ifdef __MINGW32__
  43. #undef main /* We don't want SDL to override our main() */
  44. #endif
  45. #include <unistd.h>
  46. #include <assert.h>
  47. const char program_name[] = "FFplay";
  48. const int program_birth_year = 2003;
  49. //#define DEBUG_SYNC
  50. #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
  51. #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
  52. #define MIN_FRAMES 5
  53. /* SDL audio buffer size, in samples. Should be small to have precise
  54. A/V sync as SDL does not have hardware buffer fullness info. */
  55. #define SDL_AUDIO_BUFFER_SIZE 1024
  56. /* no AV sync correction is done if below the AV sync threshold */
  57. #define AV_SYNC_THRESHOLD 0.01
  58. /* no AV correction is done if too big error */
  59. #define AV_NOSYNC_THRESHOLD 10.0
  60. #define FRAME_SKIP_FACTOR 0.05
  61. /* maximum audio speed change to get correct sync */
  62. #define SAMPLE_CORRECTION_PERCENT_MAX 10
  63. /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
  64. #define AUDIO_DIFF_AVG_NB 20
  65. /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
  66. #define SAMPLE_ARRAY_SIZE (2*65536)
  67. #if !CONFIG_AVFILTER
  68. static int sws_flags = SWS_BICUBIC;
  69. #endif
  70. typedef struct PacketQueue {
  71. AVPacketList *first_pkt, *last_pkt;
  72. int nb_packets;
  73. int size;
  74. int abort_request;
  75. SDL_mutex *mutex;
  76. SDL_cond *cond;
  77. } PacketQueue;
  78. #define VIDEO_PICTURE_QUEUE_SIZE 2
  79. #define SUBPICTURE_QUEUE_SIZE 4
  80. typedef struct VideoPicture {
  81. double pts; ///<presentation time stamp for this picture
  82. double target_clock; ///<av_gettime() time at which this should be displayed ideally
  83. int64_t pos; ///<byte position in file
  84. SDL_Overlay *bmp;
  85. int width, height; /* source height & width */
  86. int allocated;
  87. enum PixelFormat pix_fmt;
  88. #if CONFIG_AVFILTER
  89. AVFilterPicRef *picref;
  90. #endif
  91. } VideoPicture;
  92. typedef struct SubPicture {
  93. double pts; /* presentation time stamp for this picture */
  94. AVSubtitle sub;
  95. } SubPicture;
  96. enum {
  97. AV_SYNC_AUDIO_MASTER, /* default choice */
  98. AV_SYNC_VIDEO_MASTER,
  99. AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
  100. };
  101. typedef struct VideoState {
  102. SDL_Thread *parse_tid;
  103. SDL_Thread *video_tid;
  104. SDL_Thread *refresh_tid;
  105. AVInputFormat *iformat;
  106. int no_background;
  107. int abort_request;
  108. int paused;
  109. int last_paused;
  110. int seek_req;
  111. int seek_flags;
  112. int64_t seek_pos;
  113. int64_t seek_rel;
  114. int read_pause_return;
  115. AVFormatContext *ic;
  116. int dtg_active_format;
  117. int audio_stream;
  118. int av_sync_type;
  119. double external_clock; /* external clock base */
  120. int64_t external_clock_time;
  121. double audio_clock;
  122. double audio_diff_cum; /* used for AV difference average computation */
  123. double audio_diff_avg_coef;
  124. double audio_diff_threshold;
  125. int audio_diff_avg_count;
  126. AVStream *audio_st;
  127. PacketQueue audioq;
  128. int audio_hw_buf_size;
  129. /* samples output by the codec. we reserve more space for avsync
  130. compensation */
  131. DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
  132. DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
  133. uint8_t *audio_buf;
  134. unsigned int audio_buf_size; /* in bytes */
  135. int audio_buf_index; /* in bytes */
  136. AVPacket audio_pkt_temp;
  137. AVPacket audio_pkt;
  138. enum SampleFormat audio_src_fmt;
  139. AVAudioConvert *reformat_ctx;
  140. int show_audio; /* if true, display audio samples */
  141. int16_t sample_array[SAMPLE_ARRAY_SIZE];
  142. int sample_array_index;
  143. int last_i_start;
  144. RDFTContext *rdft;
  145. int rdft_bits;
  146. int xpos;
  147. SDL_Thread *subtitle_tid;
  148. int subtitle_stream;
  149. int subtitle_stream_changed;
  150. AVStream *subtitle_st;
  151. PacketQueue subtitleq;
  152. SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
  153. int subpq_size, subpq_rindex, subpq_windex;
  154. SDL_mutex *subpq_mutex;
  155. SDL_cond *subpq_cond;
  156. double frame_timer;
  157. double frame_last_pts;
  158. double frame_last_delay;
  159. double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
  160. int video_stream;
  161. AVStream *video_st;
  162. PacketQueue videoq;
  163. double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
  164. double video_current_pts_drift; ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
  165. int64_t video_current_pos; ///<current displayed file pos
  166. VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
  167. int pictq_size, pictq_rindex, pictq_windex;
  168. SDL_mutex *pictq_mutex;
  169. SDL_cond *pictq_cond;
  170. #if !CONFIG_AVFILTER
  171. struct SwsContext *img_convert_ctx;
  172. #endif
  173. // QETimer *video_timer;
  174. char filename[1024];
  175. int width, height, xleft, ytop;
  176. int64_t faulty_pts;
  177. int64_t faulty_dts;
  178. int64_t last_dts_for_fault_detection;
  179. int64_t last_pts_for_fault_detection;
  180. #if CONFIG_AVFILTER
  181. AVFilterContext *out_video_filter; ///<the last filter in the video chain
  182. #endif
  183. float skip_frames;
  184. float skip_frames_index;
  185. int refresh;
  186. } VideoState;
  187. static void show_help(void);
  188. static int audio_write_get_buf_size(VideoState *is);
  189. /* options specified by the user */
  190. static AVInputFormat *file_iformat;
  191. static const char *input_filename;
  192. static int fs_screen_width;
  193. static int fs_screen_height;
  194. static int screen_width = 0;
  195. static int screen_height = 0;
  196. static int frame_width = 0;
  197. static int frame_height = 0;
  198. static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
  199. static int audio_disable;
  200. static int video_disable;
  201. static int wanted_stream[CODEC_TYPE_NB]={
  202. [CODEC_TYPE_AUDIO]=-1,
  203. [CODEC_TYPE_VIDEO]=-1,
  204. [CODEC_TYPE_SUBTITLE]=-1,
  205. };
  206. static int seek_by_bytes=-1;
  207. static int display_disable;
  208. static int show_status = 1;
  209. static int av_sync_type = AV_SYNC_AUDIO_MASTER;
  210. static int64_t start_time = AV_NOPTS_VALUE;
  211. static int debug = 0;
  212. static int debug_mv = 0;
  213. static int step = 0;
  214. static int thread_count = 1;
  215. static int workaround_bugs = 1;
  216. static int fast = 0;
  217. static int genpts = 0;
  218. static int lowres = 0;
  219. static int idct = FF_IDCT_AUTO;
  220. static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
  221. static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
  222. static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
  223. static int error_recognition = FF_ER_CAREFUL;
  224. static int error_concealment = 3;
  225. static int decoder_reorder_pts= -1;
  226. static int autoexit;
  227. static int framedrop=1;
  228. static int rdftspeed=20;
  229. #if CONFIG_AVFILTER
  230. static char *vfilters = NULL;
  231. #endif
  232. /* current context */
  233. static int is_full_screen;
  234. static VideoState *cur_stream;
  235. static int64_t audio_callback_time;
  236. static AVPacket flush_pkt;
  237. #define FF_ALLOC_EVENT (SDL_USEREVENT)
  238. #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
  239. #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
  240. static SDL_Surface *screen;
  241. static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
  242. /* packet queue handling */
  243. static void packet_queue_init(PacketQueue *q)
  244. {
  245. memset(q, 0, sizeof(PacketQueue));
  246. q->mutex = SDL_CreateMutex();
  247. q->cond = SDL_CreateCond();
  248. packet_queue_put(q, &flush_pkt);
  249. }
  250. static void packet_queue_flush(PacketQueue *q)
  251. {
  252. AVPacketList *pkt, *pkt1;
  253. SDL_LockMutex(q->mutex);
  254. for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
  255. pkt1 = pkt->next;
  256. av_free_packet(&pkt->pkt);
  257. av_freep(&pkt);
  258. }
  259. q->last_pkt = NULL;
  260. q->first_pkt = NULL;
  261. q->nb_packets = 0;
  262. q->size = 0;
  263. SDL_UnlockMutex(q->mutex);
  264. }
  265. static void packet_queue_end(PacketQueue *q)
  266. {
  267. packet_queue_flush(q);
  268. SDL_DestroyMutex(q->mutex);
  269. SDL_DestroyCond(q->cond);
  270. }
  271. static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
  272. {
  273. AVPacketList *pkt1;
  274. /* duplicate the packet */
  275. if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
  276. return -1;
  277. pkt1 = av_malloc(sizeof(AVPacketList));
  278. if (!pkt1)
  279. return -1;
  280. pkt1->pkt = *pkt;
  281. pkt1->next = NULL;
  282. SDL_LockMutex(q->mutex);
  283. if (!q->last_pkt)
  284. q->first_pkt = pkt1;
  285. else
  286. q->last_pkt->next = pkt1;
  287. q->last_pkt = pkt1;
  288. q->nb_packets++;
  289. q->size += pkt1->pkt.size + sizeof(*pkt1);
  290. /* XXX: should duplicate packet data in DV case */
  291. SDL_CondSignal(q->cond);
  292. SDL_UnlockMutex(q->mutex);
  293. return 0;
  294. }
  295. static void packet_queue_abort(PacketQueue *q)
  296. {
  297. SDL_LockMutex(q->mutex);
  298. q->abort_request = 1;
  299. SDL_CondSignal(q->cond);
  300. SDL_UnlockMutex(q->mutex);
  301. }
  302. /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
  303. static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
  304. {
  305. AVPacketList *pkt1;
  306. int ret;
  307. SDL_LockMutex(q->mutex);
  308. for(;;) {
  309. if (q->abort_request) {
  310. ret = -1;
  311. break;
  312. }
  313. pkt1 = q->first_pkt;
  314. if (pkt1) {
  315. q->first_pkt = pkt1->next;
  316. if (!q->first_pkt)
  317. q->last_pkt = NULL;
  318. q->nb_packets--;
  319. q->size -= pkt1->pkt.size + sizeof(*pkt1);
  320. *pkt = pkt1->pkt;
  321. av_free(pkt1);
  322. ret = 1;
  323. break;
  324. } else if (!block) {
  325. ret = 0;
  326. break;
  327. } else {
  328. SDL_CondWait(q->cond, q->mutex);
  329. }
  330. }
  331. SDL_UnlockMutex(q->mutex);
  332. return ret;
  333. }
  334. static inline void fill_rectangle(SDL_Surface *screen,
  335. int x, int y, int w, int h, int color)
  336. {
  337. SDL_Rect rect;
  338. rect.x = x;
  339. rect.y = y;
  340. rect.w = w;
  341. rect.h = h;
  342. SDL_FillRect(screen, &rect, color);
  343. }
  344. #if 0
  345. /* draw only the border of a rectangle */
  346. void fill_border(VideoState *s, int x, int y, int w, int h, int color)
  347. {
  348. int w1, w2, h1, h2;
  349. /* fill the background */
  350. w1 = x;
  351. if (w1 < 0)
  352. w1 = 0;
  353. w2 = s->width - (x + w);
  354. if (w2 < 0)
  355. w2 = 0;
  356. h1 = y;
  357. if (h1 < 0)
  358. h1 = 0;
  359. h2 = s->height - (y + h);
  360. if (h2 < 0)
  361. h2 = 0;
  362. fill_rectangle(screen,
  363. s->xleft, s->ytop,
  364. w1, s->height,
  365. color);
  366. fill_rectangle(screen,
  367. s->xleft + s->width - w2, s->ytop,
  368. w2, s->height,
  369. color);
  370. fill_rectangle(screen,
  371. s->xleft + w1, s->ytop,
  372. s->width - w1 - w2, h1,
  373. color);
  374. fill_rectangle(screen,
  375. s->xleft + w1, s->ytop + s->height - h2,
  376. s->width - w1 - w2, h2,
  377. color);
  378. }
  379. #endif
  380. #define ALPHA_BLEND(a, oldp, newp, s)\
  381. ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
  382. #define RGBA_IN(r, g, b, a, s)\
  383. {\
  384. unsigned int v = ((const uint32_t *)(s))[0];\
  385. a = (v >> 24) & 0xff;\
  386. r = (v >> 16) & 0xff;\
  387. g = (v >> 8) & 0xff;\
  388. b = v & 0xff;\
  389. }
  390. #define YUVA_IN(y, u, v, a, s, pal)\
  391. {\
  392. unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
  393. a = (val >> 24) & 0xff;\
  394. y = (val >> 16) & 0xff;\
  395. u = (val >> 8) & 0xff;\
  396. v = val & 0xff;\
  397. }
  398. #define YUVA_OUT(d, y, u, v, a)\
  399. {\
  400. ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
  401. }
  402. #define BPP 1
  403. static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
  404. {
  405. int wrap, wrap3, width2, skip2;
  406. int y, u, v, a, u1, v1, a1, w, h;
  407. uint8_t *lum, *cb, *cr;
  408. const uint8_t *p;
  409. const uint32_t *pal;
  410. int dstx, dsty, dstw, dsth;
  411. dstw = av_clip(rect->w, 0, imgw);
  412. dsth = av_clip(rect->h, 0, imgh);
  413. dstx = av_clip(rect->x, 0, imgw - dstw);
  414. dsty = av_clip(rect->y, 0, imgh - dsth);
  415. lum = dst->data[0] + dsty * dst->linesize[0];
  416. cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
  417. cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
  418. width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
  419. skip2 = dstx >> 1;
  420. wrap = dst->linesize[0];
  421. wrap3 = rect->pict.linesize[0];
  422. p = rect->pict.data[0];
  423. pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
  424. if (dsty & 1) {
  425. lum += dstx;
  426. cb += skip2;
  427. cr += skip2;
  428. if (dstx & 1) {
  429. YUVA_IN(y, u, v, a, p, pal);
  430. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  431. cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
  432. cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
  433. cb++;
  434. cr++;
  435. lum++;
  436. p += BPP;
  437. }
  438. for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
  439. YUVA_IN(y, u, v, a, p, pal);
  440. u1 = u;
  441. v1 = v;
  442. a1 = a;
  443. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  444. YUVA_IN(y, u, v, a, p + BPP, pal);
  445. u1 += u;
  446. v1 += v;
  447. a1 += a;
  448. lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
  449. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
  450. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
  451. cb++;
  452. cr++;
  453. p += 2 * BPP;
  454. lum += 2;
  455. }
  456. if (w) {
  457. YUVA_IN(y, u, v, a, p, pal);
  458. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  459. cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
  460. cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
  461. p++;
  462. lum++;
  463. }
  464. p += wrap3 - dstw * BPP;
  465. lum += wrap - dstw - dstx;
  466. cb += dst->linesize[1] - width2 - skip2;
  467. cr += dst->linesize[2] - width2 - skip2;
  468. }
  469. for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
  470. lum += dstx;
  471. cb += skip2;
  472. cr += skip2;
  473. if (dstx & 1) {
  474. YUVA_IN(y, u, v, a, p, pal);
  475. u1 = u;
  476. v1 = v;
  477. a1 = a;
  478. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  479. p += wrap3;
  480. lum += wrap;
  481. YUVA_IN(y, u, v, a, p, pal);
  482. u1 += u;
  483. v1 += v;
  484. a1 += a;
  485. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  486. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
  487. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
  488. cb++;
  489. cr++;
  490. p += -wrap3 + BPP;
  491. lum += -wrap + 1;
  492. }
  493. for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
  494. YUVA_IN(y, u, v, a, p, pal);
  495. u1 = u;
  496. v1 = v;
  497. a1 = a;
  498. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  499. YUVA_IN(y, u, v, a, p + BPP, pal);
  500. u1 += u;
  501. v1 += v;
  502. a1 += a;
  503. lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
  504. p += wrap3;
  505. lum += wrap;
  506. YUVA_IN(y, u, v, a, p, pal);
  507. u1 += u;
  508. v1 += v;
  509. a1 += a;
  510. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  511. YUVA_IN(y, u, v, a, p + BPP, pal);
  512. u1 += u;
  513. v1 += v;
  514. a1 += a;
  515. lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
  516. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
  517. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
  518. cb++;
  519. cr++;
  520. p += -wrap3 + 2 * BPP;
  521. lum += -wrap + 2;
  522. }
  523. if (w) {
  524. YUVA_IN(y, u, v, a, p, pal);
  525. u1 = u;
  526. v1 = v;
  527. a1 = a;
  528. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  529. p += wrap3;
  530. lum += wrap;
  531. YUVA_IN(y, u, v, a, p, pal);
  532. u1 += u;
  533. v1 += v;
  534. a1 += a;
  535. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  536. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
  537. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
  538. cb++;
  539. cr++;
  540. p += -wrap3 + BPP;
  541. lum += -wrap + 1;
  542. }
  543. p += wrap3 + (wrap3 - dstw * BPP);
  544. lum += wrap + (wrap - dstw - dstx);
  545. cb += dst->linesize[1] - width2 - skip2;
  546. cr += dst->linesize[2] - width2 - skip2;
  547. }
  548. /* handle odd height */
  549. if (h) {
  550. lum += dstx;
  551. cb += skip2;
  552. cr += skip2;
  553. if (dstx & 1) {
  554. YUVA_IN(y, u, v, a, p, pal);
  555. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  556. cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
  557. cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
  558. cb++;
  559. cr++;
  560. lum++;
  561. p += BPP;
  562. }
  563. for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
  564. YUVA_IN(y, u, v, a, p, pal);
  565. u1 = u;
  566. v1 = v;
  567. a1 = a;
  568. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  569. YUVA_IN(y, u, v, a, p + BPP, pal);
  570. u1 += u;
  571. v1 += v;
  572. a1 += a;
  573. lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
  574. cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
  575. cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
  576. cb++;
  577. cr++;
  578. p += 2 * BPP;
  579. lum += 2;
  580. }
  581. if (w) {
  582. YUVA_IN(y, u, v, a, p, pal);
  583. lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
  584. cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
  585. cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
  586. }
  587. }
  588. }
  589. static void free_subpicture(SubPicture *sp)
  590. {
  591. int i;
  592. for (i = 0; i < sp->sub.num_rects; i++)
  593. {
  594. av_freep(&sp->sub.rects[i]->pict.data[0]);
  595. av_freep(&sp->sub.rects[i]->pict.data[1]);
  596. av_freep(&sp->sub.rects[i]);
  597. }
  598. av_free(sp->sub.rects);
  599. memset(&sp->sub, 0, sizeof(AVSubtitle));
  600. }
  601. static void video_image_display(VideoState *is)
  602. {
  603. VideoPicture *vp;
  604. SubPicture *sp;
  605. AVPicture pict;
  606. float aspect_ratio;
  607. int width, height, x, y;
  608. SDL_Rect rect;
  609. int i;
  610. vp = &is->pictq[is->pictq_rindex];
  611. if (vp->bmp) {
  612. #if CONFIG_AVFILTER
  613. if (vp->picref->pixel_aspect.num == 0)
  614. aspect_ratio = 0;
  615. else
  616. aspect_ratio = av_q2d(vp->picref->pixel_aspect);
  617. #else
  618. /* XXX: use variable in the frame */
  619. if (is->video_st->sample_aspect_ratio.num)
  620. aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
  621. else if (is->video_st->codec->sample_aspect_ratio.num)
  622. aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
  623. else
  624. aspect_ratio = 0;
  625. #endif
  626. if (aspect_ratio <= 0.0)
  627. aspect_ratio = 1.0;
  628. aspect_ratio *= (float)vp->width / (float)vp->height;
  629. /* if an active format is indicated, then it overrides the
  630. mpeg format */
  631. #if 0
  632. if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
  633. is->dtg_active_format = is->video_st->codec->dtg_active_format;
  634. printf("dtg_active_format=%d\n", is->dtg_active_format);
  635. }
  636. #endif
  637. #if 0
  638. switch(is->video_st->codec->dtg_active_format) {
  639. case FF_DTG_AFD_SAME:
  640. default:
  641. /* nothing to do */
  642. break;
  643. case FF_DTG_AFD_4_3:
  644. aspect_ratio = 4.0 / 3.0;
  645. break;
  646. case FF_DTG_AFD_16_9:
  647. aspect_ratio = 16.0 / 9.0;
  648. break;
  649. case FF_DTG_AFD_14_9:
  650. aspect_ratio = 14.0 / 9.0;
  651. break;
  652. case FF_DTG_AFD_4_3_SP_14_9:
  653. aspect_ratio = 14.0 / 9.0;
  654. break;
  655. case FF_DTG_AFD_16_9_SP_14_9:
  656. aspect_ratio = 14.0 / 9.0;
  657. break;
  658. case FF_DTG_AFD_SP_4_3:
  659. aspect_ratio = 4.0 / 3.0;
  660. break;
  661. }
  662. #endif
  663. if (is->subtitle_st)
  664. {
  665. if (is->subpq_size > 0)
  666. {
  667. sp = &is->subpq[is->subpq_rindex];
  668. if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
  669. {
  670. SDL_LockYUVOverlay (vp->bmp);
  671. pict.data[0] = vp->bmp->pixels[0];
  672. pict.data[1] = vp->bmp->pixels[2];
  673. pict.data[2] = vp->bmp->pixels[1];
  674. pict.linesize[0] = vp->bmp->pitches[0];
  675. pict.linesize[1] = vp->bmp->pitches[2];
  676. pict.linesize[2] = vp->bmp->pitches[1];
  677. for (i = 0; i < sp->sub.num_rects; i++)
  678. blend_subrect(&pict, sp->sub.rects[i],
  679. vp->bmp->w, vp->bmp->h);
  680. SDL_UnlockYUVOverlay (vp->bmp);
  681. }
  682. }
  683. }
  684. /* XXX: we suppose the screen has a 1.0 pixel ratio */
  685. height = is->height;
  686. width = ((int)rint(height * aspect_ratio)) & ~1;
  687. if (width > is->width) {
  688. width = is->width;
  689. height = ((int)rint(width / aspect_ratio)) & ~1;
  690. }
  691. x = (is->width - width) / 2;
  692. y = (is->height - height) / 2;
  693. if (!is->no_background) {
  694. /* fill the background */
  695. // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
  696. } else {
  697. is->no_background = 0;
  698. }
  699. rect.x = is->xleft + x;
  700. rect.y = is->ytop + y;
  701. rect.w = width;
  702. rect.h = height;
  703. SDL_DisplayYUVOverlay(vp->bmp, &rect);
  704. } else {
  705. #if 0
  706. fill_rectangle(screen,
  707. is->xleft, is->ytop, is->width, is->height,
  708. QERGB(0x00, 0x00, 0x00));
  709. #endif
  710. }
  711. }
  712. static inline int compute_mod(int a, int b)
  713. {
  714. a = a % b;
  715. if (a >= 0)
  716. return a;
  717. else
  718. return a + b;
  719. }
  720. static void video_audio_display(VideoState *s)
  721. {
  722. int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
  723. int ch, channels, h, h2, bgcolor, fgcolor;
  724. int16_t time_diff;
  725. int rdft_bits, nb_freq;
  726. for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
  727. ;
  728. nb_freq= 1<<(rdft_bits-1);
  729. /* compute display index : center on currently output samples */
  730. channels = s->audio_st->codec->channels;
  731. nb_display_channels = channels;
  732. if (!s->paused) {
  733. int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
  734. n = 2 * channels;
  735. delay = audio_write_get_buf_size(s);
  736. delay /= n;
  737. /* to be more precise, we take into account the time spent since
  738. the last buffer computation */
  739. if (audio_callback_time) {
  740. time_diff = av_gettime() - audio_callback_time;
  741. delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
  742. }
  743. delay += 2*data_used;
  744. if (delay < data_used)
  745. delay = data_used;
  746. i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
  747. if(s->show_audio==1){
  748. h= INT_MIN;
  749. for(i=0; i<1000; i+=channels){
  750. int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
  751. int a= s->sample_array[idx];
  752. int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
  753. int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
  754. int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
  755. int score= a-d;
  756. if(h<score && (b^c)<0){
  757. h= score;
  758. i_start= idx;
  759. }
  760. }
  761. }
  762. s->last_i_start = i_start;
  763. } else {
  764. i_start = s->last_i_start;
  765. }
  766. bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
  767. if(s->show_audio==1){
  768. fill_rectangle(screen,
  769. s->xleft, s->ytop, s->width, s->height,
  770. bgcolor);
  771. fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
  772. /* total height for one channel */
  773. h = s->height / nb_display_channels;
  774. /* graph height / 2 */
  775. h2 = (h * 9) / 20;
  776. for(ch = 0;ch < nb_display_channels; ch++) {
  777. i = i_start + ch;
  778. y1 = s->ytop + ch * h + (h / 2); /* position of center line */
  779. for(x = 0; x < s->width; x++) {
  780. y = (s->sample_array[i] * h2) >> 15;
  781. if (y < 0) {
  782. y = -y;
  783. ys = y1 - y;
  784. } else {
  785. ys = y1;
  786. }
  787. fill_rectangle(screen,
  788. s->xleft + x, ys, 1, y,
  789. fgcolor);
  790. i += channels;
  791. if (i >= SAMPLE_ARRAY_SIZE)
  792. i -= SAMPLE_ARRAY_SIZE;
  793. }
  794. }
  795. fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
  796. for(ch = 1;ch < nb_display_channels; ch++) {
  797. y = s->ytop + ch * h;
  798. fill_rectangle(screen,
  799. s->xleft, y, s->width, 1,
  800. fgcolor);
  801. }
  802. SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
  803. }else{
  804. nb_display_channels= FFMIN(nb_display_channels, 2);
  805. if(rdft_bits != s->rdft_bits){
  806. av_rdft_end(s->rdft);
  807. s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
  808. s->rdft_bits= rdft_bits;
  809. }
  810. {
  811. FFTSample data[2][2*nb_freq];
  812. for(ch = 0;ch < nb_display_channels; ch++) {
  813. i = i_start + ch;
  814. for(x = 0; x < 2*nb_freq; x++) {
  815. double w= (x-nb_freq)*(1.0/nb_freq);
  816. data[ch][x]= s->sample_array[i]*(1.0-w*w);
  817. i += channels;
  818. if (i >= SAMPLE_ARRAY_SIZE)
  819. i -= SAMPLE_ARRAY_SIZE;
  820. }
  821. av_rdft_calc(s->rdft, data[ch]);
  822. }
  823. //least efficient way to do this, we should of course directly access it but its more than fast enough
  824. for(y=0; y<s->height; y++){
  825. double w= 1/sqrt(nb_freq);
  826. int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
  827. int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
  828. a= FFMIN(a,255);
  829. b= FFMIN(b,255);
  830. fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
  831. fill_rectangle(screen,
  832. s->xpos, s->height-y, 1, 1,
  833. fgcolor);
  834. }
  835. }
  836. SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
  837. s->xpos++;
  838. if(s->xpos >= s->width)
  839. s->xpos= s->xleft;
  840. }
  841. }
  842. static int video_open(VideoState *is){
  843. int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
  844. int w,h;
  845. if(is_full_screen) flags |= SDL_FULLSCREEN;
  846. else flags |= SDL_RESIZABLE;
  847. if (is_full_screen && fs_screen_width) {
  848. w = fs_screen_width;
  849. h = fs_screen_height;
  850. } else if(!is_full_screen && screen_width){
  851. w = screen_width;
  852. h = screen_height;
  853. #if CONFIG_AVFILTER
  854. }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
  855. w = is->out_video_filter->inputs[0]->w;
  856. h = is->out_video_filter->inputs[0]->h;
  857. #else
  858. }else if (is->video_st && is->video_st->codec->width){
  859. w = is->video_st->codec->width;
  860. h = is->video_st->codec->height;
  861. #endif
  862. } else {
  863. w = 640;
  864. h = 480;
  865. }
  866. if(screen && is->width == screen->w && screen->w == w
  867. && is->height== screen->h && screen->h == h)
  868. return 0;
  869. #ifndef __APPLE__
  870. screen = SDL_SetVideoMode(w, h, 0, flags);
  871. #else
  872. /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
  873. screen = SDL_SetVideoMode(w, h, 24, flags);
  874. #endif
  875. if (!screen) {
  876. fprintf(stderr, "SDL: could not set video mode - exiting\n");
  877. return -1;
  878. }
  879. SDL_WM_SetCaption("FFplay", "FFplay");
  880. is->width = screen->w;
  881. is->height = screen->h;
  882. return 0;
  883. }
  884. /* display the current picture, if any */
  885. static void video_display(VideoState *is)
  886. {
  887. if(!screen)
  888. video_open(cur_stream);
  889. if (is->audio_st && is->show_audio)
  890. video_audio_display(is);
  891. else if (is->video_st)
  892. video_image_display(is);
  893. }
  894. static int refresh_thread(void *opaque)
  895. {
  896. VideoState *is= opaque;
  897. while(!is->abort_request){
  898. SDL_Event event;
  899. event.type = FF_REFRESH_EVENT;
  900. event.user.data1 = opaque;
  901. if(!is->refresh){
  902. is->refresh=1;
  903. SDL_PushEvent(&event);
  904. }
  905. usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
  906. }
  907. return 0;
  908. }
  909. /* get the current audio clock value */
  910. static double get_audio_clock(VideoState *is)
  911. {
  912. double pts;
  913. int hw_buf_size, bytes_per_sec;
  914. pts = is->audio_clock;
  915. hw_buf_size = audio_write_get_buf_size(is);
  916. bytes_per_sec = 0;
  917. if (is->audio_st) {
  918. bytes_per_sec = is->audio_st->codec->sample_rate *
  919. 2 * is->audio_st->codec->channels;
  920. }
  921. if (bytes_per_sec)
  922. pts -= (double)hw_buf_size / bytes_per_sec;
  923. return pts;
  924. }
  925. /* get the current video clock value */
  926. static double get_video_clock(VideoState *is)
  927. {
  928. if (is->paused) {
  929. return is->video_current_pts;
  930. } else {
  931. return is->video_current_pts_drift + av_gettime() / 1000000.0;
  932. }
  933. }
  934. /* get the current external clock value */
  935. static double get_external_clock(VideoState *is)
  936. {
  937. int64_t ti;
  938. ti = av_gettime();
  939. return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
  940. }
  941. /* get the current master clock value */
  942. static double get_master_clock(VideoState *is)
  943. {
  944. double val;
  945. if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
  946. if (is->video_st)
  947. val = get_video_clock(is);
  948. else
  949. val = get_audio_clock(is);
  950. } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
  951. if (is->audio_st)
  952. val = get_audio_clock(is);
  953. else
  954. val = get_video_clock(is);
  955. } else {
  956. val = get_external_clock(is);
  957. }
  958. return val;
  959. }
  960. /* seek in the stream */
  961. static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
  962. {
  963. if (!is->seek_req) {
  964. is->seek_pos = pos;
  965. is->seek_rel = rel;
  966. is->seek_flags &= ~AVSEEK_FLAG_BYTE;
  967. if (seek_by_bytes)
  968. is->seek_flags |= AVSEEK_FLAG_BYTE;
  969. is->seek_req = 1;
  970. }
  971. }
  972. /* pause or resume the video */
  973. static void stream_pause(VideoState *is)
  974. {
  975. if (is->paused) {
  976. is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
  977. if(is->read_pause_return != AVERROR(ENOSYS)){
  978. is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
  979. }
  980. is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
  981. }
  982. is->paused = !is->paused;
  983. }
  984. static double compute_target_time(double frame_current_pts, VideoState *is)
  985. {
  986. double delay, sync_threshold, diff;
  987. /* compute nominal delay */
  988. delay = frame_current_pts - is->frame_last_pts;
  989. if (delay <= 0 || delay >= 10.0) {
  990. /* if incorrect delay, use previous one */
  991. delay = is->frame_last_delay;
  992. } else {
  993. is->frame_last_delay = delay;
  994. }
  995. is->frame_last_pts = frame_current_pts;
  996. /* update delay to follow master synchronisation source */
  997. if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
  998. is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
  999. /* if video is slave, we try to correct big delays by
  1000. duplicating or deleting a frame */
  1001. diff = get_video_clock(is) - get_master_clock(is);
  1002. /* skip or repeat frame. We take into account the
  1003. delay to compute the threshold. I still don't know
  1004. if it is the best guess */
  1005. sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
  1006. if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
  1007. if (diff <= -sync_threshold)
  1008. delay = 0;
  1009. else if (diff >= sync_threshold)
  1010. delay = 2 * delay;
  1011. }
  1012. }
  1013. is->frame_timer += delay;
  1014. #if defined(DEBUG_SYNC)
  1015. printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
  1016. delay, actual_delay, frame_current_pts, -diff);
  1017. #endif
  1018. return is->frame_timer;
  1019. }
  1020. /* called to display each frame */
  1021. static void video_refresh_timer(void *opaque)
  1022. {
  1023. VideoState *is = opaque;
  1024. VideoPicture *vp;
  1025. SubPicture *sp, *sp2;
  1026. if (is->video_st) {
  1027. retry:
  1028. if (is->pictq_size == 0) {
  1029. //nothing to do, no picture to display in the que
  1030. } else {
  1031. double time= av_gettime()/1000000.0;
  1032. double next_target;
  1033. /* dequeue the picture */
  1034. vp = &is->pictq[is->pictq_rindex];
  1035. if(time < vp->target_clock)
  1036. return;
  1037. /* update current video pts */
  1038. is->video_current_pts = vp->pts;
  1039. is->video_current_pts_drift = is->video_current_pts - time;
  1040. is->video_current_pos = vp->pos;
  1041. if(is->pictq_size > 1){
  1042. VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
  1043. assert(nextvp->target_clock >= vp->target_clock);
  1044. next_target= nextvp->target_clock;
  1045. }else{
  1046. next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
  1047. }
  1048. if(framedrop && time > next_target){
  1049. is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
  1050. if(is->pictq_size > 1 || time > next_target + 0.5){
  1051. /* update queue size and signal for next picture */
  1052. if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
  1053. is->pictq_rindex = 0;
  1054. SDL_LockMutex(is->pictq_mutex);
  1055. is->pictq_size--;
  1056. SDL_CondSignal(is->pictq_cond);
  1057. SDL_UnlockMutex(is->pictq_mutex);
  1058. goto retry;
  1059. }
  1060. }
  1061. if(is->subtitle_st) {
  1062. if (is->subtitle_stream_changed) {
  1063. SDL_LockMutex(is->subpq_mutex);
  1064. while (is->subpq_size) {
  1065. free_subpicture(&is->subpq[is->subpq_rindex]);
  1066. /* update queue size and signal for next picture */
  1067. if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
  1068. is->subpq_rindex = 0;
  1069. is->subpq_size--;
  1070. }
  1071. is->subtitle_stream_changed = 0;
  1072. SDL_CondSignal(is->subpq_cond);
  1073. SDL_UnlockMutex(is->subpq_mutex);
  1074. } else {
  1075. if (is->subpq_size > 0) {
  1076. sp = &is->subpq[is->subpq_rindex];
  1077. if (is->subpq_size > 1)
  1078. sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
  1079. else
  1080. sp2 = NULL;
  1081. if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
  1082. || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
  1083. {
  1084. free_subpicture(sp);
  1085. /* update queue size and signal for next picture */
  1086. if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
  1087. is->subpq_rindex = 0;
  1088. SDL_LockMutex(is->subpq_mutex);
  1089. is->subpq_size--;
  1090. SDL_CondSignal(is->subpq_cond);
  1091. SDL_UnlockMutex(is->subpq_mutex);
  1092. }
  1093. }
  1094. }
  1095. }
  1096. /* display picture */
  1097. video_display(is);
  1098. /* update queue size and signal for next picture */
  1099. if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
  1100. is->pictq_rindex = 0;
  1101. SDL_LockMutex(is->pictq_mutex);
  1102. is->pictq_size--;
  1103. SDL_CondSignal(is->pictq_cond);
  1104. SDL_UnlockMutex(is->pictq_mutex);
  1105. }
  1106. } else if (is->audio_st) {
  1107. /* draw the next audio frame */
  1108. /* if only audio stream, then display the audio bars (better
  1109. than nothing, just to test the implementation */
  1110. /* display picture */
  1111. video_display(is);
  1112. }
  1113. if (show_status) {
  1114. static int64_t last_time;
  1115. int64_t cur_time;
  1116. int aqsize, vqsize, sqsize;
  1117. double av_diff;
  1118. cur_time = av_gettime();
  1119. if (!last_time || (cur_time - last_time) >= 30000) {
  1120. aqsize = 0;
  1121. vqsize = 0;
  1122. sqsize = 0;
  1123. if (is->audio_st)
  1124. aqsize = is->audioq.size;
  1125. if (is->video_st)
  1126. vqsize = is->videoq.size;
  1127. if (is->subtitle_st)
  1128. sqsize = is->subtitleq.size;
  1129. av_diff = 0;
  1130. if (is->audio_st && is->video_st)
  1131. av_diff = get_audio_clock(is) - get_video_clock(is);
  1132. printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
  1133. get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
  1134. fflush(stdout);
  1135. last_time = cur_time;
  1136. }
  1137. }
  1138. }
  1139. /* allocate a picture (needs to do that in main thread to avoid
  1140. potential locking problems */
  1141. static void alloc_picture(void *opaque)
  1142. {
  1143. VideoState *is = opaque;
  1144. VideoPicture *vp;
  1145. vp = &is->pictq[is->pictq_windex];
  1146. if (vp->bmp)
  1147. SDL_FreeYUVOverlay(vp->bmp);
  1148. #if CONFIG_AVFILTER
  1149. if (vp->picref)
  1150. avfilter_unref_pic(vp->picref);
  1151. vp->picref = NULL;
  1152. vp->width = is->out_video_filter->inputs[0]->w;
  1153. vp->height = is->out_video_filter->inputs[0]->h;
  1154. vp->pix_fmt = is->out_video_filter->inputs[0]->format;
  1155. #else
  1156. vp->width = is->video_st->codec->width;
  1157. vp->height = is->video_st->codec->height;
  1158. vp->pix_fmt = is->video_st->codec->pix_fmt;
  1159. #endif
  1160. vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
  1161. SDL_YV12_OVERLAY,
  1162. screen);
  1163. SDL_LockMutex(is->pictq_mutex);
  1164. vp->allocated = 1;
  1165. SDL_CondSignal(is->pictq_cond);
  1166. SDL_UnlockMutex(is->pictq_mutex);
  1167. }
  1168. /**
  1169. *
  1170. * @param pts the dts of the pkt / pts of the frame and guessed if not known
  1171. */
  1172. static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
  1173. {
  1174. VideoPicture *vp;
  1175. int dst_pix_fmt;
  1176. #if CONFIG_AVFILTER
  1177. AVPicture pict_src;
  1178. #endif
  1179. /* wait until we have space to put a new picture */
  1180. SDL_LockMutex(is->pictq_mutex);
  1181. if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
  1182. is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
  1183. while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
  1184. !is->videoq.abort_request) {
  1185. SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  1186. }
  1187. SDL_UnlockMutex(is->pictq_mutex);
  1188. if (is->videoq.abort_request)
  1189. return -1;
  1190. vp = &is->pictq[is->pictq_windex];
  1191. /* alloc or resize hardware picture buffer */
  1192. if (!vp->bmp ||
  1193. #if CONFIG_AVFILTER
  1194. vp->width != is->out_video_filter->inputs[0]->w ||
  1195. vp->height != is->out_video_filter->inputs[0]->h) {
  1196. #else
  1197. vp->width != is->video_st->codec->width ||
  1198. vp->height != is->video_st->codec->height) {
  1199. #endif
  1200. SDL_Event event;
  1201. vp->allocated = 0;
  1202. /* the allocation must be done in the main thread to avoid
  1203. locking problems */
  1204. event.type = FF_ALLOC_EVENT;
  1205. event.user.data1 = is;
  1206. SDL_PushEvent(&event);
  1207. /* wait until the picture is allocated */
  1208. SDL_LockMutex(is->pictq_mutex);
  1209. while (!vp->allocated && !is->videoq.abort_request) {
  1210. SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  1211. }
  1212. SDL_UnlockMutex(is->pictq_mutex);
  1213. if (is->videoq.abort_request)
  1214. return -1;
  1215. }
  1216. /* if the frame is not skipped, then display it */
  1217. if (vp->bmp) {
  1218. AVPicture pict;
  1219. #if CONFIG_AVFILTER
  1220. if(vp->picref)
  1221. avfilter_unref_pic(vp->picref);
  1222. vp->picref = src_frame->opaque;
  1223. #endif
  1224. /* get a pointer on the bitmap */
  1225. SDL_LockYUVOverlay (vp->bmp);
  1226. dst_pix_fmt = PIX_FMT_YUV420P;
  1227. memset(&pict,0,sizeof(AVPicture));
  1228. pict.data[0] = vp->bmp->pixels[0];
  1229. pict.data[1] = vp->bmp->pixels[2];
  1230. pict.data[2] = vp->bmp->pixels[1];
  1231. pict.linesize[0] = vp->bmp->pitches[0];
  1232. pict.linesize[1] = vp->bmp->pitches[2];
  1233. pict.linesize[2] = vp->bmp->pitches[1];
  1234. #if CONFIG_AVFILTER
  1235. pict_src.data[0] = src_frame->data[0];
  1236. pict_src.data[1] = src_frame->data[1];
  1237. pict_src.data[2] = src_frame->data[2];
  1238. pict_src.linesize[0] = src_frame->linesize[0];
  1239. pict_src.linesize[1] = src_frame->linesize[1];
  1240. pict_src.linesize[2] = src_frame->linesize[2];
  1241. //FIXME use direct rendering
  1242. av_picture_copy(&pict, &pict_src,
  1243. vp->pix_fmt, vp->width, vp->height);
  1244. #else
  1245. sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
  1246. is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
  1247. vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
  1248. dst_pix_fmt, sws_flags, NULL, NULL, NULL);
  1249. if (is->img_convert_ctx == NULL) {
  1250. fprintf(stderr, "Cannot initialize the conversion context\n");
  1251. exit(1);
  1252. }
  1253. sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
  1254. 0, vp->height, pict.data, pict.linesize);
  1255. #endif
  1256. /* update the bitmap content */
  1257. SDL_UnlockYUVOverlay(vp->bmp);
  1258. vp->pts = pts;
  1259. vp->pos = pos;
  1260. /* now we can update the picture count */
  1261. if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
  1262. is->pictq_windex = 0;
  1263. SDL_LockMutex(is->pictq_mutex);
  1264. vp->target_clock= compute_target_time(vp->pts, is);
  1265. is->pictq_size++;
  1266. SDL_UnlockMutex(is->pictq_mutex);
  1267. }
  1268. return 0;
  1269. }
  1270. /**
  1271. * compute the exact PTS for the picture if it is omitted in the stream
  1272. * @param pts1 the dts of the pkt / pts of the frame
  1273. */
  1274. static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
  1275. {
  1276. double frame_delay, pts;
  1277. pts = pts1;
  1278. if (pts != 0) {
  1279. /* update video clock with pts, if present */
  1280. is->video_clock = pts;
  1281. } else {
  1282. pts = is->video_clock;
  1283. }
  1284. /* update video clock for next frame */
  1285. frame_delay = av_q2d(is->video_st->codec->time_base);
  1286. /* for MPEG2, the frame can be repeated, so we update the
  1287. clock accordingly */
  1288. frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
  1289. is->video_clock += frame_delay;
  1290. #if defined(DEBUG_SYNC) && 0
  1291. printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
  1292. av_get_pict_type_char(src_frame->pict_type), pts, pts1);
  1293. #endif
  1294. return queue_picture(is, src_frame, pts, pos);
  1295. }
  1296. static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
  1297. {
  1298. int len1, got_picture, i;
  1299. if (packet_queue_get(&is->videoq, pkt, 1) < 0)
  1300. return -1;
  1301. if(pkt->data == flush_pkt.data){
  1302. avcodec_flush_buffers(is->video_st->codec);
  1303. SDL_LockMutex(is->pictq_mutex);
  1304. //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
  1305. for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
  1306. is->pictq[i].target_clock= 0;
  1307. }
  1308. while (is->pictq_size && !is->videoq.abort_request) {
  1309. SDL_CondWait(is->pictq_cond, is->pictq_mutex);
  1310. }
  1311. is->video_current_pos= -1;
  1312. SDL_UnlockMutex(is->pictq_mutex);
  1313. is->last_dts_for_fault_detection=
  1314. is->last_pts_for_fault_detection= INT64_MIN;
  1315. is->frame_last_pts= AV_NOPTS_VALUE;
  1316. is->frame_last_delay = 0;
  1317. is->frame_timer = (double)av_gettime() / 1000000.0;
  1318. is->skip_frames= 1;
  1319. is->skip_frames_index= 0;
  1320. return 0;
  1321. }
  1322. /* NOTE: ipts is the PTS of the _first_ picture beginning in
  1323. this packet, if any */
  1324. is->video_st->codec->reordered_opaque= pkt->pts;
  1325. len1 = avcodec_decode_video2(is->video_st->codec,
  1326. frame, &got_picture,
  1327. pkt);
  1328. if (got_picture) {
  1329. if(pkt->dts != AV_NOPTS_VALUE){
  1330. is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
  1331. is->last_dts_for_fault_detection= pkt->dts;
  1332. }
  1333. if(frame->reordered_opaque != AV_NOPTS_VALUE){
  1334. is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
  1335. is->last_pts_for_fault_detection= frame->reordered_opaque;
  1336. }
  1337. }
  1338. if( ( decoder_reorder_pts==1
  1339. || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
  1340. || pkt->dts == AV_NOPTS_VALUE)
  1341. && frame->reordered_opaque != AV_NOPTS_VALUE)
  1342. *pts= frame->reordered_opaque;
  1343. else if(pkt->dts != AV_NOPTS_VALUE)
  1344. *pts= pkt->dts;
  1345. else
  1346. *pts= 0;
  1347. // if (len1 < 0)
  1348. // break;
  1349. if (got_picture){
  1350. is->skip_frames_index += 1;
  1351. if(is->skip_frames_index >= is->skip_frames){
  1352. is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
  1353. return 1;
  1354. }
  1355. }
  1356. return 0;
  1357. }
  1358. #if CONFIG_AVFILTER
  1359. typedef struct {
  1360. VideoState *is;
  1361. AVFrame *frame;
  1362. } FilterPriv;
  1363. static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
  1364. {
  1365. FilterPriv *priv = ctx->priv;
  1366. if(!opaque) return -1;
  1367. priv->is = opaque;
  1368. priv->frame = avcodec_alloc_frame();
  1369. return 0;
  1370. }
  1371. static void input_uninit(AVFilterContext *ctx)
  1372. {
  1373. FilterPriv *priv = ctx->priv;
  1374. av_free(priv->frame);
  1375. }
  1376. static int input_request_frame(AVFilterLink *link)
  1377. {
  1378. FilterPriv *priv = link->src->priv;
  1379. AVFilterPicRef *picref;
  1380. int64_t pts = 0;
  1381. AVPacket pkt;
  1382. int ret;
  1383. while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
  1384. av_free_packet(&pkt);
  1385. if (ret < 0)
  1386. return -1;
  1387. /* FIXME: until I figure out how to hook everything up to the codec
  1388. * right, we're just copying the entire frame. */
  1389. picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
  1390. av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
  1391. picref->pic->format, link->w, link->h);
  1392. av_free_packet(&pkt);
  1393. picref->pts = pts;
  1394. picref->pos = pkt.pos;
  1395. picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
  1396. avfilter_start_frame(link, avfilter_ref_pic(picref, ~0));
  1397. avfilter_draw_slice(link, 0, link->h, 1);
  1398. avfilter_end_frame(link);
  1399. avfilter_unref_pic(picref);
  1400. return 0;
  1401. }
  1402. static int input_query_formats(AVFilterContext *ctx)
  1403. {
  1404. FilterPriv *priv = ctx->priv;
  1405. enum PixelFormat pix_fmts[] = {
  1406. priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
  1407. };
  1408. avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
  1409. return 0;
  1410. }
  1411. static int input_config_props(AVFilterLink *link)
  1412. {
  1413. FilterPriv *priv = link->src->priv;
  1414. AVCodecContext *c = priv->is->video_st->codec;
  1415. link->w = c->width;
  1416. link->h = c->height;
  1417. return 0;
  1418. }
  1419. static AVFilter input_filter =
  1420. {
  1421. .name = "ffplay_input",
  1422. .priv_size = sizeof(FilterPriv),
  1423. .init = input_init,
  1424. .uninit = input_uninit,
  1425. .query_formats = input_query_formats,
  1426. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  1427. .outputs = (AVFilterPad[]) {{ .name = "default",
  1428. .type = CODEC_TYPE_VIDEO,
  1429. .request_frame = input_request_frame,
  1430. .config_props = input_config_props, },
  1431. { .name = NULL }},
  1432. };
  1433. static void output_end_frame(AVFilterLink *link)
  1434. {
  1435. }
  1436. static int output_query_formats(AVFilterContext *ctx)
  1437. {
  1438. enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
  1439. avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
  1440. return 0;
  1441. }
  1442. static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
  1443. int64_t *pts, int64_t *pos)
  1444. {
  1445. AVFilterPicRef *pic;
  1446. if(avfilter_request_frame(ctx->inputs[0]))
  1447. return -1;
  1448. if(!(pic = ctx->inputs[0]->cur_pic))
  1449. return -1;
  1450. ctx->inputs[0]->cur_pic = NULL;
  1451. frame->opaque = pic;
  1452. *pts = pic->pts;
  1453. *pos = pic->pos;
  1454. memcpy(frame->data, pic->data, sizeof(frame->data));
  1455. memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
  1456. return 1;
  1457. }
  1458. static AVFilter output_filter =
  1459. {
  1460. .name = "ffplay_output",
  1461. .query_formats = output_query_formats,
  1462. .inputs = (AVFilterPad[]) {{ .name = "default",
  1463. .type = CODEC_TYPE_VIDEO,
  1464. .end_frame = output_end_frame,
  1465. .min_perms = AV_PERM_READ, },
  1466. { .name = NULL }},
  1467. .outputs = (AVFilterPad[]) {{ .name = NULL }},
  1468. };
  1469. #endif /* CONFIG_AVFILTER */
  1470. static int video_thread(void *arg)
  1471. {
  1472. VideoState *is = arg;
  1473. AVFrame *frame= avcodec_alloc_frame();
  1474. int64_t pts_int, pos;
  1475. double pts;
  1476. int ret;
  1477. #if CONFIG_AVFILTER
  1478. AVFilterContext *filt_src = NULL, *filt_out = NULL;
  1479. AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
  1480. graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
  1481. if(!(filt_src = avfilter_open(&input_filter, "src"))) goto the_end;
  1482. if(!(filt_out = avfilter_open(&output_filter, "out"))) goto the_end;
  1483. if(avfilter_init_filter(filt_src, NULL, is)) goto the_end;
  1484. if(avfilter_init_filter(filt_out, NULL, frame)) goto the_end;
  1485. if(vfilters) {
  1486. AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
  1487. AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
  1488. outputs->name = av_strdup("in");
  1489. outputs->filter = filt_src;
  1490. outputs->pad_idx = 0;
  1491. outputs->next = NULL;
  1492. inputs->name = av_strdup("out");
  1493. inputs->filter = filt_out;
  1494. inputs->pad_idx = 0;
  1495. inputs->next = NULL;
  1496. if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
  1497. goto the_end;
  1498. av_freep(&vfilters);
  1499. } else {
  1500. if(avfilter_link(filt_src, 0, filt_out, 0) < 0) goto the_end;
  1501. }
  1502. avfilter_graph_add_filter(graph, filt_src);
  1503. avfilter_graph_add_filter(graph, filt_out);
  1504. if(avfilter_graph_check_validity(graph, NULL)) goto the_end;
  1505. if(avfilter_graph_config_formats(graph, NULL)) goto the_end;
  1506. if(avfilter_graph_config_links(graph, NULL)) goto the_end;
  1507. is->out_video_filter = filt_out;
  1508. #endif
  1509. for(;;) {
  1510. #if !CONFIG_AVFILTER
  1511. AVPacket pkt;
  1512. #endif
  1513. while (is->paused && !is->videoq.abort_request)
  1514. SDL_Delay(10);
  1515. #if CONFIG_AVFILTER
  1516. ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
  1517. #else
  1518. ret = get_video_frame(is, frame, &pts_int, &pkt);
  1519. #endif
  1520. if (ret < 0) goto the_end;
  1521. if (!ret)
  1522. continue;
  1523. pts = pts_int*av_q2d(is->video_st->time_base);
  1524. #if CONFIG_AVFILTER
  1525. ret = output_picture2(is, frame, pts, pos);
  1526. #else
  1527. ret = output_picture2(is, frame, pts, pkt.pos);
  1528. av_free_packet(&pkt);
  1529. #endif
  1530. if (ret < 0)
  1531. goto the_end;
  1532. if (step)
  1533. if (cur_stream)
  1534. stream_pause(cur_stream);
  1535. }
  1536. the_end:
  1537. #if CONFIG_AVFILTER
  1538. avfilter_graph_destroy(graph);
  1539. av_freep(&graph);
  1540. #endif
  1541. av_free(frame);
  1542. return 0;
  1543. }
  1544. static int subtitle_thread(void *arg)
  1545. {
  1546. VideoState *is = arg;
  1547. SubPicture *sp;
  1548. AVPacket pkt1, *pkt = &pkt1;
  1549. int len1, got_subtitle;
  1550. double pts;
  1551. int i, j;
  1552. int r, g, b, y, u, v, a;
  1553. for(;;) {
  1554. while (is->paused && !is->subtitleq.abort_request) {
  1555. SDL_Delay(10);
  1556. }
  1557. if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
  1558. break;
  1559. if(pkt->data == flush_pkt.data){
  1560. avcodec_flush_buffers(is->subtitle_st->codec);
  1561. continue;
  1562. }
  1563. SDL_LockMutex(is->subpq_mutex);
  1564. while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
  1565. !is->subtitleq.abort_request) {
  1566. SDL_CondWait(is->subpq_cond, is->subpq_mutex);
  1567. }
  1568. SDL_UnlockMutex(is->subpq_mutex);
  1569. if (is->subtitleq.abort_request)
  1570. goto the_end;
  1571. sp = &is->subpq[is->subpq_windex];
  1572. /* NOTE: ipts is the PTS of the _first_ picture beginning in
  1573. this packet, if any */
  1574. pts = 0;
  1575. if (pkt->pts != AV_NOPTS_VALUE)
  1576. pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
  1577. len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
  1578. &sp->sub, &got_subtitle,
  1579. pkt);
  1580. // if (len1 < 0)
  1581. // break;
  1582. if (got_subtitle && sp->sub.format == 0) {
  1583. sp->pts = pts;
  1584. for (i = 0; i < sp->sub.num_rects; i++)
  1585. {
  1586. for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
  1587. {
  1588. RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
  1589. y = RGB_TO_Y_CCIR(r, g, b);
  1590. u = RGB_TO_U_CCIR(r, g, b, 0);
  1591. v = RGB_TO_V_CCIR(r, g, b, 0);
  1592. YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
  1593. }
  1594. }
  1595. /* now we can update the picture count */
  1596. if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
  1597. is->subpq_windex = 0;
  1598. SDL_LockMutex(is->subpq_mutex);
  1599. is->subpq_size++;
  1600. SDL_UnlockMutex(is->subpq_mutex);
  1601. }
  1602. av_free_packet(pkt);
  1603. // if (step)
  1604. // if (cur_stream)
  1605. // stream_pause(cur_stream);
  1606. }
  1607. the_end:
  1608. return 0;
  1609. }
  1610. /* copy samples for viewing in editor window */
  1611. static void update_sample_display(VideoState *is, short *samples, int samples_size)
  1612. {
  1613. int size, len, channels;
  1614. channels = is->audio_st->codec->channels;
  1615. size = samples_size / sizeof(short);
  1616. while (size > 0) {
  1617. len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
  1618. if (len > size)
  1619. len = size;
  1620. memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
  1621. samples += len;
  1622. is->sample_array_index += len;
  1623. if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
  1624. is->sample_array_index = 0;
  1625. size -= len;
  1626. }
  1627. }
  1628. /* return the new audio buffer size (samples can be added or deleted
  1629. to get better sync if video or external master clock) */
  1630. static int synchronize_audio(VideoState *is, short *samples,
  1631. int samples_size1, double pts)
  1632. {
  1633. int n, samples_size;
  1634. double ref_clock;
  1635. n = 2 * is->audio_st->codec->channels;
  1636. samples_size = samples_size1;
  1637. /* if not master, then we try to remove or add samples to correct the clock */
  1638. if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
  1639. is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
  1640. double diff, avg_diff;
  1641. int wanted_size, min_size, max_size, nb_samples;
  1642. ref_clock = get_master_clock(is);
  1643. diff = get_audio_clock(is) - ref_clock;
  1644. if (diff < AV_NOSYNC_THRESHOLD) {
  1645. is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
  1646. if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
  1647. /* not enough measures to have a correct estimate */
  1648. is->audio_diff_avg_count++;
  1649. } else {
  1650. /* estimate the A-V difference */
  1651. avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
  1652. if (fabs(avg_diff) >= is->audio_diff_threshold) {
  1653. wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
  1654. nb_samples = samples_size / n;
  1655. min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
  1656. max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
  1657. if (wanted_size < min_size)
  1658. wanted_size = min_size;
  1659. else if (wanted_size > max_size)
  1660. wanted_size = max_size;
  1661. /* add or remove samples to correction the synchro */
  1662. if (wanted_size < samples_size) {
  1663. /* remove samples */
  1664. samples_size = wanted_size;
  1665. } else if (wanted_size > samples_size) {
  1666. uint8_t *samples_end, *q;
  1667. int nb;
  1668. /* add samples */
  1669. nb = (samples_size - wanted_size);
  1670. samples_end = (uint8_t *)samples + samples_size - n;
  1671. q = samples_end + n;
  1672. while (nb > 0) {
  1673. memcpy(q, samples_end, n);
  1674. q += n;
  1675. nb -= n;
  1676. }
  1677. samples_size = wanted_size;
  1678. }
  1679. }
  1680. #if 0
  1681. printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
  1682. diff, avg_diff, samples_size - samples_size1,
  1683. is->audio_clock, is->video_clock, is->audio_diff_threshold);
  1684. #endif
  1685. }
  1686. } else {
  1687. /* too big difference : may be initial PTS errors, so
  1688. reset A-V filter */
  1689. is->audio_diff_avg_count = 0;
  1690. is->audio_diff_cum = 0;
  1691. }
  1692. }
  1693. return samples_size;
  1694. }
  1695. /* decode one audio frame and returns its uncompressed size */
  1696. static int audio_decode_frame(VideoState *is, double *pts_ptr)
  1697. {
  1698. AVPacket *pkt_temp = &is->audio_pkt_temp;
  1699. AVPacket *pkt = &is->audio_pkt;
  1700. AVCodecContext *dec= is->audio_st->codec;
  1701. int n, len1, data_size;
  1702. double pts;
  1703. for(;;) {
  1704. /* NOTE: the audio packet can contain several frames */
  1705. while (pkt_temp->size > 0) {
  1706. data_size = sizeof(is->audio_buf1);
  1707. len1 = avcodec_decode_audio3(dec,
  1708. (int16_t *)is->audio_buf1, &data_size,
  1709. pkt_temp);
  1710. if (len1 < 0) {
  1711. /* if error, we skip the frame */
  1712. pkt_temp->size = 0;
  1713. break;
  1714. }
  1715. pkt_temp->data += len1;
  1716. pkt_temp->size -= len1;
  1717. if (data_size <= 0)
  1718. continue;
  1719. if (dec->sample_fmt != is->audio_src_fmt) {
  1720. if (is->reformat_ctx)
  1721. av_audio_convert_free(is->reformat_ctx);
  1722. is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
  1723. dec->sample_fmt, 1, NULL, 0);
  1724. if (!is->reformat_ctx) {
  1725. fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
  1726. avcodec_get_sample_fmt_name(dec->sample_fmt),
  1727. avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
  1728. break;
  1729. }
  1730. is->audio_src_fmt= dec->sample_fmt;
  1731. }
  1732. if (is->reformat_ctx) {
  1733. const void *ibuf[6]= {is->audio_buf1};
  1734. void *obuf[6]= {is->audio_buf2};
  1735. int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
  1736. int ostride[6]= {2};
  1737. int len= data_size/istride[0];
  1738. if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
  1739. printf("av_audio_convert() failed\n");
  1740. break;
  1741. }
  1742. is->audio_buf= is->audio_buf2;
  1743. /* FIXME: existing code assume that data_size equals framesize*channels*2
  1744. remove this legacy cruft */
  1745. data_size= len*2;
  1746. }else{
  1747. is->audio_buf= is->audio_buf1;
  1748. }
  1749. /* if no pts, then compute it */
  1750. pts = is->audio_clock;
  1751. *pts_ptr = pts;
  1752. n = 2 * dec->channels;
  1753. is->audio_clock += (double)data_size /
  1754. (double)(n * dec->sample_rate);
  1755. #if defined(DEBUG_SYNC)
  1756. {
  1757. static double last_clock;
  1758. printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
  1759. is->audio_clock - last_clock,
  1760. is->audio_clock, pts);
  1761. last_clock = is->audio_clock;
  1762. }
  1763. #endif
  1764. return data_size;
  1765. }
  1766. /* free the current packet */
  1767. if (pkt->data)
  1768. av_free_packet(pkt);
  1769. if (is->paused || is->audioq.abort_request) {
  1770. return -1;
  1771. }
  1772. /* read next packet */
  1773. if (packet_queue_get(&is->audioq, pkt, 1) < 0)
  1774. return -1;
  1775. if(pkt->data == flush_pkt.data){
  1776. avcodec_flush_buffers(dec);
  1777. continue;
  1778. }
  1779. pkt_temp->data = pkt->data;
  1780. pkt_temp->size = pkt->size;
  1781. /* if update the audio clock with the pts */
  1782. if (pkt->pts != AV_NOPTS_VALUE) {
  1783. is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
  1784. }
  1785. }
  1786. }
  1787. /* get the current audio output buffer size, in samples. With SDL, we
  1788. cannot have a precise information */
  1789. static int audio_write_get_buf_size(VideoState *is)
  1790. {
  1791. return is->audio_buf_size - is->audio_buf_index;
  1792. }
  1793. /* prepare a new audio buffer */
  1794. static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
  1795. {
  1796. VideoState *is = opaque;
  1797. int audio_size, len1;
  1798. double pts;
  1799. audio_callback_time = av_gettime();
  1800. while (len > 0) {
  1801. if (is->audio_buf_index >= is->audio_buf_size) {
  1802. audio_size = audio_decode_frame(is, &pts);
  1803. if (audio_size < 0) {
  1804. /* if error, just output silence */
  1805. is->audio_buf = is->audio_buf1;
  1806. is->audio_buf_size = 1024;
  1807. memset(is->audio_buf, 0, is->audio_buf_size);
  1808. } else {
  1809. if (is->show_audio)
  1810. update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
  1811. audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
  1812. pts);
  1813. is->audio_buf_size = audio_size;
  1814. }
  1815. is->audio_buf_index = 0;
  1816. }
  1817. len1 = is->audio_buf_size - is->audio_buf_index;
  1818. if (len1 > len)
  1819. len1 = len;
  1820. memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
  1821. len -= len1;
  1822. stream += len1;
  1823. is->audio_buf_index += len1;
  1824. }
  1825. }
  1826. /* open a given stream. Return 0 if OK */
  1827. static int stream_component_open(VideoState *is, int stream_index)
  1828. {
  1829. AVFormatContext *ic = is->ic;
  1830. AVCodecContext *avctx;
  1831. AVCodec *codec;
  1832. SDL_AudioSpec wanted_spec, spec;
  1833. if (stream_index < 0 || stream_index >= ic->nb_streams)
  1834. return -1;
  1835. avctx = ic->streams[stream_index]->codec;
  1836. /* prepare audio output */
  1837. if (avctx->codec_type == CODEC_TYPE_AUDIO) {
  1838. if (avctx->channels > 0) {
  1839. avctx->request_channels = FFMIN(2, avctx->channels);
  1840. } else {
  1841. avctx->request_channels = 2;
  1842. }
  1843. }
  1844. codec = avcodec_find_decoder(avctx->codec_id);
  1845. avctx->debug_mv = debug_mv;
  1846. avctx->debug = debug;
  1847. avctx->workaround_bugs = workaround_bugs;
  1848. avctx->lowres = lowres;
  1849. if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
  1850. avctx->idct_algo= idct;
  1851. if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
  1852. avctx->skip_frame= skip_frame;
  1853. avctx->skip_idct= skip_idct;
  1854. avctx->skip_loop_filter= skip_loop_filter;
  1855. avctx->error_recognition= error_recognition;
  1856. avctx->error_concealment= error_concealment;
  1857. avcodec_thread_init(avctx, thread_count);
  1858. set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
  1859. if (!codec ||
  1860. avcodec_open(avctx, codec) < 0)
  1861. return -1;
  1862. /* prepare audio output */
  1863. if (avctx->codec_type == CODEC_TYPE_AUDIO) {
  1864. wanted_spec.freq = avctx->sample_rate;
  1865. wanted_spec.format = AUDIO_S16SYS;
  1866. wanted_spec.channels = avctx->channels;
  1867. wanted_spec.silence = 0;
  1868. wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
  1869. wanted_spec.callback = sdl_audio_callback;
  1870. wanted_spec.userdata = is;
  1871. if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
  1872. fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
  1873. return -1;
  1874. }
  1875. is->audio_hw_buf_size = spec.size;
  1876. is->audio_src_fmt= SAMPLE_FMT_S16;
  1877. }
  1878. ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
  1879. switch(avctx->codec_type) {
  1880. case CODEC_TYPE_AUDIO:
  1881. is->audio_stream = stream_index;
  1882. is->audio_st = ic->streams[stream_index];
  1883. is->audio_buf_size = 0;
  1884. is->audio_buf_index = 0;
  1885. /* init averaging filter */
  1886. is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
  1887. is->audio_diff_avg_count = 0;
  1888. /* since we do not have a precise anough audio fifo fullness,
  1889. we correct audio sync only if larger than this threshold */
  1890. is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
  1891. memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
  1892. packet_queue_init(&is->audioq);
  1893. SDL_PauseAudio(0);
  1894. break;
  1895. case CODEC_TYPE_VIDEO:
  1896. is->video_stream = stream_index;
  1897. is->video_st = ic->streams[stream_index];
  1898. // is->video_current_pts_time = av_gettime();
  1899. packet_queue_init(&is->videoq);
  1900. is->video_tid = SDL_CreateThread(video_thread, is);
  1901. break;
  1902. case CODEC_TYPE_SUBTITLE:
  1903. is->subtitle_stream = stream_index;
  1904. is->subtitle_st = ic->streams[stream_index];
  1905. packet_queue_init(&is->subtitleq);
  1906. is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
  1907. break;
  1908. default:
  1909. break;
  1910. }
  1911. return 0;
  1912. }
  1913. static void stream_component_close(VideoState *is, int stream_index)
  1914. {
  1915. AVFormatContext *ic = is->ic;
  1916. AVCodecContext *avctx;
  1917. if (stream_index < 0 || stream_index >= ic->nb_streams)
  1918. return;
  1919. avctx = ic->streams[stream_index]->codec;
  1920. switch(avctx->codec_type) {
  1921. case CODEC_TYPE_AUDIO:
  1922. packet_queue_abort(&is->audioq);
  1923. SDL_CloseAudio();
  1924. packet_queue_end(&is->audioq);
  1925. if (is->reformat_ctx)
  1926. av_audio_convert_free(is->reformat_ctx);
  1927. is->reformat_ctx = NULL;
  1928. break;
  1929. case CODEC_TYPE_VIDEO:
  1930. packet_queue_abort(&is->videoq);
  1931. /* note: we also signal this mutex to make sure we deblock the
  1932. video thread in all cases */
  1933. SDL_LockMutex(is->pictq_mutex);
  1934. SDL_CondSignal(is->pictq_cond);
  1935. SDL_UnlockMutex(is->pictq_mutex);
  1936. SDL_WaitThread(is->video_tid, NULL);
  1937. packet_queue_end(&is->videoq);
  1938. break;
  1939. case CODEC_TYPE_SUBTITLE:
  1940. packet_queue_abort(&is->subtitleq);
  1941. /* note: we also signal this mutex to make sure we deblock the
  1942. video thread in all cases */
  1943. SDL_LockMutex(is->subpq_mutex);
  1944. is->subtitle_stream_changed = 1;
  1945. SDL_CondSignal(is->subpq_cond);
  1946. SDL_UnlockMutex(is->subpq_mutex);
  1947. SDL_WaitThread(is->subtitle_tid, NULL);
  1948. packet_queue_end(&is->subtitleq);
  1949. break;
  1950. default:
  1951. break;
  1952. }
  1953. ic->streams[stream_index]->discard = AVDISCARD_ALL;
  1954. avcodec_close(avctx);
  1955. switch(avctx->codec_type) {
  1956. case CODEC_TYPE_AUDIO:
  1957. is->audio_st = NULL;
  1958. is->audio_stream = -1;
  1959. break;
  1960. case CODEC_TYPE_VIDEO:
  1961. is->video_st = NULL;
  1962. is->video_stream = -1;
  1963. break;
  1964. case CODEC_TYPE_SUBTITLE:
  1965. is->subtitle_st = NULL;
  1966. is->subtitle_stream = -1;
  1967. break;
  1968. default:
  1969. break;
  1970. }
  1971. }
  1972. /* since we have only one decoding thread, we can use a global
  1973. variable instead of a thread local variable */
  1974. static VideoState *global_video_state;
  1975. static int decode_interrupt_cb(void)
  1976. {
  1977. return (global_video_state && global_video_state->abort_request);
  1978. }
  1979. /* this thread gets the stream from the disk or the network */
  1980. static int decode_thread(void *arg)
  1981. {
  1982. VideoState *is = arg;
  1983. AVFormatContext *ic;
  1984. int err, i, ret;
  1985. int st_index[CODEC_TYPE_NB];
  1986. int st_count[CODEC_TYPE_NB]={0};
  1987. int st_best_packet_count[CODEC_TYPE_NB];
  1988. AVPacket pkt1, *pkt = &pkt1;
  1989. AVFormatParameters params, *ap = &params;
  1990. int eof=0;
  1991. ic = avformat_alloc_context();
  1992. memset(st_index, -1, sizeof(st_index));
  1993. memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
  1994. is->video_stream = -1;
  1995. is->audio_stream = -1;
  1996. is->subtitle_stream = -1;
  1997. global_video_state = is;
  1998. url_set_interrupt_cb(decode_interrupt_cb);
  1999. memset(ap, 0, sizeof(*ap));
  2000. ap->prealloced_context = 1;
  2001. ap->width = frame_width;
  2002. ap->height= frame_height;
  2003. ap->time_base= (AVRational){1, 25};
  2004. ap->pix_fmt = frame_pix_fmt;
  2005. set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
  2006. err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
  2007. if (err < 0) {
  2008. print_error(is->filename, err);
  2009. ret = -1;
  2010. goto fail;
  2011. }
  2012. is->ic = ic;
  2013. if(genpts)
  2014. ic->flags |= AVFMT_FLAG_GENPTS;
  2015. err = av_find_stream_info(ic);
  2016. if (err < 0) {
  2017. fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
  2018. ret = -1;
  2019. goto fail;
  2020. }
  2021. if(ic->pb)
  2022. ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
  2023. if(seek_by_bytes<0)
  2024. seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
  2025. /* if seeking requested, we execute it */
  2026. if (start_time != AV_NOPTS_VALUE) {
  2027. int64_t timestamp;
  2028. timestamp = start_time;
  2029. /* add the stream start time */
  2030. if (ic->start_time != AV_NOPTS_VALUE)
  2031. timestamp += ic->start_time;
  2032. ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
  2033. if (ret < 0) {
  2034. fprintf(stderr, "%s: could not seek to position %0.3f\n",
  2035. is->filename, (double)timestamp / AV_TIME_BASE);
  2036. }
  2037. }
  2038. for(i = 0; i < ic->nb_streams; i++) {
  2039. AVStream *st= ic->streams[i];
  2040. AVCodecContext *avctx = st->codec;
  2041. ic->streams[i]->discard = AVDISCARD_ALL;
  2042. if(avctx->codec_type >= (unsigned)CODEC_TYPE_NB)
  2043. continue;
  2044. if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
  2045. continue;
  2046. if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
  2047. continue;
  2048. st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
  2049. switch(avctx->codec_type) {
  2050. case CODEC_TYPE_AUDIO:
  2051. if (!audio_disable)
  2052. st_index[CODEC_TYPE_AUDIO] = i;
  2053. break;
  2054. case CODEC_TYPE_VIDEO:
  2055. case CODEC_TYPE_SUBTITLE:
  2056. if (!video_disable)
  2057. st_index[avctx->codec_type] = i;
  2058. break;
  2059. default:
  2060. break;
  2061. }
  2062. }
  2063. if (show_status) {
  2064. dump_format(ic, 0, is->filename, 0);
  2065. }
  2066. /* open the streams */
  2067. if (st_index[CODEC_TYPE_AUDIO] >= 0) {
  2068. stream_component_open(is, st_index[CODEC_TYPE_AUDIO]);
  2069. }
  2070. ret=-1;
  2071. if (st_index[CODEC_TYPE_VIDEO] >= 0) {
  2072. ret= stream_component_open(is, st_index[CODEC_TYPE_VIDEO]);
  2073. }
  2074. is->refresh_tid = SDL_CreateThread(refresh_thread, is);
  2075. if(ret<0) {
  2076. if (!display_disable)
  2077. is->show_audio = 2;
  2078. }
  2079. if (st_index[CODEC_TYPE_SUBTITLE] >= 0) {
  2080. stream_component_open(is, st_index[CODEC_TYPE_SUBTITLE]);
  2081. }
  2082. if (is->video_stream < 0 && is->audio_stream < 0) {
  2083. fprintf(stderr, "%s: could not open codecs\n", is->filename);
  2084. ret = -1;
  2085. goto fail;
  2086. }
  2087. for(;;) {
  2088. if (is->abort_request)
  2089. break;
  2090. if (is->paused != is->last_paused) {
  2091. is->last_paused = is->paused;
  2092. if (is->paused)
  2093. is->read_pause_return= av_read_pause(ic);
  2094. else
  2095. av_read_play(ic);
  2096. }
  2097. #if CONFIG_RTSP_DEMUXER
  2098. if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
  2099. /* wait 10 ms to avoid trying to get another packet */
  2100. /* XXX: horrible */
  2101. SDL_Delay(10);
  2102. continue;
  2103. }
  2104. #endif
  2105. if (is->seek_req) {
  2106. int64_t seek_target= is->seek_pos;
  2107. int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
  2108. int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
  2109. //FIXME the +-2 is due to rounding being not done in the correct direction in generation
  2110. // of the seek_pos/seek_rel variables
  2111. ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
  2112. if (ret < 0) {
  2113. fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
  2114. }else{
  2115. if (is->audio_stream >= 0) {
  2116. packet_queue_flush(&is->audioq);
  2117. packet_queue_put(&is->audioq, &flush_pkt);
  2118. }
  2119. if (is->subtitle_stream >= 0) {
  2120. packet_queue_flush(&is->subtitleq);
  2121. packet_queue_put(&is->subtitleq, &flush_pkt);
  2122. }
  2123. if (is->video_stream >= 0) {
  2124. packet_queue_flush(&is->videoq);
  2125. packet_queue_put(&is->videoq, &flush_pkt);
  2126. }
  2127. }
  2128. is->seek_req = 0;
  2129. eof= 0;
  2130. }
  2131. /* if the queue are full, no need to read more */
  2132. if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
  2133. || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
  2134. && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
  2135. && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
  2136. /* wait 10 ms */
  2137. SDL_Delay(10);
  2138. continue;
  2139. }
  2140. if(url_feof(ic->pb) || eof) {
  2141. if(is->video_stream >= 0){
  2142. av_init_packet(pkt);
  2143. pkt->data=NULL;
  2144. pkt->size=0;
  2145. pkt->stream_index= is->video_stream;
  2146. packet_queue_put(&is->videoq, pkt);
  2147. }
  2148. SDL_Delay(10);
  2149. if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
  2150. ret=AVERROR_EOF;
  2151. goto fail;
  2152. }
  2153. continue;
  2154. }
  2155. ret = av_read_frame(ic, pkt);
  2156. if (ret < 0) {
  2157. if (ret == AVERROR_EOF)
  2158. eof=1;
  2159. if (url_ferror(ic->pb))
  2160. break;
  2161. SDL_Delay(100); /* wait for user event */
  2162. continue;
  2163. }
  2164. if (pkt->stream_index == is->audio_stream) {
  2165. packet_queue_put(&is->audioq, pkt);
  2166. } else if (pkt->stream_index == is->video_stream) {
  2167. packet_queue_put(&is->videoq, pkt);
  2168. } else if (pkt->stream_index == is->subtitle_stream) {
  2169. packet_queue_put(&is->subtitleq, pkt);
  2170. } else {
  2171. av_free_packet(pkt);
  2172. }
  2173. }
  2174. /* wait until the end */
  2175. while (!is->abort_request) {
  2176. SDL_Delay(100);
  2177. }
  2178. ret = 0;
  2179. fail:
  2180. /* disable interrupting */
  2181. global_video_state = NULL;
  2182. /* close each stream */
  2183. if (is->audio_stream >= 0)
  2184. stream_component_close(is, is->audio_stream);
  2185. if (is->video_stream >= 0)
  2186. stream_component_close(is, is->video_stream);
  2187. if (is->subtitle_stream >= 0)
  2188. stream_component_close(is, is->subtitle_stream);
  2189. if (is->ic) {
  2190. av_close_input_file(is->ic);
  2191. is->ic = NULL; /* safety */
  2192. }
  2193. url_set_interrupt_cb(NULL);
  2194. if (ret != 0) {
  2195. SDL_Event event;
  2196. event.type = FF_QUIT_EVENT;
  2197. event.user.data1 = is;
  2198. SDL_PushEvent(&event);
  2199. }
  2200. return 0;
  2201. }
  2202. static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
  2203. {
  2204. VideoState *is;
  2205. is = av_mallocz(sizeof(VideoState));
  2206. if (!is)
  2207. return NULL;
  2208. av_strlcpy(is->filename, filename, sizeof(is->filename));
  2209. is->iformat = iformat;
  2210. is->ytop = 0;
  2211. is->xleft = 0;
  2212. /* start video display */
  2213. is->pictq_mutex = SDL_CreateMutex();
  2214. is->pictq_cond = SDL_CreateCond();
  2215. is->subpq_mutex = SDL_CreateMutex();
  2216. is->subpq_cond = SDL_CreateCond();
  2217. is->av_sync_type = av_sync_type;
  2218. is->parse_tid = SDL_CreateThread(decode_thread, is);
  2219. if (!is->parse_tid) {
  2220. av_free(is);
  2221. return NULL;
  2222. }
  2223. return is;
  2224. }
  2225. static void stream_close(VideoState *is)
  2226. {
  2227. VideoPicture *vp;
  2228. int i;
  2229. /* XXX: use a special url_shutdown call to abort parse cleanly */
  2230. is->abort_request = 1;
  2231. SDL_WaitThread(is->parse_tid, NULL);
  2232. SDL_WaitThread(is->refresh_tid, NULL);
  2233. /* free all pictures */
  2234. for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
  2235. vp = &is->pictq[i];
  2236. #if CONFIG_AVFILTER
  2237. if (vp->picref) {
  2238. avfilter_unref_pic(vp->picref);
  2239. vp->picref = NULL;
  2240. }
  2241. #endif
  2242. if (vp->bmp) {
  2243. SDL_FreeYUVOverlay(vp->bmp);
  2244. vp->bmp = NULL;
  2245. }
  2246. }
  2247. SDL_DestroyMutex(is->pictq_mutex);
  2248. SDL_DestroyCond(is->pictq_cond);
  2249. SDL_DestroyMutex(is->subpq_mutex);
  2250. SDL_DestroyCond(is->subpq_cond);
  2251. #if !CONFIG_AVFILTER
  2252. if (is->img_convert_ctx)
  2253. sws_freeContext(is->img_convert_ctx);
  2254. #endif
  2255. av_free(is);
  2256. }
  2257. static void stream_cycle_channel(VideoState *is, int codec_type)
  2258. {
  2259. AVFormatContext *ic = is->ic;
  2260. int start_index, stream_index;
  2261. AVStream *st;
  2262. if (codec_type == CODEC_TYPE_VIDEO)
  2263. start_index = is->video_stream;
  2264. else if (codec_type == CODEC_TYPE_AUDIO)
  2265. start_index = is->audio_stream;
  2266. else
  2267. start_index = is->subtitle_stream;
  2268. if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
  2269. return;
  2270. stream_index = start_index;
  2271. for(;;) {
  2272. if (++stream_index >= is->ic->nb_streams)
  2273. {
  2274. if (codec_type == CODEC_TYPE_SUBTITLE)
  2275. {
  2276. stream_index = -1;
  2277. goto the_end;
  2278. } else
  2279. stream_index = 0;
  2280. }
  2281. if (stream_index == start_index)
  2282. return;
  2283. st = ic->streams[stream_index];
  2284. if (st->codec->codec_type == codec_type) {
  2285. /* check that parameters are OK */
  2286. switch(codec_type) {
  2287. case CODEC_TYPE_AUDIO:
  2288. if (st->codec->sample_rate != 0 &&
  2289. st->codec->channels != 0)
  2290. goto the_end;
  2291. break;
  2292. case CODEC_TYPE_VIDEO:
  2293. case CODEC_TYPE_SUBTITLE:
  2294. goto the_end;
  2295. default:
  2296. break;
  2297. }
  2298. }
  2299. }
  2300. the_end:
  2301. stream_component_close(is, start_index);
  2302. stream_component_open(is, stream_index);
  2303. }
  2304. static void toggle_full_screen(void)
  2305. {
  2306. is_full_screen = !is_full_screen;
  2307. if (!fs_screen_width) {
  2308. /* use default SDL method */
  2309. // SDL_WM_ToggleFullScreen(screen);
  2310. }
  2311. video_open(cur_stream);
  2312. }
  2313. static void toggle_pause(void)
  2314. {
  2315. if (cur_stream)
  2316. stream_pause(cur_stream);
  2317. step = 0;
  2318. }
  2319. static void step_to_next_frame(void)
  2320. {
  2321. if (cur_stream) {
  2322. /* if the stream is paused unpause it, then step */
  2323. if (cur_stream->paused)
  2324. stream_pause(cur_stream);
  2325. }
  2326. step = 1;
  2327. }
  2328. static void do_exit(void)
  2329. {
  2330. int i;
  2331. if (cur_stream) {
  2332. stream_close(cur_stream);
  2333. cur_stream = NULL;
  2334. }
  2335. for (i = 0; i < CODEC_TYPE_NB; i++)
  2336. av_free(avcodec_opts[i]);
  2337. av_free(avformat_opts);
  2338. av_free(sws_opts);
  2339. #if CONFIG_AVFILTER
  2340. avfilter_uninit();
  2341. #endif
  2342. if (show_status)
  2343. printf("\n");
  2344. SDL_Quit();
  2345. exit(0);
  2346. }
  2347. static void toggle_audio_display(void)
  2348. {
  2349. if (cur_stream) {
  2350. int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
  2351. cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
  2352. fill_rectangle(screen,
  2353. cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
  2354. bgcolor);
  2355. SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
  2356. }
  2357. }
  2358. /* handle an event sent by the GUI */
  2359. static void event_loop(void)
  2360. {
  2361. SDL_Event event;
  2362. double incr, pos, frac;
  2363. for(;;) {
  2364. double x;
  2365. SDL_WaitEvent(&event);
  2366. switch(event.type) {
  2367. case SDL_KEYDOWN:
  2368. switch(event.key.keysym.sym) {
  2369. case SDLK_ESCAPE:
  2370. case SDLK_q:
  2371. do_exit();
  2372. break;
  2373. case SDLK_f:
  2374. toggle_full_screen();
  2375. break;
  2376. case SDLK_p:
  2377. case SDLK_SPACE:
  2378. toggle_pause();
  2379. break;
  2380. case SDLK_s: //S: Step to next frame
  2381. step_to_next_frame();
  2382. break;
  2383. case SDLK_a:
  2384. if (cur_stream)
  2385. stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
  2386. break;
  2387. case SDLK_v:
  2388. if (cur_stream)
  2389. stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
  2390. break;
  2391. case SDLK_t:
  2392. if (cur_stream)
  2393. stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
  2394. break;
  2395. case SDLK_w:
  2396. toggle_audio_display();
  2397. break;
  2398. case SDLK_LEFT:
  2399. incr = -10.0;
  2400. goto do_seek;
  2401. case SDLK_RIGHT:
  2402. incr = 10.0;
  2403. goto do_seek;
  2404. case SDLK_UP:
  2405. incr = 60.0;
  2406. goto do_seek;
  2407. case SDLK_DOWN:
  2408. incr = -60.0;
  2409. do_seek:
  2410. if (cur_stream) {
  2411. if (seek_by_bytes) {
  2412. if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
  2413. pos= cur_stream->video_current_pos;
  2414. }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
  2415. pos= cur_stream->audio_pkt.pos;
  2416. }else
  2417. pos = url_ftell(cur_stream->ic->pb);
  2418. if (cur_stream->ic->bit_rate)
  2419. incr *= cur_stream->ic->bit_rate / 8.0;
  2420. else
  2421. incr *= 180000.0;
  2422. pos += incr;
  2423. stream_seek(cur_stream, pos, incr, 1);
  2424. } else {
  2425. pos = get_master_clock(cur_stream);
  2426. pos += incr;
  2427. stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
  2428. }
  2429. }
  2430. break;
  2431. default:
  2432. break;
  2433. }
  2434. break;
  2435. case SDL_MOUSEBUTTONDOWN:
  2436. case SDL_MOUSEMOTION:
  2437. if(event.type ==SDL_MOUSEBUTTONDOWN){
  2438. x= event.button.x;
  2439. }else{
  2440. if(event.motion.state != SDL_PRESSED)
  2441. break;
  2442. x= event.motion.x;
  2443. }
  2444. if (cur_stream) {
  2445. if(seek_by_bytes || cur_stream->ic->duration<=0){
  2446. uint64_t size= url_fsize(cur_stream->ic->pb);
  2447. stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
  2448. }else{
  2449. int64_t ts;
  2450. int ns, hh, mm, ss;
  2451. int tns, thh, tmm, tss;
  2452. tns = cur_stream->ic->duration/1000000LL;
  2453. thh = tns/3600;
  2454. tmm = (tns%3600)/60;
  2455. tss = (tns%60);
  2456. frac = x/cur_stream->width;
  2457. ns = frac*tns;
  2458. hh = ns/3600;
  2459. mm = (ns%3600)/60;
  2460. ss = (ns%60);
  2461. fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
  2462. hh, mm, ss, thh, tmm, tss);
  2463. ts = frac*cur_stream->ic->duration;
  2464. if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
  2465. ts += cur_stream->ic->start_time;
  2466. stream_seek(cur_stream, ts, 0, 0);
  2467. }
  2468. }
  2469. break;
  2470. case SDL_VIDEORESIZE:
  2471. if (cur_stream) {
  2472. screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
  2473. SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
  2474. screen_width = cur_stream->width = event.resize.w;
  2475. screen_height= cur_stream->height= event.resize.h;
  2476. }
  2477. break;
  2478. case SDL_QUIT:
  2479. case FF_QUIT_EVENT:
  2480. do_exit();
  2481. break;
  2482. case FF_ALLOC_EVENT:
  2483. video_open(event.user.data1);
  2484. alloc_picture(event.user.data1);
  2485. break;
  2486. case FF_REFRESH_EVENT:
  2487. video_refresh_timer(event.user.data1);
  2488. cur_stream->refresh=0;
  2489. break;
  2490. default:
  2491. break;
  2492. }
  2493. }
  2494. }
  2495. static void opt_frame_size(const char *arg)
  2496. {
  2497. if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
  2498. fprintf(stderr, "Incorrect frame size\n");
  2499. exit(1);
  2500. }
  2501. if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
  2502. fprintf(stderr, "Frame size must be a multiple of 2\n");
  2503. exit(1);
  2504. }
  2505. }
  2506. static int opt_width(const char *opt, const char *arg)
  2507. {
  2508. screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
  2509. return 0;
  2510. }
  2511. static int opt_height(const char *opt, const char *arg)
  2512. {
  2513. screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
  2514. return 0;
  2515. }
  2516. static void opt_format(const char *arg)
  2517. {
  2518. file_iformat = av_find_input_format(arg);
  2519. if (!file_iformat) {
  2520. fprintf(stderr, "Unknown input format: %s\n", arg);
  2521. exit(1);
  2522. }
  2523. }
  2524. static void opt_frame_pix_fmt(const char *arg)
  2525. {
  2526. frame_pix_fmt = av_get_pix_fmt(arg);
  2527. }
  2528. static int opt_sync(const char *opt, const char *arg)
  2529. {
  2530. if (!strcmp(arg, "audio"))
  2531. av_sync_type = AV_SYNC_AUDIO_MASTER;
  2532. else if (!strcmp(arg, "video"))
  2533. av_sync_type = AV_SYNC_VIDEO_MASTER;
  2534. else if (!strcmp(arg, "ext"))
  2535. av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
  2536. else {
  2537. fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
  2538. exit(1);
  2539. }
  2540. return 0;
  2541. }
  2542. static int opt_seek(const char *opt, const char *arg)
  2543. {
  2544. start_time = parse_time_or_die(opt, arg, 1);
  2545. return 0;
  2546. }
  2547. static int opt_debug(const char *opt, const char *arg)
  2548. {
  2549. av_log_set_level(99);
  2550. debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
  2551. return 0;
  2552. }
  2553. static int opt_vismv(const char *opt, const char *arg)
  2554. {
  2555. debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
  2556. return 0;
  2557. }
  2558. static int opt_thread_count(const char *opt, const char *arg)
  2559. {
  2560. thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
  2561. #if !HAVE_THREADS
  2562. fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
  2563. #endif
  2564. return 0;
  2565. }
  2566. static const OptionDef options[] = {
  2567. #include "cmdutils_common_opts.h"
  2568. { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
  2569. { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
  2570. { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
  2571. { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
  2572. { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
  2573. { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
  2574. { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
  2575. { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
  2576. { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
  2577. { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
  2578. { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
  2579. { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
  2580. { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
  2581. { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
  2582. { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
  2583. { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
  2584. { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
  2585. { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
  2586. { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
  2587. { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
  2588. { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
  2589. { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
  2590. { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
  2591. { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
  2592. { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
  2593. { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
  2594. { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
  2595. { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
  2596. { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
  2597. { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
  2598. { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
  2599. { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
  2600. #if CONFIG_AVFILTER
  2601. { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
  2602. #endif
  2603. { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
  2604. { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
  2605. { NULL, },
  2606. };
  2607. static void show_usage(void)
  2608. {
  2609. printf("Simple media player\n");
  2610. printf("usage: ffplay [options] input_file\n");
  2611. printf("\n");
  2612. }
  2613. static void show_help(void)
  2614. {
  2615. show_usage();
  2616. show_help_options(options, "Main options:\n",
  2617. OPT_EXPERT, 0);
  2618. show_help_options(options, "\nAdvanced options:\n",
  2619. OPT_EXPERT, OPT_EXPERT);
  2620. printf("\nWhile playing:\n"
  2621. "q, ESC quit\n"
  2622. "f toggle full screen\n"
  2623. "p, SPC pause\n"
  2624. "a cycle audio channel\n"
  2625. "v cycle video channel\n"
  2626. "t cycle subtitle channel\n"
  2627. "w show audio waves\n"
  2628. "left/right seek backward/forward 10 seconds\n"
  2629. "down/up seek backward/forward 1 minute\n"
  2630. "mouse click seek to percentage in file corresponding to fraction of width\n"
  2631. );
  2632. }
  2633. static void opt_input_file(const char *filename)
  2634. {
  2635. if (input_filename) {
  2636. fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
  2637. filename, input_filename);
  2638. exit(1);
  2639. }
  2640. if (!strcmp(filename, "-"))
  2641. filename = "pipe:";
  2642. input_filename = filename;
  2643. }
  2644. /* Called from the main */
  2645. int main(int argc, char **argv)
  2646. {
  2647. int flags, i;
  2648. /* register all codecs, demux and protocols */
  2649. avcodec_register_all();
  2650. avdevice_register_all();
  2651. #if CONFIG_AVFILTER
  2652. avfilter_register_all();
  2653. #endif
  2654. av_register_all();
  2655. for(i=0; i<CODEC_TYPE_NB; i++){
  2656. avcodec_opts[i]= avcodec_alloc_context2(i);
  2657. }
  2658. avformat_opts = avformat_alloc_context();
  2659. #if !CONFIG_AVFILTER
  2660. sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
  2661. #endif
  2662. show_banner();
  2663. parse_options(argc, argv, options, opt_input_file);
  2664. if (!input_filename) {
  2665. show_usage();
  2666. fprintf(stderr, "An input file must be specified\n");
  2667. fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
  2668. exit(1);
  2669. }
  2670. if (display_disable) {
  2671. video_disable = 1;
  2672. }
  2673. flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
  2674. #if !defined(__MINGW32__) && !defined(__APPLE__)
  2675. flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
  2676. #endif
  2677. if (SDL_Init (flags)) {
  2678. fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
  2679. exit(1);
  2680. }
  2681. if (!display_disable) {
  2682. #if HAVE_SDL_VIDEO_SIZE
  2683. const SDL_VideoInfo *vi = SDL_GetVideoInfo();
  2684. fs_screen_width = vi->current_w;
  2685. fs_screen_height = vi->current_h;
  2686. #endif
  2687. }
  2688. SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
  2689. SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
  2690. SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
  2691. av_init_packet(&flush_pkt);
  2692. flush_pkt.data= "FLUSH";
  2693. cur_stream = stream_open(input_filename, file_iformat);
  2694. event_loop();
  2695. /* never returns */
  2696. return 0;
  2697. }