]> git.sesse.net Git - ffmpeg/blob - ffplay.c
tiff: print log in case of unknown / unsupported tag
[ffmpeg] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/parseutils.h"
31 #include "libavutil/samplefmt.h"
32 #include "libavutil/avassert.h"
33 #include "libavformat/avformat.h"
34 #include "libavdevice/avdevice.h"
35 #include "libswscale/swscale.h"
36 #include "libavcodec/audioconvert.h"
37 #include "libavutil/opt.h"
38 #include "libavcodec/avfft.h"
39
40 #if CONFIG_AVFILTER
41 # include "libavfilter/avcodec.h"
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 //#define DEBUG
62 //#define DEBUG_SYNC
63
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 #define FRAME_SKIP_FACTOR 0.05
78
79 /* maximum audio speed change to get correct sync */
80 #define SAMPLE_CORRECTION_PERCENT_MAX 10
81
82 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
83 #define AUDIO_DIFF_AVG_NB   20
84
85 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
86 #define SAMPLE_ARRAY_SIZE (2*65536)
87
88 static int sws_flags = SWS_BICUBIC;
89
90 typedef struct PacketQueue {
91     AVPacketList *first_pkt, *last_pkt;
92     int nb_packets;
93     int size;
94     int abort_request;
95     SDL_mutex *mutex;
96     SDL_cond *cond;
97 } PacketQueue;
98
99 #define VIDEO_PICTURE_QUEUE_SIZE 2
100 #define SUBPICTURE_QUEUE_SIZE 4
101
102 typedef struct VideoPicture {
103     double pts;                                  ///<presentation time stamp for this picture
104     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
105     int64_t pos;                                 ///<byte position in file
106     SDL_Overlay *bmp;
107     int width, height; /* source height & width */
108     int allocated;
109     enum PixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *read_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142
143     int audio_stream;
144
145     int av_sync_type;
146     double external_clock; /* external clock base */
147     int64_t external_clock_time;
148
149     double audio_clock;
150     double audio_diff_cum; /* used for AV difference average computation */
151     double audio_diff_avg_coef;
152     double audio_diff_threshold;
153     int audio_diff_avg_count;
154     AVStream *audio_st;
155     PacketQueue audioq;
156     int audio_hw_buf_size;
157     /* samples output by the codec. we reserve more space for avsync
158        compensation */
159     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161     uint8_t *audio_buf;
162     unsigned int audio_buf_size; /* in bytes */
163     int audio_buf_index; /* in bytes */
164     AVPacket audio_pkt_temp;
165     AVPacket audio_pkt;
166     enum AVSampleFormat audio_src_fmt;
167     AVAudioConvert *reformat_ctx;
168
169     enum ShowMode {
170         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
171     } show_mode;
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     char filename[1024];
209     int width, height, xleft, ytop;
210
211 #if CONFIG_AVFILTER
212     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213 #endif
214
215     float skip_frames;
216     float skip_frames_index;
217     int refresh;
218 } VideoState;
219
220 static void show_help(void);
221
222 /* options specified by the user */
223 static AVInputFormat *file_iformat;
224 static const char *input_filename;
225 static const char *window_title;
226 static int fs_screen_width;
227 static int fs_screen_height;
228 static int screen_width = 0;
229 static int screen_height = 0;
230 static int frame_width = 0;
231 static int frame_height = 0;
232 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
233 static int audio_disable;
234 static int video_disable;
235 static int wanted_stream[AVMEDIA_TYPE_NB]={
236     [AVMEDIA_TYPE_AUDIO]=-1,
237     [AVMEDIA_TYPE_VIDEO]=-1,
238     [AVMEDIA_TYPE_SUBTITLE]=-1,
239 };
240 static int seek_by_bytes=-1;
241 static int display_disable;
242 static int show_status = 1;
243 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
244 static int64_t start_time = AV_NOPTS_VALUE;
245 static int64_t duration = AV_NOPTS_VALUE;
246 static int debug = 0;
247 static int debug_mv = 0;
248 static int step = 0;
249 static int thread_count = 1;
250 static int workaround_bugs = 1;
251 static int fast = 0;
252 static int genpts = 0;
253 static int lowres = 0;
254 static int idct = FF_IDCT_AUTO;
255 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
258 static int error_recognition = FF_ER_CAREFUL;
259 static int error_concealment = 3;
260 static int decoder_reorder_pts= -1;
261 static int autoexit;
262 static int exit_on_keydown;
263 static int exit_on_mousedown;
264 static int loop=1;
265 static int framedrop=1;
266 static enum ShowMode show_mode = SHOW_MODE_NONE;
267
268 static int rdftspeed=20;
269 #if CONFIG_AVFILTER
270 static char *vfilters = NULL;
271 #endif
272
273 /* current context */
274 static int is_full_screen;
275 static VideoState *cur_stream;
276 static int64_t audio_callback_time;
277
278 static AVPacket flush_pkt;
279
280 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
281 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
283
284 static SDL_Surface *screen;
285
286 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
287 {
288     AVPacketList *pkt1;
289
290     /* duplicate the packet */
291     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
292         return -1;
293
294     pkt1 = av_malloc(sizeof(AVPacketList));
295     if (!pkt1)
296         return -1;
297     pkt1->pkt = *pkt;
298     pkt1->next = NULL;
299
300
301     SDL_LockMutex(q->mutex);
302
303     if (!q->last_pkt)
304
305         q->first_pkt = pkt1;
306     else
307         q->last_pkt->next = pkt1;
308     q->last_pkt = pkt1;
309     q->nb_packets++;
310     q->size += pkt1->pkt.size + sizeof(*pkt1);
311     /* XXX: should duplicate packet data in DV case */
312     SDL_CondSignal(q->cond);
313
314     SDL_UnlockMutex(q->mutex);
315     return 0;
316 }
317
318 /* packet queue handling */
319 static void packet_queue_init(PacketQueue *q)
320 {
321     memset(q, 0, sizeof(PacketQueue));
322     q->mutex = SDL_CreateMutex();
323     q->cond = SDL_CreateCond();
324     packet_queue_put(q, &flush_pkt);
325 }
326
327 static void packet_queue_flush(PacketQueue *q)
328 {
329     AVPacketList *pkt, *pkt1;
330
331     SDL_LockMutex(q->mutex);
332     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
333         pkt1 = pkt->next;
334         av_free_packet(&pkt->pkt);
335         av_freep(&pkt);
336     }
337     q->last_pkt = NULL;
338     q->first_pkt = NULL;
339     q->nb_packets = 0;
340     q->size = 0;
341     SDL_UnlockMutex(q->mutex);
342 }
343
344 static void packet_queue_end(PacketQueue *q)
345 {
346     packet_queue_flush(q);
347     SDL_DestroyMutex(q->mutex);
348     SDL_DestroyCond(q->cond);
349 }
350
351 static void packet_queue_abort(PacketQueue *q)
352 {
353     SDL_LockMutex(q->mutex);
354
355     q->abort_request = 1;
356
357     SDL_CondSignal(q->cond);
358
359     SDL_UnlockMutex(q->mutex);
360 }
361
362 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
363 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364 {
365     AVPacketList *pkt1;
366     int ret;
367
368     SDL_LockMutex(q->mutex);
369
370     for(;;) {
371         if (q->abort_request) {
372             ret = -1;
373             break;
374         }
375
376         pkt1 = q->first_pkt;
377         if (pkt1) {
378             q->first_pkt = pkt1->next;
379             if (!q->first_pkt)
380                 q->last_pkt = NULL;
381             q->nb_packets--;
382             q->size -= pkt1->pkt.size + sizeof(*pkt1);
383             *pkt = pkt1->pkt;
384             av_free(pkt1);
385             ret = 1;
386             break;
387         } else if (!block) {
388             ret = 0;
389             break;
390         } else {
391             SDL_CondWait(q->cond, q->mutex);
392         }
393     }
394     SDL_UnlockMutex(q->mutex);
395     return ret;
396 }
397
398 static inline void fill_rectangle(SDL_Surface *screen,
399                                   int x, int y, int w, int h, int color)
400 {
401     SDL_Rect rect;
402     rect.x = x;
403     rect.y = y;
404     rect.w = w;
405     rect.h = h;
406     SDL_FillRect(screen, &rect, color);
407 }
408
409 #if 0
410 /* draw only the border of a rectangle */
411 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
412 {
413     int w1, w2, h1, h2;
414
415     /* fill the background */
416     w1 = x;
417     if (w1 < 0)
418         w1 = 0;
419     w2 = s->width - (x + w);
420     if (w2 < 0)
421         w2 = 0;
422     h1 = y;
423     if (h1 < 0)
424         h1 = 0;
425     h2 = s->height - (y + h);
426     if (h2 < 0)
427         h2 = 0;
428     fill_rectangle(screen,
429                    s->xleft, s->ytop,
430                    w1, s->height,
431                    color);
432     fill_rectangle(screen,
433                    s->xleft + s->width - w2, s->ytop,
434                    w2, s->height,
435                    color);
436     fill_rectangle(screen,
437                    s->xleft + w1, s->ytop,
438                    s->width - w1 - w2, h1,
439                    color);
440     fill_rectangle(screen,
441                    s->xleft + w1, s->ytop + s->height - h2,
442                    s->width - w1 - w2, h2,
443                    color);
444 }
445 #endif
446
447 #define ALPHA_BLEND(a, oldp, newp, s)\
448 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
449
450 #define RGBA_IN(r, g, b, a, s)\
451 {\
452     unsigned int v = ((const uint32_t *)(s))[0];\
453     a = (v >> 24) & 0xff;\
454     r = (v >> 16) & 0xff;\
455     g = (v >> 8) & 0xff;\
456     b = v & 0xff;\
457 }
458
459 #define YUVA_IN(y, u, v, a, s, pal)\
460 {\
461     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
462     a = (val >> 24) & 0xff;\
463     y = (val >> 16) & 0xff;\
464     u = (val >> 8) & 0xff;\
465     v = val & 0xff;\
466 }
467
468 #define YUVA_OUT(d, y, u, v, a)\
469 {\
470     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
471 }
472
473
474 #define BPP 1
475
476 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
477 {
478     int wrap, wrap3, width2, skip2;
479     int y, u, v, a, u1, v1, a1, w, h;
480     uint8_t *lum, *cb, *cr;
481     const uint8_t *p;
482     const uint32_t *pal;
483     int dstx, dsty, dstw, dsth;
484
485     dstw = av_clip(rect->w, 0, imgw);
486     dsth = av_clip(rect->h, 0, imgh);
487     dstx = av_clip(rect->x, 0, imgw - dstw);
488     dsty = av_clip(rect->y, 0, imgh - dsth);
489     lum = dst->data[0] + dsty * dst->linesize[0];
490     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
491     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
492
493     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
494     skip2 = dstx >> 1;
495     wrap = dst->linesize[0];
496     wrap3 = rect->pict.linesize[0];
497     p = rect->pict.data[0];
498     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
499
500     if (dsty & 1) {
501         lum += dstx;
502         cb += skip2;
503         cr += skip2;
504
505         if (dstx & 1) {
506             YUVA_IN(y, u, v, a, p, pal);
507             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
508             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
509             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
510             cb++;
511             cr++;
512             lum++;
513             p += BPP;
514         }
515         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
516             YUVA_IN(y, u, v, a, p, pal);
517             u1 = u;
518             v1 = v;
519             a1 = a;
520             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521
522             YUVA_IN(y, u, v, a, p + BPP, pal);
523             u1 += u;
524             v1 += v;
525             a1 += a;
526             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
527             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529             cb++;
530             cr++;
531             p += 2 * BPP;
532             lum += 2;
533         }
534         if (w) {
535             YUVA_IN(y, u, v, a, p, pal);
536             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
538             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
539             p++;
540             lum++;
541         }
542         p += wrap3 - dstw * BPP;
543         lum += wrap - dstw - dstx;
544         cb += dst->linesize[1] - width2 - skip2;
545         cr += dst->linesize[2] - width2 - skip2;
546     }
547     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
548         lum += dstx;
549         cb += skip2;
550         cr += skip2;
551
552         if (dstx & 1) {
553             YUVA_IN(y, u, v, a, p, pal);
554             u1 = u;
555             v1 = v;
556             a1 = a;
557             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
558             p += wrap3;
559             lum += wrap;
560             YUVA_IN(y, u, v, a, p, pal);
561             u1 += u;
562             v1 += v;
563             a1 += a;
564             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
566             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
567             cb++;
568             cr++;
569             p += -wrap3 + BPP;
570             lum += -wrap + 1;
571         }
572         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
573             YUVA_IN(y, u, v, a, p, pal);
574             u1 = u;
575             v1 = v;
576             a1 = a;
577             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578
579             YUVA_IN(y, u, v, a, p + BPP, pal);
580             u1 += u;
581             v1 += v;
582             a1 += a;
583             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
584             p += wrap3;
585             lum += wrap;
586
587             YUVA_IN(y, u, v, a, p, pal);
588             u1 += u;
589             v1 += v;
590             a1 += a;
591             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
592
593             YUVA_IN(y, u, v, a, p + BPP, pal);
594             u1 += u;
595             v1 += v;
596             a1 += a;
597             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
598
599             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
600             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
601
602             cb++;
603             cr++;
604             p += -wrap3 + 2 * BPP;
605             lum += -wrap + 2;
606         }
607         if (w) {
608             YUVA_IN(y, u, v, a, p, pal);
609             u1 = u;
610             v1 = v;
611             a1 = a;
612             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613             p += wrap3;
614             lum += wrap;
615             YUVA_IN(y, u, v, a, p, pal);
616             u1 += u;
617             v1 += v;
618             a1 += a;
619             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
621             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
622             cb++;
623             cr++;
624             p += -wrap3 + BPP;
625             lum += -wrap + 1;
626         }
627         p += wrap3 + (wrap3 - dstw * BPP);
628         lum += wrap + (wrap - dstw - dstx);
629         cb += dst->linesize[1] - width2 - skip2;
630         cr += dst->linesize[2] - width2 - skip2;
631     }
632     /* handle odd height */
633     if (h) {
634         lum += dstx;
635         cb += skip2;
636         cr += skip2;
637
638         if (dstx & 1) {
639             YUVA_IN(y, u, v, a, p, pal);
640             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
641             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
642             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
643             cb++;
644             cr++;
645             lum++;
646             p += BPP;
647         }
648         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
649             YUVA_IN(y, u, v, a, p, pal);
650             u1 = u;
651             v1 = v;
652             a1 = a;
653             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
654
655             YUVA_IN(y, u, v, a, p + BPP, pal);
656             u1 += u;
657             v1 += v;
658             a1 += a;
659             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
660             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
661             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
662             cb++;
663             cr++;
664             p += 2 * BPP;
665             lum += 2;
666         }
667         if (w) {
668             YUVA_IN(y, u, v, a, p, pal);
669             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
670             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
671             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
672         }
673     }
674 }
675
676 static void free_subpicture(SubPicture *sp)
677 {
678     avsubtitle_free(&sp->sub);
679 }
680
681 static void video_image_display(VideoState *is)
682 {
683     VideoPicture *vp;
684     SubPicture *sp;
685     AVPicture pict;
686     float aspect_ratio;
687     int width, height, x, y;
688     SDL_Rect rect;
689     int i;
690
691     vp = &is->pictq[is->pictq_rindex];
692     if (vp->bmp) {
693 #if CONFIG_AVFILTER
694          if (vp->picref->video->sample_aspect_ratio.num == 0)
695              aspect_ratio = 0;
696          else
697              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
698 #else
699
700         /* XXX: use variable in the frame */
701         if (is->video_st->sample_aspect_ratio.num)
702             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
703         else if (is->video_st->codec->sample_aspect_ratio.num)
704             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
705         else
706             aspect_ratio = 0;
707 #endif
708         if (aspect_ratio <= 0.0)
709             aspect_ratio = 1.0;
710         aspect_ratio *= (float)vp->width / (float)vp->height;
711
712         if (is->subtitle_st) {
713             if (is->subpq_size > 0) {
714                 sp = &is->subpq[is->subpq_rindex];
715
716                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
717                     SDL_LockYUVOverlay (vp->bmp);
718
719                     pict.data[0] = vp->bmp->pixels[0];
720                     pict.data[1] = vp->bmp->pixels[2];
721                     pict.data[2] = vp->bmp->pixels[1];
722
723                     pict.linesize[0] = vp->bmp->pitches[0];
724                     pict.linesize[1] = vp->bmp->pitches[2];
725                     pict.linesize[2] = vp->bmp->pitches[1];
726
727                     for (i = 0; i < sp->sub.num_rects; i++)
728                         blend_subrect(&pict, sp->sub.rects[i],
729                                       vp->bmp->w, vp->bmp->h);
730
731                     SDL_UnlockYUVOverlay (vp->bmp);
732                 }
733             }
734         }
735
736
737         /* XXX: we suppose the screen has a 1.0 pixel ratio */
738         height = is->height;
739         width = ((int)rint(height * aspect_ratio)) & ~1;
740         if (width > is->width) {
741             width = is->width;
742             height = ((int)rint(width / aspect_ratio)) & ~1;
743         }
744         x = (is->width - width) / 2;
745         y = (is->height - height) / 2;
746         if (!is->no_background) {
747             /* fill the background */
748             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
749         } else {
750             is->no_background = 0;
751         }
752         rect.x = is->xleft + x;
753         rect.y = is->ytop  + y;
754         rect.w = FFMAX(width,  1);
755         rect.h = FFMAX(height, 1);
756         SDL_DisplayYUVOverlay(vp->bmp, &rect);
757     } else {
758 #if 0
759         fill_rectangle(screen,
760                        is->xleft, is->ytop, is->width, is->height,
761                        QERGB(0x00, 0x00, 0x00));
762 #endif
763     }
764 }
765
766 /* get the current audio output buffer size, in samples. With SDL, we
767    cannot have a precise information */
768 static int audio_write_get_buf_size(VideoState *is)
769 {
770     return is->audio_buf_size - is->audio_buf_index;
771 }
772
773 static inline int compute_mod(int a, int b)
774 {
775     return a < 0 ? a%b + b : a%b;
776 }
777
778 static void video_audio_display(VideoState *s)
779 {
780     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
781     int ch, channels, h, h2, bgcolor, fgcolor;
782     int16_t time_diff;
783     int rdft_bits, nb_freq;
784
785     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
786         ;
787     nb_freq= 1<<(rdft_bits-1);
788
789     /* compute display index : center on currently output samples */
790     channels = s->audio_st->codec->channels;
791     nb_display_channels = channels;
792     if (!s->paused) {
793         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
794         n = 2 * channels;
795         delay = audio_write_get_buf_size(s);
796         delay /= n;
797
798         /* to be more precise, we take into account the time spent since
799            the last buffer computation */
800         if (audio_callback_time) {
801             time_diff = av_gettime() - audio_callback_time;
802             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
803         }
804
805         delay += 2*data_used;
806         if (delay < data_used)
807             delay = data_used;
808
809         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
810         if (s->show_mode == SHOW_MODE_WAVES) {
811             h= INT_MIN;
812             for(i=0; i<1000; i+=channels){
813                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
814                 int a= s->sample_array[idx];
815                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
816                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
817                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
818                 int score= a-d;
819                 if(h<score && (b^c)<0){
820                     h= score;
821                     i_start= idx;
822                 }
823             }
824         }
825
826         s->last_i_start = i_start;
827     } else {
828         i_start = s->last_i_start;
829     }
830
831     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
832     if (s->show_mode == SHOW_MODE_WAVES) {
833         fill_rectangle(screen,
834                        s->xleft, s->ytop, s->width, s->height,
835                        bgcolor);
836
837         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
838
839         /* total height for one channel */
840         h = s->height / nb_display_channels;
841         /* graph height / 2 */
842         h2 = (h * 9) / 20;
843         for(ch = 0;ch < nb_display_channels; ch++) {
844             i = i_start + ch;
845             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
846             for(x = 0; x < s->width; x++) {
847                 y = (s->sample_array[i] * h2) >> 15;
848                 if (y < 0) {
849                     y = -y;
850                     ys = y1 - y;
851                 } else {
852                     ys = y1;
853                 }
854                 fill_rectangle(screen,
855                                s->xleft + x, ys, 1, y,
856                                fgcolor);
857                 i += channels;
858                 if (i >= SAMPLE_ARRAY_SIZE)
859                     i -= SAMPLE_ARRAY_SIZE;
860             }
861         }
862
863         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
864
865         for(ch = 1;ch < nb_display_channels; ch++) {
866             y = s->ytop + ch * h;
867             fill_rectangle(screen,
868                            s->xleft, y, s->width, 1,
869                            fgcolor);
870         }
871         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
872     }else{
873         nb_display_channels= FFMIN(nb_display_channels, 2);
874         if(rdft_bits != s->rdft_bits){
875             av_rdft_end(s->rdft);
876             av_free(s->rdft_data);
877             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
878             s->rdft_bits= rdft_bits;
879             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
880         }
881         {
882             FFTSample *data[2];
883             for(ch = 0;ch < nb_display_channels; ch++) {
884                 data[ch] = s->rdft_data + 2*nb_freq*ch;
885                 i = i_start + ch;
886                 for(x = 0; x < 2*nb_freq; x++) {
887                     double w= (x-nb_freq)*(1.0/nb_freq);
888                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
889                     i += channels;
890                     if (i >= SAMPLE_ARRAY_SIZE)
891                         i -= SAMPLE_ARRAY_SIZE;
892                 }
893                 av_rdft_calc(s->rdft, data[ch]);
894             }
895             //least efficient way to do this, we should of course directly access it but its more than fast enough
896             for(y=0; y<s->height; y++){
897                 double w= 1/sqrt(nb_freq);
898                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
899                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
900                        + data[1][2*y+1]*data[1][2*y+1])) : a;
901                 a= FFMIN(a,255);
902                 b= FFMIN(b,255);
903                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
904
905                 fill_rectangle(screen,
906                             s->xpos, s->height-y, 1, 1,
907                             fgcolor);
908             }
909         }
910         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
911         s->xpos++;
912         if(s->xpos >= s->width)
913             s->xpos= s->xleft;
914     }
915 }
916
917 static int video_open(VideoState *is){
918     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
919     int w,h;
920
921     if(is_full_screen) flags |= SDL_FULLSCREEN;
922     else               flags |= SDL_RESIZABLE;
923
924     if (is_full_screen && fs_screen_width) {
925         w = fs_screen_width;
926         h = fs_screen_height;
927     } else if(!is_full_screen && screen_width){
928         w = screen_width;
929         h = screen_height;
930 #if CONFIG_AVFILTER
931     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
932         w = is->out_video_filter->inputs[0]->w;
933         h = is->out_video_filter->inputs[0]->h;
934 #else
935     }else if (is->video_st && is->video_st->codec->width){
936         w = is->video_st->codec->width;
937         h = is->video_st->codec->height;
938 #endif
939     } else {
940         w = 640;
941         h = 480;
942     }
943     if(screen && is->width == screen->w && screen->w == w
944        && is->height== screen->h && screen->h == h)
945         return 0;
946
947 #ifndef __APPLE__
948     screen = SDL_SetVideoMode(w, h, 0, flags);
949 #else
950     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
951     screen = SDL_SetVideoMode(w, h, 24, flags);
952 #endif
953     if (!screen) {
954         fprintf(stderr, "SDL: could not set video mode - exiting\n");
955         return -1;
956     }
957     if (!window_title)
958         window_title = input_filename;
959     SDL_WM_SetCaption(window_title, window_title);
960
961     is->width = screen->w;
962     is->height = screen->h;
963
964     return 0;
965 }
966
967 /* display the current picture, if any */
968 static void video_display(VideoState *is)
969 {
970     if(!screen)
971         video_open(cur_stream);
972     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
973         video_audio_display(is);
974     else if (is->video_st)
975         video_image_display(is);
976 }
977
978 static int refresh_thread(void *opaque)
979 {
980     VideoState *is= opaque;
981     while(!is->abort_request){
982         SDL_Event event;
983         event.type = FF_REFRESH_EVENT;
984         event.user.data1 = opaque;
985         if(!is->refresh){
986             is->refresh=1;
987             SDL_PushEvent(&event);
988         }
989         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
990         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
991     }
992     return 0;
993 }
994
995 /* get the current audio clock value */
996 static double get_audio_clock(VideoState *is)
997 {
998     double pts;
999     int hw_buf_size, bytes_per_sec;
1000     pts = is->audio_clock;
1001     hw_buf_size = audio_write_get_buf_size(is);
1002     bytes_per_sec = 0;
1003     if (is->audio_st) {
1004         bytes_per_sec = is->audio_st->codec->sample_rate *
1005             2 * is->audio_st->codec->channels;
1006     }
1007     if (bytes_per_sec)
1008         pts -= (double)hw_buf_size / bytes_per_sec;
1009     return pts;
1010 }
1011
1012 /* get the current video clock value */
1013 static double get_video_clock(VideoState *is)
1014 {
1015     if (is->paused) {
1016         return is->video_current_pts;
1017     } else {
1018         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1019     }
1020 }
1021
1022 /* get the current external clock value */
1023 static double get_external_clock(VideoState *is)
1024 {
1025     int64_t ti;
1026     ti = av_gettime();
1027     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1028 }
1029
1030 /* get the current master clock value */
1031 static double get_master_clock(VideoState *is)
1032 {
1033     double val;
1034
1035     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1036         if (is->video_st)
1037             val = get_video_clock(is);
1038         else
1039             val = get_audio_clock(is);
1040     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1041         if (is->audio_st)
1042             val = get_audio_clock(is);
1043         else
1044             val = get_video_clock(is);
1045     } else {
1046         val = get_external_clock(is);
1047     }
1048     return val;
1049 }
1050
1051 /* seek in the stream */
1052 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1053 {
1054     if (!is->seek_req) {
1055         is->seek_pos = pos;
1056         is->seek_rel = rel;
1057         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1058         if (seek_by_bytes)
1059             is->seek_flags |= AVSEEK_FLAG_BYTE;
1060         is->seek_req = 1;
1061     }
1062 }
1063
1064 /* pause or resume the video */
1065 static void stream_toggle_pause(VideoState *is)
1066 {
1067     if (is->paused) {
1068         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1069         if(is->read_pause_return != AVERROR(ENOSYS)){
1070             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1071         }
1072         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1073     }
1074     is->paused = !is->paused;
1075 }
1076
1077 static double compute_target_time(double frame_current_pts, VideoState *is)
1078 {
1079     double delay, sync_threshold, diff;
1080
1081     /* compute nominal delay */
1082     delay = frame_current_pts - is->frame_last_pts;
1083     if (delay <= 0 || delay >= 10.0) {
1084         /* if incorrect delay, use previous one */
1085         delay = is->frame_last_delay;
1086     } else {
1087         is->frame_last_delay = delay;
1088     }
1089     is->frame_last_pts = frame_current_pts;
1090
1091     /* update delay to follow master synchronisation source */
1092     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1093          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1094         /* if video is slave, we try to correct big delays by
1095            duplicating or deleting a frame */
1096         diff = get_video_clock(is) - get_master_clock(is);
1097
1098         /* skip or repeat frame. We take into account the
1099            delay to compute the threshold. I still don't know
1100            if it is the best guess */
1101         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1102         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1103             if (diff <= -sync_threshold)
1104                 delay = 0;
1105             else if (diff >= sync_threshold)
1106                 delay = 2 * delay;
1107         }
1108     }
1109     is->frame_timer += delay;
1110 #if defined(DEBUG_SYNC)
1111     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1112             delay, actual_delay, frame_current_pts, -diff);
1113 #endif
1114
1115     return is->frame_timer;
1116 }
1117
1118 /* called to display each frame */
1119 static void video_refresh(void *opaque)
1120 {
1121     VideoState *is = opaque;
1122     VideoPicture *vp;
1123
1124     SubPicture *sp, *sp2;
1125
1126     if (is->video_st) {
1127 retry:
1128         if (is->pictq_size == 0) {
1129             //nothing to do, no picture to display in the que
1130         } else {
1131             double time= av_gettime()/1000000.0;
1132             double next_target;
1133             /* dequeue the picture */
1134             vp = &is->pictq[is->pictq_rindex];
1135
1136             if(time < vp->target_clock)
1137                 return;
1138             /* update current video pts */
1139             is->video_current_pts = vp->pts;
1140             is->video_current_pts_drift = is->video_current_pts - time;
1141             is->video_current_pos = vp->pos;
1142             if(is->pictq_size > 1){
1143                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1144                 assert(nextvp->target_clock >= vp->target_clock);
1145                 next_target= nextvp->target_clock;
1146             }else{
1147                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1148             }
1149             if(framedrop && time > next_target){
1150                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1151                 if(is->pictq_size > 1 || time > next_target + 0.5){
1152                     /* update queue size and signal for next picture */
1153                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1154                         is->pictq_rindex = 0;
1155
1156                     SDL_LockMutex(is->pictq_mutex);
1157                     is->pictq_size--;
1158                     SDL_CondSignal(is->pictq_cond);
1159                     SDL_UnlockMutex(is->pictq_mutex);
1160                     goto retry;
1161                 }
1162             }
1163
1164             if(is->subtitle_st) {
1165                 if (is->subtitle_stream_changed) {
1166                     SDL_LockMutex(is->subpq_mutex);
1167
1168                     while (is->subpq_size) {
1169                         free_subpicture(&is->subpq[is->subpq_rindex]);
1170
1171                         /* update queue size and signal for next picture */
1172                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1173                             is->subpq_rindex = 0;
1174
1175                         is->subpq_size--;
1176                     }
1177                     is->subtitle_stream_changed = 0;
1178
1179                     SDL_CondSignal(is->subpq_cond);
1180                     SDL_UnlockMutex(is->subpq_mutex);
1181                 } else {
1182                     if (is->subpq_size > 0) {
1183                         sp = &is->subpq[is->subpq_rindex];
1184
1185                         if (is->subpq_size > 1)
1186                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1187                         else
1188                             sp2 = NULL;
1189
1190                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1191                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1192                         {
1193                             free_subpicture(sp);
1194
1195                             /* update queue size and signal for next picture */
1196                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1197                                 is->subpq_rindex = 0;
1198
1199                             SDL_LockMutex(is->subpq_mutex);
1200                             is->subpq_size--;
1201                             SDL_CondSignal(is->subpq_cond);
1202                             SDL_UnlockMutex(is->subpq_mutex);
1203                         }
1204                     }
1205                 }
1206             }
1207
1208             /* display picture */
1209             if (!display_disable)
1210                 video_display(is);
1211
1212             /* update queue size and signal for next picture */
1213             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1214                 is->pictq_rindex = 0;
1215
1216             SDL_LockMutex(is->pictq_mutex);
1217             is->pictq_size--;
1218             SDL_CondSignal(is->pictq_cond);
1219             SDL_UnlockMutex(is->pictq_mutex);
1220         }
1221     } else if (is->audio_st) {
1222         /* draw the next audio frame */
1223
1224         /* if only audio stream, then display the audio bars (better
1225            than nothing, just to test the implementation */
1226
1227         /* display picture */
1228         if (!display_disable)
1229             video_display(is);
1230     }
1231     if (show_status) {
1232         static int64_t last_time;
1233         int64_t cur_time;
1234         int aqsize, vqsize, sqsize;
1235         double av_diff;
1236
1237         cur_time = av_gettime();
1238         if (!last_time || (cur_time - last_time) >= 30000) {
1239             aqsize = 0;
1240             vqsize = 0;
1241             sqsize = 0;
1242             if (is->audio_st)
1243                 aqsize = is->audioq.size;
1244             if (is->video_st)
1245                 vqsize = is->videoq.size;
1246             if (is->subtitle_st)
1247                 sqsize = is->subtitleq.size;
1248             av_diff = 0;
1249             if (is->audio_st && is->video_st)
1250                 av_diff = get_audio_clock(is) - get_video_clock(is);
1251             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1252                    get_master_clock(is),
1253                    av_diff,
1254                    FFMAX(is->skip_frames-1, 0),
1255                    aqsize / 1024,
1256                    vqsize / 1024,
1257                    sqsize,
1258                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1259                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1260             fflush(stdout);
1261             last_time = cur_time;
1262         }
1263     }
1264 }
1265
1266 static void stream_close(VideoState *is)
1267 {
1268     VideoPicture *vp;
1269     int i;
1270     /* XXX: use a special url_shutdown call to abort parse cleanly */
1271     is->abort_request = 1;
1272     SDL_WaitThread(is->read_tid, NULL);
1273     SDL_WaitThread(is->refresh_tid, NULL);
1274
1275     /* free all pictures */
1276     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1277         vp = &is->pictq[i];
1278 #if CONFIG_AVFILTER
1279         if (vp->picref) {
1280             avfilter_unref_buffer(vp->picref);
1281             vp->picref = NULL;
1282         }
1283 #endif
1284         if (vp->bmp) {
1285             SDL_FreeYUVOverlay(vp->bmp);
1286             vp->bmp = NULL;
1287         }
1288     }
1289     SDL_DestroyMutex(is->pictq_mutex);
1290     SDL_DestroyCond(is->pictq_cond);
1291     SDL_DestroyMutex(is->subpq_mutex);
1292     SDL_DestroyCond(is->subpq_cond);
1293 #if !CONFIG_AVFILTER
1294     if (is->img_convert_ctx)
1295         sws_freeContext(is->img_convert_ctx);
1296 #endif
1297     av_free(is);
1298 }
1299
1300 static void do_exit(void)
1301 {
1302     if (cur_stream) {
1303         stream_close(cur_stream);
1304         cur_stream = NULL;
1305     }
1306     uninit_opts();
1307 #if CONFIG_AVFILTER
1308     avfilter_uninit();
1309 #endif
1310     if (show_status)
1311         printf("\n");
1312     SDL_Quit();
1313     av_log(NULL, AV_LOG_QUIET, "");
1314     exit(0);
1315 }
1316
1317 /* allocate a picture (needs to do that in main thread to avoid
1318    potential locking problems */
1319 static void alloc_picture(void *opaque)
1320 {
1321     VideoState *is = opaque;
1322     VideoPicture *vp;
1323
1324     vp = &is->pictq[is->pictq_windex];
1325
1326     if (vp->bmp)
1327         SDL_FreeYUVOverlay(vp->bmp);
1328
1329 #if CONFIG_AVFILTER
1330     if (vp->picref)
1331         avfilter_unref_buffer(vp->picref);
1332     vp->picref = NULL;
1333
1334     vp->width   = is->out_video_filter->inputs[0]->w;
1335     vp->height  = is->out_video_filter->inputs[0]->h;
1336     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1337 #else
1338     vp->width   = is->video_st->codec->width;
1339     vp->height  = is->video_st->codec->height;
1340     vp->pix_fmt = is->video_st->codec->pix_fmt;
1341 #endif
1342
1343     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1344                                    SDL_YV12_OVERLAY,
1345                                    screen);
1346     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1347         /* SDL allocates a buffer smaller than requested if the video
1348          * overlay hardware is unable to support the requested size. */
1349         fprintf(stderr, "Error: the video system does not support an image\n"
1350                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1351                         "to reduce the image size.\n", vp->width, vp->height );
1352         do_exit();
1353     }
1354
1355     SDL_LockMutex(is->pictq_mutex);
1356     vp->allocated = 1;
1357     SDL_CondSignal(is->pictq_cond);
1358     SDL_UnlockMutex(is->pictq_mutex);
1359 }
1360
1361 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1362 {
1363     VideoPicture *vp;
1364     double frame_delay, pts = pts1;
1365
1366     /* compute the exact PTS for the picture if it is omitted in the stream
1367      * pts1 is the dts of the pkt / pts of the frame */
1368     if (pts != 0) {
1369         /* update video clock with pts, if present */
1370         is->video_clock = pts;
1371     } else {
1372         pts = is->video_clock;
1373     }
1374     /* update video clock for next frame */
1375     frame_delay = av_q2d(is->video_st->codec->time_base);
1376     /* for MPEG2, the frame can be repeated, so we update the
1377        clock accordingly */
1378     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1379     is->video_clock += frame_delay;
1380
1381 #if defined(DEBUG_SYNC) && 0
1382     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1383            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1384 #endif
1385
1386     /* wait until we have space to put a new picture */
1387     SDL_LockMutex(is->pictq_mutex);
1388
1389     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1390         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1391
1392     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1393            !is->videoq.abort_request) {
1394         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1395     }
1396     SDL_UnlockMutex(is->pictq_mutex);
1397
1398     if (is->videoq.abort_request)
1399         return -1;
1400
1401     vp = &is->pictq[is->pictq_windex];
1402
1403     /* alloc or resize hardware picture buffer */
1404     if (!vp->bmp ||
1405 #if CONFIG_AVFILTER
1406         vp->width  != is->out_video_filter->inputs[0]->w ||
1407         vp->height != is->out_video_filter->inputs[0]->h) {
1408 #else
1409         vp->width != is->video_st->codec->width ||
1410         vp->height != is->video_st->codec->height) {
1411 #endif
1412         SDL_Event event;
1413
1414         vp->allocated = 0;
1415
1416         /* the allocation must be done in the main thread to avoid
1417            locking problems */
1418         event.type = FF_ALLOC_EVENT;
1419         event.user.data1 = is;
1420         SDL_PushEvent(&event);
1421
1422         /* wait until the picture is allocated */
1423         SDL_LockMutex(is->pictq_mutex);
1424         while (!vp->allocated && !is->videoq.abort_request) {
1425             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1426         }
1427         SDL_UnlockMutex(is->pictq_mutex);
1428
1429         if (is->videoq.abort_request)
1430             return -1;
1431     }
1432
1433     /* if the frame is not skipped, then display it */
1434     if (vp->bmp) {
1435         AVPicture pict;
1436 #if CONFIG_AVFILTER
1437         if(vp->picref)
1438             avfilter_unref_buffer(vp->picref);
1439         vp->picref = src_frame->opaque;
1440 #endif
1441
1442         /* get a pointer on the bitmap */
1443         SDL_LockYUVOverlay (vp->bmp);
1444
1445         memset(&pict,0,sizeof(AVPicture));
1446         pict.data[0] = vp->bmp->pixels[0];
1447         pict.data[1] = vp->bmp->pixels[2];
1448         pict.data[2] = vp->bmp->pixels[1];
1449
1450         pict.linesize[0] = vp->bmp->pitches[0];
1451         pict.linesize[1] = vp->bmp->pitches[2];
1452         pict.linesize[2] = vp->bmp->pitches[1];
1453
1454 #if CONFIG_AVFILTER
1455         //FIXME use direct rendering
1456         av_picture_copy(&pict, (AVPicture *)src_frame,
1457                         vp->pix_fmt, vp->width, vp->height);
1458 #else
1459         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1460         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1461             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1462             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1463         if (is->img_convert_ctx == NULL) {
1464             fprintf(stderr, "Cannot initialize the conversion context\n");
1465             exit(1);
1466         }
1467         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1468                   0, vp->height, pict.data, pict.linesize);
1469 #endif
1470         /* update the bitmap content */
1471         SDL_UnlockYUVOverlay(vp->bmp);
1472
1473         vp->pts = pts;
1474         vp->pos = pos;
1475
1476         /* now we can update the picture count */
1477         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1478             is->pictq_windex = 0;
1479         SDL_LockMutex(is->pictq_mutex);
1480         vp->target_clock= compute_target_time(vp->pts, is);
1481
1482         is->pictq_size++;
1483         SDL_UnlockMutex(is->pictq_mutex);
1484     }
1485     return 0;
1486 }
1487
1488 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1489 {
1490     int len1, got_picture, i;
1491
1492     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1493         return -1;
1494
1495     if (pkt->data == flush_pkt.data) {
1496         avcodec_flush_buffers(is->video_st->codec);
1497
1498         SDL_LockMutex(is->pictq_mutex);
1499         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1500         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1501             is->pictq[i].target_clock= 0;
1502         }
1503         while (is->pictq_size && !is->videoq.abort_request) {
1504             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1505         }
1506         is->video_current_pos = -1;
1507         SDL_UnlockMutex(is->pictq_mutex);
1508
1509         is->frame_last_pts = AV_NOPTS_VALUE;
1510         is->frame_last_delay = 0;
1511         is->frame_timer = (double)av_gettime() / 1000000.0;
1512         is->skip_frames = 1;
1513         is->skip_frames_index = 0;
1514         return 0;
1515     }
1516
1517     len1 = avcodec_decode_video2(is->video_st->codec,
1518                                  frame, &got_picture,
1519                                  pkt);
1520
1521     if (got_picture) {
1522         if (decoder_reorder_pts == -1) {
1523             *pts = frame->best_effort_timestamp;
1524         } else if (decoder_reorder_pts) {
1525             *pts = frame->pkt_pts;
1526         } else {
1527             *pts = frame->pkt_dts;
1528         }
1529
1530         if (*pts == AV_NOPTS_VALUE) {
1531             *pts = 0;
1532         }
1533
1534         is->skip_frames_index += 1;
1535         if(is->skip_frames_index >= is->skip_frames){
1536             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1537             return 1;
1538         }
1539
1540     }
1541     return 0;
1542 }
1543
1544 #if CONFIG_AVFILTER
1545 typedef struct {
1546     VideoState *is;
1547     AVFrame *frame;
1548     int use_dr1;
1549 } FilterPriv;
1550
1551 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1552 {
1553     AVFilterContext *ctx = codec->opaque;
1554     AVFilterBufferRef  *ref;
1555     int perms = AV_PERM_WRITE;
1556     int i, w, h, stride[4];
1557     unsigned edge;
1558     int pixel_size;
1559
1560     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1561
1562     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1563         perms |= AV_PERM_NEG_LINESIZES;
1564
1565     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1566         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1567         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1568         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1569     }
1570     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1571
1572     w = codec->width;
1573     h = codec->height;
1574
1575     if(av_image_check_size(w, h, 0, codec))
1576         return -1;
1577
1578     avcodec_align_dimensions2(codec, &w, &h, stride);
1579     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1580     w += edge << 1;
1581     h += edge << 1;
1582
1583     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1584         return -1;
1585
1586     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1587     ref->video->w = codec->width;
1588     ref->video->h = codec->height;
1589     for(i = 0; i < 4; i ++) {
1590         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1591         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1592
1593         if (ref->data[i]) {
1594             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1595         }
1596         pic->data[i]     = ref->data[i];
1597         pic->linesize[i] = ref->linesize[i];
1598     }
1599     pic->opaque = ref;
1600     pic->age    = INT_MAX;
1601     pic->type   = FF_BUFFER_TYPE_USER;
1602     pic->reordered_opaque = codec->reordered_opaque;
1603     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1604     else           pic->pkt_pts = AV_NOPTS_VALUE;
1605     return 0;
1606 }
1607
1608 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1609 {
1610     memset(pic->data, 0, sizeof(pic->data));
1611     avfilter_unref_buffer(pic->opaque);
1612 }
1613
1614 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1615 {
1616     AVFilterBufferRef *ref = pic->opaque;
1617
1618     if (pic->data[0] == NULL) {
1619         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1620         return codec->get_buffer(codec, pic);
1621     }
1622
1623     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1624         (codec->pix_fmt != ref->format)) {
1625         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1626         return -1;
1627     }
1628
1629     pic->reordered_opaque = codec->reordered_opaque;
1630     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1631     else           pic->pkt_pts = AV_NOPTS_VALUE;
1632     return 0;
1633 }
1634
1635 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1636 {
1637     FilterPriv *priv = ctx->priv;
1638     AVCodecContext *codec;
1639     if(!opaque) return -1;
1640
1641     priv->is = opaque;
1642     codec    = priv->is->video_st->codec;
1643     codec->opaque = ctx;
1644     if((codec->codec->capabilities & CODEC_CAP_DR1)
1645     ) {
1646         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1647         priv->use_dr1 = 1;
1648         codec->get_buffer     = input_get_buffer;
1649         codec->release_buffer = input_release_buffer;
1650         codec->reget_buffer   = input_reget_buffer;
1651         codec->thread_safe_callbacks = 1;
1652     }
1653
1654     priv->frame = avcodec_alloc_frame();
1655
1656     return 0;
1657 }
1658
1659 static void input_uninit(AVFilterContext *ctx)
1660 {
1661     FilterPriv *priv = ctx->priv;
1662     av_free(priv->frame);
1663 }
1664
1665 static int input_request_frame(AVFilterLink *link)
1666 {
1667     FilterPriv *priv = link->src->priv;
1668     AVFilterBufferRef *picref;
1669     int64_t pts = 0;
1670     AVPacket pkt;
1671     int ret;
1672
1673     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1674         av_free_packet(&pkt);
1675     if (ret < 0)
1676         return -1;
1677
1678     if(priv->use_dr1) {
1679         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1680     } else {
1681         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1682         av_image_copy(picref->data, picref->linesize,
1683                       priv->frame->data, priv->frame->linesize,
1684                       picref->format, link->w, link->h);
1685     }
1686     av_free_packet(&pkt);
1687
1688     avfilter_copy_frame_props(picref, priv->frame);
1689     picref->pts = pts;
1690
1691     avfilter_start_frame(link, picref);
1692     avfilter_draw_slice(link, 0, link->h, 1);
1693     avfilter_end_frame(link);
1694
1695     return 0;
1696 }
1697
1698 static int input_query_formats(AVFilterContext *ctx)
1699 {
1700     FilterPriv *priv = ctx->priv;
1701     enum PixelFormat pix_fmts[] = {
1702         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1703     };
1704
1705     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1706     return 0;
1707 }
1708
1709 static int input_config_props(AVFilterLink *link)
1710 {
1711     FilterPriv *priv  = link->src->priv;
1712     AVCodecContext *c = priv->is->video_st->codec;
1713
1714     link->w = c->width;
1715     link->h = c->height;
1716     link->time_base = priv->is->video_st->time_base;
1717
1718     return 0;
1719 }
1720
1721 static AVFilter input_filter =
1722 {
1723     .name      = "ffplay_input",
1724
1725     .priv_size = sizeof(FilterPriv),
1726
1727     .init      = input_init,
1728     .uninit    = input_uninit,
1729
1730     .query_formats = input_query_formats,
1731
1732     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1733     .outputs   = (AVFilterPad[]) {{ .name = "default",
1734                                     .type = AVMEDIA_TYPE_VIDEO,
1735                                     .request_frame = input_request_frame,
1736                                     .config_props  = input_config_props, },
1737                                   { .name = NULL }},
1738 };
1739
1740 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1741 {
1742     char sws_flags_str[128];
1743     int ret;
1744     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1745     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1746     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1747     graph->scale_sws_opts = av_strdup(sws_flags_str);
1748
1749     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1750                                             NULL, is, graph)) < 0)
1751         goto the_end;
1752     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1753                                             NULL, &ffsink_ctx, graph)) < 0)
1754         goto the_end;
1755
1756     if(vfilters) {
1757         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1758         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1759
1760         outputs->name    = av_strdup("in");
1761         outputs->filter_ctx = filt_src;
1762         outputs->pad_idx = 0;
1763         outputs->next    = NULL;
1764
1765         inputs->name    = av_strdup("out");
1766         inputs->filter_ctx = filt_out;
1767         inputs->pad_idx = 0;
1768         inputs->next    = NULL;
1769
1770         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1771             goto the_end;
1772         av_freep(&vfilters);
1773     } else {
1774         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1775             goto the_end;
1776     }
1777
1778     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1779         goto the_end;
1780
1781     is->out_video_filter = filt_out;
1782 the_end:
1783     return ret;
1784 }
1785
1786 #endif  /* CONFIG_AVFILTER */
1787
1788 static int video_thread(void *arg)
1789 {
1790     VideoState *is = arg;
1791     AVFrame *frame= avcodec_alloc_frame();
1792     int64_t pts_int, pos;
1793     double pts;
1794     int ret;
1795
1796 #if CONFIG_AVFILTER
1797     AVFilterGraph *graph = avfilter_graph_alloc();
1798     AVFilterContext *filt_out = NULL;
1799
1800     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1801         goto the_end;
1802     filt_out = is->out_video_filter;
1803 #endif
1804
1805     for(;;) {
1806 #if !CONFIG_AVFILTER
1807         AVPacket pkt;
1808 #else
1809         AVFilterBufferRef *picref;
1810         AVRational tb;
1811 #endif
1812         while (is->paused && !is->videoq.abort_request)
1813             SDL_Delay(10);
1814 #if CONFIG_AVFILTER
1815         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1816         if (picref) {
1817             pts_int = picref->pts;
1818             pos     = picref->pos;
1819             frame->opaque = picref;
1820         }
1821
1822         if (av_cmp_q(tb, is->video_st->time_base)) {
1823             av_unused int64_t pts1 = pts_int;
1824             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1825             av_dlog(NULL, "video_thread(): "
1826                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1827                     tb.num, tb.den, pts1,
1828                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1829         }
1830 #else
1831         ret = get_video_frame(is, frame, &pts_int, &pkt);
1832         pos = pkt.pos;
1833         av_free_packet(&pkt);
1834 #endif
1835
1836         if (ret < 0) goto the_end;
1837
1838         if (!ret)
1839             continue;
1840
1841         pts = pts_int*av_q2d(is->video_st->time_base);
1842
1843         ret = queue_picture(is, frame, pts, pos);
1844
1845         if (ret < 0)
1846             goto the_end;
1847
1848         if (step)
1849             if (cur_stream)
1850                 stream_toggle_pause(cur_stream);
1851     }
1852  the_end:
1853 #if CONFIG_AVFILTER
1854     avfilter_graph_free(&graph);
1855 #endif
1856     av_free(frame);
1857     return 0;
1858 }
1859
1860 static int subtitle_thread(void *arg)
1861 {
1862     VideoState *is = arg;
1863     SubPicture *sp;
1864     AVPacket pkt1, *pkt = &pkt1;
1865     int len1, got_subtitle;
1866     double pts;
1867     int i, j;
1868     int r, g, b, y, u, v, a;
1869
1870     for(;;) {
1871         while (is->paused && !is->subtitleq.abort_request) {
1872             SDL_Delay(10);
1873         }
1874         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1875             break;
1876
1877         if(pkt->data == flush_pkt.data){
1878             avcodec_flush_buffers(is->subtitle_st->codec);
1879             continue;
1880         }
1881         SDL_LockMutex(is->subpq_mutex);
1882         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1883                !is->subtitleq.abort_request) {
1884             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1885         }
1886         SDL_UnlockMutex(is->subpq_mutex);
1887
1888         if (is->subtitleq.abort_request)
1889             goto the_end;
1890
1891         sp = &is->subpq[is->subpq_windex];
1892
1893        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1894            this packet, if any */
1895         pts = 0;
1896         if (pkt->pts != AV_NOPTS_VALUE)
1897             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1898
1899         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1900                                     &sp->sub, &got_subtitle,
1901                                     pkt);
1902 //            if (len1 < 0)
1903 //                break;
1904         if (got_subtitle && sp->sub.format == 0) {
1905             sp->pts = pts;
1906
1907             for (i = 0; i < sp->sub.num_rects; i++)
1908             {
1909                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1910                 {
1911                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1912                     y = RGB_TO_Y_CCIR(r, g, b);
1913                     u = RGB_TO_U_CCIR(r, g, b, 0);
1914                     v = RGB_TO_V_CCIR(r, g, b, 0);
1915                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1916                 }
1917             }
1918
1919             /* now we can update the picture count */
1920             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1921                 is->subpq_windex = 0;
1922             SDL_LockMutex(is->subpq_mutex);
1923             is->subpq_size++;
1924             SDL_UnlockMutex(is->subpq_mutex);
1925         }
1926         av_free_packet(pkt);
1927 //        if (step)
1928 //            if (cur_stream)
1929 //                stream_toggle_pause(cur_stream);
1930     }
1931  the_end:
1932     return 0;
1933 }
1934
1935 /* copy samples for viewing in editor window */
1936 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1937 {
1938     int size, len;
1939
1940     size = samples_size / sizeof(short);
1941     while (size > 0) {
1942         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1943         if (len > size)
1944             len = size;
1945         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1946         samples += len;
1947         is->sample_array_index += len;
1948         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1949             is->sample_array_index = 0;
1950         size -= len;
1951     }
1952 }
1953
1954 /* return the new audio buffer size (samples can be added or deleted
1955    to get better sync if video or external master clock) */
1956 static int synchronize_audio(VideoState *is, short *samples,
1957                              int samples_size1, double pts)
1958 {
1959     int n, samples_size;
1960     double ref_clock;
1961
1962     n = 2 * is->audio_st->codec->channels;
1963     samples_size = samples_size1;
1964
1965     /* if not master, then we try to remove or add samples to correct the clock */
1966     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1967          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1968         double diff, avg_diff;
1969         int wanted_size, min_size, max_size, nb_samples;
1970
1971         ref_clock = get_master_clock(is);
1972         diff = get_audio_clock(is) - ref_clock;
1973
1974         if (diff < AV_NOSYNC_THRESHOLD) {
1975             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1976             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1977                 /* not enough measures to have a correct estimate */
1978                 is->audio_diff_avg_count++;
1979             } else {
1980                 /* estimate the A-V difference */
1981                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1982
1983                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1984                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1985                     nb_samples = samples_size / n;
1986
1987                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1988                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1989                     if (wanted_size < min_size)
1990                         wanted_size = min_size;
1991                     else if (wanted_size > max_size)
1992                         wanted_size = max_size;
1993
1994                     /* add or remove samples to correction the synchro */
1995                     if (wanted_size < samples_size) {
1996                         /* remove samples */
1997                         samples_size = wanted_size;
1998                     } else if (wanted_size > samples_size) {
1999                         uint8_t *samples_end, *q;
2000                         int nb;
2001
2002                         /* add samples */
2003                         nb = (samples_size - wanted_size);
2004                         samples_end = (uint8_t *)samples + samples_size - n;
2005                         q = samples_end + n;
2006                         while (nb > 0) {
2007                             memcpy(q, samples_end, n);
2008                             q += n;
2009                             nb -= n;
2010                         }
2011                         samples_size = wanted_size;
2012                     }
2013                 }
2014 #if 0
2015                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2016                        diff, avg_diff, samples_size - samples_size1,
2017                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2018 #endif
2019             }
2020         } else {
2021             /* too big difference : may be initial PTS errors, so
2022                reset A-V filter */
2023             is->audio_diff_avg_count = 0;
2024             is->audio_diff_cum = 0;
2025         }
2026     }
2027
2028     return samples_size;
2029 }
2030
2031 /* decode one audio frame and returns its uncompressed size */
2032 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2033 {
2034     AVPacket *pkt_temp = &is->audio_pkt_temp;
2035     AVPacket *pkt = &is->audio_pkt;
2036     AVCodecContext *dec= is->audio_st->codec;
2037     int n, len1, data_size;
2038     double pts;
2039
2040     for(;;) {
2041         /* NOTE: the audio packet can contain several frames */
2042         while (pkt_temp->size > 0) {
2043             data_size = sizeof(is->audio_buf1);
2044             len1 = avcodec_decode_audio3(dec,
2045                                         (int16_t *)is->audio_buf1, &data_size,
2046                                         pkt_temp);
2047             if (len1 < 0) {
2048                 /* if error, we skip the frame */
2049                 pkt_temp->size = 0;
2050                 break;
2051             }
2052
2053             pkt_temp->data += len1;
2054             pkt_temp->size -= len1;
2055             if (data_size <= 0)
2056                 continue;
2057
2058             if (dec->sample_fmt != is->audio_src_fmt) {
2059                 if (is->reformat_ctx)
2060                     av_audio_convert_free(is->reformat_ctx);
2061                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2062                                                          dec->sample_fmt, 1, NULL, 0);
2063                 if (!is->reformat_ctx) {
2064                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2065                         av_get_sample_fmt_name(dec->sample_fmt),
2066                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2067                         break;
2068                 }
2069                 is->audio_src_fmt= dec->sample_fmt;
2070             }
2071
2072             if (is->reformat_ctx) {
2073                 const void *ibuf[6]= {is->audio_buf1};
2074                 void *obuf[6]= {is->audio_buf2};
2075                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2076                 int ostride[6]= {2};
2077                 int len= data_size/istride[0];
2078                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2079                     printf("av_audio_convert() failed\n");
2080                     break;
2081                 }
2082                 is->audio_buf= is->audio_buf2;
2083                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2084                           remove this legacy cruft */
2085                 data_size= len*2;
2086             }else{
2087                 is->audio_buf= is->audio_buf1;
2088             }
2089
2090             /* if no pts, then compute it */
2091             pts = is->audio_clock;
2092             *pts_ptr = pts;
2093             n = 2 * dec->channels;
2094             is->audio_clock += (double)data_size /
2095                 (double)(n * dec->sample_rate);
2096 #if defined(DEBUG_SYNC)
2097             {
2098                 static double last_clock;
2099                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2100                        is->audio_clock - last_clock,
2101                        is->audio_clock, pts);
2102                 last_clock = is->audio_clock;
2103             }
2104 #endif
2105             return data_size;
2106         }
2107
2108         /* free the current packet */
2109         if (pkt->data)
2110             av_free_packet(pkt);
2111
2112         if (is->paused || is->audioq.abort_request) {
2113             return -1;
2114         }
2115
2116         /* read next packet */
2117         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2118             return -1;
2119         if(pkt->data == flush_pkt.data){
2120             avcodec_flush_buffers(dec);
2121             continue;
2122         }
2123
2124         pkt_temp->data = pkt->data;
2125         pkt_temp->size = pkt->size;
2126
2127         /* if update the audio clock with the pts */
2128         if (pkt->pts != AV_NOPTS_VALUE) {
2129             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2130         }
2131     }
2132 }
2133
2134 /* prepare a new audio buffer */
2135 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2136 {
2137     VideoState *is = opaque;
2138     int audio_size, len1;
2139     double pts;
2140
2141     audio_callback_time = av_gettime();
2142
2143     while (len > 0) {
2144         if (is->audio_buf_index >= is->audio_buf_size) {
2145            audio_size = audio_decode_frame(is, &pts);
2146            if (audio_size < 0) {
2147                 /* if error, just output silence */
2148                is->audio_buf = is->audio_buf1;
2149                is->audio_buf_size = 1024;
2150                memset(is->audio_buf, 0, is->audio_buf_size);
2151            } else {
2152                if (is->show_mode != SHOW_MODE_VIDEO)
2153                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2154                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2155                                               pts);
2156                is->audio_buf_size = audio_size;
2157            }
2158            is->audio_buf_index = 0;
2159         }
2160         len1 = is->audio_buf_size - is->audio_buf_index;
2161         if (len1 > len)
2162             len1 = len;
2163         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2164         len -= len1;
2165         stream += len1;
2166         is->audio_buf_index += len1;
2167     }
2168 }
2169
2170 /* open a given stream. Return 0 if OK */
2171 static int stream_component_open(VideoState *is, int stream_index)
2172 {
2173     AVFormatContext *ic = is->ic;
2174     AVCodecContext *avctx;
2175     AVCodec *codec;
2176     SDL_AudioSpec wanted_spec, spec;
2177
2178     if (stream_index < 0 || stream_index >= ic->nb_streams)
2179         return -1;
2180     avctx = ic->streams[stream_index]->codec;
2181
2182     /* prepare audio output */
2183     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2184         if (avctx->channels > 0) {
2185             avctx->request_channels = FFMIN(2, avctx->channels);
2186         } else {
2187             avctx->request_channels = 2;
2188         }
2189     }
2190
2191     codec = avcodec_find_decoder(avctx->codec_id);
2192     if (!codec)
2193         return -1;
2194
2195     avctx->debug_mv = debug_mv;
2196     avctx->debug = debug;
2197     avctx->workaround_bugs = workaround_bugs;
2198     avctx->lowres = lowres;
2199     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2200     avctx->idct_algo= idct;
2201     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2202     avctx->skip_frame= skip_frame;
2203     avctx->skip_idct= skip_idct;
2204     avctx->skip_loop_filter= skip_loop_filter;
2205     avctx->error_recognition= error_recognition;
2206     avctx->error_concealment= error_concealment;
2207     avctx->thread_count= thread_count;
2208
2209     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2210
2211     if(codec->capabilities & CODEC_CAP_DR1)
2212         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2213
2214     if (avcodec_open(avctx, codec) < 0)
2215         return -1;
2216
2217     /* prepare audio output */
2218     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2219         wanted_spec.freq = avctx->sample_rate;
2220         wanted_spec.format = AUDIO_S16SYS;
2221         wanted_spec.channels = avctx->channels;
2222         wanted_spec.silence = 0;
2223         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2224         wanted_spec.callback = sdl_audio_callback;
2225         wanted_spec.userdata = is;
2226         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2227             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2228             return -1;
2229         }
2230         is->audio_hw_buf_size = spec.size;
2231         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2232     }
2233
2234     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2235     switch(avctx->codec_type) {
2236     case AVMEDIA_TYPE_AUDIO:
2237         is->audio_stream = stream_index;
2238         is->audio_st = ic->streams[stream_index];
2239         is->audio_buf_size = 0;
2240         is->audio_buf_index = 0;
2241
2242         /* init averaging filter */
2243         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2244         is->audio_diff_avg_count = 0;
2245         /* since we do not have a precise anough audio fifo fullness,
2246            we correct audio sync only if larger than this threshold */
2247         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2248
2249         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2250         packet_queue_init(&is->audioq);
2251         SDL_PauseAudio(0);
2252         break;
2253     case AVMEDIA_TYPE_VIDEO:
2254         is->video_stream = stream_index;
2255         is->video_st = ic->streams[stream_index];
2256
2257 //        is->video_current_pts_time = av_gettime();
2258
2259         packet_queue_init(&is->videoq);
2260         is->video_tid = SDL_CreateThread(video_thread, is);
2261         break;
2262     case AVMEDIA_TYPE_SUBTITLE:
2263         is->subtitle_stream = stream_index;
2264         is->subtitle_st = ic->streams[stream_index];
2265         packet_queue_init(&is->subtitleq);
2266
2267         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2268         break;
2269     default:
2270         break;
2271     }
2272     return 0;
2273 }
2274
2275 static void stream_component_close(VideoState *is, int stream_index)
2276 {
2277     AVFormatContext *ic = is->ic;
2278     AVCodecContext *avctx;
2279
2280     if (stream_index < 0 || stream_index >= ic->nb_streams)
2281         return;
2282     avctx = ic->streams[stream_index]->codec;
2283
2284     switch(avctx->codec_type) {
2285     case AVMEDIA_TYPE_AUDIO:
2286         packet_queue_abort(&is->audioq);
2287
2288         SDL_CloseAudio();
2289
2290         packet_queue_end(&is->audioq);
2291         if (is->reformat_ctx)
2292             av_audio_convert_free(is->reformat_ctx);
2293         is->reformat_ctx = NULL;
2294         break;
2295     case AVMEDIA_TYPE_VIDEO:
2296         packet_queue_abort(&is->videoq);
2297
2298         /* note: we also signal this mutex to make sure we deblock the
2299            video thread in all cases */
2300         SDL_LockMutex(is->pictq_mutex);
2301         SDL_CondSignal(is->pictq_cond);
2302         SDL_UnlockMutex(is->pictq_mutex);
2303
2304         SDL_WaitThread(is->video_tid, NULL);
2305
2306         packet_queue_end(&is->videoq);
2307         break;
2308     case AVMEDIA_TYPE_SUBTITLE:
2309         packet_queue_abort(&is->subtitleq);
2310
2311         /* note: we also signal this mutex to make sure we deblock the
2312            video thread in all cases */
2313         SDL_LockMutex(is->subpq_mutex);
2314         is->subtitle_stream_changed = 1;
2315
2316         SDL_CondSignal(is->subpq_cond);
2317         SDL_UnlockMutex(is->subpq_mutex);
2318
2319         SDL_WaitThread(is->subtitle_tid, NULL);
2320
2321         packet_queue_end(&is->subtitleq);
2322         break;
2323     default:
2324         break;
2325     }
2326
2327     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2328     avcodec_close(avctx);
2329     switch(avctx->codec_type) {
2330     case AVMEDIA_TYPE_AUDIO:
2331         is->audio_st = NULL;
2332         is->audio_stream = -1;
2333         break;
2334     case AVMEDIA_TYPE_VIDEO:
2335         is->video_st = NULL;
2336         is->video_stream = -1;
2337         break;
2338     case AVMEDIA_TYPE_SUBTITLE:
2339         is->subtitle_st = NULL;
2340         is->subtitle_stream = -1;
2341         break;
2342     default:
2343         break;
2344     }
2345 }
2346
2347 /* since we have only one decoding thread, we can use a global
2348    variable instead of a thread local variable */
2349 static VideoState *global_video_state;
2350
2351 static int decode_interrupt_cb(void)
2352 {
2353     return (global_video_state && global_video_state->abort_request);
2354 }
2355
2356 /* this thread gets the stream from the disk or the network */
2357 static int read_thread(void *arg)
2358 {
2359     VideoState *is = arg;
2360     AVFormatContext *ic;
2361     int err, i, ret;
2362     int st_index[AVMEDIA_TYPE_NB];
2363     AVPacket pkt1, *pkt = &pkt1;
2364     AVFormatParameters params, *ap = &params;
2365     int eof=0;
2366     int pkt_in_play_range = 0;
2367
2368     ic = avformat_alloc_context();
2369
2370     memset(st_index, -1, sizeof(st_index));
2371     is->video_stream = -1;
2372     is->audio_stream = -1;
2373     is->subtitle_stream = -1;
2374
2375     global_video_state = is;
2376     avio_set_interrupt_cb(decode_interrupt_cb);
2377
2378     memset(ap, 0, sizeof(*ap));
2379
2380     ap->prealloced_context = 1;
2381     ap->width = frame_width;
2382     ap->height= frame_height;
2383     ap->time_base= (AVRational){1, 25};
2384     ap->pix_fmt = frame_pix_fmt;
2385     ic->flags |= AVFMT_FLAG_PRIV_OPT;
2386
2387
2388     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2389     if (err >= 0) {
2390         set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2391         err = av_demuxer_open(ic, ap);
2392         if(err < 0){
2393             avformat_free_context(ic);
2394             ic= NULL;
2395         }
2396     }
2397     if (err < 0) {
2398         print_error(is->filename, err);
2399         ret = -1;
2400         goto fail;
2401     }
2402     is->ic = ic;
2403
2404     if(genpts)
2405         ic->flags |= AVFMT_FLAG_GENPTS;
2406
2407     err = av_find_stream_info(ic);
2408     if (err < 0) {
2409         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2410         ret = -1;
2411         goto fail;
2412     }
2413     if(ic->pb)
2414         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2415
2416     if(seek_by_bytes<0)
2417         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2418
2419     /* if seeking requested, we execute it */
2420     if (start_time != AV_NOPTS_VALUE) {
2421         int64_t timestamp;
2422
2423         timestamp = start_time;
2424         /* add the stream start time */
2425         if (ic->start_time != AV_NOPTS_VALUE)
2426             timestamp += ic->start_time;
2427         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2428         if (ret < 0) {
2429             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2430                     is->filename, (double)timestamp / AV_TIME_BASE);
2431         }
2432     }
2433
2434     for (i = 0; i < ic->nb_streams; i++)
2435         ic->streams[i]->discard = AVDISCARD_ALL;
2436     if (!video_disable)
2437         st_index[AVMEDIA_TYPE_VIDEO] =
2438             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2439                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2440     if (!audio_disable)
2441         st_index[AVMEDIA_TYPE_AUDIO] =
2442             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2443                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2444                                 st_index[AVMEDIA_TYPE_VIDEO],
2445                                 NULL, 0);
2446     if (!video_disable)
2447         st_index[AVMEDIA_TYPE_SUBTITLE] =
2448             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2449                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2450                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2451                                  st_index[AVMEDIA_TYPE_AUDIO] :
2452                                  st_index[AVMEDIA_TYPE_VIDEO]),
2453                                 NULL, 0);
2454     if (show_status) {
2455         av_dump_format(ic, 0, is->filename, 0);
2456     }
2457
2458     is->show_mode = show_mode;
2459
2460     /* open the streams */
2461     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2462         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2463     }
2464
2465     ret=-1;
2466     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2467         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2468     }
2469     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2470     if (is->show_mode == SHOW_MODE_NONE)
2471         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2472
2473     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2474         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2475     }
2476
2477     if (is->video_stream < 0 && is->audio_stream < 0) {
2478         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2479         ret = -1;
2480         goto fail;
2481     }
2482
2483     for(;;) {
2484         if (is->abort_request)
2485             break;
2486         if (is->paused != is->last_paused) {
2487             is->last_paused = is->paused;
2488             if (is->paused)
2489                 is->read_pause_return= av_read_pause(ic);
2490             else
2491                 av_read_play(ic);
2492         }
2493 #if CONFIG_RTSP_DEMUXER
2494         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2495             /* wait 10 ms to avoid trying to get another packet */
2496             /* XXX: horrible */
2497             SDL_Delay(10);
2498             continue;
2499         }
2500 #endif
2501         if (is->seek_req) {
2502             int64_t seek_target= is->seek_pos;
2503             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2504             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2505 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2506 //      of the seek_pos/seek_rel variables
2507
2508             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2509             if (ret < 0) {
2510                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2511             }else{
2512                 if (is->audio_stream >= 0) {
2513                     packet_queue_flush(&is->audioq);
2514                     packet_queue_put(&is->audioq, &flush_pkt);
2515                 }
2516                 if (is->subtitle_stream >= 0) {
2517                     packet_queue_flush(&is->subtitleq);
2518                     packet_queue_put(&is->subtitleq, &flush_pkt);
2519                 }
2520                 if (is->video_stream >= 0) {
2521                     packet_queue_flush(&is->videoq);
2522                     packet_queue_put(&is->videoq, &flush_pkt);
2523                 }
2524             }
2525             is->seek_req = 0;
2526             eof= 0;
2527         }
2528
2529         /* if the queue are full, no need to read more */
2530         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2531             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2532                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2533                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2534             /* wait 10 ms */
2535             SDL_Delay(10);
2536             continue;
2537         }
2538         if(eof) {
2539             if(is->video_stream >= 0){
2540                 av_init_packet(pkt);
2541                 pkt->data=NULL;
2542                 pkt->size=0;
2543                 pkt->stream_index= is->video_stream;
2544                 packet_queue_put(&is->videoq, pkt);
2545             }
2546             SDL_Delay(10);
2547             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2548                 if(loop!=1 && (!loop || --loop)){
2549                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2550                 }else if(autoexit){
2551                     ret=AVERROR_EOF;
2552                     goto fail;
2553                 }
2554             }
2555             eof=0;
2556             continue;
2557         }
2558         ret = av_read_frame(ic, pkt);
2559         if (ret < 0) {
2560             if (ret == AVERROR_EOF || url_feof(ic->pb))
2561                 eof=1;
2562             if (ic->pb && ic->pb->error)
2563                 break;
2564             SDL_Delay(100); /* wait for user event */
2565             continue;
2566         }
2567         /* check if packet is in play range specified by user, then queue, otherwise discard */
2568         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2569                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2570                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2571                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2572                 <= ((double)duration/1000000);
2573         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2574             packet_queue_put(&is->audioq, pkt);
2575         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2576             packet_queue_put(&is->videoq, pkt);
2577         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2578             packet_queue_put(&is->subtitleq, pkt);
2579         } else {
2580             av_free_packet(pkt);
2581         }
2582     }
2583     /* wait until the end */
2584     while (!is->abort_request) {
2585         SDL_Delay(100);
2586     }
2587
2588     ret = 0;
2589  fail:
2590     /* disable interrupting */
2591     global_video_state = NULL;
2592
2593     /* close each stream */
2594     if (is->audio_stream >= 0)
2595         stream_component_close(is, is->audio_stream);
2596     if (is->video_stream >= 0)
2597         stream_component_close(is, is->video_stream);
2598     if (is->subtitle_stream >= 0)
2599         stream_component_close(is, is->subtitle_stream);
2600     if (is->ic) {
2601         av_close_input_file(is->ic);
2602         is->ic = NULL; /* safety */
2603     }
2604     avio_set_interrupt_cb(NULL);
2605
2606     if (ret != 0) {
2607         SDL_Event event;
2608
2609         event.type = FF_QUIT_EVENT;
2610         event.user.data1 = is;
2611         SDL_PushEvent(&event);
2612     }
2613     return 0;
2614 }
2615
2616 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2617 {
2618     VideoState *is;
2619
2620     is = av_mallocz(sizeof(VideoState));
2621     if (!is)
2622         return NULL;
2623     av_strlcpy(is->filename, filename, sizeof(is->filename));
2624     is->iformat = iformat;
2625     is->ytop = 0;
2626     is->xleft = 0;
2627
2628     /* start video display */
2629     is->pictq_mutex = SDL_CreateMutex();
2630     is->pictq_cond = SDL_CreateCond();
2631
2632     is->subpq_mutex = SDL_CreateMutex();
2633     is->subpq_cond = SDL_CreateCond();
2634
2635     is->av_sync_type = av_sync_type;
2636     is->read_tid = SDL_CreateThread(read_thread, is);
2637     if (!is->read_tid) {
2638         av_free(is);
2639         return NULL;
2640     }
2641     return is;
2642 }
2643
2644 static void stream_cycle_channel(VideoState *is, int codec_type)
2645 {
2646     AVFormatContext *ic = is->ic;
2647     int start_index, stream_index;
2648     AVStream *st;
2649
2650     if (codec_type == AVMEDIA_TYPE_VIDEO)
2651         start_index = is->video_stream;
2652     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2653         start_index = is->audio_stream;
2654     else
2655         start_index = is->subtitle_stream;
2656     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2657         return;
2658     stream_index = start_index;
2659     for(;;) {
2660         if (++stream_index >= is->ic->nb_streams)
2661         {
2662             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2663             {
2664                 stream_index = -1;
2665                 goto the_end;
2666             } else
2667                 stream_index = 0;
2668         }
2669         if (stream_index == start_index)
2670             return;
2671         st = ic->streams[stream_index];
2672         if (st->codec->codec_type == codec_type) {
2673             /* check that parameters are OK */
2674             switch(codec_type) {
2675             case AVMEDIA_TYPE_AUDIO:
2676                 if (st->codec->sample_rate != 0 &&
2677                     st->codec->channels != 0)
2678                     goto the_end;
2679                 break;
2680             case AVMEDIA_TYPE_VIDEO:
2681             case AVMEDIA_TYPE_SUBTITLE:
2682                 goto the_end;
2683             default:
2684                 break;
2685             }
2686         }
2687     }
2688  the_end:
2689     stream_component_close(is, start_index);
2690     stream_component_open(is, stream_index);
2691 }
2692
2693
2694 static void toggle_full_screen(void)
2695 {
2696     is_full_screen = !is_full_screen;
2697     if (!fs_screen_width) {
2698         /* use default SDL method */
2699 //        SDL_WM_ToggleFullScreen(screen);
2700     }
2701     video_open(cur_stream);
2702 }
2703
2704 static void toggle_pause(void)
2705 {
2706     if (cur_stream)
2707         stream_toggle_pause(cur_stream);
2708     step = 0;
2709 }
2710
2711 static void step_to_next_frame(void)
2712 {
2713     if (cur_stream) {
2714         /* if the stream is paused unpause it, then step */
2715         if (cur_stream->paused)
2716             stream_toggle_pause(cur_stream);
2717     }
2718     step = 1;
2719 }
2720
2721 static void toggle_audio_display(void)
2722 {
2723     if (cur_stream) {
2724         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2725         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2726         fill_rectangle(screen,
2727                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2728                     bgcolor);
2729         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2730     }
2731 }
2732
2733 /* handle an event sent by the GUI */
2734 static void event_loop(void)
2735 {
2736     SDL_Event event;
2737     double incr, pos, frac;
2738
2739     for(;;) {
2740         double x;
2741         SDL_WaitEvent(&event);
2742         switch(event.type) {
2743         case SDL_KEYDOWN:
2744             if (exit_on_keydown) {
2745                 do_exit();
2746                 break;
2747             }
2748             switch(event.key.keysym.sym) {
2749             case SDLK_ESCAPE:
2750             case SDLK_q:
2751                 do_exit();
2752                 break;
2753             case SDLK_f:
2754                 toggle_full_screen();
2755                 break;
2756             case SDLK_p:
2757             case SDLK_SPACE:
2758                 toggle_pause();
2759                 break;
2760             case SDLK_s: //S: Step to next frame
2761                 step_to_next_frame();
2762                 break;
2763             case SDLK_a:
2764                 if (cur_stream)
2765                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2766                 break;
2767             case SDLK_v:
2768                 if (cur_stream)
2769                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2770                 break;
2771             case SDLK_t:
2772                 if (cur_stream)
2773                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2774                 break;
2775             case SDLK_w:
2776                 toggle_audio_display();
2777                 break;
2778             case SDLK_LEFT:
2779                 incr = -10.0;
2780                 goto do_seek;
2781             case SDLK_RIGHT:
2782                 incr = 10.0;
2783                 goto do_seek;
2784             case SDLK_UP:
2785                 incr = 60.0;
2786                 goto do_seek;
2787             case SDLK_DOWN:
2788                 incr = -60.0;
2789             do_seek:
2790                 if (cur_stream) {
2791                     if (seek_by_bytes) {
2792                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2793                             pos= cur_stream->video_current_pos;
2794                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2795                             pos= cur_stream->audio_pkt.pos;
2796                         }else
2797                             pos = avio_tell(cur_stream->ic->pb);
2798                         if (cur_stream->ic->bit_rate)
2799                             incr *= cur_stream->ic->bit_rate / 8.0;
2800                         else
2801                             incr *= 180000.0;
2802                         pos += incr;
2803                         stream_seek(cur_stream, pos, incr, 1);
2804                     } else {
2805                         pos = get_master_clock(cur_stream);
2806                         pos += incr;
2807                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2808                     }
2809                 }
2810                 break;
2811             default:
2812                 break;
2813             }
2814             break;
2815         case SDL_MOUSEBUTTONDOWN:
2816             if (exit_on_mousedown) {
2817                 do_exit();
2818                 break;
2819             }
2820         case SDL_MOUSEMOTION:
2821             if(event.type ==SDL_MOUSEBUTTONDOWN){
2822                 x= event.button.x;
2823             }else{
2824                 if(event.motion.state != SDL_PRESSED)
2825                     break;
2826                 x= event.motion.x;
2827             }
2828             if (cur_stream) {
2829                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2830                     uint64_t size=  avio_size(cur_stream->ic->pb);
2831                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2832                 }else{
2833                     int64_t ts;
2834                     int ns, hh, mm, ss;
2835                     int tns, thh, tmm, tss;
2836                     tns = cur_stream->ic->duration/1000000LL;
2837                     thh = tns/3600;
2838                     tmm = (tns%3600)/60;
2839                     tss = (tns%60);
2840                     frac = x/cur_stream->width;
2841                     ns = frac*tns;
2842                     hh = ns/3600;
2843                     mm = (ns%3600)/60;
2844                     ss = (ns%60);
2845                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2846                             hh, mm, ss, thh, tmm, tss);
2847                     ts = frac*cur_stream->ic->duration;
2848                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2849                         ts += cur_stream->ic->start_time;
2850                     stream_seek(cur_stream, ts, 0, 0);
2851                 }
2852             }
2853             break;
2854         case SDL_VIDEORESIZE:
2855             if (cur_stream) {
2856                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2857                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2858                 screen_width = cur_stream->width = event.resize.w;
2859                 screen_height= cur_stream->height= event.resize.h;
2860             }
2861             break;
2862         case SDL_QUIT:
2863         case FF_QUIT_EVENT:
2864             do_exit();
2865             break;
2866         case FF_ALLOC_EVENT:
2867             video_open(event.user.data1);
2868             alloc_picture(event.user.data1);
2869             break;
2870         case FF_REFRESH_EVENT:
2871             video_refresh(event.user.data1);
2872             cur_stream->refresh=0;
2873             break;
2874         default:
2875             break;
2876         }
2877     }
2878 }
2879
2880 static void opt_frame_size(const char *arg)
2881 {
2882     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2883         fprintf(stderr, "Incorrect frame size\n");
2884         exit(1);
2885     }
2886     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2887         fprintf(stderr, "Frame size must be a multiple of 2\n");
2888         exit(1);
2889     }
2890 }
2891
2892 static int opt_width(const char *opt, const char *arg)
2893 {
2894     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2895     return 0;
2896 }
2897
2898 static int opt_height(const char *opt, const char *arg)
2899 {
2900     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2901     return 0;
2902 }
2903
2904 static void opt_format(const char *arg)
2905 {
2906     file_iformat = av_find_input_format(arg);
2907     if (!file_iformat) {
2908         fprintf(stderr, "Unknown input format: %s\n", arg);
2909         exit(1);
2910     }
2911 }
2912
2913 static void opt_frame_pix_fmt(const char *arg)
2914 {
2915     frame_pix_fmt = av_get_pix_fmt(arg);
2916 }
2917
2918 static int opt_sync(const char *opt, const char *arg)
2919 {
2920     if (!strcmp(arg, "audio"))
2921         av_sync_type = AV_SYNC_AUDIO_MASTER;
2922     else if (!strcmp(arg, "video"))
2923         av_sync_type = AV_SYNC_VIDEO_MASTER;
2924     else if (!strcmp(arg, "ext"))
2925         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2926     else {
2927         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2928         exit(1);
2929     }
2930     return 0;
2931 }
2932
2933 static int opt_seek(const char *opt, const char *arg)
2934 {
2935     start_time = parse_time_or_die(opt, arg, 1);
2936     return 0;
2937 }
2938
2939 static int opt_duration(const char *opt, const char *arg)
2940 {
2941     duration = parse_time_or_die(opt, arg, 1);
2942     return 0;
2943 }
2944
2945 static int opt_debug(const char *opt, const char *arg)
2946 {
2947     av_log_set_level(99);
2948     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2949     return 0;
2950 }
2951
2952 static int opt_vismv(const char *opt, const char *arg)
2953 {
2954     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2955     return 0;
2956 }
2957
2958 static int opt_thread_count(const char *opt, const char *arg)
2959 {
2960     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2961 #if !HAVE_THREADS
2962     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2963 #endif
2964     return 0;
2965 }
2966
2967 static int opt_show_mode(const char *opt, const char *arg)
2968 {
2969     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2970                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2971                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2972                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2973     return 0;
2974 }
2975
2976 static const OptionDef options[] = {
2977 #include "cmdutils_common_opts.h"
2978     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2979     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2980     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2981     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2982     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2983     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2984     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2985     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2986     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2987     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2988     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2989     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2990     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2991     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2992     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2993     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2994     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2995     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2996     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2997     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2998     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2999     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3000     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3001     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3002     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3003     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3004     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3005     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3006     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3007     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3008     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3009     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3010     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3011     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3012     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3013     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3014     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3015 #if CONFIG_AVFILTER
3016     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3017 #endif
3018     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3019     { "showmode", HAS_ARG | OPT_FUNC2, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3020     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3021     { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
3022     { NULL, },
3023 };
3024
3025 static void show_usage(void)
3026 {
3027     printf("Simple media player\n");
3028     printf("usage: ffplay [options] input_file\n");
3029     printf("\n");
3030 }
3031
3032 static void show_help(void)
3033 {
3034     av_log_set_callback(log_callback_help);
3035     show_usage();
3036     show_help_options(options, "Main options:\n",
3037                       OPT_EXPERT, 0);
3038     show_help_options(options, "\nAdvanced options:\n",
3039                       OPT_EXPERT, OPT_EXPERT);
3040     printf("\n");
3041     av_opt_show2(avcodec_opts[0], NULL,
3042                  AV_OPT_FLAG_DECODING_PARAM, 0);
3043     printf("\n");
3044     av_opt_show2(avformat_opts, NULL,
3045                  AV_OPT_FLAG_DECODING_PARAM, 0);
3046 #if !CONFIG_AVFILTER
3047     printf("\n");
3048     av_opt_show2(sws_opts, NULL,
3049                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3050 #endif
3051     printf("\nWhile playing:\n"
3052            "q, ESC              quit\n"
3053            "f                   toggle full screen\n"
3054            "p, SPC              pause\n"
3055            "a                   cycle audio channel\n"
3056            "v                   cycle video channel\n"
3057            "t                   cycle subtitle channel\n"
3058            "w                   show audio waves\n"
3059            "s                   activate frame-step mode\n"
3060            "left/right          seek backward/forward 10 seconds\n"
3061            "down/up             seek backward/forward 1 minute\n"
3062            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3063            );
3064 }
3065
3066 static void opt_input_file(const char *filename)
3067 {
3068     if (input_filename) {
3069         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3070                 filename, input_filename);
3071         exit(1);
3072     }
3073     if (!strcmp(filename, "-"))
3074         filename = "pipe:";
3075     input_filename = filename;
3076 }
3077
3078 /* Called from the main */
3079 int main(int argc, char **argv)
3080 {
3081     int flags;
3082
3083     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3084
3085     /* register all codecs, demux and protocols */
3086     avcodec_register_all();
3087 #if CONFIG_AVDEVICE
3088     avdevice_register_all();
3089 #endif
3090 #if CONFIG_AVFILTER
3091     avfilter_register_all();
3092 #endif
3093     av_register_all();
3094
3095     init_opts();
3096
3097     show_banner();
3098
3099     parse_options(argc, argv, options, opt_input_file);
3100
3101     if (!input_filename) {
3102         show_usage();
3103         fprintf(stderr, "An input file must be specified\n");
3104         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3105         exit(1);
3106     }
3107
3108     if (display_disable) {
3109         video_disable = 1;
3110     }
3111     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3112 #if !defined(__MINGW32__) && !defined(__APPLE__)
3113     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3114 #endif
3115     if (SDL_Init (flags)) {
3116         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3117         exit(1);
3118     }
3119
3120     if (!display_disable) {
3121 #if HAVE_SDL_VIDEO_SIZE
3122         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3123         fs_screen_width = vi->current_w;
3124         fs_screen_height = vi->current_h;
3125 #endif
3126     }
3127
3128     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3129     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3130     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3131
3132     av_init_packet(&flush_pkt);
3133     flush_pkt.data= "FLUSH";
3134
3135     cur_stream = stream_open(input_filename, file_iformat);
3136
3137     event_loop();
3138
3139     /* never returns */
3140
3141     return 0;
3142 }